You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@helix.apache.org by ka...@apache.org on 2014/07/10 19:04:44 UTC
[01/50] [abbrv] git commit: Merge branch 'helix-provisioning' of
https://git-wip-us.apache.org/repos/asf/helix into helix-provisioning
Repository: helix
Updated Branches:
refs/heads/master 884e071f3 -> 713586c42
Merge branch 'helix-provisioning' of https://git-wip-us.apache.org/repos/asf/helix into helix-provisioning
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/48031f36
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/48031f36
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/48031f36
Branch: refs/heads/master
Commit: 48031f3670dc53e275a84ea1a3458bf589814693
Parents: df9a3a3 e8620e4
Author: Kishore Gopalakrishna <g....@gmail.com>
Authored: Tue Feb 18 18:25:54 2014 -0800
Committer: Kishore Gopalakrishna <g....@gmail.com>
Committed: Tue Feb 18 18:25:54 2014 -0800
----------------------------------------------------------------------
.../java/org/apache/helix/api/Resource.java | 10 +
.../helix/task/AbstractTaskRebalancer.java | 631 +++++++++++++++++++
.../helix/task/IndependentTaskRebalancer.java | 110 ++++
.../org/apache/helix/task/TaskRebalancer.java | 603 +-----------------
4 files changed, 779 insertions(+), 575 deletions(-)
----------------------------------------------------------------------
[19/50] [abbrv] git commit: Fixing app status report generator
Posted by ka...@apache.org.
Fixing app status report generator
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/c072aca4
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/c072aca4
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/c072aca4
Branch: refs/heads/master
Commit: c072aca47327c85a811c886e7782a6bd51c8380f
Parents: 8992aa5
Author: Kishore Gopalakrishna <g....@gmail.com>
Authored: Mon Feb 24 08:53:12 2014 -0800
Committer: Kishore Gopalakrishna <g....@gmail.com>
Committed: Mon Feb 24 08:53:12 2014 -0800
----------------------------------------------------------------------
.../helix/api/accessor/ClusterAccessor.java | 4 +
.../apache/helix/api/config/ResourceConfig.java | 14 ++++
.../helix/provisioning/yarn/AppLauncher.java | 69 +++++++++++-----
.../yarn/AppStatusReportGenerator.java | 27 +++---
.../java/tools/UpdateProvisionerConfig.java | 87 ++++++++++++++++++++
recipes/helloworld-provisioning-yarn/run.sh | 10 +--
.../main/resources/hello_world_app_spec.yaml | 4 +-
7 files changed, 177 insertions(+), 38 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/c072aca4/helix-core/src/main/java/org/apache/helix/api/accessor/ClusterAccessor.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/api/accessor/ClusterAccessor.java b/helix-core/src/main/java/org/apache/helix/api/accessor/ClusterAccessor.java
index cacdf6c..e653338 100644
--- a/helix-core/src/main/java/org/apache/helix/api/accessor/ClusterAccessor.java
+++ b/helix-core/src/main/java/org/apache/helix/api/accessor/ClusterAccessor.java
@@ -728,10 +728,14 @@ public class ClusterAccessor {
BaseDataAccessor<?> baseAccessor = _accessor.getBaseDataAccessor();
if (baseAccessor != null) {
boolean[] existsResults = baseAccessor.exists(paths, 0);
+ int ind =0;
for (boolean exists : existsResults) {
+
if (!exists) {
+ LOG.warn("Path does not exist:"+ paths.get(ind));
return false;
}
+ ind = ind + 1;
}
}
return true;
http://git-wip-us.apache.org/repos/asf/helix/blob/c072aca4/helix-core/src/main/java/org/apache/helix/api/config/ResourceConfig.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/api/config/ResourceConfig.java b/helix-core/src/main/java/org/apache/helix/api/config/ResourceConfig.java
index 0b2df4a..5443236 100644
--- a/helix-core/src/main/java/org/apache/helix/api/config/ResourceConfig.java
+++ b/helix-core/src/main/java/org/apache/helix/api/config/ResourceConfig.java
@@ -180,6 +180,7 @@ public class ResourceConfig {
private enum Fields {
TYPE,
REBALANCER_CONFIG,
+ PROVISIONER_CONFIG,
USER_CONFIG,
BUCKET_SIZE,
BATCH_MESSAGE_MODE
@@ -220,6 +221,16 @@ public class ResourceConfig {
}
/**
+ * Set the provisioner configuration
+ * @param config properties of interest for provisioning
+ * @return Delta
+ */
+ public Delta setProvisionerConfig(ProvisionerConfig config) {
+ _builder.provisionerConfig(config);
+ _updateFields.add(Fields.PROVISIONER_CONFIG);
+ return this;
+ }
+ /**
* Set the user configuration
* @param userConfig user-specified properties
* @return Delta
@@ -272,6 +283,9 @@ public class ResourceConfig {
case REBALANCER_CONFIG:
builder.rebalancerConfig(deltaConfig.getRebalancerConfig());
break;
+ case PROVISIONER_CONFIG:
+ builder.provisionerConfig(deltaConfig.getProvisionerConfig());
+ break;
case USER_CONFIG:
builder.userConfig(deltaConfig.getUserConfig());
break;
http://git-wip-us.apache.org/repos/asf/helix/blob/c072aca4/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
index d2e901f..4b77105 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
@@ -13,6 +13,10 @@ import java.util.List;
import java.util.Map;
import java.util.Vector;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
import org.apache.commons.compress.archivers.ArchiveStreamFactory;
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
@@ -117,7 +121,7 @@ public class AppLauncher {
// Set up the container launch context for the application master
ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
- LOG.info("Copy App archive file from local filesystem and add to local environment");
+ LOG.info("Copy Application archive file from local filesystem and add to local environment");
// Copy the application master jar to the filesystem
// Create a local resource to point to the destination jar path
FileSystem fs = FileSystem.get(_conf);
@@ -185,10 +189,9 @@ public class AppLauncher {
classPathEnv.append(':');
classPathEnv.append(System.getProperty("java.class.path"));
}
- LOG.info("\n\n Setting the classpath for AppMaster:\n\n" + classPathEnv.toString());
+ LOG.info("\n\n Setting the classpath to launch AppMaster:\n\n" );
// Set the env variables to be setup in the env where the application master will be run
Map<String, String> env = new HashMap<String, String>(_appMasterConfig.getEnv());
- LOG.info("Set the environment for the application master" + env);
env.put("CLASSPATH", classPathEnv.toString());
amContainer.setEnvironment(env);
@@ -197,7 +200,7 @@ public class AppLauncher {
Vector<CharSequence> vargs = new Vector<CharSequence>(30);
// Set java executable command
- LOG.info("Setting up app master command");
+ LOG.info("Setting up app master launch command");
vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
int amMemory = 1024;
// Set Xmx based on am memory size
@@ -265,13 +268,12 @@ public class AppLauncher {
// Set the queue to which this application is to be submitted in the RM
appContext.setQueue(amQueue);
- // Submit the application to the applications manager
- // SubmitApplicationResponse submitResp = applicationsManager.submitApplication(appRequest);
- // Ignore the response as either a valid response object is returned on success
- // or an exception thrown to denote some form of a failure
- LOG.info("Submitting application to ASM");
- yarnClient.submitApplication(appContext);
+ LOG.info("Submitting application to YARN Resource Manager");
+
+ ApplicationId applicationId = yarnClient.submitApplication(appContext);
+
+ LOG.info("Submitted application with applicationId:" + applicationId );
return true;
}
@@ -355,6 +357,8 @@ public class AppLauncher {
*/
public boolean waitUntilDone() {
String prevReport = "";
+ HelixConnection connection = null;
+
while (true) {
try {
// Get application report for the appId we are interested in
@@ -381,15 +385,27 @@ public class AppLauncher {
return false;
}
if (YarnApplicationState.RUNNING == state) {
- HelixConnection connection = new ZkHelixConnection(report.getHost() + ":2181");
- try{
- connection.connect();
- }catch(Exception e){
- LOG.warn("AppMaster started but not yet initialized");
+ if (connection == null) {
+ String hostName = null;
+ int ind = report.getHost().indexOf('/');
+ if (ind > -1) {
+ hostName = report.getHost().substring(ind + 1);
+ } else {
+ hostName = report.getHost();
+ }
+ connection = new ZkHelixConnection(hostName + ":2181");
+
+ try {
+ connection.connect();
+ } catch (Exception e) {
+ LOG.warn("AppMaster started but not yet initialized");
+ connection = null;
+ }
}
- if(connection.isConnected()){
+ if (connection.isConnected()) {
AppStatusReportGenerator generator = new AppStatusReportGenerator();
- String generateReport = generator.generateReport(connection, ClusterId.from(_applicationSpec.getAppName()));
+ ClusterId clusterId = ClusterId.from(_applicationSpec.getAppName());
+ String generateReport = generator.generateReport(connection, clusterId);
LOG.info(generateReport);
}
}
@@ -430,13 +446,24 @@ public class AppLauncher {
}
/**
- * will take the input file and AppSpecFactory class name as input
- * @param args
+ * Launches the application on a YARN cluster. Once launched, it will display (periodically) the status of the containers in the application.
+ * @param args app_spec_provider and app_config_spec
* @throws Exception
*/
public static void main(String[] args) throws Exception {
- ApplicationSpecFactory applicationSpecFactory = HelixYarnUtil.createInstance(args[0]);
- File yamlConfigFile = new File(args[1]);
+
+ Options opts = new Options();
+ opts.addOption(new Option("app_spec_provider",true, "Application Spec Factory Class that will parse the app_config_spec file"));
+ opts.addOption(new Option("app_config_spec",true, "YAML config file that provides the app specifications"));
+ CommandLine cliParser = new GnuParser().parse(opts, args);
+ String appSpecFactoryClass = cliParser.getOptionValue("app_spec_provider");
+ String yamlConfigFileName = cliParser.getOptionValue("app_config_spec");
+
+ ApplicationSpecFactory applicationSpecFactory = HelixYarnUtil.createInstance(appSpecFactoryClass);
+ File yamlConfigFile = new File(yamlConfigFileName);
+ if(!yamlConfigFile.exists()){
+ throw new IllegalArgumentException("YAML app_config_spec file: '"+ yamlConfigFileName + "' does not exist");
+ }
final AppLauncher launcher = new AppLauncher(applicationSpecFactory, yamlConfigFile);
launcher.launch();
Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
http://git-wip-us.apache.org/repos/asf/helix/blob/c072aca4/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppStatusReportGenerator.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppStatusReportGenerator.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppStatusReportGenerator.java
index 0443f8a..b083ac9 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppStatusReportGenerator.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppStatusReportGenerator.java
@@ -17,7 +17,6 @@ import org.apache.helix.api.id.ResourceId;
import org.apache.helix.controller.provisioner.ContainerId;
import org.apache.helix.controller.provisioner.ContainerState;
import org.apache.helix.manager.zk.ZkHelixConnection;
-import org.apache.helix.model.ExternalView;
public class AppStatusReportGenerator {
static String TAB = "\t";
@@ -39,26 +38,30 @@ public class AppStatusReportGenerator {
resource.getExternalView().getStateMap(PartitionId.from(resourceId.stringify() + "_0"));
builder.append(TAB).append("CONTAINER_NAME").append(TAB).append(TAB)
- .append("CONTAINER_STATE").append(TAB).append("SERVICE_STATE").append(TAB).append("CONTAINER_ID").append(NEWLINE);
+ .append("CONTAINER_STATE").append(TAB).append("SERVICE_STATE").append(TAB)
+ .append("CONTAINER_ID").append(NEWLINE);
for (Participant participant : participants.values()) {
// need a better check
if (!participant.getId().stringify().startsWith(resource.getId().stringify())) {
continue;
}
ContainerConfig containerConfig = participant.getContainerConfig();
- ContainerState containerState =ContainerState.UNDEFINED;
+ ContainerState containerState = ContainerState.UNDEFINED;
ContainerId containerId = ContainerId.from("N/A");
if (containerConfig != null) {
containerId = containerConfig.getId();
containerState = containerConfig.getState();
}
- State participantState = serviceStateMap.get(participant.getId());
+ State participantState = null;
+ if (serviceStateMap != null) {
+ participantState = serviceStateMap.get(participant.getId());
+ }
if (participantState == null) {
participantState = State.from("UNKNOWN");
}
- builder.append(TAB).append(participant.getId()).append(TAB)
- .append(containerState).append(TAB).append(participantState).append(TAB).append(TAB).append(containerId);
+ builder.append(TAB).append(participant.getId()).append(TAB).append(containerState)
+ .append(TAB).append(participantState).append(TAB).append(TAB).append(containerId);
builder.append(NEWLINE);
}
@@ -67,13 +70,17 @@ public class AppStatusReportGenerator {
}
- public static void main(String[] args) {
+ public static void main(String[] args) throws InterruptedException {
AppStatusReportGenerator generator = new AppStatusReportGenerator();
ZkHelixConnection connection = new ZkHelixConnection("localhost:2181");
connection.connect();
- String generateReport = generator.generateReport(connection, ClusterId.from("testApp"));
- System.out.println(generateReport);
- connection.disconnect();
+ while (true) {
+ String generateReport = generator.generateReport(connection, ClusterId.from("testApp1"));
+ System.out.println(generateReport);
+ Thread.sleep(10000);
+ connection.createClusterManagementTool().addCluster("testApp1");
+ }
+ // connection.disconnect();
}
}
http://git-wip-us.apache.org/repos/asf/helix/blob/c072aca4/helix-provisioning/src/main/java/tools/UpdateProvisionerConfig.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/tools/UpdateProvisionerConfig.java b/helix-provisioning/src/main/java/tools/UpdateProvisionerConfig.java
new file mode 100644
index 0000000..89ee1c5
--- /dev/null
+++ b/helix-provisioning/src/main/java/tools/UpdateProvisionerConfig.java
@@ -0,0 +1,87 @@
+package tools;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.OptionBuilder;
+import org.apache.commons.cli.OptionGroup;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.helix.HelixConnection;
+import org.apache.helix.api.Resource;
+import org.apache.helix.api.accessor.ResourceAccessor;
+import org.apache.helix.api.config.ResourceConfig;
+import org.apache.helix.api.id.ClusterId;
+import org.apache.helix.api.id.ResourceId;
+import org.apache.helix.manager.zk.ZkHelixConnection;
+import org.apache.helix.provisioning.yarn.YarnProvisionerConfig;
+import org.apache.log4j.Logger;
+/**
+ * Update the provisioner config
+ */
+public class UpdateProvisionerConfig {
+ private static Logger LOG = Logger.getLogger(UpdateProvisionerConfig.class);
+ private static String updateContainerCount = "updateContainerCount";
+ private HelixConnection _connection;
+
+ public UpdateProvisionerConfig(String zkAddress) {
+ _connection = new ZkHelixConnection(zkAddress);
+ _connection.connect();
+ }
+
+ public void setNumContainers(String appName, String serviceName, int numContainers) {
+ ResourceId resourceId = ResourceId.from(serviceName);
+
+ ResourceAccessor resourceAccessor = _connection.createResourceAccessor(ClusterId.from(appName));
+ Resource resource = resourceAccessor.readResource(resourceId);
+ LOG.info("Current provisioner config:"+ resource.getProvisionerConfig());
+
+ ResourceConfig.Delta delta = new ResourceConfig.Delta(resourceId);
+ YarnProvisionerConfig config = new YarnProvisionerConfig(resourceId);
+ config.setNumContainers(numContainers);
+ delta.setProvisionerConfig(config);
+ ResourceConfig updatedResourceConfig = resourceAccessor.updateResource(resourceId, delta);
+ LOG.info("Update provisioner config:"+ updatedResourceConfig.getProvisionerConfig());
+
+ }
+
+ @SuppressWarnings("static-access")
+ public static void main(String[] args) throws ParseException {
+ Option zkServerOption =
+ OptionBuilder.withLongOpt("zookeeperAddress").withDescription("Provide zookeeper address")
+ .create();
+ zkServerOption.setArgs(1);
+ zkServerOption.setRequired(true);
+ zkServerOption.setArgName("zookeeperAddress(Required)");
+
+ OptionGroup group = new OptionGroup();
+ group.setRequired(true);
+
+ // update container count per service
+ Option updateContainerCountOption =
+ OptionBuilder.withLongOpt(updateContainerCount)
+ .withDescription("set the number of containers per service").create();
+ updateContainerCountOption.setArgs(3);
+ updateContainerCountOption.setRequired(false);
+ updateContainerCountOption.setArgName("appName serviceName numContainers");
+
+ group.addOption(updateContainerCountOption);
+
+ Options options = new Options();
+ options.addOption(zkServerOption);
+ options.addOptionGroup(group);
+ CommandLine cliParser = new GnuParser().parse(options, args);
+
+ String zkAddress = cliParser.getOptionValue("zookeeperAddress");
+ UpdateProvisionerConfig updater = new UpdateProvisionerConfig(zkAddress);
+
+ if (cliParser.hasOption(updateContainerCount)) {
+ String appName = cliParser.getOptionValues(updateContainerCount)[0];
+ String serviceName = cliParser.getOptionValues(updateContainerCount)[1];
+ int numContainers = Integer.parseInt(
+ cliParser.getOptionValues(updateContainerCount)[2]);
+ updater.setNumContainers(appName, serviceName, numContainers);
+ }
+
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/c072aca4/recipes/helloworld-provisioning-yarn/run.sh
----------------------------------------------------------------------
diff --git a/recipes/helloworld-provisioning-yarn/run.sh b/recipes/helloworld-provisioning-yarn/run.sh
index 51d4c35..07448bb 100755
--- a/recipes/helloworld-provisioning-yarn/run.sh
+++ b/recipes/helloworld-provisioning-yarn/run.sh
@@ -1,6 +1,6 @@
-cd ../../../../
-mvn clean install -DskipTests
-cd recipes/provisioning/yarn/helloworld/
+#cd ../../
+#mvn clean install -DskipTests
+#cd recipes/helloworld-provisioning-yarn
mvn clean package -DskipTests
-chmod +x target/helloworld-pkg/bin/app-launcher.sh
-target/helloworld-pkg/bin/app-launcher.sh org.apache.helix.provisioning.yarn.example.HelloWordAppSpecFactory /Users/kgopalak/Documents/projects/incubator-helix/recipes/provisioning/yarn/helloworld/src/main/resources/hello_world_app_spec.yaml
+chmod +x target/helloworld-provisioning-yarn-pkg/bin/app-launcher.sh
+target/helloworld-provisioning-yarn/pkg/bin/app-launcher.sh org.apache.helix.provisioning.yarn.example.HelloWordAppSpecFactory /Users/kgopalak/Documents/projects/incubator-helix/recipes/helloworld-provisioning-yarn/src/main/resources/hello_world_app_spec.yaml
http://git-wip-us.apache.org/repos/asf/helix/blob/c072aca4/recipes/helloworld-provisioning-yarn/src/main/resources/hello_world_app_spec.yaml
----------------------------------------------------------------------
diff --git a/recipes/helloworld-provisioning-yarn/src/main/resources/hello_world_app_spec.yaml b/recipes/helloworld-provisioning-yarn/src/main/resources/hello_world_app_spec.yaml
index d8d1dd2..baaddb5 100644
--- a/recipes/helloworld-provisioning-yarn/src/main/resources/hello_world_app_spec.yaml
+++ b/recipes/helloworld-provisioning-yarn/src/main/resources/hello_world_app_spec.yaml
@@ -3,7 +3,7 @@ appConfig:
config: {
k1: v1
}
-appMasterPackageUri: 'file:///Users/kgopalak/Documents/projects/incubator-helix/recipes/provisioning/yarn/helloworld/target/helloworld-0.7.1-incubating-SNAPSHOT-pkg.tar'
+appMasterPackageUri: 'file:///Users/kgopalak/Documents/projects/incubator-helix/recipes/helloworld-provisioning-yarn/target/helloworld-provisioning-yarn-0.7.1-incubating-SNAPSHOT-pkg.tar'
appName: testApp
serviceConfigMap:
HelloWorld: {
@@ -14,7 +14,7 @@ serviceMainClassMap: {
HelloWorld: org.apache.helix.provisioning.yarn.example.HelloWorldService
}
servicePackageURIMap: {
- HelloWorld: 'file:///Users/kgopalak/Documents/projects/incubator-helix/recipes/provisioning/yarn/helloworld/target/helloworld-0.7.1-incubating-SNAPSHOT-pkg.tar'
+ HelloWorld: 'file:///Users/kgopalak/Documents/projects/incubator-helix/recipes/helloworld-provisioning-yarn/target/helloworld-provisioning-yarn-0.7.1-incubating-SNAPSHOT-pkg.tar'
}
services: [
HelloWorld]
[30/50] [abbrv] git commit: Support running different tasks on each
partition
Posted by ka...@apache.org.
Support running different tasks on each partition
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/e4468121
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/e4468121
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/e4468121
Branch: refs/heads/master
Commit: e4468121bef440fe97e6d8ba36639656e3a1e0b9
Parents: 4ea6bce
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Tue Mar 4 15:54:04 2014 -0800
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Tue Mar 4 15:54:04 2014 -0800
----------------------------------------------------------------------
.../java/org/apache/helix/task/TaskConfig.java | 21 ++++++++---
.../java/org/apache/helix/task/TaskDriver.java | 2 ++
.../org/apache/helix/task/TaskStateModel.java | 12 ++++++-
.../java/org/apache/helix/task/TaskUtil.java | 38 ++++++++------------
.../helix/provisioning/tools/TaskManager.java | 11 +++---
.../provisioning/tools/TestTaskManager.java | 20 ++++++++---
6 files changed, 67 insertions(+), 37 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/e4468121/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java b/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java
index be9db79..0287657 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java
@@ -21,6 +21,7 @@ package org.apache.helix.task;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@@ -64,8 +65,8 @@ public class TaskConfig {
public static final String NUM_CONCURRENT_TASKS_PER_INSTANCE = "ConcurrentTasksPerInstance";
/** Support overarching tasks that hang around for a while */
public static final String LONG_LIVED = "LongLived";
- /** Support giving tasks a custom name **/
- public static final String PARTITION_NAME_MAP = "PartitionNameMap";
+ /** Support giving mapping partition IDs to specific task names **/
+ public static final String TASK_NAME_MAP = "TaskNameMap";
// // Default property values ////
@@ -83,11 +84,12 @@ public class TaskConfig {
private final int _numConcurrentTasksPerInstance;
private final int _maxAttemptsPerPartition;
private final boolean _longLived;
+ private final Map<String, String> _taskNameMap;
private TaskConfig(String workflow, String targetResource, List<Integer> targetPartitions,
Set<String> targetPartitionStates, String command, String commandConfig,
long timeoutPerPartition, int numConcurrentTasksPerInstance, int maxAttemptsPerPartition,
- boolean longLived) {
+ boolean longLived, Map<String, String> taskNameMap) {
_workflow = workflow;
_targetResource = targetResource;
_targetPartitions = targetPartitions;
@@ -98,6 +100,7 @@ public class TaskConfig {
_numConcurrentTasksPerInstance = numConcurrentTasksPerInstance;
_maxAttemptsPerPartition = maxAttemptsPerPartition;
_longLived = longLived;
+ _taskNameMap = taskNameMap;
}
public String getWorkflow() {
@@ -140,6 +143,10 @@ public class TaskConfig {
return _longLived;
}
+ public Map<String, String> getTaskNameMap() {
+ return _taskNameMap;
+ }
+
public Map<String, String> getResourceConfigMap() {
Map<String, String> cfgMap = new HashMap<String, String>();
cfgMap.put(TaskConfig.WORKFLOW_ID, _workflow);
@@ -174,13 +181,14 @@ public class TaskConfig {
private int _numConcurrentTasksPerInstance = DEFAULT_NUM_CONCURRENT_TASKS_PER_INSTANCE;
private int _maxAttemptsPerPartition = DEFAULT_MAX_ATTEMPTS_PER_PARTITION;
private boolean _longLived = false;
+ private Map<String, String> _taskNameMap = Collections.emptyMap();
public TaskConfig build() {
validate();
return new TaskConfig(_workflow, _targetResource, _targetPartitions, _targetPartitionStates,
_command, _commandConfig, _timeoutPerPartition, _numConcurrentTasksPerInstance,
- _maxAttemptsPerPartition, _longLived);
+ _maxAttemptsPerPartition, _longLived, _taskNameMap);
}
/**
@@ -275,6 +283,11 @@ public class TaskConfig {
return this;
}
+ public Builder setTaskNameMap(Map<String, String> taskNameMap) {
+ _taskNameMap = taskNameMap;
+ return this;
+ }
+
private void validate() {
if (_targetResource == null && _targetPartitions == null) {
throw new IllegalArgumentException(String.format(
http://git-wip-us.apache.org/repos/asf/helix/blob/e4468121/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java b/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
index 17e7542..dd47625 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
@@ -289,6 +289,7 @@ public class TaskDriver {
}
/** Constructs option group containing options required by all drivable tasks */
+ @SuppressWarnings("static-access")
private static OptionGroup contructGenericRequiredOptionGroup() {
Option zkAddressOption =
OptionBuilder.isRequired().hasArgs(1).withArgName("zkAddress").withLongOpt(ZK_ADDRESS)
@@ -310,6 +311,7 @@ public class TaskDriver {
}
/** Constructs option group containing options required by all drivable tasks */
+ @SuppressWarnings("static-access")
private static OptionGroup constructStartOptionGroup() {
Option workflowFileOption =
OptionBuilder.withLongOpt(WORKFLOW_FILE_OPTION).hasArgs(1).withArgName("workflowFile")
http://git-wip-us.apache.org/repos/asf/helix/blob/e4468121/helix-core/src/main/java/org/apache/helix/task/TaskStateModel.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskStateModel.java b/helix-core/src/main/java/org/apache/helix/task/TaskStateModel.java
index cecf2e8..c399930 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskStateModel.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskStateModel.java
@@ -25,6 +25,7 @@ import java.util.TimerTask;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
+
import org.apache.helix.HelixManager;
import org.apache.helix.NotificationContext;
import org.apache.helix.model.Message;
@@ -217,7 +218,16 @@ public class TaskStateModel extends StateModel {
private void startTask(Message msg, String taskPartition) {
TaskConfig cfg = TaskUtil.getTaskCfg(_manager, msg.getResourceName());
- TaskFactory taskFactory = _taskFactoryRegistry.get(cfg.getCommand());
+ String command = cfg.getCommand();
+ Map<String, String> taskNameMap = cfg.getTaskNameMap();
+ if (taskNameMap != null && taskNameMap.containsKey(taskPartition)) {
+ // Support a partition-specifc override of tasks to run
+ String taskName = taskNameMap.get(taskPartition);
+ if (_taskFactoryRegistry.containsKey(taskName)) {
+ command = taskName;
+ }
+ }
+ TaskFactory taskFactory = _taskFactoryRegistry.get(command);
Task task = taskFactory.createNewTask(cfg.getCommandConfig());
_taskRunner =
http://git-wip-us.apache.org/repos/asf/helix/blob/e4468121/helix-core/src/main/java/org/apache/helix/task/TaskUtil.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskUtil.java b/helix-core/src/main/java/org/apache/helix/task/TaskUtil.java
index c81be5d..0f980b8 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskUtil.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskUtil.java
@@ -19,12 +19,9 @@ package org.apache.helix.task;
* under the License.
*/
-import com.google.common.base.Joiner;
-import java.util.HashMap;
-import java.util.List;
import java.util.Map;
+
import org.apache.helix.AccessOption;
-import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
import org.apache.helix.PropertyKey;
@@ -34,9 +31,12 @@ import org.apache.helix.api.id.PartitionId;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.HelixConfigScope;
import org.apache.helix.model.ResourceAssignment;
+import org.apache.helix.model.ResourceConfiguration;
import org.apache.helix.model.builder.HelixConfigScopeBuilder;
import org.apache.log4j.Logger;
+import com.google.common.base.Joiner;
+
/**
* Static utility methods.
*/
@@ -66,16 +66,19 @@ public class TaskUtil {
* otherwise.
*/
public static TaskConfig getTaskCfg(HelixManager manager, String taskResource) {
- Map<String, String> taskCfg = getResourceConfigMap(manager, taskResource);
+ ResourceConfiguration config = getResourceConfig(manager, taskResource);
+ Map<String, String> taskCfg = config.getRecord().getSimpleFields();
TaskConfig.Builder b = TaskConfig.Builder.fromMap(taskCfg);
-
+ if (config.getRecord().getMapFields().containsKey(TaskConfig.TASK_NAME_MAP)) {
+ b.setTaskNameMap(config.getRecord().getMapField(TaskConfig.TASK_NAME_MAP));
+ }
return b.build();
}
public static WorkflowConfig getWorkflowCfg(HelixManager manager, String workflowResource) {
- Map<String, String> workflowCfg = getResourceConfigMap(manager, workflowResource);
+ ResourceConfiguration config = getResourceConfig(manager, workflowResource);
+ Map<String, String> workflowCfg = config.getRecord().getSimpleFields();
WorkflowConfig.Builder b = WorkflowConfig.Builder.fromMap(workflowCfg);
-
return b.build();
}
@@ -155,20 +158,9 @@ public class TaskUtil {
return workflowResource + "_" + taskName;
}
- private static Map<String, String> getResourceConfigMap(HelixManager manager, String resource) {
- HelixConfigScope scope = getResourceConfigScope(manager.getClusterName(), resource);
- ConfigAccessor configAccessor = manager.getConfigAccessor();
-
- Map<String, String> taskCfg = new HashMap<String, String>();
- List<String> cfgKeys = configAccessor.getKeys(scope);
- if (cfgKeys == null || cfgKeys.isEmpty()) {
- return null;
- }
-
- for (String cfgKey : cfgKeys) {
- taskCfg.put(cfgKey, configAccessor.get(scope, cfgKey));
- }
-
- return taskCfg;
+ private static ResourceConfiguration getResourceConfig(HelixManager manager, String resource) {
+ HelixDataAccessor accessor = manager.getHelixDataAccessor();
+ PropertyKey.Builder keyBuilder = accessor.keyBuilder();
+ return accessor.getProperty(keyBuilder.resourceConfig(resource));
}
}
http://git-wip-us.apache.org/repos/asf/helix/blob/e4468121/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/TaskManager.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/TaskManager.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/TaskManager.java
index 2a80841..2d3f8bb 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/TaskManager.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/TaskManager.java
@@ -35,6 +35,7 @@ import org.apache.helix.PropertyKey;
import org.apache.helix.ZNRecord;
import org.apache.helix.api.id.ClusterId;
import org.apache.helix.api.id.Id;
+import org.apache.helix.api.id.ResourceId;
import org.apache.helix.manager.zk.HelixConnectionAdaptor;
import org.apache.helix.task.TaskConfig;
import org.apache.helix.task.TaskDriver;
@@ -105,7 +106,8 @@ public class TaskManager {
public void addTaskToQueue(final String taskName, final String queueName) {
HelixDataAccessor accessor = _connection.createDataAccessor(_clusterId);
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
- String configPath = keyBuilder.resourceConfig(queueName + "_" + queueName).getPath();
+ final ResourceId resourceId = ResourceId.from(queueName + "_" + queueName);
+ String configPath = keyBuilder.resourceConfig(resourceId.toString()).getPath();
DataUpdater<ZNRecord> dataUpdater = new DataUpdater<ZNRecord>() {
@Override
public ZNRecord update(ZNRecord currentData) {
@@ -120,12 +122,12 @@ public class TaskManager {
currentId = parts.length;
currentData.setSimpleField(TaskConfig.TARGET_PARTITIONS, current + "," + currentId);
}
- Map<String, String> partitionMap = currentData.getMapField(TaskConfig.PARTITION_NAME_MAP);
+ Map<String, String> partitionMap = currentData.getMapField(TaskConfig.TASK_NAME_MAP);
if (partitionMap == null) {
partitionMap = Maps.newHashMap();
- currentData.setMapField(TaskConfig.PARTITION_NAME_MAP, partitionMap);
+ currentData.setMapField(TaskConfig.TASK_NAME_MAP, partitionMap);
}
- partitionMap.put(String.valueOf(currentId), taskName);
+ partitionMap.put(resourceId.toString() + '_' + currentId, taskName);
return currentData;
}
};
@@ -147,6 +149,5 @@ public class TaskManager {
}
public void shutdownQueue(String queueName) {
-
}
}
http://git-wip-us.apache.org/repos/asf/helix/blob/e4468121/helix-provisioning/src/test/java/org/apache/helix/provisioning/tools/TestTaskManager.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/test/java/org/apache/helix/provisioning/tools/TestTaskManager.java b/helix-provisioning/src/test/java/org/apache/helix/provisioning/tools/TestTaskManager.java
index 7016661..f90ef3a 100644
--- a/helix-provisioning/src/test/java/org/apache/helix/provisioning/tools/TestTaskManager.java
+++ b/helix-provisioning/src/test/java/org/apache/helix/provisioning/tools/TestTaskManager.java
@@ -61,10 +61,16 @@ public class TestTaskManager extends ZkUnitTestBase {
true); // do rebalance
Map<String, TaskFactory> taskFactoryReg = new HashMap<String, TaskFactory>();
- taskFactoryReg.put("myqueue", new TaskFactory() {
+ taskFactoryReg.put("mytask1", new TaskFactory() {
@Override
public Task createNewTask(String config) {
- return new MyTask();
+ return new MyTask(1);
+ }
+ });
+ taskFactoryReg.put("mytask2", new TaskFactory() {
+ @Override
+ public Task createNewTask(String config) {
+ return new MyTask(2);
}
});
MockParticipantManager[] participants = new MockParticipantManager[NUM_PARTICIPANTS];
@@ -88,7 +94,7 @@ public class TestTaskManager extends ZkUnitTestBase {
ClusterId clusterId = ClusterId.from(clusterName);
TaskManager taskManager = new TaskManager(clusterId, connection);
taskManager.createTaskQueue("myqueue", true);
- taskManager.addTaskToQueue("mytask", "myqueue");
+ taskManager.addTaskToQueue("mytask1", "myqueue");
taskManager.addTaskToQueue("mytask2", "myqueue");
controller.syncStop();
@@ -98,13 +104,19 @@ public class TestTaskManager extends ZkUnitTestBase {
}
public static class MyTask implements Task {
+ private final int _id;
+
+ public MyTask(int id) {
+ _id = id;
+ }
+
@Override
public TaskResult run() {
try {
Thread.sleep(10000);
} catch (InterruptedException e) {
}
- System.err.println("task complete");
+ System.err.println("task complete for " + _id);
return new TaskResult(TaskResult.Status.COMPLETED, "");
}
[50/50] [abbrv] git commit: Merge remote-tracking branch
'origin/helix-provisioning'
Posted by ka...@apache.org.
Merge remote-tracking branch 'origin/helix-provisioning'
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/713586c4
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/713586c4
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/713586c4
Branch: refs/heads/master
Commit: 713586c4282feb47e411f66d43af60132ec54e64
Parents: 884e071 0f79187
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Thu Jul 10 10:02:44 2014 -0700
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Thu Jul 10 10:02:44 2014 -0700
----------------------------------------------------------------------
helix-core/pom.xml | 4 +
.../java/org/apache/helix/HelixService.java | 8 +-
.../java/org/apache/helix/api/Controller.java | 4 +-
.../java/org/apache/helix/api/Participant.java | 14 +-
.../java/org/apache/helix/api/Resource.java | 34 +-
.../helix/api/accessor/ClusterAccessor.java | 11 +
.../helix/api/accessor/ParticipantAccessor.java | 15 +-
.../helix/api/accessor/ResourceAccessor.java | 14 +-
.../helix/api/config/ContainerConfig.java | 66 ++
.../apache/helix/api/config/ResourceConfig.java | 45 +-
.../controller/GenericHelixController.java | 2 +
.../context/ControllerContextProvider.java | 6 +-
.../controller/provisioner/ContainerId.java | 49 ++
.../provisioner/ContainerProvider.java | 36 ++
.../controller/provisioner/ContainerSpec.java | 65 ++
.../controller/provisioner/ContainerState.java | 34 ++
.../controller/provisioner/Provisioner.java | 42 ++
.../provisioner/ProvisionerConfig.java | 31 +
.../controller/provisioner/ProvisionerRef.java | 100 +++
.../controller/provisioner/ServiceConfig.java | 24 +
.../controller/provisioner/TargetProvider.java | 39 ++
.../provisioner/TargetProviderResponse.java | 68 +++
.../stages/ContainerProvisioningStage.java | 332 ++++++++++
.../stages/ResourceComputationStage.java | 1 +
.../strategy/AutoRebalanceStrategy.java | 4 +-
.../knapsack/AbstractBaseKnapsackSolver.java | 51 ++
.../knapsack/AbstractKnapsackPropagator.java | 123 ++++
.../strategy/knapsack/BaseKnapsackSolver.java | 68 +++
.../strategy/knapsack/KnapsackAssignment.java | 40 ++
.../KnapsackCapacityPropagatorImpl.java | 237 +++++++
.../knapsack/KnapsackGenericSolverImpl.java | 288 +++++++++
.../strategy/knapsack/KnapsackItem.java | 52 ++
.../strategy/knapsack/KnapsackPropagator.java | 80 +++
.../strategy/knapsack/KnapsackSearchNode.java | 81 +++
.../knapsack/KnapsackSearchNodeImpl.java | 96 +++
.../strategy/knapsack/KnapsackSearchPath.java | 58 ++
.../knapsack/KnapsackSearchPathImpl.java | 84 +++
.../strategy/knapsack/KnapsackSolver.java | 79 +++
.../strategy/knapsack/KnapsackSolverImpl.java | 210 +++++++
.../strategy/knapsack/KnapsackState.java | 61 ++
.../strategy/knapsack/KnapsackStateImpl.java | 80 +++
.../helix/healthcheck/DecayAggregationType.java | 3 +-
.../DefaultControllerMessageHandlerFactory.java | 7 +-
...ltParticipantErrorMessageHandlerFactory.java | 7 +-
.../DefaultSchedulerMessageHandlerFactory.java | 7 +-
.../manager/zk/HelixConnectionAdaptor.java | 3 +-
.../apache/helix/manager/zk/ZKHelixAdmin.java | 5 +-
.../helix/manager/zk/ZkBaseDataAccessor.java | 7 +-
.../manager/zk/ZkCacheBaseDataAccessor.java | 5 +-
.../helix/manager/zk/ZkCallbackHandler.java | 31 +-
.../helix/manager/zk/ZkHelixAutoController.java | 4 +-
.../helix/manager/zk/ZkHelixController.java | 13 +-
.../helix/manager/zk/ZkHelixParticipant.java | 4 +-
.../handling/AsyncCallbackService.java | 10 +-
.../handling/HelixStateTransitionHandler.java | 2 +
.../helix/model/ClusterConfiguration.java | 52 +-
.../java/org/apache/helix/model/IdealState.java | 16 +-
.../org/apache/helix/model/InstanceConfig.java | 80 ++-
.../java/org/apache/helix/model/Message.java | 3 +-
.../helix/model/ProvisionerConfigHolder.java | 184 ++++++
.../helix/model/ResourceConfiguration.java | 68 ++-
.../participant/AbstractParticipantService.java | 142 +++++
.../helix/participant/StateMachineEngine.java | 3 +-
.../helix/task/FixedTargetTaskRebalancer.java | 163 +++++
.../helix/task/GenericTaskRebalancer.java | 277 +++++++++
.../java/org/apache/helix/task/JobConfig.java | 381 ++++++++++++
.../java/org/apache/helix/task/JobContext.java | 248 ++++++++
.../main/java/org/apache/helix/task/JobDag.java | 151 +++++
.../org/apache/helix/task/ScheduleConfig.java | 162 +++++
.../java/org/apache/helix/task/TargetState.java | 8 +-
.../apache/helix/task/TaskCallbackContext.java | 67 ++
.../java/org/apache/helix/task/TaskConfig.java | 340 +++--------
.../org/apache/helix/task/TaskConstants.java | 4 +
.../java/org/apache/helix/task/TaskContext.java | 135 ----
.../java/org/apache/helix/task/TaskDag.java | 152 -----
.../java/org/apache/helix/task/TaskDriver.java | 213 +++++--
.../java/org/apache/helix/task/TaskFactory.java | 5 +-
.../org/apache/helix/task/TaskRebalancer.java | 611 ++++++++++++-------
.../java/org/apache/helix/task/TaskRunner.java | 18 +-
.../org/apache/helix/task/TaskStateModel.java | 46 +-
.../helix/task/TaskStateModelFactory.java | 8 +-
.../java/org/apache/helix/task/TaskUtil.java | 334 ++++++++--
.../java/org/apache/helix/task/Workflow.java | 243 +++++---
.../org/apache/helix/task/WorkflowConfig.java | 58 +-
.../org/apache/helix/task/WorkflowContext.java | 73 +--
.../org/apache/helix/task/beans/JobBean.java | 44 ++
.../apache/helix/task/beans/ScheduleBean.java | 32 +
.../org/apache/helix/task/beans/TaskBean.java | 17 +-
.../apache/helix/task/beans/WorkflowBean.java | 7 +-
.../org/apache/helix/tools/ClusterSetup.java | 6 +-
.../helix/tools/StateModelConfigGenerator.java | 54 ++
.../org/apache/helix/util/StatusUpdateUtil.java | 5 +-
.../java/org/apache/helix/ZkTestHelper.java | 6 +-
.../helix/controller/stages/BaseStageTest.java | 2 +-
.../stages/TestMsgSelectionStage.java | 8 +-
.../strategy/TestNewAutoRebalanceStrategy.java | 2 +-
.../helix/healthcheck/TestAlertFireHistory.java | 1 -
.../helix/healthcheck/TestExpandAlert.java | 3 +-
.../helix/healthcheck/TestSimpleAlert.java | 3 +-
.../healthcheck/TestSimpleWildcardAlert.java | 3 +-
.../helix/healthcheck/TestStalenessAlert.java | 3 +-
.../helix/healthcheck/TestWildcardAlert.java | 3 +-
.../TestAddNodeAfterControllerStart.java | 4 +-
.../TestAddStateModelFactoryAfterConnect.java | 4 +-
.../helix/integration/TestAutoRebalance.java | 3 +-
.../integration/TestCleanupExternalView.java | 8 +-
.../helix/integration/TestHelixConnection.java | 8 +-
.../integration/TestLocalContainerProvider.java | 346 +++++++++++
.../TestMessagePartitionStateMismatch.java | 3 +-
.../TestParticipantErrorMessage.java | 9 +-
.../integration/TestResetPartitionState.java | 7 +-
.../helix/integration/TestSharedConnection.java | 12 +-
.../helix/integration/TestStandAloneCMMain.java | 3 +-
.../integration/TestStateTransitionTimeout.java | 3 +-
...dAloneCMTestBaseWithPropertyServerCheck.java | 30 +-
.../manager/ClusterDistributedController.java | 3 +-
.../task/TestIndependentTaskRebalancer.java | 331 ++++++++++
.../integration/task/TestTaskRebalancer.java | 136 +++--
.../task/TestTaskRebalancerStopResume.java | 59 +-
.../apache/helix/integration/task/TestUtil.java | 15 +-
.../integration/task/WorkflowGenerator.java | 74 ++-
.../zk/TestZKPropertyTransferServer.java | 6 +-
.../manager/zk/TestZkHelixAutoController.java | 4 +-
.../helix/manager/zk/TestZkHelixController.java | 6 +-
.../manager/zk/TestZkHelixParticipant.java | 9 +-
.../zk/TestZkManagerFlappingDetection.java | 1 -
.../handling/TestConfigThreadpoolSize.java | 7 +-
.../handling/TestHelixTaskExecutor.java | 9 +-
.../apache/helix/tools/TestHelixAdminCli.java | 3 +-
.../helix/examples/LogicalModelExample.java | 12 +-
helix-provisioning/.gitignore | 16 +
helix-provisioning/DISCLAIMER | 15 +
helix-provisioning/LICENSE | 273 +++++++++
helix-provisioning/NOTICE | 30 +
helix-provisioning/README.md | 35 ++
helix-provisioning/pom.xml | 118 ++++
helix-provisioning/src/assemble/assembly.xml | 60 ++
.../src/main/config/log4j.properties | 31 +
.../apache/helix/provisioning/AppConfig.java | 35 ++
.../helix/provisioning/ApplicationSpec.java | 46 ++
.../provisioning/ApplicationSpecFactory.java | 28 +
.../provisioning/ContainerAskResponse.java | 36 ++
.../provisioning/ContainerLaunchResponse.java | 24 +
.../provisioning/ContainerReleaseResponse.java | 24 +
.../provisioning/ContainerStopResponse.java | 24 +
.../helix/provisioning/HelixYarnUtil.java | 61 ++
.../helix/provisioning/ParticipantLauncher.java | 156 +++++
.../helix/provisioning/ServiceConfig.java | 32 +
.../apache/helix/provisioning/TaskConfig.java | 48 ++
.../StatelessParticipantService.java | 86 +++
.../participant/StatelessServiceStateModel.java | 56 ++
.../StatelessServiceStateModelFactory.java | 39 ++
.../provisioning/tools/ContainerAdmin.java | 116 ++++
.../tools/UpdateProvisionerConfig.java | 106 ++++
.../helix/provisioning/yarn/AppLauncher.java | 580 ++++++++++++++++++
.../provisioning/yarn/AppMasterConfig.java | 130 ++++
.../provisioning/yarn/AppMasterLauncher.java | 213 +++++++
.../yarn/AppStatusReportGenerator.java | 103 ++++
.../provisioning/yarn/FixedTargetProvider.java | 39 ++
.../yarn/GenericApplicationMaster.java | 316 ++++++++++
.../yarn/LaunchContainerRunnable.java | 98 +++
.../provisioning/yarn/NMCallbackHandler.java | 103 ++++
.../provisioning/yarn/RMCallbackHandler.java | 150 +++++
.../provisioning/yarn/YarnProvisioner.java | 411 +++++++++++++
.../yarn/YarnProvisionerConfig.java | 73 +++
.../src/main/resources/sample_application.yaml | 42 ++
helix-provisioning/src/test/conf/testng.xml | 27 +
pom.xml | 6 +
recipes/helloworld-provisioning-yarn/pom.xml | 158 +++++
recipes/helloworld-provisioning-yarn/run.sh | 6 +
.../src/assemble/assembly.xml | 60 ++
.../src/main/config/log4j.properties | 31 +
.../yarn/example/HelloWordAppSpecFactory.java | 48 ++
.../yarn/example/HelloWorldService.java | 56 ++
.../yarn/example/HelloworldAppSpec.java | 167 +++++
.../main/resources/hello_world_app_spec.yaml | 42 ++
.../src/test/conf/testng.xml | 27 +
recipes/jobrunner-yarn/pom.xml | 158 +++++
recipes/jobrunner-yarn/run.sh | 6 +
.../jobrunner-yarn/src/assemble/assembly.xml | 60 ++
.../src/main/config/log4j.properties | 31 +
.../yarn/example/JobRunnerMain.java | 151 +++++
.../helix/provisioning/yarn/example/MyTask.java | 72 +++
.../yarn/example/MyTaskAppSpec.java | 167 +++++
.../yarn/example/MyTaskAppSpecFactory.java | 47 ++
.../yarn/example/MyTaskService.java | 81 +++
.../src/main/resources/dummy_job.yaml | 36 ++
.../src/main/resources/job_runner_app_spec.yaml | 41 ++
recipes/jobrunner-yarn/src/test/conf/testng.xml | 27 +
recipes/pom.xml | 2 +
190 files changed, 12263 insertions(+), 1400 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/pom.xml
----------------------------------------------------------------------
diff --cc helix-core/pom.xml
index 7ca41d7,0f1f2b9..a8414f4
--- a/helix-core/pom.xml
+++ b/helix-core/pom.xml
@@@ -213,9 -217,9 +213,13 @@@ under the License
<name>test-util</name>
</program>
<program>
+ <mainClass>org.apache.helix.tools.ZkGrep</mainClass>
+ <name>zkgrep</name>
+ </program>
++ <program>
+ <mainClass>org.apache.helix.task.TaskDriver</mainClass>
+ <name>task-driver</name>
+ </program>
</programs>
</configuration>
</plugin>
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/api/accessor/ClusterAccessor.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/api/accessor/ClusterAccessor.java
index a41da5a,e653338..c85dd0b
--- a/helix-core/src/main/java/org/apache/helix/api/accessor/ClusterAccessor.java
+++ b/helix-core/src/main/java/org/apache/helix/api/accessor/ClusterAccessor.java
@@@ -55,7 -54,7 +55,8 @@@ import org.apache.helix.api.id.SessionI
import org.apache.helix.api.id.StateModelDefId;
import org.apache.helix.controller.context.ControllerContext;
import org.apache.helix.controller.context.ControllerContextHolder;
+ import org.apache.helix.controller.provisioner.ProvisionerConfig;
+import org.apache.helix.controller.rebalancer.RebalancerRef;
import org.apache.helix.controller.rebalancer.config.PartitionedRebalancerConfig;
import org.apache.helix.controller.rebalancer.config.RebalancerConfig;
import org.apache.helix.controller.rebalancer.config.RebalancerConfigHolder;
@@@ -768,22 -700,6 +770,27 @@@ public class ClusterAccessor
if (idealState != null) {
_accessor.setProperty(_keyBuilder.idealStates(resourceId.stringify()), idealState);
}
+
+ // Add resource user config
+ if (resource.getUserConfig() != null) {
+ ResourceConfiguration configuration = new ResourceConfiguration(resourceId);
+ configuration.setType(resource.getType());
+ configuration.addNamespacedConfig(resource.getUserConfig());
+ PartitionedRebalancerConfig partitionedConfig = PartitionedRebalancerConfig.from(config);
+ if (idealState == null
+ && (partitionedConfig == null || partitionedConfig.getRebalanceMode() == RebalanceMode.USER_DEFINED)) {
+ // only persist if this is not easily convertible to an ideal state
+ configuration
+ .addNamespacedConfig(new RebalancerConfigHolder(resource.getRebalancerConfig())
+ .toNamespacedConfig());
+ }
++ ProvisionerConfig provisionerConfig = resource.getProvisionerConfig();
++ if (provisionerConfig != null) {
++ configuration.addNamespacedConfig(new ProvisionerConfigHolder(provisionerConfig)
++ .toNamespacedConfig());
++ }
+ _accessor.setProperty(_keyBuilder.resourceConfig(resourceId.stringify()), configuration);
+ }
return true;
}
@@@ -812,10 -728,14 +819,14 @@@
BaseDataAccessor<?> baseAccessor = _accessor.getBaseDataAccessor();
if (baseAccessor != null) {
boolean[] existsResults = baseAccessor.exists(paths, 0);
- int ind =0;
++ int ind = 0;
for (boolean exists : existsResults) {
-
++
if (!exists) {
- LOG.warn("Path does not exist:"+ paths.get(ind));
++ LOG.warn("Path does not exist:" + paths.get(ind));
return false;
}
+ ind = ind + 1;
}
}
return true;
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/api/accessor/ParticipantAccessor.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/api/accessor/ParticipantAccessor.java
index d0cc3ba,cf4c549..cb52e91
--- a/helix-core/src/main/java/org/apache/helix/api/accessor/ParticipantAccessor.java
+++ b/helix-core/src/main/java/org/apache/helix/api/accessor/ParticipantAccessor.java
@@@ -43,9 -43,9 +43,10 @@@ import org.apache.helix.api.Resource
import org.apache.helix.api.RunningInstance;
import org.apache.helix.api.Scope;
import org.apache.helix.api.State;
+ import org.apache.helix.api.config.ContainerConfig;
import org.apache.helix.api.config.ParticipantConfig;
import org.apache.helix.api.config.UserConfig;
+import org.apache.helix.api.id.ClusterId;
import org.apache.helix.api.id.MessageId;
import org.apache.helix.api.id.ParticipantId;
import org.apache.helix.api.id.PartitionId;
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/api/accessor/ResourceAccessor.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/api/accessor/ResourceAccessor.java
index 310b457,0052871..7dde6ee
--- a/helix-core/src/main/java/org/apache/helix/api/accessor/ResourceAccessor.java
+++ b/helix-core/src/main/java/org/apache/helix/api/accessor/ResourceAccessor.java
@@@ -39,7 -38,7 +39,8 @@@ import org.apache.helix.api.id.ClusterI
import org.apache.helix.api.id.ParticipantId;
import org.apache.helix.api.id.PartitionId;
import org.apache.helix.api.id.ResourceId;
+ import org.apache.helix.controller.provisioner.ProvisionerConfig;
+import org.apache.helix.controller.rebalancer.RebalancerRef;
import org.apache.helix.controller.rebalancer.config.BasicRebalancerConfig;
import org.apache.helix.controller.rebalancer.config.CustomRebalancerConfig;
import org.apache.helix.controller.rebalancer.config.PartitionedRebalancerConfig;
@@@ -274,11 -260,13 +276,17 @@@ public class ResourceAccessor
// only persist if this is not easily convertible to an ideal state
config.addNamespacedConfig(new RebalancerConfigHolder(resourceConfig.getRebalancerConfig())
.toNamespacedConfig());
+ config.setBucketSize(resourceConfig.getBucketSize());
+ config.setBatchMessageMode(resourceConfig.getBatchMessageMode());
+ } else if (userConfig == null) {
+ config = null;
}
+ if (resourceConfig.getProvisionerConfig() != null) {
+ config.addNamespacedConfig(new ProvisionerConfigHolder(resourceConfig.getProvisionerConfig())
+ .toNamespacedConfig());
+ }
+ config.setBucketSize(resourceConfig.getBucketSize());
+ config.setBatchMessageMode(resourceConfig.getBatchMessageMode());
setConfiguration(resourceId, config, resourceConfig.getRebalancerConfig());
return true;
}
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/GenericHelixController.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/controller/GenericHelixController.java
index c8b033e,b5fb23e..f9af914
--- a/helix-core/src/main/java/org/apache/helix/controller/GenericHelixController.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/GenericHelixController.java
@@@ -48,10 -45,9 +48,11 @@@ import org.apache.helix.api.id.SessionI
import org.apache.helix.controller.pipeline.Pipeline;
import org.apache.helix.controller.pipeline.PipelineRegistry;
import org.apache.helix.controller.stages.BestPossibleStateCalcStage;
+import org.apache.helix.controller.stages.ClusterDataCache;
import org.apache.helix.controller.stages.ClusterEvent;
+import org.apache.helix.controller.stages.ClusterEventBlockingQueue;
import org.apache.helix.controller.stages.CompatibilityCheckStage;
+ import org.apache.helix.controller.stages.ContainerProvisioningStage;
import org.apache.helix.controller.stages.CurrentStateComputationStage;
import org.apache.helix.controller.stages.ExternalViewComputeStage;
import org.apache.helix.controller.stages.MessageGenerationStage;
@@@ -201,7 -183,7 +202,8 @@@ public class GenericHelixController imp
Pipeline rebalancePipeline = new Pipeline();
rebalancePipeline.addStage(new CompatibilityCheckStage());
rebalancePipeline.addStage(new ResourceComputationStage());
+ rebalancePipeline.addStage(new ResourceValidationStage());
+ rebalancePipeline.addStage(new ContainerProvisioningStage());
rebalancePipeline.addStage(new CurrentStateComputationStage());
rebalancePipeline.addStage(new BestPossibleStateCalcStage());
rebalancePipeline.addStage(new MessageGenerationStage());
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerId.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerId.java
index 0000000,49bc0fc..b42d881
mode 000000,100644..100644
--- a/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerId.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerId.java
@@@ -1,0 -1,30 +1,49 @@@
+ package org.apache.helix.controller.provisioner;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import org.apache.helix.api.id.Id;
+
+ public class ContainerId extends Id {
+
+ String id;
+
+ private ContainerId(String containerId) {
+ this.id = containerId;
+ }
+
+ @Override
+ public String stringify() {
+ return id;
+ }
+
+ /**
+ * Get a concrete partition id
+ * @param partitionId string partition identifier
+ * @return PartitionId
+ */
+ public static ContainerId from(String containerId) {
+ if (containerId == null) {
+ return null;
+ }
+ return new ContainerId(containerId);
+ }
+
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerSpec.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerSpec.java
index 0000000,ab3c46a..36ad7f9
mode 000000,100644..100644
--- a/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerSpec.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerSpec.java
@@@ -1,0 -1,65 +1,65 @@@
+ package org.apache.helix.controller.provisioner;
+
+ import org.apache.helix.api.id.ParticipantId;
+
+ /*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+ public class ContainerSpec {
+ /**
+ * Some unique id representing the container.
+ */
+ ContainerId _containerId;
-
++
+ int _memory;
+
+ private ParticipantId _participantId;
+
+ public ContainerSpec(ParticipantId _participantId) {
+ this._participantId = _participantId;
+ }
+
+ public ContainerId getContainerId() {
+ return _containerId;
+ }
+
+ @Override
+ public String toString() {
+ return _participantId.toString();
+ }
-
- public void setMemory(int memory){
++
++ public void setMemory(int memory) {
+ _memory = memory;
+ }
+
- public int getMemory(){
++ public int getMemory() {
+ return _memory;
+ }
-
++
+ public static ContainerSpec from(String serialized) {
- //todo
++ // todo
+ return null;
- //return new ContainerSpec(ContainerId.from(serialized));
++ // return new ContainerSpec(ContainerId.from(serialized));
+ }
+
+ public ParticipantId getParticipantId() {
+ return _participantId;
+ }
-
++
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/provisioner/ServiceConfig.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/controller/provisioner/ServiceConfig.java
index 0000000,adccb2c..9370cfd
mode 000000,100644..100644
--- a/helix-core/src/main/java/org/apache/helix/controller/provisioner/ServiceConfig.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/provisioner/ServiceConfig.java
@@@ -1,0 -1,5 +1,24 @@@
+ package org.apache.helix.controller.provisioner;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ public class ServiceConfig {
+
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/provisioner/TargetProvider.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/controller/provisioner/TargetProvider.java
index 0000000,063d008..1e5957b
mode 000000,100644..100644
--- a/helix-core/src/main/java/org/apache/helix/controller/provisioner/TargetProvider.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/provisioner/TargetProvider.java
@@@ -1,0 -1,42 +1,39 @@@
+ package org.apache.helix.controller.provisioner;
+
+ /*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+ import java.util.Collection;
+
-import org.apache.helix.HelixManager;
+ import org.apache.helix.api.Cluster;
+ import org.apache.helix.api.Participant;
-import org.apache.helix.api.config.ResourceConfig;
+ import org.apache.helix.api.id.ResourceId;
+
+ public interface TargetProvider {
+
-
+ /**
+ * @param cluster
+ * @param resourceId ResourceId name of the resource
+ * @param participants
+ * @return
+ */
+ TargetProviderResponse evaluateExistingContainers(Cluster cluster, ResourceId resourceId,
+ Collection<Participant> participants);
+
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
index 0000000,ae433e0..25645d3
mode 000000,100644..100644
--- a/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
@@@ -1,0 -1,332 +1,332 @@@
+ package org.apache.helix.controller.stages;
+
+ /*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+ import java.util.Collection;
+ import java.util.HashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.UUID;
+
+ import org.apache.helix.HelixAdmin;
+ import org.apache.helix.HelixDataAccessor;
+ import org.apache.helix.HelixManager;
+ import org.apache.helix.PropertyKey;
+ import org.apache.helix.api.Cluster;
+ import org.apache.helix.api.Participant;
+ import org.apache.helix.api.config.ContainerConfig;
+ import org.apache.helix.api.config.ResourceConfig;
+ import org.apache.helix.api.id.ParticipantId;
+ import org.apache.helix.api.id.ResourceId;
+ import org.apache.helix.controller.pipeline.AbstractBaseStage;
+ import org.apache.helix.controller.provisioner.ContainerId;
+ import org.apache.helix.controller.provisioner.ContainerProvider;
+ import org.apache.helix.controller.provisioner.ContainerSpec;
+ import org.apache.helix.controller.provisioner.ContainerState;
+ import org.apache.helix.controller.provisioner.Provisioner;
+ import org.apache.helix.controller.provisioner.ProvisionerConfig;
+ import org.apache.helix.controller.provisioner.ProvisionerRef;
+ import org.apache.helix.controller.provisioner.TargetProvider;
+ import org.apache.helix.controller.provisioner.TargetProviderResponse;
+ import org.apache.helix.model.InstanceConfig;
+ import org.apache.helix.model.Message;
+ import org.apache.helix.model.Message.MessageType;
+ import org.apache.log4j.Logger;
+
+ import com.google.common.util.concurrent.FutureCallback;
+ import com.google.common.util.concurrent.Futures;
+ import com.google.common.util.concurrent.ListenableFuture;
+
+ /**
+ * This stage will manager the container allocation/deallocation needed for a
+ * specific resource.<br/>
+ * It does the following <br/>
+ * From the idealstate, it gets ContainerTargetProvider and ContainerProvider <br/>
+ * ContainerTargetProviderFactory will provide the number of containers needed
+ * for a resource <br/>
+ * ContainerProvider will provide the ability to allocate, deallocate, start,
+ * stop container <br/>
+ */
+ public class ContainerProvisioningStage extends AbstractBaseStage {
+ private static final Logger LOG = Logger.getLogger(ContainerProvisioningStage.class);
+
+ Map<ResourceId, Provisioner> _provisionerMap = new HashMap<ResourceId, Provisioner>();
+ Map<ResourceId, TargetProvider> _targetProviderMap = new HashMap<ResourceId, TargetProvider>();
+ Map<ResourceId, ContainerProvider> _containerProviderMap =
+ new HashMap<ResourceId, ContainerProvider>();
+
+ @Override
+ public void process(ClusterEvent event) throws Exception {
+ final HelixManager helixManager = event.getAttribute("helixmanager");
+ final Map<ResourceId, ResourceConfig> resourceMap =
+ event.getAttribute(AttributeName.RESOURCES.toString());
+ final HelixAdmin helixAdmin = helixManager.getClusterManagmentTool();
+ final HelixDataAccessor accessor = helixManager.getHelixDataAccessor();
+ final PropertyKey.Builder keyBuilder = accessor.keyBuilder();
+ for (ResourceId resourceId : resourceMap.keySet()) {
+ ResourceConfig resourceConfig = resourceMap.get(resourceId);
+ ProvisionerConfig provisionerConfig = resourceConfig.getProvisionerConfig();
+ if (provisionerConfig != null) {
+ Provisioner provisioner;
+ provisioner = _provisionerMap.get(resourceId);
+
+ // instantiate and cache a provisioner if there isn't one already cached
+ if (provisioner == null) {
+ ProvisionerRef provisionerRef = provisionerConfig.getProvisionerRef();
+ if (provisionerRef != null) {
+ provisioner = provisionerRef.getProvisioner();
+ }
+ if (provisioner != null) {
+ provisioner.init(helixManager, resourceConfig);
+ _containerProviderMap.put(resourceId, provisioner.getContainerProvider());
+ _targetProviderMap.put(resourceId, provisioner.getTargetProvider());
+ _provisionerMap.put(resourceId, provisioner);
+ } else {
+ LOG.error("Resource " + resourceId + " does not have a valid provisioner class!");
+ break;
+ }
+ }
+ TargetProvider targetProvider = _targetProviderMap.get(resourceId);
+ ContainerProvider containerProvider = _containerProviderMap.get(resourceId);
- final Cluster cluster = event.getAttribute("ClusterDataCache");
++ final Cluster cluster = event.getAttribute("Cluster");
+ final Collection<Participant> participants = cluster.getParticipantMap().values();
+
+ // If a process died, we need to mark it as DISCONNECTED or if the process is ready, mark as
+ // CONNECTED
+ Map<ParticipantId, Participant> participantMap = cluster.getParticipantMap();
+ for (ParticipantId participantId : participantMap.keySet()) {
+ Participant participant = participantMap.get(participantId);
+ ContainerConfig config = participant.getContainerConfig();
+ if (config != null) {
+ ContainerState containerState = config.getState();
+ if (!participant.isAlive() && ContainerState.CONNECTED.equals(containerState)) {
+ // Need to mark as disconnected if process died
+ LOG.info("Participant " + participantId + " died, marking as DISCONNECTED");
+ updateContainerState(helixAdmin, accessor, keyBuilder, cluster, participantId,
+ ContainerState.DISCONNECTED);
+ } else if (participant.isAlive() && ContainerState.CONNECTING.equals(containerState)) {
+ // Need to mark as connected only when the live instance is visible
+ LOG.info("Participant " + participantId + " is ready, marking as CONNECTED");
+ updateContainerState(helixAdmin, accessor, keyBuilder, cluster, participantId,
+ ContainerState.CONNECTED);
+ } else if (!participant.isAlive() && ContainerState.HALTING.equals(containerState)) {
+ // Need to mark as connected only when the live instance is visible
+ LOG.info("Participant " + participantId + " is has been killed, marking as HALTED");
+ updateContainerState(helixAdmin, accessor, keyBuilder, cluster, participantId,
+ ContainerState.HALTED);
+ }
+ }
+ }
+
+ // Participants registered in helix
+ // Give those participants to targetprovider
+ // Provide the response that contains, new containerspecs, containers to be released,
+ // containers to be stopped
+ // call the respective provisioner to allocate and start the container.
+ // Each container is then started its state is changed from any place.
+ // The target provider is given the state of container and asked for its new state. For each
+ // state there is a corresponding handler function.
+
+ // TargetProvider should be stateless, given the state of cluster and existing participants
+ // it should return the same result
+ final TargetProviderResponse response =
+ targetProvider.evaluateExistingContainers(cluster, resourceId, participants);
+
+ // allocate new containers
+ for (final ContainerSpec spec : response.getContainersToAcquire()) {
+ final ParticipantId participantId = spec.getParticipantId();
+ List<String> instancesInCluster =
+ helixAdmin.getInstancesInCluster(cluster.getId().stringify());
+ if (!instancesInCluster.contains(participantId.stringify())) {
+ // create a new Participant, attach the container spec
+ InstanceConfig instanceConfig = new InstanceConfig(participantId);
+ instanceConfig.setContainerSpec(spec);
+ // create a helix_participant in ACQUIRING state
+ instanceConfig.setContainerState(ContainerState.ACQUIRING);
+ // create the helix participant and add it to cluster
+ helixAdmin.addInstance(cluster.getId().toString(), instanceConfig);
+ }
+ LOG.info("Allocating container for " + participantId);
+ ListenableFuture<ContainerId> future = containerProvider.allocateContainer(spec);
+ FutureCallback<ContainerId> callback = new FutureCallback<ContainerId>() {
+ @Override
+ public void onSuccess(ContainerId containerId) {
+ LOG.info("Container " + containerId + " acquired. Marking " + participantId);
+ InstanceConfig existingInstance =
+ helixAdmin
+ .getInstanceConfig(cluster.getId().toString(), participantId.toString());
+ existingInstance.setContainerId(containerId);
+ existingInstance.setContainerState(ContainerState.ACQUIRED);
+ accessor.updateProperty(keyBuilder.instanceConfig(participantId.toString()),
+ existingInstance);
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ LOG.error("Could not allocate a container for participant " + participantId, t);
+ updateContainerState(helixAdmin, accessor, keyBuilder, cluster, participantId,
+ ContainerState.FAILED);
+ }
+ };
+ safeAddCallback(future, callback);
+ }
+
+ // start new containers
+ for (final Participant participant : response.getContainersToStart()) {
+ final InstanceConfig existingInstance =
+ helixAdmin.getInstanceConfig(cluster.getId().toString(), participant.getId()
+ .toString());
+ final ContainerId containerId = existingInstance.getContainerId();
+ existingInstance.setContainerState(ContainerState.CONNECTING);
+ accessor.updateProperty(keyBuilder.instanceConfig(participant.getId().toString()),
+ existingInstance);
+ // create the helix participant and add it to cluster
+ LOG.info("Starting container " + containerId + " for " + participant.getId());
+ ListenableFuture<Boolean> future =
+ containerProvider.startContainer(containerId, participant);
+ FutureCallback<Boolean> callback = new FutureCallback<Boolean>() {
+ @Override
+ public void onSuccess(Boolean result) {
+ // Do nothing yet, need to wait for live instance
+ LOG.info("Container " + containerId + " started for " + participant.getId());
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ LOG.error("Could not start container" + containerId + "for participant "
+ + participant.getId(), t);
+ updateContainerState(helixAdmin, accessor, keyBuilder, cluster, participant.getId(),
+ ContainerState.FAILED);
+ }
+ };
+ safeAddCallback(future, callback);
+ }
+
+ // release containers
+ for (final Participant participant : response.getContainersToRelease()) {
+ // mark it as finalizing
+ final InstanceConfig existingInstance =
+ helixAdmin.getInstanceConfig(cluster.getId().toString(), participant.getId()
+ .toString());
+ final ContainerId containerId = existingInstance.getContainerId();
+ existingInstance.setContainerState(ContainerState.FINALIZING);
+ accessor.updateProperty(keyBuilder.instanceConfig(participant.getId().toString()),
+ existingInstance);
+ // remove the participant
+ LOG.info("Deallocating container " + containerId + " for " + participant.getId());
+ ListenableFuture<Boolean> future = containerProvider.deallocateContainer(containerId);
+ FutureCallback<Boolean> callback = new FutureCallback<Boolean>() {
+ @Override
+ public void onSuccess(Boolean result) {
+ LOG.info("Container " + containerId + " deallocated. Dropping " + participant.getId());
+ InstanceConfig existingInstance =
+ helixAdmin.getInstanceConfig(cluster.getId().toString(), participant.getId()
+ .toString());
+ helixAdmin.dropInstance(cluster.getId().toString(), existingInstance);
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ LOG.error("Could not deallocate container" + containerId + "for participant "
+ + participant.getId(), t);
+ updateContainerState(helixAdmin, accessor, keyBuilder, cluster, participant.getId(),
+ ContainerState.FAILED);
+ }
+ };
+ safeAddCallback(future, callback);
+ }
+
+ // stop but don't remove
+ for (final Participant participant : response.getContainersToStop()) {
+ // disable the node first
+ final InstanceConfig existingInstance =
+ helixAdmin.getInstanceConfig(cluster.getId().toString(), participant.getId()
+ .toString());
+ final ContainerId containerId = existingInstance.getContainerId();
+ existingInstance.setInstanceEnabled(false);
+ existingInstance.setContainerState(ContainerState.HALTING);
+ accessor.updateProperty(keyBuilder.instanceConfig(participant.getId().toString()),
+ existingInstance);
+ // stop the container
+ LOG.info("Stopping container " + containerId + " for " + participant.getId());
+ ListenableFuture<Boolean> future = containerProvider.stopContainer(containerId);
+ FutureCallback<Boolean> callback = new FutureCallback<Boolean>() {
+ @Override
+ public void onSuccess(Boolean result) {
+ // Don't update the state here, wait for the live instance, but do send a shutdown
+ // message
+ LOG.info("Container " + containerId + " stopped for " + participant.getId());
+ if (participant.isAlive()) {
+ Message message = new Message(MessageType.SHUTDOWN, UUID.randomUUID().toString());
+ message.setTgtName(participant.getId().toString());
+ message.setTgtSessionId(participant.getRunningInstance().getSessionId());
+ message.setMsgId(message.getId());
+ accessor.createProperty(
+ keyBuilder.message(participant.getId().toString(), message.getId()), message);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ LOG.error(
+ "Could not stop container" + containerId + "for participant "
+ + participant.getId(), t);
+ updateContainerState(helixAdmin, accessor, keyBuilder, cluster, participant.getId(),
+ ContainerState.FAILED);
+ }
+ };
+ safeAddCallback(future, callback);
+ }
+ }
+ }
+ }
+
+ /**
+ * Update a participant with a new container state
+ * @param helixAdmin
+ * @param accessor
+ * @param keyBuilder
+ * @param cluster
+ * @param participantId
+ * @param state
+ */
+ private void updateContainerState(HelixAdmin helixAdmin, HelixDataAccessor accessor,
+ PropertyKey.Builder keyBuilder, Cluster cluster, ParticipantId participantId,
+ ContainerState state) {
+ InstanceConfig existingInstance =
+ helixAdmin.getInstanceConfig(cluster.getId().toString(), participantId.toString());
+ existingInstance.setContainerState(state);
+ existingInstance.setInstanceEnabled(state.equals(ContainerState.CONNECTED));
+ accessor.updateProperty(keyBuilder.instanceConfig(participantId.toString()), existingInstance);
+ }
+
+ /**
+ * Add a callback, failing if the add fails
+ * @param future the future to listen on
+ * @param callback the callback to invoke
+ */
+ private <T> void safeAddCallback(ListenableFuture<T> future, FutureCallback<T> callback) {
+ try {
+ Futures.addCallback(future, callback);
+ } catch (Throwable t) {
+ callback.onFailure(t);
+ }
+ }
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/stages/ResourceComputationStage.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/strategy/AutoRebalanceStrategy.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/controller/strategy/AutoRebalanceStrategy.java
index 5a8de20,bff7e46..09b66c1
--- a/helix-core/src/main/java/org/apache/helix/controller/strategy/AutoRebalanceStrategy.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/AutoRebalanceStrategy.java
@@@ -510,13 -491,12 +510,13 @@@ public class AutoRebalanceStrategy
* @return The current assignments that do not conform to the preferred assignment
*/
private Map<Replica, Node> computeExistingNonPreferredPlacement(
- Map<String, Map<String, String>> currentMapping) {
+ Map<PartitionId, Map<ParticipantId, State>> currentMapping) {
Map<Replica, Node> existingNonPreferredAssignment = new TreeMap<Replica, Node>();
int count = countStateReplicas();
- for (String partition : currentMapping.keySet()) {
- Map<String, String> nodeStateMap = currentMapping.get(partition);
- for (String nodeId : nodeStateMap.keySet()) {
+ for (PartitionId partition : currentMapping.keySet()) {
+ Map<ParticipantId, State> nodeStateMap = currentMapping.get(partition);
++ nodeStateMap.keySet().retainAll(_nodeMap.keySet());
+ for (ParticipantId nodeId : nodeStateMap.keySet()) {
- nodeStateMap.keySet().retainAll(_nodeMap.keySet());
Node node = _nodeMap.get(nodeId);
boolean skip = false;
for (Replica replica : node.preferred) {
@@@ -580,13 -560,12 +580,13 @@@
* @return Assignments that conform to the preferred placement
*/
private Map<Replica, Node> computeExistingPreferredPlacement(
- final Map<String, Map<String, String>> currentMapping) {
+ final Map<PartitionId, Map<ParticipantId, State>> currentMapping) {
Map<Replica, Node> existingPreferredAssignment = new TreeMap<Replica, Node>();
int count = countStateReplicas();
- for (String partition : currentMapping.keySet()) {
- Map<String, String> nodeStateMap = currentMapping.get(partition);
- for (String nodeId : nodeStateMap.keySet()) {
+ for (PartitionId partition : currentMapping.keySet()) {
+ Map<ParticipantId, State> nodeStateMap = currentMapping.get(partition);
++ nodeStateMap.keySet().retainAll(_nodeMap.keySet());
+ for (ParticipantId nodeId : nodeStateMap.keySet()) {
- nodeStateMap.keySet().retainAll(_nodeMap.keySet());
Node node = _nodeMap.get(nodeId);
node.currentlyAssigned = node.currentlyAssigned + 1;
// check if its in one of the preferred position
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/AbstractBaseKnapsackSolver.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/AbstractBaseKnapsackSolver.java
index 0000000,4d27bd7..a0de0e7
mode 000000,100644..100644
--- a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/AbstractBaseKnapsackSolver.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/AbstractBaseKnapsackSolver.java
@@@ -1,0 -1,32 +1,51 @@@
+ package org.apache.helix.controller.strategy.knapsack;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ /**
+ * Common implementation of a knapsack solver<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+ public abstract class AbstractBaseKnapsackSolver implements BaseKnapsackSolver {
+ private final String _solverName;
+
+ /**
+ * Initialize the solver
+ * @param solverName the name of the solvers
+ */
+ public AbstractBaseKnapsackSolver(final String solverName) {
+ _solverName = solverName;
+ }
+
+ @Override
+ public long[] getLowerAndUpperBoundWhenItem(int itemId, boolean isItemIn, long lowerBound,
+ long upperBound) {
+ return new long[] {
+ 0L, Long.MAX_VALUE
+ };
+ }
+
+ @Override
+ public String getName() {
+ return _solverName;
+ }
+
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/AbstractKnapsackPropagator.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/AbstractKnapsackPropagator.java
index 0000000,0663990..6f9de66
mode 000000,100644..100644
--- a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/AbstractKnapsackPropagator.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/AbstractKnapsackPropagator.java
@@@ -1,0 -1,104 +1,123 @@@
+ package org.apache.helix.controller.strategy.knapsack;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.util.ArrayList;
+
+ /**
+ * Common implementation of a knapsack constraint satisfier<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+ public abstract class AbstractKnapsackPropagator implements KnapsackPropagator {
+ private ArrayList<KnapsackItem> _items;
+ private long _currentProfit;
+ private long _profitLowerBound;
+ private long _profitUpperBound;
+ private KnapsackState _state;
+
+ /**
+ * Initialize the propagator
+ * @param state the current knapsack state
+ */
+ public AbstractKnapsackPropagator(final KnapsackState state) {
+ _items = new ArrayList<KnapsackItem>();
+ _currentProfit = 0L;
+ _profitLowerBound = 0L;
+ _profitUpperBound = Long.MAX_VALUE;
+ _state = state;
+ }
+
+ @Override
+ public void init(ArrayList<Long> profits, ArrayList<Long> weights) {
+ final int numberOfItems = profits.size();
+ _items.clear();
+ for (int i = 0; i < numberOfItems; i++) {
+ _items.add(new KnapsackItem(i, weights.get(i), profits.get(i)));
+ }
+ _currentProfit = 0;
+ _profitLowerBound = Long.MIN_VALUE;
+ _profitUpperBound = Long.MAX_VALUE;
+ initPropagator();
+ }
+
+ @Override
+ public boolean update(boolean revert, KnapsackAssignment assignment) {
+ if (assignment.isIn) {
+ if (revert) {
+ _currentProfit -= _items.get(assignment.itemId).profit;
+ } else {
+ _currentProfit += _items.get(assignment.itemId).profit;
+ }
+ }
+ return updatePropagator(revert, assignment);
+ }
+
+ @Override
+ public long currentProfit() {
+ return _currentProfit;
+ }
+
+ @Override
+ public long profitLowerBound() {
+ return _profitLowerBound;
+ }
+
+ @Override
+ public long profitUpperBound() {
+ return _profitUpperBound;
+ }
+
+ @Override
+ public void copyCurrentStateToSolution(boolean hasOnePropagator, ArrayList<Boolean> solution) {
+ if (solution == null) {
+ throw new RuntimeException("solution cannot be null!");
+ }
+ for (KnapsackItem item : _items) {
+ final int itemId = item.id;
+ solution.set(itemId, _state.isBound(itemId) && _state.isIn(itemId));
+ }
+ if (hasOnePropagator) {
+ copyCurrentStateToSolutionPropagator(solution);
+ }
+ }
+
+ protected abstract void initPropagator();
+
+ protected abstract boolean updatePropagator(boolean revert, final KnapsackAssignment assignment);
+
+ protected abstract void copyCurrentStateToSolutionPropagator(ArrayList<Boolean> solution);
+
+ protected KnapsackState state() {
+ return _state;
+ }
+
+ protected ArrayList<KnapsackItem> items() {
+ return _items;
+ }
+
+ protected void setProfitLowerBound(long profit) {
+ _profitLowerBound = profit;
+ }
+
+ protected void setProfitUpperBound(long profit) {
+ _profitUpperBound = profit;
+ }
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/BaseKnapsackSolver.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/BaseKnapsackSolver.java
index 0000000,1d71a22..51221e5
mode 000000,100644..100644
--- a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/BaseKnapsackSolver.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/BaseKnapsackSolver.java
@@@ -1,0 -1,49 +1,68 @@@
+ package org.apache.helix.controller.strategy.knapsack;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.util.ArrayList;
+
+ /**
+ * The interface of any multidimensional knapsack solver<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+ public interface BaseKnapsackSolver {
+ /**
+ * Initialize the solver
+ * @param profits profit of adding each item to the knapsack
+ * @param weights cost of adding each item in each dimension
+ * @param capacities maximum weight per dimension
+ */
+ void init(final ArrayList<Long> profits, final ArrayList<ArrayList<Long>> weights,
+ final ArrayList<Long> capacities);
+
+ /**
+ * Compute an upper and lower bound on the knapsack given the assignment state of the knapsack
+ * @param itemId the item id
+ * @param isItemIn true if the item is in the knapsack, false otherwise
+ * @param lowerBound the current lower bound
+ * @param upperBound the current upper bound
+ * @return the new lower and upper bounds
+ */
+ long[] getLowerAndUpperBoundWhenItem(int itemId, boolean isItemIn, long lowerBound,
+ long upperBound);
+
+ /**
+ * Solve the knapsack problem
+ * @return the (approximate) optimal profit
+ */
+ long solve();
+
+ /**
+ * Check if an item is in the final solution
+ * @param itemId the item id
+ * @return true if the item is present, false otherwise
+ */
+ boolean bestSolution(int itemId);
+
+ /**
+ * Get the solver name
+ * @return solver name
+ */
+ String getName();
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackAssignment.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackAssignment.java
index 0000000,bfd29d7..50f58a7
mode 000000,100644..100644
--- a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackAssignment.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackAssignment.java
@@@ -1,0 -1,21 +1,40 @@@
+ package org.apache.helix.controller.strategy.knapsack;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ /**
+ * The assignment of a knapsack item to a knapsack<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+ public class KnapsackAssignment {
+ public int itemId;
+ public boolean isIn;
+
+ /**
+ * Create the assignment
+ * @param itemId the item id
+ * @param isIn true if the item is in the knapsack, false otherwise
+ */
+ public KnapsackAssignment(int itemId, boolean isIn) {
+ this.itemId = itemId;
+ this.isIn = isIn;
+ }
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackCapacityPropagatorImpl.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackCapacityPropagatorImpl.java
index 0000000,357cc2a..e630b3c
mode 000000,100644..100644
--- a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackCapacityPropagatorImpl.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackCapacityPropagatorImpl.java
@@@ -1,0 -1,218 +1,237 @@@
+ package org.apache.helix.controller.strategy.knapsack;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.util.ArrayList;
+ import java.util.Collections;
+ import java.util.Comparator;
+
+ /**
+ * A knapsack propagator that constrains assignments based on knapsack capacity for a given
+ * dimension<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+ public class KnapsackCapacityPropagatorImpl extends AbstractKnapsackPropagator {
+ private static final long ALL_BITS_64 = 0xFFFFFFFFFFFFFFFFL;
+ private static final int NO_SELECTION = -1;
+
+ private long _capacity;
+ private long _consumedCapacity;
+ private int _breakItemId;
+ private ArrayList<KnapsackItem> _sortedItems;
+ private long _profitMax;
+
+ /**
+ * Initialize the propagator
+ * @param state the current knapsack state
+ * @param capacity the knapsack capacity for this dimension
+ */
+ public KnapsackCapacityPropagatorImpl(KnapsackState state, long capacity) {
+ super(state);
+ _capacity = capacity;
+ _consumedCapacity = 0L;
+ _breakItemId = NO_SELECTION;
+ _sortedItems = new ArrayList<KnapsackItem>();
+ _profitMax = 0L;
+ }
+
+ @Override
+ public void computeProfitBounds() {
+ setProfitLowerBound(currentProfit());
+ _breakItemId = NO_SELECTION;
+
+ long remainingCapacity = _capacity - _consumedCapacity;
+ int breakSortedItemId = NO_SELECTION;
+ final int numberOfSortedItems = _sortedItems.size();
+ for (int sortedId = 0; sortedId < numberOfSortedItems; sortedId++) {
+ final KnapsackItem item = _sortedItems.get(sortedId);
+ if (!state().isBound(item.id)) {
+ _breakItemId = item.id;
+
+ if (remainingCapacity >= item.weight) {
+ remainingCapacity -= item.weight;
+ setProfitLowerBound(profitLowerBound() + item.profit);
+ } else {
+ breakSortedItemId = sortedId;
+ break;
+ }
+ }
+ }
+ setProfitUpperBound(profitLowerBound());
+ if (breakSortedItemId != NO_SELECTION) {
+ final long additionalProfit = getAdditionalProfit(remainingCapacity, breakSortedItemId);
+ setProfitUpperBound(profitUpperBound() + additionalProfit);
+ }
+ }
+
+ @Override
+ public int getNextItemId() {
+ return _breakItemId;
+ }
+
+ @Override
+ protected void initPropagator() {
+ _consumedCapacity = 0L;
+ _breakItemId = NO_SELECTION;
+ _sortedItems = new ArrayList<KnapsackItem>(items());
+ _profitMax = 0L;
+ for (KnapsackItem item : _sortedItems) {
+ _profitMax = Math.max(_profitMax, item.profit);
+ }
+ _profitMax++;
+ Collections.sort(_sortedItems, new KnapsackItemDecreasingEfficiencyComparator(_profitMax));
+ }
+
+ @Override
+ protected boolean updatePropagator(boolean revert, KnapsackAssignment assignment) {
+ if (assignment.isIn) {
+ if (revert) {
+ _consumedCapacity -= items().get(assignment.itemId).weight;
+ } else {
+ _consumedCapacity += items().get(assignment.itemId).weight;
+ if (_consumedCapacity > _capacity) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ @Override
+ protected void copyCurrentStateToSolutionPropagator(ArrayList<Boolean> solution) {
+ if (solution == null) {
+ throw new RuntimeException("solution cannot be null!");
+ }
+ long remainingCapacity = _capacity - _consumedCapacity;
+ for (KnapsackItem item : _sortedItems) {
+ if (!state().isBound(item.id)) {
+ if (remainingCapacity >= item.weight) {
+ remainingCapacity -= item.weight;
+ solution.set(item.id, true);
+ } else {
+ return;
+ }
+ }
+ }
+ }
+
+ private long getAdditionalProfit(long remainingCapacity, int breakItemId) {
+ final int afterBreakItemId = breakItemId + 1;
+ long additionalProfitWhenNoBreakItem = 0L;
+ if (afterBreakItemId < _sortedItems.size()) {
+ final long nextWeight = _sortedItems.get(afterBreakItemId).weight;
+ final long nextProfit = _sortedItems.get(afterBreakItemId).profit;
+ additionalProfitWhenNoBreakItem =
+ upperBoundOfRatio(remainingCapacity, nextProfit, nextWeight);
+ }
+
+ final int beforeBreakItemId = breakItemId - 1;
+ long additionalProfitWhenBreakItem = 0L;
+ if (beforeBreakItemId >= 0) {
+ final long previousWeight = _sortedItems.get(beforeBreakItemId).weight;
+ if (previousWeight != 0) {
+ final long previousProfit = _sortedItems.get(beforeBreakItemId).profit;
+ final long overusedCapacity = _sortedItems.get(breakItemId).weight - remainingCapacity;
+ final long ratio = upperBoundOfRatio(overusedCapacity, previousProfit, previousWeight);
+
+ additionalProfitWhenBreakItem = _sortedItems.get(breakItemId).profit - ratio;
+ }
+ }
+
+ final long additionalProfit =
+ Math.max(additionalProfitWhenNoBreakItem, additionalProfitWhenBreakItem);
+ return additionalProfit;
+ }
+
+ private int mostSignificantBitsPosition64(long n) {
+ int b = 0;
+ if (0 != (n & (ALL_BITS_64 << (1 << 5)))) {
+ b |= (1 << 5);
+ n >>= (1 << 5);
+ }
+ if (0 != (n & (ALL_BITS_64 << (1 << 4)))) {
+ b |= (1 << 4);
+ n >>= (1 << 4);
+ }
+ if (0 != (n & (ALL_BITS_64 << (1 << 3)))) {
+ b |= (1 << 3);
+ n >>= (1 << 3);
+ }
+ if (0 != (n & (ALL_BITS_64 << (1 << 2)))) {
+ b |= (1 << 2);
+ n >>= (1 << 2);
+ }
+ if (0 != (n & (ALL_BITS_64 << (1 << 1)))) {
+ b |= (1 << 1);
+ n >>= (1 << 1);
+ }
+ if (0 != (n & (ALL_BITS_64 << (1 << 0)))) {
+ b |= (1 << 0);
+ }
+ return b;
+ }
+
+ private boolean willProductOverflow(long value1, long value2) {
+ final int mostSignificantBitsPosition1 = mostSignificantBitsPosition64(value1);
+ final int mostSignificantBitsPosition2 = mostSignificantBitsPosition64(value2);
+ final int overflow = 61;
+ return mostSignificantBitsPosition1 + mostSignificantBitsPosition2 > overflow;
+ }
+
+ private long upperBoundOfRatio(long numerator1, long numerator2, long denominator) {
+ if (!willProductOverflow(numerator1, numerator2)) {
+ final long numerator = numerator1 * numerator2;
+ final long result = numerator / denominator;
+ return result;
+ } else {
+ final double ratio = (((double) numerator1) * ((double) numerator2)) / ((double) denominator);
+ final long result = ((long) Math.floor(ratio + 0.5));
+ return result;
+ }
+ }
+
+ /**
+ * A special comparator that orders knapsack items by decreasing efficiency (profit to weight
+ * ratio)
+ */
+ private static class KnapsackItemDecreasingEfficiencyComparator implements
+ Comparator<KnapsackItem> {
+ private final long _profitMax;
+
+ public KnapsackItemDecreasingEfficiencyComparator(long profitMax) {
+ _profitMax = profitMax;
+ }
+
+ @Override
+ public int compare(KnapsackItem item1, KnapsackItem item2) {
+ double eff1 = item1.getEfficiency(_profitMax);
+ double eff2 = item2.getEfficiency(_profitMax);
+ if (eff1 < eff2) {
+ return 1;
+ } else if (eff1 > eff2) {
+ return -1;
+ } else {
+ return 0;
+ }
+ }
+
+ }
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackGenericSolverImpl.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackGenericSolverImpl.java
index 0000000,1bf1d3f..dec9b2d
mode 000000,100644..100644
--- a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackGenericSolverImpl.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackGenericSolverImpl.java
@@@ -1,0 -1,269 +1,288 @@@
+ package org.apache.helix.controller.strategy.knapsack;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.util.ArrayList;
+ import java.util.Comparator;
+ import java.util.PriorityQueue;
+
+ /**
+ * A generic knapsack solver that supports multiple dimensions<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+ public class KnapsackGenericSolverImpl extends AbstractBaseKnapsackSolver {
+ private static final int MASTER_PROPAGATOR_ID = 0;
+ private static final int NO_SELECTION = -1;
+
+ private ArrayList<KnapsackPropagator> _propagators;
+ private int _masterPropagatorId;
+ private ArrayList<KnapsackSearchNode> _searchNodes;
+ private KnapsackState _state;
+ private long _bestSolutionProfit;
+ private ArrayList<Boolean> _bestSolution;
+
+ /**
+ * Create the solver
+ * @param solverName name of the solver
+ */
+ public KnapsackGenericSolverImpl(String solverName) {
+ super(solverName);
+ _propagators = new ArrayList<KnapsackPropagator>();
+ _masterPropagatorId = MASTER_PROPAGATOR_ID;
+ _searchNodes = new ArrayList<KnapsackSearchNode>();
+ _state = new KnapsackStateImpl();
+ _bestSolutionProfit = 0L;
+ _bestSolution = new ArrayList<Boolean>();
+ }
+
+ @Override
+ public void init(ArrayList<Long> profits, ArrayList<ArrayList<Long>> weights,
+ ArrayList<Long> capacities) {
+ clear();
+ final int numberOfItems = profits.size();
+ final int numberOfDimensions = weights.size();
+ _state.init(numberOfItems);
+
+ _bestSolution.clear();
+ for (int i = 0; i < numberOfItems; i++) {
+ _bestSolution.add(false);
+ }
+
+ for (int i = 0; i < numberOfDimensions; i++) {
+ KnapsackPropagator propagator = new KnapsackCapacityPropagatorImpl(_state, capacities.get(i));
+ propagator.init(profits, weights.get(i));
+ _propagators.add(propagator);
+ }
+ _masterPropagatorId = MASTER_PROPAGATOR_ID;
+ }
+
+ public int getNumberOfItems() {
+ return _state.getNumberOfItems();
+ }
+
+ @Override
+ public long[] getLowerAndUpperBoundWhenItem(int itemId, boolean isItemIn, long lowerBound,
+ long upperBound) {
+ long[] result = {
+ lowerBound, upperBound
+ };
+ KnapsackAssignment assignment = new KnapsackAssignment(itemId, isItemIn);
+ final boolean fail = !incrementalUpdate(false, assignment);
+ if (fail) {
+ result[0] = 0L;
+ result[1] = 0L;
+ } else {
+ result[0] =
+ (hasOnePropagator()) ? _propagators.get(_masterPropagatorId).profitLowerBound() : 0L;
+ result[1] = getAggregatedProfitUpperBound();
+ }
+
+ final boolean failRevert = !incrementalUpdate(true, assignment);
+ if (failRevert) {
+ result[0] = 0L;
+ result[1] = 0L;
+ }
+ return result;
+ }
+
+ public void setMasterPropagatorId(int masterPropagatorId) {
+ _masterPropagatorId = masterPropagatorId;
+ }
+
+ @Override
+ public long solve() {
+ _bestSolutionProfit = 0L;
+ PriorityQueue<KnapsackSearchNode> searchQueue =
+ new PriorityQueue<KnapsackSearchNode>(11,
+ new KnapsackSearchNodeInDecreasingUpperBoundComparator());
+ KnapsackAssignment assignment = new KnapsackAssignment(NO_SELECTION, true);
+ KnapsackSearchNode rootNode = new KnapsackSearchNodeImpl(null, assignment);
+ rootNode.setCurrentProfit(getCurrentProfit());
+ rootNode.setProfitUpperBound(getAggregatedProfitUpperBound());
+ rootNode.setNextItemId(getNextItemId());
+ _searchNodes.add(rootNode);
+
+ if (makeNewNode(rootNode, false)) {
+ searchQueue.add(_searchNodes.get(_searchNodes.size() - 1));
+ }
+ if (makeNewNode(rootNode, true)) {
+ searchQueue.add(_searchNodes.get(_searchNodes.size() - 1));
+ }
+
+ KnapsackSearchNode currentNode = rootNode;
+ while (!searchQueue.isEmpty() && searchQueue.peek().profitUpperBound() > _bestSolutionProfit) {
+ KnapsackSearchNode node = searchQueue.poll();
+
+ // TODO: check if equality is enough
+ if (node != currentNode) {
+ KnapsackSearchPath path = new KnapsackSearchPathImpl(currentNode, node);
+ path.init();
+ final boolean noFail = updatePropagators(path);
+ currentNode = node;
+ if (!noFail) {
+ throw new RuntimeException("solver failed to update propagators");
+ }
+ }
+
+ if (makeNewNode(node, false)) {
+ searchQueue.add(_searchNodes.get(_searchNodes.size() - 1));
+ }
+ if (makeNewNode(node, true)) {
+ searchQueue.add(_searchNodes.get(_searchNodes.size() - 1));
+ }
+ }
+ return _bestSolutionProfit;
+ }
+
+ @Override
+ public boolean bestSolution(int itemId) {
+ return _bestSolution.get(itemId);
+ }
+
+ private void clear() {
+ _propagators.clear();
+ _searchNodes.clear();
+ }
+
+ private boolean updatePropagators(final KnapsackSearchPath path) {
+ boolean noFail = true;
+ KnapsackSearchNode node = path.from();
+ KnapsackSearchNode via = path.via();
+ while (node != via) {
+ noFail = incrementalUpdate(true, node.assignment()) && noFail;
+ node = node.parent();
+ }
+ node = path.to();
+ while (node != via) {
+ noFail = incrementalUpdate(false, node.assignment()) && noFail;
+ node = node.parent();
+ }
+ return noFail;
+ }
+
+ private boolean incrementalUpdate(boolean revert, final KnapsackAssignment assignment) {
+ boolean noFail = _state.updateState(revert, assignment);
+ for (KnapsackPropagator propagator : _propagators) {
+ noFail = propagator.update(revert, assignment) && noFail;
+ }
+ return noFail;
+ }
+
+ private void updateBestSolution() {
+ final long profitLowerBound =
+ (hasOnePropagator()) ? _propagators.get(_masterPropagatorId).profitLowerBound()
+ : _propagators.get(_masterPropagatorId).currentProfit();
+
+ if (_bestSolutionProfit < profitLowerBound) {
+ _bestSolutionProfit = profitLowerBound;
+ _propagators.get(_masterPropagatorId).copyCurrentStateToSolution(hasOnePropagator(),
+ _bestSolution);
+ }
+ }
+
+ private boolean makeNewNode(final KnapsackSearchNode node, boolean isIn) {
+ if (node.nextItemId() == NO_SELECTION) {
+ return false;
+ }
+ KnapsackAssignment assignment = new KnapsackAssignment(node.nextItemId(), isIn);
+ KnapsackSearchNode newNode = new KnapsackSearchNodeImpl(node, assignment);
+
+ KnapsackSearchPath path = new KnapsackSearchPathImpl(node, newNode);
+ path.init();
+ final boolean noFail = updatePropagators(path);
+ if (noFail) {
+ newNode.setCurrentProfit(getCurrentProfit());
+ newNode.setProfitUpperBound(getAggregatedProfitUpperBound());
+ newNode.setNextItemId(getNextItemId());
+ updateBestSolution();
+ }
+
+ KnapsackSearchPath revertPath = new KnapsackSearchPathImpl(newNode, node);
+ revertPath.init();
+ updatePropagators(revertPath);
+
+ if (!noFail || newNode.profitUpperBound() < _bestSolutionProfit) {
+ return false;
+ }
+
+ KnapsackSearchNode relevantNode = new KnapsackSearchNodeImpl(node, assignment);
+ relevantNode.setCurrentProfit(newNode.currentProfit());
+ relevantNode.setProfitUpperBound(newNode.profitUpperBound());
+ relevantNode.setNextItemId(newNode.nextItemId());
+ _searchNodes.add(relevantNode);
+
+ return true;
+ }
+
+ private long getAggregatedProfitUpperBound() {
+ long upperBound = Long.MAX_VALUE;
+ for (KnapsackPropagator propagator : _propagators) {
+ propagator.computeProfitBounds();
+ final long propagatorUpperBound = propagator.profitUpperBound();
+ upperBound = Math.min(upperBound, propagatorUpperBound);
+ }
+ return upperBound;
+ }
+
+ private boolean hasOnePropagator() {
+ return _propagators.size() == 1;
+ }
+
+ private long getCurrentProfit() {
+ return _propagators.get(_masterPropagatorId).currentProfit();
+ }
+
+ private int getNextItemId() {
+ return _propagators.get(_masterPropagatorId).getNextItemId();
+ }
+
+ /**
+ * A special comparator that orders knapsack search nodes in decreasing potential profit order
+ */
+ // TODO: check order
+ private static class KnapsackSearchNodeInDecreasingUpperBoundComparator implements
+ Comparator<KnapsackSearchNode> {
+ @Override
+ public int compare(KnapsackSearchNode node1, KnapsackSearchNode node2) {
+ final long profitUpperBound1 = node1.profitUpperBound();
+ final long profitUpperBound2 = node2.profitUpperBound();
+ if (profitUpperBound1 == profitUpperBound2) {
+ final long currentProfit1 = node1.currentProfit();
+ final long currentProfit2 = node2.currentProfit();
+ if (currentProfit1 > currentProfit2) {
+ return -1;
+ } else if (currentProfit1 < currentProfit2) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+ if (profitUpperBound1 > profitUpperBound2) {
+ return -1;
+ } else if (profitUpperBound1 < profitUpperBound2) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+
+ }
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackItem.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackItem.java
index 0000000,3996816..70824a9
mode 000000,100644..100644
--- a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackItem.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackItem.java
@@@ -1,0 -1,33 +1,52 @@@
+ package org.apache.helix.controller.strategy.knapsack;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ /**
+ * Basic structure of an item in a knapsack<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+ public class KnapsackItem {
+ public final int id;
+ public final long weight;
+ public final long profit;
+
+ /**
+ * Initialize the item
+ * @param id the item id
+ * @param weight the cost to place the item in the knapsack for one dimension
+ * @param profit the benefit of placing the item in the knapsack
+ */
+ public KnapsackItem(int id, long weight, long profit) {
+ this.id = id;
+ this.weight = weight;
+ this.profit = profit;
+ }
+
+ /**
+ * Get the profit to weight ratio
+ * @param profitMax the maximum possible profit for this item
+ * @return the item addition effciency
+ */
+ public double getEfficiency(long profitMax) {
+ return (weight > 0) ? ((double) profit) / ((double) weight) : ((double) profitMax);
+ }
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackPropagator.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackPropagator.java
index 0000000,702bf1e..cb3eca7
mode 000000,100644..100644
--- a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackPropagator.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackPropagator.java
@@@ -1,0 -1,61 +1,80 @@@
+ package org.apache.helix.controller.strategy.knapsack;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.util.ArrayList;
+
+ /**
+ * Constraint enforcer for a single dimenstion on a knapsack solution search<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+ public interface KnapsackPropagator {
+ /**
+ * Initialize the propagator
+ * @param profits profits for selecting each item
+ * @param weights weights of each item for this dimension
+ */
+ void init(final ArrayList<Long> profits, final ArrayList<Long> weights);
+
+ /**
+ * Update the search
+ * @param revert revert the assignment
+ * @param assignment the assignment to use for the update
+ * @return true if successful, false if failed
+ */
+ boolean update(boolean revert, final KnapsackAssignment assignment);
+
+ /**
+ * Compute the upper and lower bounds of potential profits
+ */
+ void computeProfitBounds();
+
+ /**
+ * Get the next item to use in the search
+ * @return item id
+ */
+ int getNextItemId();
+
+ /**
+ * Get the current profit of the search
+ * @return current profit
+ */
+ long currentProfit();
+
+ /**
+ * Get the lowest possible profit of the search
+ * @return profit lower bound
+ */
+ long profitLowerBound();
+
+ /**
+ * Get the highest possible profit of the search
+ * @return profit upper bound
+ */
+ long profitUpperBound();
+
+ /**
+ * Copy the current computed state to the final solution
+ * @param hasOnePropagator true if there is only one propagator, i.e. 1 dimension
+ * @param solution the solution vector
+ */
+ void copyCurrentStateToSolution(boolean hasOnePropagator, ArrayList<Boolean> solution);
+ }
[28/50] [abbrv] git commit: Start changing the task rebalancer to
work at a finer granularity
Posted by ka...@apache.org.
Start changing the task rebalancer to work at a finer granularity
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/080a15ff
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/080a15ff
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/080a15ff
Branch: refs/heads/master
Commit: 080a15ff9241f1a7d2c218447ad2569d62ca120d
Parents: 1bc9354
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Mon Mar 3 18:28:20 2014 -0800
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Mon Mar 3 18:28:20 2014 -0800
----------------------------------------------------------------------
.../helix/task/AbstractTaskRebalancer.java | 7 +-
.../helix/task/IndependentTaskRebalancer.java | 32 +++-
.../java/org/apache/helix/task/TaskConfig.java | 35 ++++-
.../helix/provisioning/tools/TaskManager.java | 152 +++++++++++++++++++
.../provisioning/tools/TestTaskManager.java | 115 ++++++++++++++
5 files changed, 331 insertions(+), 10 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/080a15ff/helix-core/src/main/java/org/apache/helix/task/AbstractTaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/AbstractTaskRebalancer.java b/helix-core/src/main/java/org/apache/helix/task/AbstractTaskRebalancer.java
index 9a9538c..329d02f 100644
--- a/helix-core/src/main/java/org/apache/helix/task/AbstractTaskRebalancer.java
+++ b/helix-core/src/main/java/org/apache/helix/task/AbstractTaskRebalancer.java
@@ -357,7 +357,9 @@ public abstract class AbstractTaskRebalancer implements HelixRebalancer {
}
if (isTaskComplete(taskCtx, allPartitions)) {
- workflowCtx.setTaskState(taskResource, TaskState.COMPLETED);
+ if (!taskCfg.isLongLived()) {
+ workflowCtx.setTaskState(taskResource, TaskState.COMPLETED);
+ }
if (isWorkflowComplete(workflowCtx, workflowConfig)) {
workflowCtx.setWorkflowState(TaskState.COMPLETED);
workflowCtx.setFinishTime(System.currentTimeMillis());
@@ -553,6 +555,9 @@ public abstract class AbstractTaskRebalancer implements HelixRebalancer {
private static List<Integer> getNextPartitions(SortedSet<Integer> candidatePartitions,
Set<Integer> excluded, int n) {
List<Integer> result = new ArrayList<Integer>(n);
+ if (candidatePartitions == null || candidatePartitions.isEmpty()) {
+ return result;
+ }
for (Integer pId : candidatePartitions) {
if (result.size() >= n) {
break;
http://git-wip-us.apache.org/repos/asf/helix/blob/080a15ff/helix-core/src/main/java/org/apache/helix/task/IndependentTaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/IndependentTaskRebalancer.java b/helix-core/src/main/java/org/apache/helix/task/IndependentTaskRebalancer.java
index 80ec23c..2bc4081 100644
--- a/helix-core/src/main/java/org/apache/helix/task/IndependentTaskRebalancer.java
+++ b/helix-core/src/main/java/org/apache/helix/task/IndependentTaskRebalancer.java
@@ -42,6 +42,7 @@ import org.apache.helix.model.ResourceAssignment;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
/**
* A task rebalancer that evenly assigns tasks to nodes
@@ -63,11 +64,25 @@ public class IndependentTaskRebalancer extends AbstractTaskRebalancer {
@Override
public Map<String, SortedSet<Integer>> getTaskAssignment(ResourceCurrentState currStateOutput,
ResourceAssignment prevAssignment, Iterable<ParticipantId> instanceList, TaskConfig taskCfg,
- TaskContext taskContext, WorkflowConfig workflowCfg, WorkflowContext workflowCtx,
+ final TaskContext taskContext, WorkflowConfig workflowCfg, WorkflowContext workflowCtx,
Set<Integer> partitionSet, Cluster cluster) {
// Gather input to the full auto rebalancing algorithm
LinkedHashMap<State, Integer> states = new LinkedHashMap<State, Integer>();
states.put(State.from("ONLINE"), 1);
+
+ // Only map partitions whose assignment we care about
+ final Set<TaskPartitionState> honoredStates =
+ Sets.newHashSet(TaskPartitionState.INIT, TaskPartitionState.RUNNING,
+ TaskPartitionState.STOPPED);
+ Set<Integer> filteredPartitionSet = Sets.newHashSet();
+ for (Integer p : partitionSet) {
+ TaskPartitionState state = (taskContext == null) ? null : taskContext.getPartitionState(p);
+ if (state == null || honoredStates.contains(state)) {
+ filteredPartitionSet.add(p);
+ }
+ }
+
+ // Transform from partition id to fully qualified partition name
List<Integer> partitionNums = Lists.newArrayList(partitionSet);
Collections.sort(partitionNums);
final ResourceId resourceId = prevAssignment.getResourceId();
@@ -79,10 +94,21 @@ public class IndependentTaskRebalancer extends AbstractTaskRebalancer {
return PartitionId.from(resourceId, partitionNum.toString());
}
}));
+
+ // Compute the current assignment
Map<PartitionId, Map<ParticipantId, State>> currentMapping = Maps.newHashMap();
for (PartitionId partitionId : currStateOutput.getCurrentStateMappedPartitions(resourceId)) {
- currentMapping.put(partitionId, currStateOutput.getCurrentStateMap(resourceId, partitionId));
- currentMapping.put(partitionId, currStateOutput.getPendingStateMap(resourceId, partitionId));
+ if (!filteredPartitionSet.contains(pId(partitionId.toString()))) {
+ // not computing old partitions
+ continue;
+ }
+ Map<ParticipantId, State> allPreviousDecisionMap = Maps.newHashMap();
+ if (prevAssignment != null) {
+ allPreviousDecisionMap.putAll(prevAssignment.getReplicaMap(partitionId));
+ }
+ allPreviousDecisionMap.putAll(currStateOutput.getCurrentStateMap(resourceId, partitionId));
+ allPreviousDecisionMap.putAll(currStateOutput.getPendingStateMap(resourceId, partitionId));
+ currentMapping.put(partitionId, allPreviousDecisionMap);
}
// Get the assignment keyed on partition
http://git-wip-us.apache.org/repos/asf/helix/blob/080a15ff/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java b/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java
index 2834e85..be9db79 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java
@@ -62,6 +62,10 @@ public class TaskConfig {
public static final String MAX_ATTEMPTS_PER_PARTITION = "MaxAttemptsPerPartition";
/** The number of concurrent tasks that are allowed to run on an instance. */
public static final String NUM_CONCURRENT_TASKS_PER_INSTANCE = "ConcurrentTasksPerInstance";
+ /** Support overarching tasks that hang around for a while */
+ public static final String LONG_LIVED = "LongLived";
+ /** Support giving tasks a custom name **/
+ public static final String PARTITION_NAME_MAP = "PartitionNameMap";
// // Default property values ////
@@ -78,10 +82,12 @@ public class TaskConfig {
private final long _timeoutPerPartition;
private final int _numConcurrentTasksPerInstance;
private final int _maxAttemptsPerPartition;
+ private final boolean _longLived;
private TaskConfig(String workflow, String targetResource, List<Integer> targetPartitions,
Set<String> targetPartitionStates, String command, String commandConfig,
- long timeoutPerPartition, int numConcurrentTasksPerInstance, int maxAttemptsPerPartition) {
+ long timeoutPerPartition, int numConcurrentTasksPerInstance, int maxAttemptsPerPartition,
+ boolean longLived) {
_workflow = workflow;
_targetResource = targetResource;
_targetPartitions = targetPartitions;
@@ -91,6 +97,7 @@ public class TaskConfig {
_timeoutPerPartition = timeoutPerPartition;
_numConcurrentTasksPerInstance = numConcurrentTasksPerInstance;
_maxAttemptsPerPartition = maxAttemptsPerPartition;
+ _longLived = longLived;
}
public String getWorkflow() {
@@ -129,6 +136,10 @@ public class TaskConfig {
return _maxAttemptsPerPartition;
}
+ public boolean isLongLived() {
+ return _longLived;
+ }
+
public Map<String, String> getResourceConfigMap() {
Map<String, String> cfgMap = new HashMap<String, String>();
cfgMap.put(TaskConfig.WORKFLOW_ID, _workflow);
@@ -143,7 +154,9 @@ public class TaskConfig {
}
cfgMap.put(TaskConfig.TIMEOUT_PER_PARTITION, "" + _timeoutPerPartition);
cfgMap.put(TaskConfig.MAX_ATTEMPTS_PER_PARTITION, "" + _maxAttemptsPerPartition);
-
+ cfgMap.put(TaskConfig.LONG_LIVED + "", String.valueOf(_longLived));
+ cfgMap.put(TaskConfig.NUM_CONCURRENT_TASKS_PER_INSTANCE + "",
+ String.valueOf(_numConcurrentTasksPerInstance));
return cfgMap;
}
@@ -160,13 +173,14 @@ public class TaskConfig {
private long _timeoutPerPartition = DEFAULT_TIMEOUT_PER_PARTITION;
private int _numConcurrentTasksPerInstance = DEFAULT_NUM_CONCURRENT_TASKS_PER_INSTANCE;
private int _maxAttemptsPerPartition = DEFAULT_MAX_ATTEMPTS_PER_PARTITION;
+ private boolean _longLived = false;
public TaskConfig build() {
validate();
return new TaskConfig(_workflow, _targetResource, _targetPartitions, _targetPartitionStates,
_command, _commandConfig, _timeoutPerPartition, _numConcurrentTasksPerInstance,
- _maxAttemptsPerPartition);
+ _maxAttemptsPerPartition, _longLived);
}
/**
@@ -205,7 +219,9 @@ public class TaskConfig {
if (cfg.containsKey(MAX_ATTEMPTS_PER_PARTITION)) {
b.setMaxAttemptsPerPartition(Integer.parseInt(cfg.get(MAX_ATTEMPTS_PER_PARTITION)));
}
-
+ if (cfg.containsKey(LONG_LIVED)) {
+ b.setLongLived(Boolean.parseBoolean(cfg.get(LONG_LIVED)));
+ }
return b;
}
@@ -254,8 +270,13 @@ public class TaskConfig {
return this;
}
+ public Builder setLongLived(boolean isLongLived) {
+ _longLived = isLongLived;
+ return this;
+ }
+
private void validate() {
- if (_targetResource == null && (_targetPartitions == null || _targetPartitions.isEmpty())) {
+ if (_targetResource == null && _targetPartitions == null) {
throw new IllegalArgumentException(String.format(
"%s cannot be null without specified partitions", TARGET_RESOURCE));
}
@@ -288,7 +309,9 @@ public class TaskConfig {
String[] vals = csv.split(",");
List<Integer> l = new ArrayList<Integer>();
for (String v : vals) {
- l.add(Integer.parseInt(v));
+ if (v != null && !v.isEmpty()) {
+ l.add(Integer.parseInt(v));
+ }
}
return l;
http://git-wip-us.apache.org/repos/asf/helix/blob/080a15ff/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/TaskManager.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/TaskManager.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/TaskManager.java
new file mode 100644
index 0000000..2a80841
--- /dev/null
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/TaskManager.java
@@ -0,0 +1,152 @@
+package org.apache.helix.provisioning.tools;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import org.I0Itec.zkclient.DataUpdater;
+import org.apache.helix.AccessOption;
+import org.apache.helix.ClusterMessagingService;
+import org.apache.helix.HelixConnection;
+import org.apache.helix.HelixDataAccessor;
+import org.apache.helix.HelixRole;
+import org.apache.helix.InstanceType;
+import org.apache.helix.PropertyKey;
+import org.apache.helix.ZNRecord;
+import org.apache.helix.api.id.ClusterId;
+import org.apache.helix.api.id.Id;
+import org.apache.helix.manager.zk.HelixConnectionAdaptor;
+import org.apache.helix.task.TaskConfig;
+import org.apache.helix.task.TaskDriver;
+import org.apache.helix.task.Workflow;
+import org.apache.log4j.Logger;
+
+import com.google.common.collect.Maps;
+
+public class TaskManager {
+ private static final Logger LOG = Logger.getLogger(TaskManager.class);
+
+ private final ClusterId _clusterId;
+ private final HelixConnection _connection;
+ private final TaskDriver _driver;
+
+ public TaskManager(final ClusterId clusterId, final HelixConnection connection) {
+ HelixRole dummyRole = new HelixRole() {
+ @Override
+ public HelixConnection getConnection() {
+ return connection;
+ }
+
+ @Override
+ public ClusterId getClusterId() {
+ return clusterId;
+ }
+
+ @Override
+ public Id getId() {
+ return clusterId;
+ }
+
+ @Override
+ public InstanceType getType() {
+ return InstanceType.ADMINISTRATOR;
+ }
+
+ @Override
+ public ClusterMessagingService getMessagingService() {
+ return null;
+ }
+ };
+ _driver = new TaskDriver(new HelixConnectionAdaptor(dummyRole));
+ _clusterId = clusterId;
+ _connection = connection;
+ }
+
+ public boolean createTaskQueue(String queueName, boolean isParallel) {
+ Workflow.Builder builder = new Workflow.Builder(queueName);
+ builder.addConfig(queueName, TaskConfig.COMMAND, queueName);
+ builder.addConfig(queueName, TaskConfig.TARGET_PARTITIONS, "");
+ builder.addConfig(queueName, TaskConfig.COMMAND_CONFIG, "");
+ builder.addConfig(queueName, TaskConfig.LONG_LIVED + "", String.valueOf(true));
+ if (isParallel) {
+ builder
+ .addConfig(queueName, TaskConfig.NUM_CONCURRENT_TASKS_PER_INSTANCE, String.valueOf(10));
+ }
+ Workflow workflow = builder.build();
+ try {
+ _driver.start(workflow);
+ } catch (Exception e) {
+ LOG.error("Failed to start queue " + queueName, e);
+ return false;
+ }
+ return true;
+ }
+
+ public void addTaskToQueue(final String taskName, final String queueName) {
+ HelixDataAccessor accessor = _connection.createDataAccessor(_clusterId);
+ PropertyKey.Builder keyBuilder = accessor.keyBuilder();
+ String configPath = keyBuilder.resourceConfig(queueName + "_" + queueName).getPath();
+ DataUpdater<ZNRecord> dataUpdater = new DataUpdater<ZNRecord>() {
+ @Override
+ public ZNRecord update(ZNRecord currentData) {
+ // Update the partition integers to add one to the end, and have that integer map to the
+ // task name
+ String current = currentData.getSimpleField(TaskConfig.TARGET_PARTITIONS);
+ int currentId = 0;
+ if (current == null || current.isEmpty()) {
+ currentData.setSimpleField(TaskConfig.TARGET_PARTITIONS, String.valueOf(currentId));
+ } else {
+ String[] parts = current.split(",");
+ currentId = parts.length;
+ currentData.setSimpleField(TaskConfig.TARGET_PARTITIONS, current + "," + currentId);
+ }
+ Map<String, String> partitionMap = currentData.getMapField(TaskConfig.PARTITION_NAME_MAP);
+ if (partitionMap == null) {
+ partitionMap = Maps.newHashMap();
+ currentData.setMapField(TaskConfig.PARTITION_NAME_MAP, partitionMap);
+ }
+ partitionMap.put(String.valueOf(currentId), taskName);
+ return currentData;
+ }
+ };
+ List<DataUpdater<ZNRecord>> dataUpdaters = new ArrayList<DataUpdater<ZNRecord>>();
+ dataUpdaters.add(dataUpdater);
+ accessor.updateChildren(Arrays.asList(configPath), dataUpdaters, AccessOption.PERSISTENT);
+
+ // Update the ideal state to trigger a change event
+ DataUpdater<ZNRecord> noOpUpdater = new DataUpdater<ZNRecord>() {
+ @Override
+ public ZNRecord update(ZNRecord currentData) {
+ return currentData;
+ }
+ };
+ String idealStatePath = keyBuilder.idealStates(queueName + "_" + queueName).getPath();
+ dataUpdaters.clear();
+ dataUpdaters.add(noOpUpdater);
+ accessor.updateChildren(Arrays.asList(idealStatePath), dataUpdaters, AccessOption.PERSISTENT);
+ }
+
+ public void shutdownQueue(String queueName) {
+
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/080a15ff/helix-provisioning/src/test/java/org/apache/helix/provisioning/tools/TestTaskManager.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/test/java/org/apache/helix/provisioning/tools/TestTaskManager.java b/helix-provisioning/src/test/java/org/apache/helix/provisioning/tools/TestTaskManager.java
new file mode 100644
index 0000000..7016661
--- /dev/null
+++ b/helix-provisioning/src/test/java/org/apache/helix/provisioning/tools/TestTaskManager.java
@@ -0,0 +1,115 @@
+package org.apache.helix.provisioning.tools;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.helix.HelixConnection;
+import org.apache.helix.TestHelper;
+import org.apache.helix.ZkUnitTestBase;
+import org.apache.helix.api.id.ClusterId;
+import org.apache.helix.api.id.StateModelDefId;
+import org.apache.helix.integration.TestHelixConnection;
+import org.apache.helix.integration.manager.ClusterControllerManager;
+import org.apache.helix.integration.manager.MockParticipantManager;
+import org.apache.helix.manager.zk.ZkHelixConnection;
+import org.apache.helix.model.IdealState.RebalanceMode;
+import org.apache.helix.task.Task;
+import org.apache.helix.task.TaskFactory;
+import org.apache.helix.task.TaskResult;
+import org.apache.helix.task.TaskStateModelFactory;
+import org.testng.annotations.Test;
+
+public class TestTaskManager extends ZkUnitTestBase {
+ @Test
+ public void testBasic() throws Exception {
+ final int NUM_PARTICIPANTS = 3;
+ final int NUM_PARTITIONS = 1;
+ final int NUM_REPLICAS = 1;
+
+ String className = TestHelper.getTestClassName();
+ String methodName = TestHelper.getTestMethodName();
+ String clusterName = className + "_" + methodName;
+
+ // Set up cluster
+ TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
+ "localhost", // participant name prefix
+ "TestService", // resource name prefix
+ 1, // resources
+ NUM_PARTITIONS, // partitions per resource
+ NUM_PARTICIPANTS, // number of nodes
+ NUM_REPLICAS, // replicas
+ "StatelessService", RebalanceMode.FULL_AUTO, // just get everything up
+ true); // do rebalance
+
+ Map<String, TaskFactory> taskFactoryReg = new HashMap<String, TaskFactory>();
+ taskFactoryReg.put("myqueue", new TaskFactory() {
+ @Override
+ public Task createNewTask(String config) {
+ return new MyTask();
+ }
+ });
+ MockParticipantManager[] participants = new MockParticipantManager[NUM_PARTICIPANTS];
+ for (int i = 0; i < participants.length; i++) {
+ String instanceName = "localhost_" + (12918 + i);
+ participants[i] = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
+ participants[i].getStateMachineEngine()
+ .registerStateModelFactory(StateModelDefId.from("StatelessService"),
+ new TestHelixConnection.MockStateModelFactory());
+ participants[i].getStateMachineEngine().registerStateModelFactory(
+ StateModelDefId.from("Task"), new TaskStateModelFactory(participants[i], taskFactoryReg));
+ participants[i].syncStart();
+ }
+
+ ClusterControllerManager controller =
+ new ClusterControllerManager(ZK_ADDR, clusterName, "controller_1");
+ controller.syncStart();
+
+ HelixConnection connection = new ZkHelixConnection(ZK_ADDR);
+ connection.connect();
+ ClusterId clusterId = ClusterId.from(clusterName);
+ TaskManager taskManager = new TaskManager(clusterId, connection);
+ taskManager.createTaskQueue("myqueue", true);
+ taskManager.addTaskToQueue("mytask", "myqueue");
+ taskManager.addTaskToQueue("mytask2", "myqueue");
+
+ controller.syncStop();
+ for (MockParticipantManager participant : participants) {
+ participant.syncStop();
+ }
+ }
+
+ public static class MyTask implements Task {
+ @Override
+ public TaskResult run() {
+ try {
+ Thread.sleep(10000);
+ } catch (InterruptedException e) {
+ }
+ System.err.println("task complete");
+ return new TaskResult(TaskResult.Status.COMPLETED, "");
+ }
+
+ @Override
+ public void cancel() {
+ }
+ }
+}
[47/50] [abbrv] Merge remote-tracking branch
'origin/helix-provisioning'
Posted by ka...@apache.org.
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
index 0000000,ce6b1bc..a3bdb0f
mode 000000,100644..100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
@@@ -1,0 -1,392 +1,411 @@@
+ package org.apache.helix.provisioning.yarn;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.io.File;
+ import java.util.ArrayList;
+ import java.util.Collection;
+ import java.util.HashMap;
+ import java.util.HashSet;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Set;
+ import java.util.Vector;
+ import java.util.concurrent.Executors;
+
+ import org.apache.commons.logging.Log;
+ import org.apache.commons.logging.LogFactory;
+ import org.apache.hadoop.fs.FileStatus;
+ import org.apache.hadoop.fs.FileSystem;
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.yarn.api.ApplicationConstants;
+ import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
+ import org.apache.hadoop.yarn.api.records.Container;
+ import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+ import org.apache.hadoop.yarn.api.records.LocalResource;
+ import org.apache.hadoop.yarn.api.records.LocalResourceType;
+ import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+ import org.apache.hadoop.yarn.api.records.Priority;
+ import org.apache.hadoop.yarn.api.records.Resource;
+ import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
+ import org.apache.hadoop.yarn.conf.YarnConfiguration;
+ import org.apache.hadoop.yarn.util.ConverterUtils;
+ import org.apache.hadoop.yarn.util.Records;
+ import org.apache.helix.HelixManager;
+ import org.apache.helix.api.Cluster;
+ import org.apache.helix.api.Participant;
+ import org.apache.helix.api.config.ContainerConfig;
+ import org.apache.helix.api.config.ResourceConfig;
+ import org.apache.helix.api.id.ParticipantId;
+ import org.apache.helix.api.id.ResourceId;
+ import org.apache.helix.controller.provisioner.ContainerId;
+ import org.apache.helix.controller.provisioner.ContainerProvider;
+ import org.apache.helix.controller.provisioner.ContainerSpec;
+ import org.apache.helix.controller.provisioner.ContainerState;
+ import org.apache.helix.controller.provisioner.Provisioner;
+ import org.apache.helix.controller.provisioner.TargetProvider;
+ import org.apache.helix.controller.provisioner.TargetProviderResponse;
+ import org.apache.helix.model.InstanceConfig;
+ import org.apache.helix.provisioning.ApplicationSpec;
+ import org.apache.helix.provisioning.ContainerAskResponse;
+ import org.apache.helix.provisioning.ContainerLaunchResponse;
+ import org.apache.helix.provisioning.ContainerReleaseResponse;
+ import org.apache.helix.provisioning.ContainerStopResponse;
+ import org.apache.helix.provisioning.ParticipantLauncher;
+
+ import com.google.common.base.Function;
+ import com.google.common.collect.Lists;
+ import com.google.common.collect.Maps;
+ import com.google.common.util.concurrent.Futures;
+ import com.google.common.util.concurrent.ListenableFuture;
+ import com.google.common.util.concurrent.ListeningExecutorService;
+ import com.google.common.util.concurrent.MoreExecutors;
+
+ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerProvider {
+
+ private static final Log LOG = LogFactory.getLog(YarnProvisioner.class);
+ static GenericApplicationMaster applicationMaster;
+ static ListeningExecutorService service = MoreExecutors.listeningDecorator(Executors
+ .newCachedThreadPool());
+ public static AppMasterConfig applicationMasterConfig;
+ public static ApplicationSpec applicationSpec;
+ Map<ContainerId, Container> allocatedContainersMap = new HashMap<ContainerId, Container>();
+ private HelixManager _helixManager;
+ private ResourceConfig _resourceConfig;
+
+ public YarnProvisioner() {
+
+ }
+
+ @Override
+ public ListenableFuture<ContainerId> allocateContainer(ContainerSpec spec) {
+ ContainerRequest containerAsk = setupContainerAskForRM(spec);
+ ListenableFuture<ContainerAskResponse> requestNewContainer =
+ applicationMaster.acquireContainer(containerAsk);
+ return Futures.transform(requestNewContainer,
+ new Function<ContainerAskResponse, ContainerId>() {
+ @Override
+ public ContainerId apply(ContainerAskResponse containerAskResponse) {
+ ContainerId helixContainerId =
+ ContainerId.from(containerAskResponse.getContainer().getId().toString());
+ allocatedContainersMap.put(helixContainerId, containerAskResponse.getContainer());
+ return helixContainerId;
+ }
+ });
+
+ }
+
+ @Override
+ public ListenableFuture<Boolean> deallocateContainer(final ContainerId containerId) {
+ ListenableFuture<ContainerReleaseResponse> releaseContainer =
+ applicationMaster.releaseContainer(allocatedContainersMap.get(containerId));
+ return Futures.transform(releaseContainer, new Function<ContainerReleaseResponse, Boolean>() {
+ @Override
+ public Boolean apply(ContainerReleaseResponse response) {
+ return response != null;
+ }
+ }, service);
+
+ }
+
+ @Override
+ public ListenableFuture<Boolean> startContainer(final ContainerId containerId,
+ Participant participant) {
+ Container container = allocatedContainersMap.get(containerId);
+ ContainerLaunchContext launchContext;
+ try {
+ launchContext = createLaunchContext(containerId, container, participant);
+ } catch (Exception e) {
+ LOG.error("Exception while creating context to launch container:" + containerId, e);
+ return null;
+ }
+ ListenableFuture<ContainerLaunchResponse> future =
+ applicationMaster.launchContainer(container, launchContext);
+ return Futures.transform(future, new Function<ContainerLaunchResponse, Boolean>() {
+ @Override
+ public Boolean apply(ContainerLaunchResponse response) {
+ return response != null;
+ }
+ }, service);
+ }
+
+ private ContainerLaunchContext createLaunchContext(ContainerId containerId, Container container,
+ Participant participant) throws Exception {
+
+ ContainerLaunchContext participantContainer = Records.newRecord(ContainerLaunchContext.class);
+
+ // Map<String, String> envs = System.getenv();
+ String appName = applicationMasterConfig.getAppName();
+ int appId = applicationMasterConfig.getAppId();
+ String serviceName = _resourceConfig.getId().stringify();
+ String serviceClasspath = applicationMasterConfig.getClassPath(serviceName);
+ String mainClass = applicationMasterConfig.getMainClass(serviceName);
+ String zkAddress = applicationMasterConfig.getZKAddress();
+
+ // set the localresources needed to launch container
+ Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
+
+ LocalResource servicePackageResource = Records.newRecord(LocalResource.class);
+ YarnConfiguration conf = new YarnConfiguration();
+ FileSystem fs;
+ fs = FileSystem.get(conf);
+ String pathSuffix = appName + "/" + appId + "/" + serviceName + ".tar";
+ Path dst = new Path(fs.getHomeDirectory(), pathSuffix);
+ FileStatus destStatus = fs.getFileStatus(dst);
+
+ // Set the type of resource - file or archive
+ // archives are untarred at destination
+ // we don't need the jar file to be untarred for now
+ servicePackageResource.setType(LocalResourceType.ARCHIVE);
+ // Set visibility of the resource
+ // Setting to most private option
+ servicePackageResource.setVisibility(LocalResourceVisibility.APPLICATION);
+ // Set the resource to be copied over
+ servicePackageResource.setResource(ConverterUtils.getYarnUrlFromPath(dst));
+ // Set timestamp and length of file so that the framework
+ // can do basic sanity checks for the local resource
+ // after it has been copied over to ensure it is the same
+ // resource the client intended to use with the application
+ servicePackageResource.setTimestamp(destStatus.getModificationTime());
+ servicePackageResource.setSize(destStatus.getLen());
+ LOG.info("Setting local resource:" + servicePackageResource + " for service" + serviceName);
+ localResources.put(serviceName, servicePackageResource);
+
+ // Set local resource info into app master container launch context
+ participantContainer.setLocalResources(localResources);
+
+ // Set the necessary security tokens as needed
+ // amContainer.setContainerTokens(containerToken);
+
+ // Set the env variables to be setup in the env where the application master will be run
+ LOG.info("Set the environment for the application master");
+ Map<String, String> env = new HashMap<String, String>();
+ env.put(serviceName, dst.getName());
+ // Add AppMaster.jar location to classpath
+ // At some point we should not be required to add
+ // the hadoop specific classpaths to the env.
+ // It should be provided out of the box.
+ // For now setting all required classpaths including
+ // the classpath to "." for the application jar
+ StringBuilder classPathEnv =
+ new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar).append("./*");
+ classPathEnv.append(File.pathSeparatorChar);
+ classPathEnv.append(serviceClasspath);
+ for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
+ YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
+ classPathEnv.append(File.pathSeparatorChar);
+ classPathEnv.append(c.trim());
+ }
+ classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties");
+ LOG.info("Setting classpath for service:\n" + classPathEnv.toString());
+ env.put("CLASSPATH", classPathEnv.toString());
+
+ participantContainer.setEnvironment(env);
+
+ if (applicationMaster.allTokens != null) {
+ LOG.info("Setting tokens: " + applicationMaster.allTokens);
+ participantContainer.setTokens(applicationMaster.allTokens);
+ }
+
+ // Set the necessary command to execute the application master
+ Vector<CharSequence> vargs = new Vector<CharSequence>(30);
+
+ // Set java executable command
+ LOG.info("Setting up app master command");
+ vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
+ // Set Xmx based on am memory size
+ vargs.add("-Xmx" + 1024 + "m");
+ // Set class name
+ vargs.add(ParticipantLauncher.class.getCanonicalName());
+ // Set params for container participant
+ vargs.add("--zkAddress " + zkAddress);
+ vargs.add("--cluster " + appName);
+ vargs.add("--participantId " + participant.getId().stringify());
+ vargs.add("--participantClass " + mainClass);
+
+ vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/ContainerParticipant.stdout");
+ vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/ContainerParticipant.stderr");
+
+ // Get final commmand
+ StringBuilder command = new StringBuilder();
+ for (CharSequence str : vargs) {
+ command.append(str).append(" ");
+ }
+
+ LOG.info("Completed setting up container launch command " + command.toString()
+ + " with arguments \n" + vargs);
+ List<String> commands = new ArrayList<String>();
+ commands.add(command.toString());
+ participantContainer.setCommands(commands);
+ return participantContainer;
+ }
+
+ @Override
+ public ListenableFuture<Boolean> stopContainer(final ContainerId containerId) {
+ Container container = allocatedContainersMap.get(containerId);
+ ListenableFuture<ContainerStopResponse> future = applicationMaster.stopContainer(container);
+ return Futures.transform(future, new Function<ContainerStopResponse, Boolean>() {
+ @Override
+ public Boolean apply(ContainerStopResponse response) {
+ return response != null;
+ }
+ }, service);
+ }
+
+ @Override
+ public void init(HelixManager helixManager, ResourceConfig resourceConfig) {
+ _helixManager = helixManager;
+ _resourceConfig = resourceConfig;
+ }
+
+ @Override
+ public TargetProviderResponse evaluateExistingContainers(Cluster cluster, ResourceId resourceId,
+ Collection<Participant> participants) {
+ TargetProviderResponse response = new TargetProviderResponse();
+ // ask for two containers at a time
+ List<ContainerSpec> containersToAcquire = Lists.newArrayList();
+ List<Participant> containersToStart = Lists.newArrayList();
+ List<Participant> containersToRelease = Lists.newArrayList();
+ List<Participant> containersToStop = Lists.newArrayList();
+ YarnProvisionerConfig provisionerConfig =
+ (YarnProvisionerConfig) cluster.getConfig().getResourceMap().get(resourceId)
+ .getProvisionerConfig();
+ int targetNumContainers = provisionerConfig.getNumContainers();
+
+ // Any container that is in a state should be put in this set
+ Set<ParticipantId> existingContainersIdSet = new HashSet<ParticipantId>();
+
+ // Cache halted containers to determine which to restart and which to release
+ Map<ParticipantId, Participant> excessHaltedContainers = Maps.newHashMap();
+
+ // Cache participants to ensure that excess participants are stopped
+ Map<ParticipantId, Participant> excessActiveContainers = Maps.newHashMap();
+
+ for (Participant participant : participants) {
+ ContainerConfig containerConfig = participant.getContainerConfig();
+ if (containerConfig != null && containerConfig.getState() != null) {
+ ContainerState state = containerConfig.getState();
+ switch (state) {
+ case ACQUIRING:
+ existingContainersIdSet.add(participant.getId());
+ break;
+ case ACQUIRED:
+ // acquired containers are ready to start
+ existingContainersIdSet.add(participant.getId());
+ containersToStart.add(participant);
+ break;
+ case CONNECTING:
+ existingContainersIdSet.add(participant.getId());
+ break;
+ case CONNECTED:
+ // active containers can be stopped or kept active
+ existingContainersIdSet.add(participant.getId());
+ excessActiveContainers.put(participant.getId(), participant);
+ break;
+ case DISCONNECTED:
+ // disconnected containers must be stopped
+ existingContainersIdSet.add(participant.getId());
+ containersToStop.add(participant);
+ case HALTING:
+ existingContainersIdSet.add(participant.getId());
+ break;
+ case HALTED:
+ // halted containers can be released or restarted
+ existingContainersIdSet.add(participant.getId());
+ excessHaltedContainers.put(participant.getId(), participant);
+ break;
+ case FINALIZING:
+ existingContainersIdSet.add(participant.getId());
+ break;
+ case FINALIZED:
+ break;
+ case FAILED:
+ // remove the failed instance
+ _helixManager.getClusterManagmentTool().dropInstance(cluster.getId().toString(),
+ new InstanceConfig(participant.getId()));
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ for (int i = 0; i < targetNumContainers; i++) {
+ ParticipantId participantId = ParticipantId.from(resourceId + "_container_" + (i));
+ excessActiveContainers.remove(participantId); // don't stop this container if active
+ if (excessHaltedContainers.containsKey(participantId)) {
+ // Halted containers can be restarted if necessary
+ // Participant participant = excessHaltedContainers.get(participantId);
+ // containersToStart.add(participant);
+ // excessHaltedContainers.remove(participantId); // don't release this container
+ } else if (!existingContainersIdSet.contains(participantId)) {
+ // Unallocated containers must be allocated
+ ContainerSpec containerSpec = new ContainerSpec(participantId);
+ containerSpec.setMemory(_resourceConfig.getUserConfig().getIntField("memory", 1024));
+ containersToAcquire.add(containerSpec);
+ }
+ }
+
+ // Add all the containers that should be stopped because they fall outside the target range
+ containersToStop.addAll(excessActiveContainers.values());
+
+ // Add halted containers that should not be restarted
+ containersToRelease.addAll(excessHaltedContainers.values());
+
+ response.setContainersToAcquire(containersToAcquire);
+ response.setContainersToStart(containersToStart);
+ response.setContainersToRelease(containersToRelease);
+ response.setContainersToStop(containersToStop);
+ LOG.info("target provider response containers to acquire:" + response.getContainersToAcquire());
+ LOG.info("target provider response containers to start:" + response.getContainersToStart());
+ return response;
+ }
+
+ private ContainerRequest setupContainerAskForRM(ContainerSpec spec) {
+ // setup requirements for hosts
+ // using * as any host will do for the distributed shell app
+ // set the priority for the request
+ Priority pri = Records.newRecord(Priority.class);
+ int requestPriority = 0;
+ // TODO - what is the range for priority? how to decide?
+ pri.setPriority(requestPriority);
+
+ // Set up resource type requirements
+ // For now, only memory is supported so we set memory requirements
+ Resource capability = Records.newRecord(Resource.class);
+ int memory = spec.getMemory();
+ capability.setMemory(memory);
+
+ ContainerRequest request = new ContainerRequest(capability, null, null, pri);
+ LOG.info("Requested container ask: " + request.toString());
+ return request;
+ }
+
+ @Override
+ public ContainerProvider getContainerProvider() {
+ return this;
+ }
+
+ @Override
+ public TargetProvider getTargetProvider() {
+ return this;
+ }
+
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisionerConfig.java
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisionerConfig.java
index 0000000,67dd679..69aa2fb
mode 000000,100644..100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisionerConfig.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisionerConfig.java
@@@ -1,0 -1,54 +1,73 @@@
+ package org.apache.helix.provisioning.yarn;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import org.apache.helix.api.id.ResourceId;
+ import org.apache.helix.controller.provisioner.ProvisionerConfig;
+ import org.apache.helix.controller.provisioner.ProvisionerRef;
+ import org.apache.helix.controller.serializer.DefaultStringSerializer;
+ import org.apache.helix.controller.serializer.StringSerializer;
+ import org.codehaus.jackson.annotate.JsonProperty;
+
+ public class YarnProvisionerConfig implements ProvisionerConfig {
+
+ private ResourceId _resourceId;
+ private Class<? extends StringSerializer> _serializerClass;
+ private ProvisionerRef _provisionerRef;
+ private Integer _numContainers;
+
+ public YarnProvisionerConfig(@JsonProperty("resourceId") ResourceId resourceId) {
+ _resourceId = resourceId;
+ _serializerClass = DefaultStringSerializer.class;
+ _provisionerRef = ProvisionerRef.from(YarnProvisioner.class.getName());
+ }
+
+ public void setNumContainers(int numContainers) {
+ _numContainers = numContainers;
+ }
+
+ public Integer getNumContainers() {
+ return _numContainers;
+ }
+
+ @Override
+ public ResourceId getResourceId() {
+ return _resourceId;
+ }
+
+ @Override
+ public ProvisionerRef getProvisionerRef() {
+ return _provisionerRef;
+ }
+
+ public void setProvisionerRef(ProvisionerRef provisionerRef) {
+ _provisionerRef = provisionerRef;
+ }
+
+ @Override
+ public Class<? extends StringSerializer> getSerializerClass() {
+ return _serializerClass;
+ }
+
+ public void setSerializerClass(Class<? extends StringSerializer> serializerClass) {
+ _serializerClass = serializerClass;
+ }
+
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/resources/sample_application.yaml
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/resources/sample_application.yaml
index 0000000,f45faa3..5d6d88f
mode 000000,100644..100644
--- a/helix-provisioning/src/main/resources/sample_application.yaml
+++ b/helix-provisioning/src/main/resources/sample_application.yaml
@@@ -1,0 -1,26 +1,42 @@@
-===
++#
++# Licensed to the Apache Software Foundation (ASF) under one
++# or more contributor license agreements. See the NOTICE file
++# distributed with this work for additional information
++# regarding copyright ownership. The ASF licenses this file
++# to you under the Apache License, Version 2.0 (the
++# "License"); you may not use this file except in compliance
++# with the License. You may obtain a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing,
++# software distributed under the License is distributed on an
++# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++# KIND, either express or implied. See the License for the
++# specific language governing permissions and limitations
++# under the License.
++#
+ appName: test
+ configs:
+ k1: v1
+ services:
+ - name: myservice
- participantClass: org.apache.helix.myApp.SimpleWebserver
++ participantClass: org.apache.helix.myApp.SimpleWebserver
+ minContainers:3
- maxContainers:3
++ maxContainers:3
+ configs:
- - participantId: myservice_0
++ - participantId: myservice_0
+ port: 9500
- - participantId: myservice_1
- port: 9501
- - participantId: myservice_2
++ - participantId: myservice_1
++ port: 9501
++ - participantId: myservice_2
+ port: 9502
+ resources:
+ - name: distributedLock
+ numPartitions: 6
+ numReplicas: 2
+ rebalanceMode: FULL_AUTO
+ stateModel: OnlineOffline
- tag: myservice
- configs:
++ tag: myservice
++ configs:
+ k1: v1
-
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/pom.xml
----------------------------------------------------------------------
diff --cc pom.xml
index 2675d45,f3d2e61..acb8589
--- a/pom.xml
+++ b/pom.xml
@@@ -197,9 -197,10 +197,10 @@@ under the License
<module>helix-core</module>
<module>helix-admin-webapp</module>
<module>helix-agent</module>
+ <module>helix-provisioning</module>
<module>helix-examples</module>
<module>recipes</module>
- <module>site-releases</module>
+ <module>website</module>
</modules>
<mailingLists>
@@@ -342,6 -352,6 +343,11 @@@
<version>${project.version}</version>
</dependency>
<dependency>
++ <groupId>org.apache.helix</groupId>
++ <artifactId>helix-provisioning</artifactId>
++ <version>${project.version}</version>
++ </dependency>
++ <dependency>
<groupId>org.apache.helix</groupId>
<artifactId>helix-core</artifactId>
<type>test-jar</type>
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/recipes/helloworld-provisioning-yarn/pom.xml
----------------------------------------------------------------------
diff --cc recipes/helloworld-provisioning-yarn/pom.xml
index 0000000,4cef9a7..bf5a89c
mode 000000,100644..100644
--- a/recipes/helloworld-provisioning-yarn/pom.xml
+++ b/recipes/helloworld-provisioning-yarn/pom.xml
@@@ -1,0 -1,159 +1,158 @@@
+ <?xml version="1.0" encoding="UTF-8" ?>
+ <!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied. See the License for the
+ specific language governing permissions and limitations
+ under the License.
+ -->
+ <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.apache.helix.recipes</groupId>
+ <artifactId>recipes</artifactId>
+ <version>0.7.1-incubating-SNAPSHOT</version>
+ </parent>
+
+ <artifactId>helloworld-provisioning-yarn</artifactId>
+ <packaging>bundle</packaging>
+ <name>Apache Helix :: Recipes :: Provisioning :: YARN :: Hello World</name>
+
+ <properties>
+ <osgi.import>
+ org.apache.helix*,
+ org.apache.log4j,
+ *
+ </osgi.import>
+ <osgi.export>org.apache.helix.provisioning.yarn.example*;version="${project.version};-noimport:=true</osgi.export>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.testng</groupId>
+ <artifactId>testng</artifactId>
+ <version>6.0.1</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.helix</groupId>
+ <artifactId>helix-core</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.helix</groupId>
+ <artifactId>helix-provisioning</artifactId>
- <version>0.7.1-incubating-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>log4j</groupId>
+ <artifactId>log4j</artifactId>
+ <exclusions>
+ <exclusion>
+ <groupId>javax.mail</groupId>
+ <artifactId>mail</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>javax.jms</groupId>
+ <artifactId>jms</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.sun.jdmk</groupId>
+ <artifactId>jmxtools</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.sun.jmx</groupId>
+ <artifactId>jmxri</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ </dependencies>
+ <build>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>appassembler-maven-plugin</artifactId>
+ <configuration>
+ <!-- Set the target configuration directory to be used in the bin scripts -->
+ <!-- <configurationDirectory>conf</configurationDirectory> -->
+ <!-- Copy the contents from "/src/main/config" to the target configuration
+ directory in the assembled application -->
+ <!-- <copyConfigurationDirectory>true</copyConfigurationDirectory> -->
+ <!-- Include the target configuration directory in the beginning of
+ the classpath declaration in the bin scripts -->
+ <includeConfigurationDirectoryInClasspath>true</includeConfigurationDirectoryInClasspath>
+ <assembleDirectory>${project.build.directory}/${project.artifactId}-pkg</assembleDirectory>
+ <!-- Extra JVM arguments that will be included in the bin scripts -->
+ <extraJvmArguments>-Xms512m -Xmx512m</extraJvmArguments>
+ <!-- Generate bin scripts for windows and unix pr default -->
+ <platforms>
+ <platform>windows</platform>
+ <platform>unix</platform>
+ </platforms>
+ </configuration>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>assemble</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.rat</groupId>
+ <artifactId>apache-rat-plugin</artifactId>
+ <configuration>
+ <excludes combine.children="append">
+ </excludes>
+ </configuration>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>appassembler-maven-plugin</artifactId>
+ <configuration>
+ <programs>
+ <program>
+ <mainClass>org.apache.helix.provisioning.yarn.Client</mainClass>
+ <name>yarn-job-launcher</name>
+ </program>
+ <program>
+ <mainClass>org.apache.helix.provisioning.yarn.AppLauncher</mainClass>
+ <name>app-launcher</name>
+ </program>
+ </programs>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <configuration>
+ <descriptors>
+ <descriptor>src/assemble/assembly.xml</descriptor>
+ </descriptors>
+ </configuration>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>single</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+ </project>
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
----------------------------------------------------------------------
diff --cc recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
index 0000000,e9163d3..40fed23
mode 000000,100644..100644
--- a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
+++ b/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
@@@ -1,0 -1,36 +1,48 @@@
+ package org.apache.helix.provisioning.yarn.example;
+
-import java.io.File;
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.io.InputStream;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
+
-import org.apache.helix.provisioning.AppConfig;
+ import org.apache.helix.provisioning.ApplicationSpec;
+ import org.apache.helix.provisioning.ApplicationSpecFactory;
-import org.apache.helix.provisioning.yarn.example.HelloWorldService;
+ import org.apache.helix.provisioning.yarn.example.HelloworldAppSpec;
-import org.yaml.snakeyaml.DumperOptions;
+ import org.yaml.snakeyaml.Yaml;
+
+ public class HelloWordAppSpecFactory implements ApplicationSpecFactory {
+
+ @Override
+ public ApplicationSpec fromYaml(InputStream inputstream) {
+ return (ApplicationSpec) new Yaml().load(inputstream);
+ // return data;
+ }
+
+ public static void main(String[] args) {
+
+ Yaml yaml = new Yaml();
+ InputStream resourceAsStream =
+ ClassLoader.getSystemClassLoader().getResourceAsStream("hello_world_app_spec.yaml");
+ HelloworldAppSpec spec = yaml.loadAs(resourceAsStream, HelloworldAppSpec.class);
+ String dump = yaml.dump(spec);
+ System.out.println(dump);
+ System.out.println(spec.getServiceConfig("HelloWorld").getStringField("num_containers", "1"));
+
+ }
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
----------------------------------------------------------------------
diff --cc recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
index 0000000,269ae0c..3d604eb
mode 000000,100644..100644
--- a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
+++ b/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
@@@ -1,0 -1,37 +1,56 @@@
+ package org.apache.helix.provisioning.yarn.example;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import org.apache.helix.HelixConnection;
+ import org.apache.helix.api.id.ClusterId;
+ import org.apache.helix.api.id.ParticipantId;
+ import org.apache.helix.participant.AbstractParticipantService;
+ import org.apache.helix.provisioning.ServiceConfig;
+ import org.apache.helix.provisioning.participant.StatelessParticipantService;
+ import org.apache.log4j.Logger;
+
+ public class HelloWorldService extends StatelessParticipantService {
+
+ private static Logger LOG = Logger.getLogger(AbstractParticipantService.class);
+
+ static String SERVICE_NAME = "HelloWorld";
+
+ public HelloWorldService(HelixConnection connection, ClusterId clusterId,
+ ParticipantId participantId) {
+ super(connection, clusterId, participantId, SERVICE_NAME);
+ }
+
+ @Override
+ protected void init(ServiceConfig serviceConfig) {
+ LOG.info("Initialized service with config " + serviceConfig);
+ }
+
+ @Override
+ protected void goOnline() {
+ LOG.info("HelloWorld service is told to go online");
+ }
+
+ @Override
+ protected void goOffine() {
+ LOG.info("HelloWorld service is told to go offline");
+ }
+
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
----------------------------------------------------------------------
diff --cc recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
index 0000000,4fda91e..e2a63f2
mode 000000,100644..100644
--- a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
+++ b/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
@@@ -1,0 -1,153 +1,167 @@@
+ package org.apache.helix.provisioning.yarn.example;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.net.URI;
+ import java.net.URISyntaxException;
+ import java.util.List;
+ import java.util.Map;
+
+ import org.apache.helix.api.Scope;
-import org.apache.helix.api.config.ParticipantConfig;
-import org.apache.helix.api.config.ResourceConfig;
-import org.apache.helix.api.config.ResourceConfig.Builder;
-import org.apache.helix.api.config.UserConfig;
-import org.apache.helix.api.id.ParticipantId;
+ import org.apache.helix.api.id.ResourceId;
+ import org.apache.helix.provisioning.AppConfig;
+ import org.apache.helix.provisioning.ApplicationSpec;
+ import org.apache.helix.provisioning.ServiceConfig;
+ import org.apache.helix.provisioning.TaskConfig;
+
+ import com.google.common.collect.Maps;
+
+ public class HelloworldAppSpec implements ApplicationSpec {
+
+ public String _appName;
+
+ public AppConfig _appConfig;
+
+ public List<String> _services;
+
+ private String _appMasterPackageUri;
+
+ private Map<String, String> _servicePackageURIMap;
+
+ private Map<String, String> _serviceMainClassMap;
+
+ private Map<String, ServiceConfig> _serviceConfigMap;
+
+ private List<TaskConfig> _taskConfigs;
+
+ public AppConfig getAppConfig() {
+ return _appConfig;
+ }
+
+ public void setAppConfig(AppConfig appConfig) {
+ _appConfig = appConfig;
+ }
+
+ public String getAppMasterPackageUri() {
+ return _appMasterPackageUri;
+ }
+
+ public void setAppMasterPackageUri(String appMasterPackageUri) {
+ _appMasterPackageUri = appMasterPackageUri;
+ }
+
+ public Map<String, String> getServicePackageURIMap() {
+ return _servicePackageURIMap;
+ }
+
+ public void setServicePackageURIMap(Map<String, String> servicePackageURIMap) {
+ _servicePackageURIMap = servicePackageURIMap;
+ }
+
+ public Map<String, String> getServiceMainClassMap() {
+ return _serviceMainClassMap;
+ }
+
+ public void setServiceMainClassMap(Map<String, String> serviceMainClassMap) {
+ _serviceMainClassMap = serviceMainClassMap;
+ }
+
+ public Map<String, Map<String, String>> getServiceConfigMap() {
+ Map<String,Map<String,String>> map = Maps.newHashMap();
+ for(String service:_serviceConfigMap.keySet()){
+ map.put(service, _serviceConfigMap.get(service).getSimpleFields());
+ }
+ return map;
+ }
+
+ public void setServiceConfigMap(Map<String, Map<String, Object>> map) {
+ _serviceConfigMap = Maps.newHashMap();
+
+ for(String service:map.keySet()){
+ ServiceConfig serviceConfig = new ServiceConfig(Scope.resource(ResourceId.from(service)));
+ Map<String, Object> simpleFields = map.get(service);
+ for(String key:simpleFields.keySet()){
+ serviceConfig.setSimpleField(key, simpleFields.get(key).toString());
+ }
+ _serviceConfigMap.put(service, serviceConfig);
+ }
+ }
+
+ public void setAppName(String appName) {
+ _appName = appName;
+ }
+
+ public void setServices(List<String> services) {
+ _services = services;
+ }
+
+ public void setTaskConfigs(List<TaskConfig> taskConfigs) {
+ _taskConfigs = taskConfigs;
+ }
+
+ @Override
+ public String getAppName() {
+ return _appName;
+ }
+
+ @Override
+ public AppConfig getConfig() {
+ return _appConfig;
+ }
+
+ @Override
+ public List<String> getServices() {
+ return _services;
+ }
+
+ @Override
+ public URI getAppMasterPackage() {
+ try {
+ return new URI(_appMasterPackageUri);
+ } catch (URISyntaxException e) {
+ return null;
+ }
+ }
+
+ @Override
+ public URI getServicePackage(String serviceName) {
+ try {
+ return new URI(_servicePackageURIMap.get(serviceName));
+ } catch (URISyntaxException e) {
+ return null;
+ }
+ }
+
+ @Override
+ public String getServiceMainClass(String service) {
+ return _serviceMainClassMap.get(service);
+ }
+
+ @Override
+ public ServiceConfig getServiceConfig(String serviceName) {
+ return _serviceConfigMap.get(serviceName);
+ }
+
+ @Override
+ public List<TaskConfig> getTaskConfigs() {
+ return _taskConfigs;
+ }
+
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/recipes/helloworld-provisioning-yarn/src/main/resources/hello_world_app_spec.yaml
----------------------------------------------------------------------
diff --cc recipes/helloworld-provisioning-yarn/src/main/resources/hello_world_app_spec.yaml
index 0000000,baaddb5..761c97e
mode 000000,100755..100755
--- a/recipes/helloworld-provisioning-yarn/src/main/resources/hello_world_app_spec.yaml
+++ b/recipes/helloworld-provisioning-yarn/src/main/resources/hello_world_app_spec.yaml
@@@ -1,0 -1,24 +1,42 @@@
++#
++# Licensed to the Apache Software Foundation (ASF) under one
++# or more contributor license agreements. See the NOTICE file
++# distributed with this work for additional information
++# regarding copyright ownership. The ASF licenses this file
++# to you under the Apache License, Version 2.0 (the
++# "License"); you may not use this file except in compliance
++# with the License. You may obtain a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing,
++# software distributed under the License is distributed on an
++# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++# KIND, either express or implied. See the License for the
++# specific language governing permissions and limitations
++# under the License.
++#
+ !!org.apache.helix.provisioning.yarn.example.HelloworldAppSpec
+ appConfig:
+ config: {
+ k1: v1
+ }
+ appMasterPackageUri: 'file:///Users/kgopalak/Documents/projects/incubator-helix/recipes/helloworld-provisioning-yarn/target/helloworld-provisioning-yarn-0.7.1-incubating-SNAPSHOT-pkg.tar'
+ appName: testApp
+ serviceConfigMap:
+ HelloWorld: {
+ num_containers: 3,
+ memory: 1024
+ }
+ serviceMainClassMap: {
+ HelloWorld: org.apache.helix.provisioning.yarn.example.HelloWorldService
+ }
+ servicePackageURIMap: {
+ HelloWorld: 'file:///Users/kgopalak/Documents/projects/incubator-helix/recipes/helloworld-provisioning-yarn/target/helloworld-provisioning-yarn-0.7.1-incubating-SNAPSHOT-pkg.tar'
+ }
+ services: [
+ HelloWorld]
+ taskConfigs: null
+
+
+
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/recipes/jobrunner-yarn/pom.xml
----------------------------------------------------------------------
diff --cc recipes/jobrunner-yarn/pom.xml
index 0000000,f067a56..434fd8d
mode 000000,100644..100644
--- a/recipes/jobrunner-yarn/pom.xml
+++ b/recipes/jobrunner-yarn/pom.xml
@@@ -1,0 -1,159 +1,158 @@@
+ <?xml version="1.0" encoding="UTF-8" ?>
+ <!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied. See the License for the
+ specific language governing permissions and limitations
+ under the License.
+ -->
+ <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.apache.helix.recipes</groupId>
+ <artifactId>recipes</artifactId>
+ <version>0.7.1-incubating-SNAPSHOT</version>
+ </parent>
+
+ <artifactId>jobrunner-yarn</artifactId>
+ <packaging>bundle</packaging>
+ <name>Apache Helix :: Recipes :: Provisioning :: YARN :: Job Runner</name>
+
+ <properties>
+ <osgi.import>
+ org.apache.helix*,
+ org.apache.log4j,
+ *
+ </osgi.import>
+ <osgi.export>org.apache.helix.provisioning.yarn.example*;version="${project.version};-noimport:=true</osgi.export>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.testng</groupId>
+ <artifactId>testng</artifactId>
+ <version>6.0.1</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.helix</groupId>
+ <artifactId>helix-core</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.helix</groupId>
+ <artifactId>helix-provisioning</artifactId>
- <version>0.7.1-incubating-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>log4j</groupId>
+ <artifactId>log4j</artifactId>
+ <exclusions>
+ <exclusion>
+ <groupId>javax.mail</groupId>
+ <artifactId>mail</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>javax.jms</groupId>
+ <artifactId>jms</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.sun.jdmk</groupId>
+ <artifactId>jmxtools</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.sun.jmx</groupId>
+ <artifactId>jmxri</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ </dependencies>
+ <build>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>appassembler-maven-plugin</artifactId>
+ <configuration>
+ <!-- Set the target configuration directory to be used in the bin scripts -->
+ <!-- <configurationDirectory>conf</configurationDirectory> -->
+ <!-- Copy the contents from "/src/main/config" to the target configuration
+ directory in the assembled application -->
+ <!-- <copyConfigurationDirectory>true</copyConfigurationDirectory> -->
+ <!-- Include the target configuration directory in the beginning of
+ the classpath declaration in the bin scripts -->
+ <includeConfigurationDirectoryInClasspath>true</includeConfigurationDirectoryInClasspath>
+ <assembleDirectory>${project.build.directory}/${project.artifactId}-pkg</assembleDirectory>
+ <!-- Extra JVM arguments that will be included in the bin scripts -->
+ <extraJvmArguments>-Xms512m -Xmx512m</extraJvmArguments>
+ <!-- Generate bin scripts for windows and unix pr default -->
+ <platforms>
+ <platform>windows</platform>
+ <platform>unix</platform>
+ </platforms>
+ </configuration>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>assemble</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.rat</groupId>
+ <artifactId>apache-rat-plugin</artifactId>
+ <configuration>
+ <excludes combine.children="append">
+ </excludes>
+ </configuration>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>appassembler-maven-plugin</artifactId>
+ <configuration>
+ <programs>
+ <program>
+ <mainClass>org.apache.helix.provisioning.yarn.AppLauncher</mainClass>
+ <name>app-launcher</name>
+ </program>
+ <program>
+ <mainClass>org.apache.helix.provisioning.yarn.example.JobRunnerMain</mainClass>
+ <name>job-runner</name>
+ </program>
+ </programs>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <configuration>
+ <descriptors>
+ <descriptor>src/assemble/assembly.xml</descriptor>
+ </descriptors>
+ </configuration>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>single</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+ </project>
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/JobRunnerMain.java
----------------------------------------------------------------------
diff --cc recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/JobRunnerMain.java
index 0000000,78266cf..3b92f33
mode 000000,100644..100644
--- a/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/JobRunnerMain.java
+++ b/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/JobRunnerMain.java
@@@ -1,0 -1,132 +1,151 @@@
+ package org.apache.helix.provisioning.yarn.example;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.io.File;
+ import java.util.Collection;
+ import java.util.List;
+
+ import org.apache.commons.cli.CommandLine;
+ import org.apache.commons.cli.GnuParser;
+ import org.apache.commons.cli.Option;
+ import org.apache.commons.cli.Options;
+ import org.apache.helix.ClusterMessagingService;
+ import org.apache.helix.HelixConnection;
+ import org.apache.helix.HelixManager;
+ import org.apache.helix.HelixRole;
+ import org.apache.helix.InstanceType;
+ import org.apache.helix.api.Participant;
+ import org.apache.helix.api.RunningInstance;
+ import org.apache.helix.api.accessor.ClusterAccessor;
+ import org.apache.helix.api.config.ContainerConfig;
+ import org.apache.helix.api.id.ClusterId;
+ import org.apache.helix.api.id.Id;
+ import org.apache.helix.manager.zk.HelixConnectionAdaptor;
+ import org.apache.helix.provisioning.ApplicationSpec;
+ import org.apache.helix.provisioning.ApplicationSpecFactory;
+ import org.apache.helix.provisioning.HelixYarnUtil;
+ import org.apache.helix.provisioning.TaskConfig;
+ import org.apache.helix.provisioning.yarn.AppLauncher;
+ import org.apache.helix.task.TaskDriver;
+ import org.apache.helix.task.Workflow;
+
+ public class JobRunnerMain {
+ public static void main(String[] args) throws Exception {
+ Options opts = new Options();
+ opts.addOption(new Option("app_spec_provider", true,
+ "Application Spec Factory Class that will parse the app_config_spec file"));
+ opts.addOption(new Option("app_config_spec", true,
+ "YAML config file that provides the app specifications"));
+ CommandLine cliParser = new GnuParser().parse(opts, args);
+ String appSpecFactoryClass = cliParser.getOptionValue("app_spec_provider");
+ String yamlConfigFileName = cliParser.getOptionValue("app_config_spec");
+
+ ApplicationSpecFactory applicationSpecFactory =
+ HelixYarnUtil.createInstance(appSpecFactoryClass);
+ File yamlConfigFile = new File(yamlConfigFileName);
+ if (!yamlConfigFile.exists()) {
+ throw new IllegalArgumentException("YAML app_config_spec file: '" + yamlConfigFileName
+ + "' does not exist");
+ }
+ final AppLauncher launcher = new AppLauncher(applicationSpecFactory, yamlConfigFile);
+ launcher.launch();
+ Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
+
+ @Override
+ public void run() {
+ launcher.cleanup();
+ }
+ }));
+
+ final ApplicationSpec appSpec = launcher.getApplicationSpec();
+
+ // Repeatedly print status
+ final HelixConnection connection = launcher.pollForConnection();
+ final ClusterId clusterId = ClusterId.from(appSpec.getAppName());
+ // TODO: this is a hack -- TaskDriver should accept a connection instead of a manager
+ HelixManager manager = new HelixConnectionAdaptor(new HelixRole() {
+ @Override
+ public HelixConnection getConnection() {
+ return connection;
+ }
+
+ @Override
+ public ClusterId getClusterId() {
+ return clusterId;
+ }
+
+ @Override
+ public Id getId() {
+ return null;
+ }
+
+ @Override
+ public InstanceType getType() {
+ return InstanceType.ADMINISTRATOR;
+ }
+
+ @Override
+ public ClusterMessagingService getMessagingService() {
+ return null;
+ }
+ });
+
+ // Get all submitted jobs
+ String workflow = null;
+ List<TaskConfig> taskConfigs = appSpec.getTaskConfigs();
+ if (taskConfigs != null) {
+ for (TaskConfig taskConfig : taskConfigs) {
+ String yamlFile = taskConfig.getValue("yamlFile");
+ if (yamlFile != null) {
+ Workflow flow = Workflow.parse(new File(yamlFile));
+ workflow = flow.getName();
+ }
+ }
+ }
+
+ // Repeatedly poll for status
+ if (workflow != null) {
+ ClusterAccessor accessor = connection.createClusterAccessor(clusterId);
+ TaskDriver driver = new TaskDriver(manager);
+ while (true) {
+ System.out.println("CONTAINER STATUS");
+ System.out.println("----------------");
+ Collection<Participant> participants = accessor.readParticipants().values();
+ for (Participant participant : participants) {
+ ContainerConfig containerConfig = participant.getContainerConfig();
+ if (containerConfig != null) {
+ System.out.println(participant.getId() + "[" + containerConfig.getId() + "]: "
+ + containerConfig.getState());
+ }
+ if (participant.isAlive()) {
+ RunningInstance runningInstance = participant.getRunningInstance();
+ System.out.println("\tProcess: " + runningInstance.getPid());
+ }
+ }
+ System.out.println("----------------");
+ System.out.println("TASK STATUS");
+ System.out.println("----------------");
+ driver.list(workflow);
+ Thread.sleep(5000);
+ }
+ }
+ }
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTask.java
----------------------------------------------------------------------
diff --cc recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTask.java
index 0000000,584550d..650bb14
mode 000000,100644..100644
--- a/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTask.java
+++ b/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTask.java
@@@ -1,0 -1,53 +1,72 @@@
+ package org.apache.helix.provisioning.yarn.example;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import org.apache.helix.task.Task;
+ import org.apache.helix.task.TaskCallbackContext;
+ import org.apache.helix.task.TaskResult;
+ import org.apache.log4j.Logger;
+
+ /**
+ * Callbacks for task execution - THIS INTERFACE IS SUBJECT TO CHANGE
+ */
+ public class MyTask implements Task {
+ private static final Logger LOG = Logger.getLogger(MyTask.class);
+ private static final long DEFAULT_DELAY = 60000L;
+ private final long _delay;
+ private volatile boolean _canceled;
+
+ public MyTask(TaskCallbackContext context) {
+ LOG.info("Job config" + context.getJobConfig().getJobConfigMap());
+ if (context.getTaskConfig() != null) {
+ LOG.info("Task config: " + context.getTaskConfig().getConfigMap());
+ }
+ _delay = DEFAULT_DELAY;
+ }
+
+ @Override
+ public TaskResult run() {
+ long expiry = System.currentTimeMillis() + _delay;
+ long timeLeft;
+ while (System.currentTimeMillis() < expiry) {
+ if (_canceled) {
+ timeLeft = expiry - System.currentTimeMillis();
+ return new TaskResult(TaskResult.Status.CANCELED, String.valueOf(timeLeft < 0 ? 0
+ : timeLeft));
+ }
+ sleep(50);
+ }
+ timeLeft = expiry - System.currentTimeMillis();
+ return new TaskResult(TaskResult.Status.COMPLETED, String.valueOf(timeLeft < 0 ? 0 : timeLeft));
+ }
+
+ @Override
+ public void cancel() {
+ _canceled = true;
+ }
+
+ private static void sleep(long d) {
+ try {
+ Thread.sleep(d);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskAppSpec.java
----------------------------------------------------------------------
diff --cc recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskAppSpec.java
index 0000000,a20994c..50fb3de
mode 000000,100644..100644
--- a/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskAppSpec.java
+++ b/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskAppSpec.java
@@@ -1,0 -1,148 +1,167 @@@
+ package org.apache.helix.provisioning.yarn.example;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.net.URI;
+ import java.net.URISyntaxException;
+ import java.util.List;
+ import java.util.Map;
+
+ import org.apache.helix.api.Scope;
+ import org.apache.helix.api.id.ResourceId;
+ import org.apache.helix.provisioning.AppConfig;
+ import org.apache.helix.provisioning.ApplicationSpec;
+ import org.apache.helix.provisioning.ServiceConfig;
+ import org.apache.helix.provisioning.TaskConfig;
+
+ import com.google.common.collect.Maps;
+
+ public class MyTaskAppSpec implements ApplicationSpec {
+
+ public String _appName;
+
+ public AppConfig _appConfig;
+
+ public List<String> _services;
+
+ private String _appMasterPackageUri;
+
+ private Map<String, String> _servicePackageURIMap;
+
+ private Map<String, String> _serviceMainClassMap;
+
+ private Map<String, ServiceConfig> _serviceConfigMap;
+
+ private List<TaskConfig> _taskConfigs;
+
+ public AppConfig getAppConfig() {
+ return _appConfig;
+ }
+
+ public void setAppConfig(AppConfig appConfig) {
+ _appConfig = appConfig;
+ }
+
+ public String getAppMasterPackageUri() {
+ return _appMasterPackageUri;
+ }
+
+ public void setAppMasterPackageUri(String appMasterPackageUri) {
+ _appMasterPackageUri = appMasterPackageUri;
+ }
+
+ public Map<String, String> getServicePackageURIMap() {
+ return _servicePackageURIMap;
+ }
+
+ public void setServicePackageURIMap(Map<String, String> servicePackageURIMap) {
+ _servicePackageURIMap = servicePackageURIMap;
+ }
+
+ public Map<String, String> getServiceMainClassMap() {
+ return _serviceMainClassMap;
+ }
+
+ public void setServiceMainClassMap(Map<String, String> serviceMainClassMap) {
+ _serviceMainClassMap = serviceMainClassMap;
+ }
+
+ public Map<String, Map<String, String>> getServiceConfigMap() {
+ Map<String, Map<String, String>> map = Maps.newHashMap();
+ for (String service : _serviceConfigMap.keySet()) {
+ map.put(service, _serviceConfigMap.get(service).getSimpleFields());
+ }
+ return map;
+ }
+
+ public void setServiceConfigMap(Map<String, Map<String, Object>> map) {
+ _serviceConfigMap = Maps.newHashMap();
+
+ for (String service : map.keySet()) {
+ ServiceConfig serviceConfig = new ServiceConfig(Scope.resource(ResourceId.from(service)));
+ Map<String, Object> simpleFields = map.get(service);
+ for (String key : simpleFields.keySet()) {
+ serviceConfig.setSimpleField(key, simpleFields.get(key).toString());
+ }
+ _serviceConfigMap.put(service, serviceConfig);
+ }
+ }
+
+ public void setAppName(String appName) {
+ _appName = appName;
+ }
+
+ public void setServices(List<String> services) {
+ _services = services;
+ }
+
+ public void setTaskConfigs(List<TaskConfig> taskConfigs) {
+ _taskConfigs = taskConfigs;
+ }
+
+ @Override
+ public String getAppName() {
+ return _appName;
+ }
+
+ @Override
+ public AppConfig getConfig() {
+ return _appConfig;
+ }
+
+ @Override
+ public List<String> getServices() {
+ return _services;
+ }
+
+ @Override
+ public URI getAppMasterPackage() {
+ try {
+ return new URI(_appMasterPackageUri);
+ } catch (URISyntaxException e) {
+ return null;
+ }
+ }
+
+ @Override
+ public URI getServicePackage(String serviceName) {
+ try {
+ return new URI(_servicePackageURIMap.get(serviceName));
+ } catch (URISyntaxException e) {
+ return null;
+ }
+ }
+
+ @Override
+ public String getServiceMainClass(String service) {
+ return _serviceMainClassMap.get(service);
+ }
+
+ @Override
+ public ServiceConfig getServiceConfig(String serviceName) {
+ return _serviceConfigMap.get(serviceName);
+ }
+
+ @Override
+ public List<TaskConfig> getTaskConfigs() {
+ return _taskConfigs;
+ }
+
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskAppSpecFactory.java
----------------------------------------------------------------------
diff --cc recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskAppSpecFactory.java
index 0000000,17601ba..d5f486a
mode 000000,100644..100644
--- a/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskAppSpecFactory.java
+++ b/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskAppSpecFactory.java
@@@ -1,0 -1,28 +1,47 @@@
+ package org.apache.helix.provisioning.yarn.example;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.io.InputStream;
+
+ import org.apache.helix.provisioning.ApplicationSpec;
+ import org.apache.helix.provisioning.ApplicationSpecFactory;
+ import org.yaml.snakeyaml.Yaml;
+
+ public class MyTaskAppSpecFactory implements ApplicationSpecFactory {
+
+ @Override
+ public ApplicationSpec fromYaml(InputStream inputstream) {
+ return (ApplicationSpec) new Yaml().load(inputstream);
+ // return data;
+ }
+
+ public static void main(String[] args) {
+
+ Yaml yaml = new Yaml();
+ InputStream resourceAsStream =
+ ClassLoader.getSystemClassLoader().getResourceAsStream("job_runner_app_spec.yaml");
+ MyTaskAppSpec spec = yaml.loadAs(resourceAsStream, MyTaskAppSpec.class);
+ String dump = yaml.dump(spec);
+ System.out.println(dump);
+ System.out.println(spec.getServiceConfig("JobRunner").getStringField("num_containers", "1"));
+
+ }
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskService.java
----------------------------------------------------------------------
diff --cc recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskService.java
index 0000000,22c3ab0..7c50e53
mode 000000,100644..100644
--- a/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskService.java
+++ b/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskService.java
@@@ -1,0 -1,62 +1,81 @@@
+ package org.apache.helix.provisioning.yarn.example;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.util.HashMap;
+ import java.util.Map;
+
+ import org.apache.helix.HelixConnection;
+ import org.apache.helix.HelixManager;
+ import org.apache.helix.api.id.ClusterId;
+ import org.apache.helix.api.id.ParticipantId;
+ import org.apache.helix.api.id.StateModelDefId;
+ import org.apache.helix.manager.zk.HelixConnectionAdaptor;
+ import org.apache.helix.participant.AbstractParticipantService;
+ import org.apache.helix.provisioning.ServiceConfig;
+ import org.apache.helix.provisioning.participant.StatelessParticipantService;
+ import org.apache.helix.task.Task;
+ import org.apache.helix.task.TaskCallbackContext;
+ import org.apache.helix.task.TaskFactory;
+ import org.apache.helix.task.TaskStateModelFactory;
+ import org.apache.log4j.Logger;
+
+ /**
+ * A simple "service" for task callback registration.
+ */
+ public class MyTaskService extends StatelessParticipantService {
+
+ private static Logger LOG = Logger.getLogger(AbstractParticipantService.class);
+
+ static String SERVICE_NAME = "JobRunner";
+
+ public MyTaskService(HelixConnection connection, ClusterId clusterId,
+ ParticipantId participantId) {
+ super(connection, clusterId, participantId, SERVICE_NAME);
+ }
+
+ @Override
+ protected void init(ServiceConfig serviceConfig) {
+ LOG.info("Initialized service with config " + serviceConfig);
+
+ // Register for callbacks for tasks
+ HelixManager manager = new HelixConnectionAdaptor(getParticipant());
+ Map<String, TaskFactory> taskFactoryReg = new HashMap<String, TaskFactory>();
+ taskFactoryReg.put("RunTask", new TaskFactory() {
+ @Override
+ public Task createNewTask(TaskCallbackContext context) {
+ return new MyTask(context);
+ }
+ });
+ getParticipant().getStateMachineEngine().registerStateModelFactory(
+ StateModelDefId.from("Task"), new TaskStateModelFactory(manager, taskFactoryReg));
+ }
+
+ @Override
+ protected void goOnline() {
+ LOG.info("JobRunner service is told to go online");
+ }
+
+ @Override
+ protected void goOffine() {
+ LOG.info("JobRunner service is told to go offline");
+ }
+
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/recipes/jobrunner-yarn/src/main/resources/dummy_job.yaml
----------------------------------------------------------------------
diff --cc recipes/jobrunner-yarn/src/main/resources/dummy_job.yaml
index 0000000,0187fd1..ff44243
mode 000000,100644..100644
--- a/recipes/jobrunner-yarn/src/main/resources/dummy_job.yaml
+++ b/recipes/jobrunner-yarn/src/main/resources/dummy_job.yaml
@@@ -1,0 -1,18 +1,36 @@@
++#
++# Licensed to the Apache Software Foundation (ASF) under one
++# or more contributor license agreements. See the NOTICE file
++# distributed with this work for additional information
++# regarding copyright ownership. The ASF licenses this file
++# to you under the Apache License, Version 2.0 (the
++# "License"); you may not use this file except in compliance
++# with the License. You may obtain a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing,
++# software distributed under the License is distributed on an
++# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++# KIND, either express or implied. See the License for the
++# specific language governing permissions and limitations
++# under the License.
++#
+ name: myJob1234
+ jobs:
+ - name: myJob1234
+ command: RunTask
+ jobConfigMap: {
+ k1: "v1",
+ k2: "v2"
+ }
+ tasks:
+ - taskConfigMap: {
+ k3: "v3"
+ }
+ - taskConfigMap: {
+ k4: "v4"
+ }
+ - taskConfigMap: {
+ k5: "v5"
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/recipes/jobrunner-yarn/src/main/resources/job_runner_app_spec.yaml
----------------------------------------------------------------------
diff --cc recipes/jobrunner-yarn/src/main/resources/job_runner_app_spec.yaml
index 0000000,0945690..83c7edf
mode 000000,100755..100755
--- a/recipes/jobrunner-yarn/src/main/resources/job_runner_app_spec.yaml
+++ b/recipes/jobrunner-yarn/src/main/resources/job_runner_app_spec.yaml
@@@ -1,0 -1,23 +1,41 @@@
++#
++# Licensed to the Apache Software Foundation (ASF) under one
++# or more contributor license agreements. See the NOTICE file
++# distributed with this work for additional information
++# regarding copyright ownership. The ASF licenses this file
++# to you under the Apache License, Version 2.0 (the
++# "License"); you may not use this file except in compliance
++# with the License. You may obtain a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing,
++# software distributed under the License is distributed on an
++# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++# KIND, either express or implied. See the License for the
++# specific language governing permissions and limitations
++# under the License.
++#
+ !!org.apache.helix.provisioning.yarn.example.MyTaskAppSpec
+ appConfig:
+ config: {
+ k1: v1
+ }
+ appMasterPackageUri: 'file:///Users/kbiscuit/helix/incubator-helix/recipes/jobrunner-yarn/target/jobrunner-yarn-0.7.1-incubating-SNAPSHOT-pkg.tar'
+ appName: testApp
+ serviceConfigMap:
+ JobRunner: {
+ num_containers: 3,
+ memory: 1024
+ }
+ serviceMainClassMap: {
+ JobRunner: org.apache.helix.provisioning.yarn.example.MyTaskService
+ }
+ servicePackageURIMap: {
+ JobRunner: 'file:///Users/kbiscuit/helix/incubator-helix/recipes/jobrunner-yarn/target/jobrunner-yarn-0.7.1-incubating-SNAPSHOT-pkg.tar'
+ }
+ services: [
+ JobRunner]
+ taskConfigs:
+ - name: JobRunnerWorkflow
+ yamlFile: 'file:///Users/kbiscuit/helix/incubator-helix/recipes/jobrunner-yarn/src/main/resources/dummy_job.yaml'
[45/50] [abbrv] git commit: [HELIX-440] One-time scheduling for task
framework
Posted by ka...@apache.org.
[HELIX-440] One-time scheduling for task framework
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/346d8a32
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/346d8a32
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/346d8a32
Branch: refs/heads/master
Commit: 346d8a32ed91db9ce182d5cea911769a23654d0b
Parents: 0272e37
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Thu Jun 5 09:37:31 2014 -0700
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Wed Jul 9 09:36:14 2014 -0700
----------------------------------------------------------------------
.../org/apache/helix/task/ScheduleConfig.java | 165 +++++++++++++++++++
.../org/apache/helix/task/TaskRebalancer.java | 74 +++++++++
.../java/org/apache/helix/task/TaskUtil.java | 12 ++
.../java/org/apache/helix/task/Workflow.java | 33 ++++
.../org/apache/helix/task/WorkflowConfig.java | 55 ++++++-
.../apache/helix/task/beans/ScheduleBean.java | 32 ++++
.../apache/helix/task/beans/WorkflowBean.java | 1 +
.../task/TestIndependentTaskRebalancer.java | 34 ++++
8 files changed, 404 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/346d8a32/helix-core/src/main/java/org/apache/helix/task/ScheduleConfig.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/ScheduleConfig.java b/helix-core/src/main/java/org/apache/helix/task/ScheduleConfig.java
new file mode 100644
index 0000000..9e3801e
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/task/ScheduleConfig.java
@@ -0,0 +1,165 @@
+package org.apache.helix.task;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.util.Date;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.helix.task.beans.ScheduleBean;
+import org.apache.log4j.Logger;
+
+/**
+ * Configuration for scheduling both one-time and recurring workflows in Helix
+ */
+public class ScheduleConfig {
+ private static final Logger LOG = Logger.getLogger(ScheduleConfig.class);
+
+ /** Enforce that a workflow can recur at most once per minute */
+ private static final long MIN_RECURRENCE_MILLIS = 60 * 1000;
+
+ private final Date _startTime;
+ private final TimeUnit _recurUnit;
+ private final Long _recurInterval;
+
+ private ScheduleConfig(Date startTime, TimeUnit recurUnit, Long recurInterval) {
+ _startTime = startTime;
+ _recurUnit = recurUnit;
+ _recurInterval = recurInterval;
+ }
+
+ /**
+ * When the workflow should be started
+ * @return Date object representing the start time
+ */
+ public Date getStartTime() {
+ return _startTime;
+ }
+
+ /**
+ * The unit of the recurrence interval if this is a recurring workflow
+ * @return the recurrence interval unit, or null if this workflow is a one-time workflow
+ */
+ public TimeUnit getRecurrenceUnit() {
+ return _recurUnit;
+ }
+
+ /**
+ * The magnitude of the recurrence interval if this is a recurring task
+ * @return the recurrence interval magnitude, or null if this workflow is a one-time workflow
+ */
+ public Long getRecurrenceInterval() {
+ return _recurInterval;
+ }
+
+ /**
+ * Check if this workflow is recurring
+ * @return true if recurring, false if one-time
+ */
+ public boolean isRecurring() {
+ return _recurUnit != null && _recurInterval != null;
+ }
+
+ /**
+ * Check if the configured schedule is valid given these constraints:
+ * <ul>
+ * <li>All workflows must have a start time</li>
+ * <li>Recurrence unit and interval must both be present if either is present</li>
+ * <li>Recurring workflows must have a positive interval magnitude</li>
+ * <li>Intervals must be at least one minute</li>
+ * </ul>
+ * @return true if valid, false if invalid
+ */
+ public boolean isValid() {
+ // For now, disallow recurring workflows
+ if (isRecurring()) {
+ LOG.error("Recurring workflows are not currently supported.");
+ return false;
+ }
+
+ // All schedules must have a start time even if they are recurring
+ if (_startTime == null) {
+ LOG.error("All schedules must have a start time!");
+ return false;
+ }
+
+ // Recurrence properties must both either be present or absent
+ if ((_recurUnit == null && _recurInterval != null)
+ || (_recurUnit != null && _recurInterval == null)) {
+ LOG.error("Recurrence interval and unit must either both be present or both be absent");
+ return false;
+ }
+
+ // Only positive recurrence intervals are allowed if present
+ if (_recurInterval != null && _recurInterval <= 0) {
+ LOG.error("Recurrence interval must be positive");
+ return false;
+ }
+
+ // Enforce minimum interval length
+ if (_recurUnit != null) {
+ long converted = _recurUnit.toMillis(_recurInterval);
+ if (converted < MIN_RECURRENCE_MILLIS) {
+ LOG.error("Recurrence must be at least " + MIN_RECURRENCE_MILLIS + " ms");
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Create this configuration from a serialized bean
+ * @param bean flat configuration of the schedule
+ * @return instantiated ScheduleConfig
+ */
+ public static ScheduleConfig from(ScheduleBean bean) {
+ return new ScheduleConfig(bean.startTime, bean.recurUnit, bean.recurInterval);
+ }
+
+ /**
+ * Create a schedule for a workflow that runs once at a specified time
+ * @param startTime the time to start the workflow
+ * @return instantiated ScheduleConfig
+ */
+ public static ScheduleConfig oneTimeDelayedStart(Date startTime) {
+ return new ScheduleConfig(startTime, null, null);
+ }
+
+ /*
+ * Create a schedule for a recurring workflow that should start immediately
+ * @param recurUnit the unit of the recurrence interval
+ * @param recurInterval the magnitude of the recurrence interval
+ * @return instantiated ScheduleConfig
+ * public static ScheduleConfig recurringFromNow(TimeUnit recurUnit, long recurInterval) {
+ * return new ScheduleConfig(new Date(), recurUnit, recurInterval);
+ * }
+ */
+
+ /*
+ * Create a schedule for a recurring workflow that should start at a specific time
+ * @param startTime the time to start the workflow the first time
+ * @param recurUnit the unit of the recurrence interval
+ * @param recurInterval the magnitude of the recurrence interval
+ * @return instantiated ScheduleConfig
+ * public static ScheduleConfig recurringFromDate(Date startTime, TimeUnit recurUnit,
+ * long recurInterval) {
+ * return new ScheduleConfig(startTime, recurUnit, recurInterval);
+ * }
+ */
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/346d8a32/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java b/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
index 043e7dd..37c8548 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
@@ -21,6 +21,7 @@ package org.apache.helix.task;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@@ -29,6 +30,9 @@ import java.util.Set;
import java.util.SortedSet;
import java.util.TreeMap;
import java.util.TreeSet;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
import org.apache.helix.AccessOption;
import org.apache.helix.HelixDataAccessor;
@@ -50,6 +54,8 @@ import org.apache.helix.model.ResourceAssignment;
import org.apache.log4j.Logger;
import com.google.common.base.Joiner;
+import com.google.common.collect.BiMap;
+import com.google.common.collect.HashBiMap;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Sets;
@@ -58,6 +64,13 @@ import com.google.common.collect.Sets;
*/
public abstract class TaskRebalancer implements HelixRebalancer {
private static final Logger LOG = Logger.getLogger(TaskRebalancer.class);
+
+ /** Management of already-scheduled workflows across jobs */
+ private static final BiMap<String, Date> SCHEDULED_WORKFLOWS = HashBiMap.create();
+ private static final ScheduledExecutorService SCHEDULED_EXECUTOR = Executors
+ .newSingleThreadScheduledExecutor();
+
+ /** For connection management */
private HelixManager _manager;
/**
@@ -116,6 +129,12 @@ public abstract class TaskRebalancer implements HelixRebalancer {
WorkflowConfig workflowCfg = TaskUtil.getWorkflowCfg(_manager, workflowResource);
WorkflowContext workflowCtx = TaskUtil.getWorkflowContext(_manager, workflowResource);
+ // Check for readiness, and stop processing if it's not ready
+ boolean isReady = scheduleIfNotReady(workflowCfg, workflowResource, resourceName);
+ if (!isReady) {
+ return emptyAssignment(resourceName);
+ }
+
// Initialize workflow context if needed
if (workflowCtx == null) {
workflowCtx = new WorkflowContext(new ZNRecord("WorkflowContext"));
@@ -422,6 +441,43 @@ public abstract class TaskRebalancer implements HelixRebalancer {
}
/**
+ * Check if a workflow is ready to schedule, and schedule a rebalance if it is not
+ * @param workflowCfg the workflow to check
+ * @param workflowResource the Helix resource associated with the workflow
+ * @param jobResource a job from the workflow
+ * @return true if ready, false if not ready
+ */
+ private boolean scheduleIfNotReady(WorkflowConfig workflowCfg, String workflowResource,
+ String jobResource) {
+ // Ignore non-scheduled workflows
+ if (workflowCfg == null || workflowCfg.getScheduleConfig() == null) {
+ return true;
+ }
+
+ // Figure out when this should be run, and if it's ready, then just run it
+ ScheduleConfig scheduleConfig = workflowCfg.getScheduleConfig();
+ Date startTime = scheduleConfig.getStartTime();
+ long delay = startTime.getTime() - new Date().getTime();
+ if (delay <= 0) {
+ SCHEDULED_WORKFLOWS.remove(workflowResource);
+ SCHEDULED_WORKFLOWS.inverse().remove(startTime);
+ return true;
+ }
+
+ // No need to schedule the same runnable at the same time
+ if (SCHEDULED_WORKFLOWS.containsKey(workflowResource)
+ || SCHEDULED_WORKFLOWS.inverse().containsKey(startTime)) {
+ return false;
+ }
+
+ // For workflows not yet scheduled, schedule them and record it
+ RebalanceInvoker rebalanceInvoker = new RebalanceInvoker(_manager, jobResource);
+ SCHEDULED_WORKFLOWS.put(workflowResource, startTime);
+ SCHEDULED_EXECUTOR.schedule(rebalanceInvoker, delay, TimeUnit.MILLISECONDS);
+ return false;
+ }
+
+ /**
* Checks if the job has completed.
* @param ctx The rebalancer context.
* @param allPartitions The set of partitions to check.
@@ -660,4 +716,22 @@ public abstract class TaskRebalancer implements HelixRebalancer {
_state = state;
}
}
+
+ /**
+ * The simplest possible runnable that will trigger a run of the controller pipeline
+ */
+ private static class RebalanceInvoker implements Runnable {
+ private final HelixManager _manager;
+ private final String _resource;
+
+ public RebalanceInvoker(HelixManager manager, String resource) {
+ _manager = manager;
+ _resource = resource;
+ }
+
+ @Override
+ public void run() {
+ TaskUtil.invokeRebalance(_manager, _resource);
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/helix/blob/346d8a32/helix-core/src/main/java/org/apache/helix/task/TaskUtil.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskUtil.java b/helix-core/src/main/java/org/apache/helix/task/TaskUtil.java
index 96b7e55..43a1741 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskUtil.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskUtil.java
@@ -36,6 +36,7 @@ import org.apache.helix.api.State;
import org.apache.helix.api.id.PartitionId;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.HelixConfigScope;
+import org.apache.helix.model.IdealState;
import org.apache.helix.model.ResourceAssignment;
import org.apache.helix.model.builder.HelixConfigScopeBuilder;
import org.apache.log4j.Logger;
@@ -182,6 +183,17 @@ public class TaskUtil {
return Collections.emptyMap();
}
+ /**
+ * Trigger a controller pipeline execution for a given resource.
+ * @param manager Helix connection
+ * @param resource the name of the resource changed to triggering the execution
+ */
+ public static void invokeRebalance(HelixManager manager, String resource) {
+ // The pipeline is idempotent, so touching an ideal state is enough to trigger a pipeline run
+ HelixDataAccessor accessor = manager.getHelixDataAccessor();
+ accessor.updateProperty(accessor.keyBuilder().idealStates(resource), new IdealState(resource));
+ }
+
private static Map<String, String> getResourceConfigMap(HelixManager manager, String resource) {
HelixConfigScope scope = getResourceConfigScope(manager.getClusterName(), resource);
ConfigAccessor configAccessor = manager.getConfigAccessor();
http://git-wip-us.apache.org/repos/asf/helix/blob/346d8a32/helix-core/src/main/java/org/apache/helix/task/Workflow.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/Workflow.java b/helix-core/src/main/java/org/apache/helix/task/Workflow.java
index 70fb82c..fef0274 100644
--- a/helix-core/src/main/java/org/apache/helix/task/Workflow.java
+++ b/helix-core/src/main/java/org/apache/helix/task/Workflow.java
@@ -27,6 +27,7 @@ import java.io.Reader;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -82,12 +83,31 @@ public class Workflow {
return _taskConfigs;
}
+ public WorkflowConfig getWorkflowConfig() {
+ return _workflowConfig;
+ }
+
public Map<String, String> getResourceConfigMap() throws Exception {
Map<String, String> cfgMap = new HashMap<String, String>();
cfgMap.put(WorkflowConfig.DAG, _workflowConfig.getJobDag().toJson());
cfgMap.put(WorkflowConfig.EXPIRY, String.valueOf(_workflowConfig.getExpiry()));
cfgMap.put(WorkflowConfig.TARGET_STATE, _workflowConfig.getTargetState().name());
+ // Populate schedule if present
+ ScheduleConfig scheduleConfig = _workflowConfig.getScheduleConfig();
+ if (scheduleConfig != null) {
+ Date startTime = scheduleConfig.getStartTime();
+ if (startTime != null) {
+ String formattedTime = WorkflowConfig.DEFAULT_DATE_FORMAT.format(startTime);
+ cfgMap.put(WorkflowConfig.START_TIME, formattedTime);
+ }
+ if (scheduleConfig.isRecurring()) {
+ cfgMap.put(WorkflowConfig.RECURRENCE_UNIT, scheduleConfig.getRecurrenceUnit().toString());
+ cfgMap.put(WorkflowConfig.RECURRENCE_INTERVAL, scheduleConfig.getRecurrenceInterval()
+ .toString());
+ }
+ }
+
return cfgMap;
}
@@ -198,6 +218,10 @@ public class Workflow {
}
}
+ if (wf.schedule != null) {
+ builder.setScheduleConfig(ScheduleConfig.from(wf.schedule));
+ }
+
return builder.build();
}
@@ -235,6 +259,7 @@ public class Workflow {
private JobDag _dag;
private Map<String, Map<String, String>> _jobConfigs;
private Map<String, List<TaskConfig>> _taskConfigs;
+ private ScheduleConfig _scheduleConfig;
private long _expiry;
public Builder(String name) {
@@ -291,6 +316,11 @@ public class Workflow {
return this;
}
+ public Builder setScheduleConfig(ScheduleConfig scheduleConfig) {
+ _scheduleConfig = scheduleConfig;
+ return this;
+ }
+
public Builder setExpiry(long expiry) {
_expiry = expiry;
return this;
@@ -309,6 +339,9 @@ public class Workflow {
WorkflowConfig.Builder builder = new WorkflowConfig.Builder();
builder.setTaskDag(_dag);
builder.setTargetState(TargetState.START);
+ if (_scheduleConfig != null) {
+ builder.setScheduleConfig(_scheduleConfig);
+ }
if (_expiry > 0) {
builder.setExpiry(_expiry);
}
http://git-wip-us.apache.org/repos/asf/helix/blob/346d8a32/helix-core/src/main/java/org/apache/helix/task/WorkflowConfig.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/WorkflowConfig.java b/helix-core/src/main/java/org/apache/helix/task/WorkflowConfig.java
index ff4a2a9..a8aff1f 100644
--- a/helix-core/src/main/java/org/apache/helix/task/WorkflowConfig.java
+++ b/helix-core/src/main/java/org/apache/helix/task/WorkflowConfig.java
@@ -19,29 +19,48 @@ package org.apache.helix.task;
* under the License.
*/
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.Date;
import java.util.Map;
+import java.util.TimeZone;
+
+import org.apache.log4j.Logger;
/**
* Provides a typed interface to workflow level configurations. Validates the configurations.
*/
public class WorkflowConfig {
+ private static final Logger LOG = Logger.getLogger(WorkflowConfig.class);
+
/* Config fields */
public static final String DAG = "Dag";
public static final String TARGET_STATE = "TargetState";
public static final String EXPIRY = "Expiry";
+ public static final String START_TIME = "StartTime";
+ public static final String RECURRENCE_UNIT = "RecurrenceUnit";
+ public static final String RECURRENCE_INTERVAL = "RecurrenceInterval";
/* Default values */
public static final long DEFAULT_EXPIRY = 24 * 60 * 60 * 1000;
+ public static final SimpleDateFormat DEFAULT_DATE_FORMAT = new SimpleDateFormat(
+ "MM-dd-yyyy HH:mm:ss");
+ static {
+ DEFAULT_DATE_FORMAT.setTimeZone(TimeZone.getTimeZone("UTC"));
+ }
/* Member variables */
private JobDag _jobDag;
private TargetState _targetState;
private long _expiry;
+ private ScheduleConfig _scheduleConfig;
- private WorkflowConfig(JobDag jobDag, TargetState targetState, long expiry) {
+ private WorkflowConfig(JobDag jobDag, TargetState targetState, long expiry,
+ ScheduleConfig scheduleConfig) {
_jobDag = jobDag;
_targetState = targetState;
_expiry = expiry;
+ _scheduleConfig = scheduleConfig;
}
public JobDag getJobDag() {
@@ -56,10 +75,15 @@ public class WorkflowConfig {
return _expiry;
}
+ public ScheduleConfig getScheduleConfig() {
+ return _scheduleConfig;
+ }
+
public static class Builder {
private JobDag _taskDag = JobDag.EMPTY_DAG;
private TargetState _targetState = TargetState.START;
private long _expiry = DEFAULT_EXPIRY;
+ private ScheduleConfig _scheduleConfig;
public Builder() {
// Nothing to do
@@ -68,7 +92,7 @@ public class WorkflowConfig {
public WorkflowConfig build() {
validate();
- return new WorkflowConfig(_taskDag, _targetState, _expiry);
+ return new WorkflowConfig(_taskDag, _targetState, _expiry, _scheduleConfig);
}
public Builder setTaskDag(JobDag v) {
@@ -86,6 +110,11 @@ public class WorkflowConfig {
return this;
}
+ public Builder setScheduleConfig(ScheduleConfig scheduleConfig) {
+ _scheduleConfig = scheduleConfig;
+ return this;
+ }
+
public static Builder fromMap(Map<String, String> cfg) {
Builder b = new Builder();
@@ -103,6 +132,24 @@ public class WorkflowConfig {
b.setTargetState(TargetState.valueOf(cfg.get(TARGET_STATE)));
}
+ // Parse schedule-specific configs, if they exist
+ Date startTime = null;
+ if (cfg.containsKey(START_TIME)) {
+ try {
+ startTime = DEFAULT_DATE_FORMAT.parse(cfg.get(START_TIME));
+ } catch (ParseException e) {
+ LOG.error("Unparseable date " + cfg.get(START_TIME), e);
+ }
+ }
+ if (cfg.containsKey(RECURRENCE_UNIT) && cfg.containsKey(RECURRENCE_INTERVAL)) {
+ /*
+ * b.setScheduleConfig(ScheduleConfig.recurringFromDate(startTime,
+ * TimeUnit.valueOf(cfg.get(RECURRENCE_UNIT)),
+ * Long.parseLong(cfg.get(RECURRENCE_INTERVAL))));
+ */
+ } else if (startTime != null) {
+ b.setScheduleConfig(ScheduleConfig.oneTimeDelayedStart(startTime));
+ }
return b;
}
@@ -110,6 +157,10 @@ public class WorkflowConfig {
if (_expiry < 0) {
throw new IllegalArgumentException(
String.format("%s has invalid value %s", EXPIRY, _expiry));
+ } else if (_scheduleConfig != null && !_scheduleConfig.isValid()) {
+ throw new IllegalArgumentException(
+ "Scheduler configuration is invalid. The configuration must have a start time if it is "
+ + "one-time, and it must have a positive interval magnitude if it is recurring");
}
}
}
http://git-wip-us.apache.org/repos/asf/helix/blob/346d8a32/helix-core/src/main/java/org/apache/helix/task/beans/ScheduleBean.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/beans/ScheduleBean.java b/helix-core/src/main/java/org/apache/helix/task/beans/ScheduleBean.java
new file mode 100644
index 0000000..9e843f5
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/task/beans/ScheduleBean.java
@@ -0,0 +1,32 @@
+package org.apache.helix.task.beans;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.util.Date;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * A bean representing how a workflow can be scheduled in Helix
+ */
+public class ScheduleBean {
+ public Date startTime;
+ public Long recurInterval;
+ public TimeUnit recurUnit;
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/346d8a32/helix-core/src/main/java/org/apache/helix/task/beans/WorkflowBean.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/beans/WorkflowBean.java b/helix-core/src/main/java/org/apache/helix/task/beans/WorkflowBean.java
index 76da4c8..2ea23c7 100644
--- a/helix-core/src/main/java/org/apache/helix/task/beans/WorkflowBean.java
+++ b/helix-core/src/main/java/org/apache/helix/task/beans/WorkflowBean.java
@@ -28,4 +28,5 @@ public class WorkflowBean {
public String name;
public String expiry;
public List<JobBean> jobs;
+ public ScheduleBean schedule;
}
http://git-wip-us.apache.org/repos/asf/helix/blob/346d8a32/helix-core/src/test/java/org/apache/helix/integration/task/TestIndependentTaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/integration/task/TestIndependentTaskRebalancer.java b/helix-core/src/test/java/org/apache/helix/integration/task/TestIndependentTaskRebalancer.java
index 006c3fe..1196f41 100644
--- a/helix-core/src/test/java/org/apache/helix/integration/task/TestIndependentTaskRebalancer.java
+++ b/helix-core/src/test/java/org/apache/helix/integration/task/TestIndependentTaskRebalancer.java
@@ -19,6 +19,7 @@ package org.apache.helix.integration.task;
* under the License.
*/
+import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -35,6 +36,7 @@ import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.integration.task.TestTaskRebalancerStopResume.ReindexTask;
import org.apache.helix.participant.StateMachineEngine;
import org.apache.helix.task.JobConfig;
+import org.apache.helix.task.ScheduleConfig;
import org.apache.helix.task.Task;
import org.apache.helix.task.TaskCallbackContext;
import org.apache.helix.task.TaskConfig;
@@ -44,7 +46,9 @@ import org.apache.helix.task.TaskResult;
import org.apache.helix.task.TaskResult.Status;
import org.apache.helix.task.TaskState;
import org.apache.helix.task.TaskStateModelFactory;
+import org.apache.helix.task.TaskUtil;
import org.apache.helix.task.Workflow;
+import org.apache.helix.task.WorkflowContext;
import org.apache.helix.tools.ClusterSetup;
import org.testng.Assert;
import org.testng.annotations.BeforeClass;
@@ -246,6 +250,36 @@ public class TestIndependentTaskRebalancer extends ZkIntegrationTestBase {
Assert.assertTrue(_runCounts.values().contains(1));
}
+ @Test
+ public void testOneTimeScheduled() throws Exception {
+ String jobName = TestHelper.getTestMethodName();
+ Workflow.Builder workflowBuilder = new Workflow.Builder(jobName);
+ List<TaskConfig> taskConfigs = Lists.newArrayListWithCapacity(1);
+ Map<String, String> taskConfigMap = Maps.newHashMap();
+ TaskConfig taskConfig1 = new TaskConfig("TaskOne", taskConfigMap, false);
+ taskConfigs.add(taskConfig1);
+ workflowBuilder.addTaskConfigs(jobName, taskConfigs);
+ workflowBuilder.addConfig(jobName, JobConfig.COMMAND, "DummyCommand");
+ Map<String, String> jobConfigMap = Maps.newHashMap();
+ jobConfigMap.put("Timeout", "1000");
+ workflowBuilder.addJobConfigMap(jobName, jobConfigMap);
+ long inFiveSeconds = System.currentTimeMillis() + (5 * 1000);
+ workflowBuilder.setScheduleConfig(ScheduleConfig.oneTimeDelayedStart(new Date(inFiveSeconds)));
+ _driver.start(workflowBuilder.build());
+
+ // Ensure the job completes
+ TestUtil.pollForWorkflowState(_manager, jobName, TaskState.IN_PROGRESS);
+ TestUtil.pollForWorkflowState(_manager, jobName, TaskState.COMPLETED);
+
+ // Ensure that the class was invoked
+ Assert.assertTrue(_invokedClasses.contains(TaskOne.class.getName()));
+
+ // Check that the workflow only started after the start time (with a 1 second buffer)
+ WorkflowContext workflowCtx = TaskUtil.getWorkflowContext(_manager, jobName);
+ long startTime = workflowCtx.getStartTime();
+ Assert.assertTrue((startTime + 1000) >= inFiveSeconds);
+ }
+
private class TaskOne extends ReindexTask {
private final boolean _shouldFail;
private final String _instanceName;
[33/50] [abbrv] Port recent task framework changes
Posted by ka...@apache.org.
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java b/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java
index 0287657..547ba48 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java
@@ -19,315 +19,108 @@ package org.apache.helix.task;
* under the License.
*/
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
+import java.io.IOException;
import java.util.Map;
-import java.util.Set;
+import java.util.UUID;
-import org.apache.helix.task.Workflow.WorkflowEnum;
+import org.apache.helix.task.beans.TaskBean;
+import org.apache.log4j.Logger;
+import org.codehaus.jackson.map.ObjectMapper;
-import com.google.common.base.Joiner;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Maps;
/**
- * Provides a typed interface to task configurations.
+ * Configuration for an individual task to be run as part of a job.
*/
public class TaskConfig {
- // // Property names ////
-
- /** The name of the workflow to which the task belongs. */
- public static final String WORKFLOW_ID = "WorkflowID";
- /** The name of the target resource. */
- public static final String TARGET_RESOURCE = "TargetResource";
- /**
- * The set of the target partition states. The value must be a comma-separated list of partition
- * states.
- */
- public static final String TARGET_PARTITION_STATES = "TargetPartitionStates";
- /**
- * The set of the target partition ids. The value must be a comma-separated list of partition ids.
- */
- public static final String TARGET_PARTITIONS = "TargetPartitions";
- /** The command that is to be run by participants. */
- public static final String COMMAND = "Command";
- /** The command configuration to be used by the task partitions. */
- public static final String COMMAND_CONFIG = "CommandConfig";
- /** The timeout for a task partition. */
- public static final String TIMEOUT_PER_PARTITION = "TimeoutPerPartition";
- /** The maximum number of times the task rebalancer may attempt to execute a task partitions. */
- public static final String MAX_ATTEMPTS_PER_PARTITION = "MaxAttemptsPerPartition";
- /** The number of concurrent tasks that are allowed to run on an instance. */
- public static final String NUM_CONCURRENT_TASKS_PER_INSTANCE = "ConcurrentTasksPerInstance";
- /** Support overarching tasks that hang around for a while */
- public static final String LONG_LIVED = "LongLived";
- /** Support giving mapping partition IDs to specific task names **/
- public static final String TASK_NAME_MAP = "TaskNameMap";
-
- // // Default property values ////
-
- public static final long DEFAULT_TIMEOUT_PER_PARTITION = 60 * 60 * 1000; // 1 hr.
- public static final int DEFAULT_MAX_ATTEMPTS_PER_PARTITION = 10;
- public static final int DEFAULT_NUM_CONCURRENT_TASKS_PER_INSTANCE = 1;
-
- private final String _workflow;
- private final String _targetResource;
- private final List<Integer> _targetPartitions;
- private final Set<String> _targetPartitionStates;
- private final String _command;
- private final String _commandConfig;
- private final long _timeoutPerPartition;
- private final int _numConcurrentTasksPerInstance;
- private final int _maxAttemptsPerPartition;
- private final boolean _longLived;
- private final Map<String, String> _taskNameMap;
-
- private TaskConfig(String workflow, String targetResource, List<Integer> targetPartitions,
- Set<String> targetPartitionStates, String command, String commandConfig,
- long timeoutPerPartition, int numConcurrentTasksPerInstance, int maxAttemptsPerPartition,
- boolean longLived, Map<String, String> taskNameMap) {
- _workflow = workflow;
- _targetResource = targetResource;
- _targetPartitions = targetPartitions;
- _targetPartitionStates = targetPartitionStates;
- _command = command;
- _commandConfig = commandConfig;
- _timeoutPerPartition = timeoutPerPartition;
- _numConcurrentTasksPerInstance = numConcurrentTasksPerInstance;
- _maxAttemptsPerPartition = maxAttemptsPerPartition;
- _longLived = longLived;
- _taskNameMap = taskNameMap;
- }
-
- public String getWorkflow() {
- return _workflow == null ? WorkflowEnum.UNSPECIFIED.name() : _workflow;
- }
-
- public String getTargetResource() {
- return _targetResource;
- }
-
- public List<Integer> getTargetPartitions() {
- return _targetPartitions;
- }
-
- public Set<String> getTargetPartitionStates() {
- return _targetPartitionStates;
+ private enum TaskConfigFields {
+ TASK_ID,
+ TASK_COMMAND
}
- public String getCommand() {
- return _command;
- }
+ private static final Logger LOG = Logger.getLogger(TaskConfig.class);
- public String getCommandConfig() {
- return _commandConfig;
- }
+ private final Map<String, String> _configMap;
- public long getTimeoutPerPartition() {
- return _timeoutPerPartition;
+ /**
+ * Instantiate the task config
+ * @param command the command to invoke for the task
+ * @param configMap configuration to be passed as part of the invocation
+ * @param id existing task ID
+ */
+ public TaskConfig(String command, Map<String, String> configMap, String id) {
+ if (configMap == null) {
+ configMap = Maps.newHashMap();
+ }
+ if (id == null) {
+ id = UUID.randomUUID().toString();
+ }
+ configMap.put(TaskConfigFields.TASK_COMMAND.toString(), command);
+ configMap.put(TaskConfigFields.TASK_ID.toString(), id);
+ _configMap = configMap;
}
- public int getNumConcurrentTasksPerInstance() {
- return _numConcurrentTasksPerInstance;
+ /**
+ * Instantiate the task config
+ * @param command the command to invoke for the task
+ * @param configMap configuration to be passed as part of the invocation
+ */
+ public TaskConfig(String command, Map<String, String> configMap) {
+ this(command, configMap, null);
}
- public int getMaxAttemptsPerPartition() {
- return _maxAttemptsPerPartition;
+ /**
+ * Unique identifier for this task
+ * @return UUID as a string
+ */
+ public String getId() {
+ return _configMap.get(TaskConfigFields.TASK_ID.toString());
}
- public boolean isLongLived() {
- return _longLived;
+ /**
+ * Get the command to invoke for this task
+ * @return string command
+ */
+ public String getCommand() {
+ return _configMap.get(TaskConfigFields.TASK_COMMAND.toString());
}
- public Map<String, String> getTaskNameMap() {
- return _taskNameMap;
+ /**
+ * Get the configuration map for this task's command
+ * @return map of configuration key to value
+ */
+ public Map<String, String> getConfigMap() {
+ return _configMap;
}
- public Map<String, String> getResourceConfigMap() {
- Map<String, String> cfgMap = new HashMap<String, String>();
- cfgMap.put(TaskConfig.WORKFLOW_ID, _workflow);
- cfgMap.put(TaskConfig.COMMAND, _command);
- cfgMap.put(TaskConfig.COMMAND_CONFIG, _commandConfig);
- cfgMap.put(TaskConfig.TARGET_RESOURCE, _targetResource);
- if (_targetPartitionStates != null) {
- cfgMap.put(TaskConfig.TARGET_PARTITION_STATES, Joiner.on(",").join(_targetPartitionStates));
- }
- if (_targetPartitions != null) {
- cfgMap.put(TaskConfig.TARGET_PARTITIONS, Joiner.on(",").join(_targetPartitions));
+ @Override
+ public String toString() {
+ ObjectMapper mapper = new ObjectMapper();
+ try {
+ return mapper.writeValueAsString(this);
+ } catch (IOException e) {
+ LOG.error("Could not serialize TaskConfig", e);
}
- cfgMap.put(TaskConfig.TIMEOUT_PER_PARTITION, "" + _timeoutPerPartition);
- cfgMap.put(TaskConfig.MAX_ATTEMPTS_PER_PARTITION, "" + _maxAttemptsPerPartition);
- cfgMap.put(TaskConfig.LONG_LIVED + "", String.valueOf(_longLived));
- cfgMap.put(TaskConfig.NUM_CONCURRENT_TASKS_PER_INSTANCE + "",
- String.valueOf(_numConcurrentTasksPerInstance));
- return cfgMap;
+ return super.toString();
}
/**
- * A builder for {@link TaskConfig}. Validates the configurations.
+ * Instantiate a typed configuration from a bean
+ * @param bean plain bean describing the task
+ * @return instantiated TaskConfig
*/
- public static class Builder {
- private String _workflow;
- private String _targetResource;
- private List<Integer> _targetPartitions;
- private Set<String> _targetPartitionStates;
- private String _command;
- private String _commandConfig;
- private long _timeoutPerPartition = DEFAULT_TIMEOUT_PER_PARTITION;
- private int _numConcurrentTasksPerInstance = DEFAULT_NUM_CONCURRENT_TASKS_PER_INSTANCE;
- private int _maxAttemptsPerPartition = DEFAULT_MAX_ATTEMPTS_PER_PARTITION;
- private boolean _longLived = false;
- private Map<String, String> _taskNameMap = Collections.emptyMap();
-
- public TaskConfig build() {
- validate();
-
- return new TaskConfig(_workflow, _targetResource, _targetPartitions, _targetPartitionStates,
- _command, _commandConfig, _timeoutPerPartition, _numConcurrentTasksPerInstance,
- _maxAttemptsPerPartition, _longLived, _taskNameMap);
- }
-
- /**
- * Convenience method to build a {@link TaskConfig} from a {@code Map<String, String>}.
- * @param cfg A map of property names to their string representations.
- * @return A {@link Builder}.
- */
- public static Builder fromMap(Map<String, String> cfg) {
- Builder b = new Builder();
- if (cfg.containsKey(WORKFLOW_ID)) {
- b.setWorkflow(cfg.get(WORKFLOW_ID));
- }
- if (cfg.containsKey(TARGET_RESOURCE)) {
- b.setTargetResource(cfg.get(TARGET_RESOURCE));
- }
- if (cfg.containsKey(TARGET_PARTITIONS)) {
- b.setTargetPartitions(csvToIntList(cfg.get(TARGET_PARTITIONS)));
- }
- if (cfg.containsKey(TARGET_PARTITION_STATES)) {
- b.setTargetPartitionStates(new HashSet<String>(Arrays.asList(cfg.get(
- TARGET_PARTITION_STATES).split(","))));
- }
- if (cfg.containsKey(COMMAND)) {
- b.setCommand(cfg.get(COMMAND));
- }
- if (cfg.containsKey(COMMAND_CONFIG)) {
- b.setCommandConfig(cfg.get(COMMAND_CONFIG));
- }
- if (cfg.containsKey(TIMEOUT_PER_PARTITION)) {
- b.setTimeoutPerPartition(Long.parseLong(cfg.get(TIMEOUT_PER_PARTITION)));
- }
- if (cfg.containsKey(NUM_CONCURRENT_TASKS_PER_INSTANCE)) {
- b.setNumConcurrentTasksPerInstance(Integer.parseInt(cfg
- .get(NUM_CONCURRENT_TASKS_PER_INSTANCE)));
- }
- if (cfg.containsKey(MAX_ATTEMPTS_PER_PARTITION)) {
- b.setMaxAttemptsPerPartition(Integer.parseInt(cfg.get(MAX_ATTEMPTS_PER_PARTITION)));
- }
- if (cfg.containsKey(LONG_LIVED)) {
- b.setLongLived(Boolean.parseBoolean(cfg.get(LONG_LIVED)));
- }
- return b;
- }
-
- public Builder setWorkflow(String v) {
- _workflow = v;
- return this;
- }
-
- public Builder setTargetResource(String v) {
- _targetResource = v;
- return this;
- }
-
- public Builder setTargetPartitions(List<Integer> v) {
- _targetPartitions = ImmutableList.copyOf(v);
- return this;
- }
-
- public Builder setTargetPartitionStates(Set<String> v) {
- _targetPartitionStates = ImmutableSet.copyOf(v);
- return this;
- }
-
- public Builder setCommand(String v) {
- _command = v;
- return this;
- }
-
- public Builder setCommandConfig(String v) {
- _commandConfig = v;
- return this;
- }
-
- public Builder setTimeoutPerPartition(long v) {
- _timeoutPerPartition = v;
- return this;
- }
-
- public Builder setNumConcurrentTasksPerInstance(int v) {
- _numConcurrentTasksPerInstance = v;
- return this;
- }
-
- public Builder setMaxAttemptsPerPartition(int v) {
- _maxAttemptsPerPartition = v;
- return this;
- }
-
- public Builder setLongLived(boolean isLongLived) {
- _longLived = isLongLived;
- return this;
- }
-
- public Builder setTaskNameMap(Map<String, String> taskNameMap) {
- _taskNameMap = taskNameMap;
- return this;
- }
-
- private void validate() {
- if (_targetResource == null && _targetPartitions == null) {
- throw new IllegalArgumentException(String.format(
- "%s cannot be null without specified partitions", TARGET_RESOURCE));
- }
- if (_targetResource != null && _targetPartitionStates != null
- && _targetPartitionStates.isEmpty()) {
- throw new IllegalArgumentException(String.format("%s cannot be empty",
- TARGET_PARTITION_STATES));
- }
- if (_command == null) {
- throw new IllegalArgumentException(String.format("%s cannot be null", COMMAND));
- }
- if (_timeoutPerPartition < 0) {
- throw new IllegalArgumentException(String.format("%s has invalid value %s",
- TIMEOUT_PER_PARTITION, _timeoutPerPartition));
- }
- if (_numConcurrentTasksPerInstance < 1) {
- throw new IllegalArgumentException(String.format("%s has invalid value %s",
- NUM_CONCURRENT_TASKS_PER_INSTANCE, _numConcurrentTasksPerInstance));
- }
- if (_maxAttemptsPerPartition < 1) {
- throw new IllegalArgumentException(String.format("%s has invalid value %s",
- MAX_ATTEMPTS_PER_PARTITION, _maxAttemptsPerPartition));
- }
- if (_workflow == null) {
- throw new IllegalArgumentException(String.format("%s cannot be null", WORKFLOW_ID));
- }
- }
-
- private static List<Integer> csvToIntList(String csv) {
- String[] vals = csv.split(",");
- List<Integer> l = new ArrayList<Integer>();
- for (String v : vals) {
- if (v != null && !v.isEmpty()) {
- l.add(Integer.parseInt(v));
- }
- }
+ public static TaskConfig from(TaskBean bean) {
+ return new TaskConfig(bean.command, bean.taskConfigMap);
+ }
- return l;
- }
+ /**
+ * Instantiate a typed configuration from a raw string map
+ * @param rawConfigMap mixed map of configuration and task metadata
+ * @return instantiated TaskConfig
+ */
+ public static TaskConfig from(Map<String, String> rawConfigMap) {
+ String taskId = rawConfigMap.get(TaskConfigFields.TASK_ID.toString());
+ String command = rawConfigMap.get(TaskConfigFields.TASK_COMMAND.toString());
+ return new TaskConfig(command, rawConfigMap, taskId);
}
}
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/TaskContext.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskContext.java b/helix-core/src/main/java/org/apache/helix/task/TaskContext.java
deleted file mode 100644
index d416a86..0000000
--- a/helix-core/src/main/java/org/apache/helix/task/TaskContext.java
+++ /dev/null
@@ -1,135 +0,0 @@
-package org.apache.helix.task;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import java.util.Map;
-import java.util.TreeMap;
-import org.apache.helix.HelixProperty;
-import org.apache.helix.ZNRecord;
-
-/**
- * Provides a typed interface to the context information stored by {@link TaskRebalancer} in the
- * Helix property store.
- */
-public class TaskContext extends HelixProperty {
- public static final String START_TIME = "START_TIME";
- public static final String PARTITION_STATE = "STATE";
- public static final String NUM_ATTEMPTS = "NUM_ATTEMPTS";
- public static final String FINISH_TIME = "FINISH_TIME";
-
- public TaskContext(ZNRecord record) {
- super(record);
- }
-
- public void setStartTime(long t) {
- _record.setSimpleField(START_TIME, String.valueOf(t));
- }
-
- public long getStartTime() {
- String tStr = _record.getSimpleField(START_TIME);
- if (tStr == null) {
- return -1;
- }
-
- return Long.parseLong(tStr);
- }
-
- public void setPartitionState(int p, TaskPartitionState s) {
- String pStr = String.valueOf(p);
- Map<String, String> map = _record.getMapField(pStr);
- if (map == null) {
- map = new TreeMap<String, String>();
- _record.setMapField(pStr, map);
- }
- map.put(PARTITION_STATE, s.name());
- }
-
- public TaskPartitionState getPartitionState(int p) {
- Map<String, String> map = _record.getMapField(String.valueOf(p));
- if (map == null) {
- return null;
- }
-
- String str = map.get(PARTITION_STATE);
- if (str != null) {
- return TaskPartitionState.valueOf(str);
- } else {
- return null;
- }
- }
-
- public void setPartitionNumAttempts(int p, int n) {
- String pStr = String.valueOf(p);
- Map<String, String> map = _record.getMapField(pStr);
- if (map == null) {
- map = new TreeMap<String, String>();
- _record.setMapField(pStr, map);
- }
- map.put(NUM_ATTEMPTS, String.valueOf(n));
- }
-
- public int incrementNumAttempts(int pId) {
- int n = this.getPartitionNumAttempts(pId);
- if (n < 0) {
- n = 0;
- }
- n += 1;
- this.setPartitionNumAttempts(pId, n);
- return n;
- }
-
- public int getPartitionNumAttempts(int p) {
- Map<String, String> map = _record.getMapField(String.valueOf(p));
- if (map == null) {
- return -1;
- }
-
- String nStr = map.get(NUM_ATTEMPTS);
- if (nStr == null) {
- return -1;
- }
-
- return Integer.parseInt(nStr);
- }
-
- public void setPartitionFinishTime(int p, long t) {
- String pStr = String.valueOf(p);
- Map<String, String> map = _record.getMapField(pStr);
- if (map == null) {
- map = new TreeMap<String, String>();
- _record.setMapField(pStr, map);
- }
- map.put(FINISH_TIME, String.valueOf(t));
- }
-
- public long getPartitionFinishTime(int p) {
- Map<String, String> map = _record.getMapField(String.valueOf(p));
- if (map == null) {
- return -1;
- }
-
- String tStr = map.get(FINISH_TIME);
- if (tStr == null) {
- return -1;
- }
-
- return Long.parseLong(tStr);
- }
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/TaskDag.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskDag.java b/helix-core/src/main/java/org/apache/helix/task/TaskDag.java
deleted file mode 100644
index ab5bc62..0000000
--- a/helix-core/src/main/java/org/apache/helix/task/TaskDag.java
+++ /dev/null
@@ -1,152 +0,0 @@
-package org.apache.helix.task;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import org.codehaus.jackson.annotate.JsonProperty;
-import org.codehaus.jackson.map.ObjectMapper;
-
-import java.util.Collections;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.TreeSet;
-
-/**
- * Provides a convenient way to construct, traverse,
- * and validate a task dependency graph
- */
-public class TaskDag {
- @JsonProperty("parentsToChildren")
- private final Map<String, Set<String>> _parentsToChildren;
-
- @JsonProperty("childrenToParents")
- private final Map<String, Set<String>> _childrenToParents;
-
- @JsonProperty("allNodes")
- private final Set<String> _allNodes;
-
- public static final TaskDag EMPTY_DAG = new TaskDag();
-
- public TaskDag() {
- _parentsToChildren = new TreeMap<String, Set<String>>();
- _childrenToParents = new TreeMap<String, Set<String>>();
- _allNodes = new TreeSet<String>();
- }
-
- public void addParentToChild(String parent, String child) {
- if (!_parentsToChildren.containsKey(parent)) {
- _parentsToChildren.put(parent, new TreeSet<String>());
- }
- _parentsToChildren.get(parent).add(child);
-
- if (!_childrenToParents.containsKey(child)) {
- _childrenToParents.put(child, new TreeSet<String>());
- }
- _childrenToParents.get(child).add(parent);
-
- _allNodes.add(parent);
- _allNodes.add(child);
- }
-
- public void addNode(String node) {
- _allNodes.add(node);
- }
-
- public Map<String, Set<String>> getParentsToChildren() {
- return _parentsToChildren;
- }
-
- public Map<String, Set<String>> getChildrenToParents() {
- return _childrenToParents;
- }
-
- public Set<String> getAllNodes() {
- return _allNodes;
- }
-
- public Set<String> getDirectChildren(String node) {
- if (!_parentsToChildren.containsKey(node)) {
- return Collections.emptySet();
- }
- return _parentsToChildren.get(node);
- }
-
- public Set<String> getDirectParents(String node) {
- if (!_childrenToParents.containsKey(node)) {
- return Collections.emptySet();
- }
- return _childrenToParents.get(node);
- }
-
- public String toJson() throws Exception {
- return new ObjectMapper().writeValueAsString(this);
- }
-
- public static TaskDag fromJson(String json) {
- try {
- return new ObjectMapper().readValue(json, TaskDag.class);
- } catch (Exception e) {
- throw new IllegalArgumentException("Unable to parse json " + json + " into task dag");
- }
- }
-
- /**
- * Checks that dag contains no cycles and all nodes are reachable.
- */
- public void validate() {
- Set<String> prevIteration = new TreeSet<String>();
-
- // get all unparented nodes
- for (String node : _allNodes) {
- if (getDirectParents(node).isEmpty()) {
- prevIteration.add(node);
- }
- }
-
- // visit children nodes up to max iteration count, by which point we should have exited
- // naturally
- Set<String> allNodesReached = new TreeSet<String>();
- int iterationCount = 0;
- int maxIterations = _allNodes.size() + 1;
-
- while (!prevIteration.isEmpty() && iterationCount < maxIterations) {
- // construct set of all children reachable from prev iteration
- Set<String> thisIteration = new TreeSet<String>();
- for (String node : prevIteration) {
- thisIteration.addAll(getDirectChildren(node));
- }
-
- allNodesReached.addAll(prevIteration);
- prevIteration = thisIteration;
- iterationCount++;
- }
-
- allNodesReached.addAll(prevIteration);
-
- if (iterationCount >= maxIterations) {
- throw new IllegalArgumentException("DAG invalid: cycles detected");
- }
-
- if (!allNodesReached.containsAll(_allNodes)) {
- throw new IllegalArgumentException("DAG invalid: unreachable nodes found. Reachable set is "
- + allNodesReached);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java b/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
index dd47625..ada2f99 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
@@ -20,8 +20,8 @@ package org.apache.helix.task;
*/
import java.io.File;
-import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
@@ -41,11 +41,13 @@ import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.HelixProperty;
import org.apache.helix.InstanceType;
-import org.apache.helix.controller.rebalancer.HelixRebalancer;
+import org.apache.helix.PropertyKey;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.builder.CustomModeISBuilder;
import org.apache.log4j.Logger;
+import com.beust.jcommander.internal.Lists;
+
/**
* CLI for scheduling/canceling workflows
*/
@@ -152,67 +154,77 @@ public class TaskDriver {
flow.getResourceConfigMap());
// then schedule tasks
- for (String task : flow.getTaskConfigs().keySet()) {
- scheduleTask(task, TaskConfig.Builder.fromMap(flow.getTaskConfigs().get(task)).build());
+ for (String job : flow.getJobConfigs().keySet()) {
+ JobConfig.Builder builder = JobConfig.Builder.fromMap(flow.getJobConfigs().get(job));
+ if (flow.getTaskConfigs() != null && flow.getTaskConfigs().containsKey(job)) {
+ builder.addTaskConfigs(flow.getTaskConfigs().get(job));
+ }
+ scheduleJob(job, builder.build());
}
}
- /** Posts new task to cluster */
- private void scheduleTask(String taskResource, TaskConfig taskConfig) throws Exception {
- // Set up task resource based on partitions provided, or from target resource
- int numPartitions;
- List<Integer> partitions = taskConfig.getTargetPartitions();
- String targetResource = taskConfig.getTargetResource();
- if (partitions != null && !partitions.isEmpty()) {
- numPartitions = partitions.size();
- } else if (targetResource != null) {
- numPartitions =
- _admin.getResourceIdealState(_clusterName, taskConfig.getTargetResource())
- .getPartitionSet().size();
- } else {
- numPartitions = 0;
+ /** Posts new job to cluster */
+ private void scheduleJob(String jobResource, JobConfig jobConfig) throws Exception {
+ // Set up job resource based on partitions from target resource
+ int numIndependentTasks = jobConfig.getTaskConfigMap().size();
+ int numPartitions =
+ (numIndependentTasks > 0) ? numIndependentTasks : _admin
+ .getResourceIdealState(_clusterName, jobConfig.getTargetResource()).getPartitionSet()
+ .size();
+ _admin.addResource(_clusterName, jobResource, numPartitions, TaskConstants.STATE_MODEL_NAME);
+
+ // Set the job configuration
+ HelixDataAccessor accessor = _manager.getHelixDataAccessor();
+ PropertyKey.Builder keyBuilder = accessor.keyBuilder();
+ HelixProperty resourceConfig = new HelixProperty(jobResource);
+ resourceConfig.getRecord().getSimpleFields().putAll(jobConfig.getResourceConfigMap());
+ Map<String, TaskConfig> taskConfigMap = jobConfig.getTaskConfigMap();
+ if (taskConfigMap != null) {
+ for (TaskConfig taskConfig : taskConfigMap.values()) {
+ resourceConfig.getRecord().setMapField(taskConfig.getId(), taskConfig.getConfigMap());
+ }
}
- _admin.addResource(_clusterName, taskResource, numPartitions, TaskConstants.STATE_MODEL_NAME);
- _admin.setConfig(TaskUtil.getResourceConfigScope(_clusterName, taskResource),
- taskConfig.getResourceConfigMap());
+ accessor.setProperty(keyBuilder.resourceConfig(jobResource), resourceConfig);
// Push out new ideal state based on number of target partitions
- CustomModeISBuilder builder = new CustomModeISBuilder(taskResource);
+ CustomModeISBuilder builder = new CustomModeISBuilder(jobResource);
builder.setRebalancerMode(IdealState.RebalanceMode.USER_DEFINED);
builder.setNumReplica(1);
builder.setNumPartitions(numPartitions);
builder.setStateModel(TaskConstants.STATE_MODEL_NAME);
for (int i = 0; i < numPartitions; i++) {
- builder.add(taskResource + "_" + i);
+ builder.add(jobResource + "_" + i);
}
IdealState is = builder.build();
- Class<? extends HelixRebalancer> rebalancerClass =
- (targetResource != null) ? TaskRebalancer.class : IndependentTaskRebalancer.class;
- is.setRebalancerClassName(rebalancerClass.getName());
- _admin.setResourceIdealState(_clusterName, taskResource, is);
+ if (taskConfigMap != null && !taskConfigMap.isEmpty()) {
+ is.setRebalancerClassName(GenericTaskRebalancer.class.getName());
+ } else {
+ is.setRebalancerClassName(FixedTargetTaskRebalancer.class.getName());
+ }
+ _admin.setResourceIdealState(_clusterName, jobResource, is);
}
- /** Public method to resume a task/workflow */
+ /** Public method to resume a job/workflow */
public void resume(String resource) {
setTaskTargetState(resource, TargetState.START);
}
- /** Public method to stop a task/workflow */
+ /** Public method to stop a job/workflow */
public void stop(String resource) {
setTaskTargetState(resource, TargetState.STOP);
}
- /** Public method to delete a task/workflow */
+ /** Public method to delete a job/workflow */
public void delete(String resource) {
setTaskTargetState(resource, TargetState.DELETE);
}
/** Helper function to change target state for a given task */
- private void setTaskTargetState(String taskResource, TargetState state) {
+ private void setTaskTargetState(String jobResource, TargetState state) {
HelixDataAccessor accessor = _manager.getHelixDataAccessor();
- HelixProperty p = new HelixProperty(taskResource);
+ HelixProperty p = new HelixProperty(jobResource);
p.getRecord().setSimpleField(WorkflowConfig.TARGET_STATE, state.name());
- accessor.updateProperty(accessor.keyBuilder().resourceConfig(taskResource), p);
+ accessor.updateProperty(accessor.keyBuilder().resourceConfig(jobResource), p);
invokeRebalance();
}
@@ -222,34 +234,24 @@ public class TaskDriver {
WorkflowContext wCtx = TaskUtil.getWorkflowContext(_manager, resource);
LOG.info("Workflow " + resource + " consists of the following tasks: "
- + wCfg.getTaskDag().getAllNodes());
+ + wCfg.getJobDag().getAllNodes());
LOG.info("Current state of workflow is " + wCtx.getWorkflowState().name());
- LOG.info("Task states are: ");
+ LOG.info("Job states are: ");
LOG.info("-------");
- for (String task : wCfg.getTaskDag().getAllNodes()) {
- LOG.info("Task " + task + " is " + wCtx.getTaskState(task));
+ for (String job : wCfg.getJobDag().getAllNodes()) {
+ LOG.info("Task " + job + " is " + wCtx.getJobState(job));
// fetch task information
- TaskContext tCtx = TaskUtil.getTaskContext(_manager, task);
- TaskConfig tCfg = TaskUtil.getTaskCfg(_manager, task);
+ JobContext jCtx = TaskUtil.getJobContext(_manager, job);
// calculate taskPartitions
- List<Integer> partitions;
- if (tCfg.getTargetPartitions() != null) {
- partitions = tCfg.getTargetPartitions();
- } else {
- partitions = new ArrayList<Integer>();
- for (String pStr : _admin.getResourceIdealState(_clusterName, tCfg.getTargetResource())
- .getPartitionSet()) {
- partitions
- .add(Integer.parseInt(pStr.substring(pStr.lastIndexOf("_") + 1, pStr.length())));
- }
- }
+ List<Integer> partitions = Lists.newArrayList(jCtx.getPartitionSet());
+ Collections.sort(partitions);
// group partitions by status
Map<TaskPartitionState, Integer> statusCount = new TreeMap<TaskPartitionState, Integer>();
for (Integer i : partitions) {
- TaskPartitionState s = tCtx.getPartitionState(i);
+ TaskPartitionState s = jCtx.getPartitionState(i);
if (!statusCount.containsKey(s)) {
statusCount.put(s, 0);
}
@@ -288,20 +290,26 @@ public class TaskDriver {
return options;
}
- /** Constructs option group containing options required by all drivable tasks */
+ /** Constructs option group containing options required by all drivable jobs */
@SuppressWarnings("static-access")
private static OptionGroup contructGenericRequiredOptionGroup() {
Option zkAddressOption =
- OptionBuilder.isRequired().hasArgs(1).withArgName("zkAddress").withLongOpt(ZK_ADDRESS)
- .withDescription("ZK address managing target cluster").create();
+ OptionBuilder.isRequired().withLongOpt(ZK_ADDRESS)
+ .withDescription("ZK address managing cluster").create();
+ zkAddressOption.setArgs(1);
+ zkAddressOption.setArgName("zkAddress");
Option clusterNameOption =
- OptionBuilder.isRequired().hasArgs(1).withArgName("clusterName")
- .withLongOpt(CLUSTER_NAME_OPTION).withDescription("Target cluster name").create();
+ OptionBuilder.isRequired().withLongOpt(CLUSTER_NAME_OPTION).withDescription("Cluster name")
+ .create();
+ clusterNameOption.setArgs(1);
+ clusterNameOption.setArgName("clusterName");
Option taskResourceOption =
- OptionBuilder.isRequired().hasArgs(1).withArgName("resourceName")
- .withLongOpt(RESOURCE_OPTION).withDescription("Target workflow or task").create();
+ OptionBuilder.isRequired().withLongOpt(RESOURCE_OPTION)
+ .withDescription("Workflow or job name").create();
+ taskResourceOption.setArgs(1);
+ taskResourceOption.setArgName("resourceName");
OptionGroup group = new OptionGroup();
group.addOption(zkAddressOption);
@@ -310,12 +318,14 @@ public class TaskDriver {
return group;
}
- /** Constructs option group containing options required by all drivable tasks */
- @SuppressWarnings("static-access")
+ /** Constructs option group containing options required by all drivable jobs */
private static OptionGroup constructStartOptionGroup() {
+ @SuppressWarnings("static-access")
Option workflowFileOption =
- OptionBuilder.withLongOpt(WORKFLOW_FILE_OPTION).hasArgs(1).withArgName("workflowFile")
+ OptionBuilder.withLongOpt(WORKFLOW_FILE_OPTION)
.withDescription("Local file describing workflow").create();
+ workflowFileOption.setArgs(1);
+ workflowFileOption.setArgName("workflowFile");
OptionGroup group = new OptionGroup();
group.addOption(workflowFileOption);
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/TaskFactory.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskFactory.java b/helix-core/src/main/java/org/apache/helix/task/TaskFactory.java
index 0cbf24c..31fddc7 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskFactory.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskFactory.java
@@ -19,14 +19,15 @@ package org.apache.helix.task;
* under the License.
*/
+
/**
* A factory for {@link Task} objects.
*/
public interface TaskFactory {
/**
* Returns a {@link Task} instance.
- * @param config Configuration information for the task.
+ * @param context Contextual information for the task, including task and job configurations
* @return A {@link Task} instance.
*/
- Task createNewTask(String config);
+ Task createNewTask(TaskCallbackContext context);
}
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java b/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
index 7b93b82..829f0c4 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
@@ -19,112 +19,585 @@ package org.apache.helix.task;
* under the License.
*/
-import java.util.Collections;
+import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
+import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedSet;
+import java.util.TreeMap;
import java.util.TreeSet;
+import org.apache.helix.AccessOption;
+import org.apache.helix.HelixDataAccessor;
+import org.apache.helix.HelixManager;
+import org.apache.helix.PropertyKey;
+import org.apache.helix.ZNRecord;
import org.apache.helix.api.Cluster;
import org.apache.helix.api.Resource;
import org.apache.helix.api.State;
import org.apache.helix.api.id.ParticipantId;
import org.apache.helix.api.id.PartitionId;
import org.apache.helix.api.id.ResourceId;
+import org.apache.helix.controller.context.ControllerContextProvider;
+import org.apache.helix.controller.rebalancer.HelixRebalancer;
+import org.apache.helix.controller.rebalancer.config.RebalancerConfig;
import org.apache.helix.controller.stages.ResourceCurrentState;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.ResourceAssignment;
+import org.apache.log4j.Logger;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Sets;
/**
- * Custom rebalancer implementation for the {@code Task} state model. Tasks are assigned to
- * instances hosting target resource partitions in target states
+ * Custom rebalancer implementation for the {@code Task} state model.
*/
-public class TaskRebalancer extends AbstractTaskRebalancer {
+public abstract class TaskRebalancer implements HelixRebalancer {
+ private static final Logger LOG = Logger.getLogger(TaskRebalancer.class);
+ private HelixManager _manager;
+
+ /**
+ * Get all the partitions that should be created by this task
+ * @param jobCfg the task configuration
+ * @param jobCtx the task context
+ * @param workflowCfg the workflow configuration
+ * @param workflowCtx the workflow context
+ * @param cache cluster snapshot
+ * @return set of partition numbers
+ */
+ public abstract Set<Integer> getAllTaskPartitions(JobConfig jobCfg, JobContext jobCtx,
+ WorkflowConfig workflowCfg, WorkflowContext workflowCtx, Cluster cache);
+
+ /**
+ * Compute an assignment of tasks to instances
+ * @param currStateOutput the current state of the instances
+ * @param prevAssignment the previous task partition assignment
+ * @param instanceList the instances
+ * @param jobCfg the task configuration
+ * @param taskCtx the task context
+ * @param workflowCfg the workflow configuration
+ * @param workflowCtx the workflow context
+ * @param partitionSet the partitions to assign
+ * @param cache cluster snapshot
+ * @return map of instances to set of partition numbers
+ */
+ public abstract Map<ParticipantId, SortedSet<Integer>> getTaskAssignment(
+ ResourceCurrentState currStateOutput, ResourceAssignment prevAssignment,
+ Iterable<ParticipantId> instanceList, JobConfig jobCfg, JobContext jobContext,
+ WorkflowConfig workflowCfg, WorkflowContext workflowCtx, Set<Integer> partitionSet,
+ Cluster cache);
+
@Override
- public Set<Integer> getAllTaskPartitions(TaskConfig taskCfg, WorkflowConfig workflowCfg,
- WorkflowContext workflowCtx, Cluster cluster) {
- return getAllTaskPartitions(getTgtIdealState(taskCfg, cluster), taskCfg);
+ public void init(HelixManager manager, ControllerContextProvider contextProvider) {
+ _manager = manager;
}
@Override
- public Map<String, SortedSet<Integer>> getTaskAssignment(ResourceCurrentState currStateOutput,
- ResourceAssignment prevAssignment, Iterable<ParticipantId> instanceList, TaskConfig taskCfg,
- TaskContext taskCtx, WorkflowConfig workflowCfg, WorkflowContext workflowCtx,
- Set<Integer> partitionSet, Cluster cluster) {
- IdealState tgtIs = getTgtIdealState(taskCfg, cluster);
- if (tgtIs == null) {
- return Collections.emptyMap();
+ public ResourceAssignment computeResourceMapping(RebalancerConfig rebalancerConfig,
+ ResourceAssignment prevAssignment, Cluster cluster, ResourceCurrentState currentState) {
+ IdealState taskIs = cluster.getResource(rebalancerConfig.getResourceId()).getIdealState();
+ return computeBestPossiblePartitionState(cluster, taskIs,
+ cluster.getResource(rebalancerConfig.getResourceId()), currentState);
+ }
+
+ public ResourceAssignment computeBestPossiblePartitionState(Cluster clusterData,
+ IdealState taskIs, Resource resource, ResourceCurrentState currStateOutput) {
+ final String resourceName = resource.getId().toString();
+
+ // Fetch job configuration
+ JobConfig jobCfg = TaskUtil.getJobCfg(_manager, resourceName);
+ String workflowResource = jobCfg.getWorkflow();
+
+ // Fetch workflow configuration and context
+ WorkflowConfig workflowCfg = TaskUtil.getWorkflowCfg(_manager, workflowResource);
+ WorkflowContext workflowCtx = TaskUtil.getWorkflowContext(_manager, workflowResource);
+
+ // Initialize workflow context if needed
+ if (workflowCtx == null) {
+ workflowCtx = new WorkflowContext(new ZNRecord("WorkflowContext"));
+ workflowCtx.setStartTime(System.currentTimeMillis());
+ }
+
+ // Check parent dependencies
+ for (String parent : workflowCfg.getJobDag().getDirectParents(resourceName)) {
+ if (workflowCtx.getJobState(parent) == null
+ || !workflowCtx.getJobState(parent).equals(TaskState.COMPLETED)) {
+ return emptyAssignment(resourceName);
+ }
+ }
+
+ // Clean up if workflow marked for deletion
+ TargetState targetState = workflowCfg.getTargetState();
+ if (targetState == TargetState.DELETE) {
+ cleanup(_manager, resourceName, workflowCfg, workflowResource);
+ return emptyAssignment(resourceName);
+ }
+
+ // Check if this workflow has been finished past its expiry.
+ if (workflowCtx.getFinishTime() != WorkflowContext.UNFINISHED
+ && workflowCtx.getFinishTime() + workflowCfg.getExpiry() <= System.currentTimeMillis()) {
+ markForDeletion(_manager, workflowResource);
+ cleanup(_manager, resourceName, workflowCfg, workflowResource);
+ return emptyAssignment(resourceName);
+ }
+
+ // Fetch any existing context information from the property store.
+ JobContext jobCtx = TaskUtil.getJobContext(_manager, resourceName);
+ if (jobCtx == null) {
+ jobCtx = new JobContext(new ZNRecord("TaskContext"));
+ jobCtx.setStartTime(System.currentTimeMillis());
+ }
+
+ // The job is already in a final state (completed/failed).
+ if (workflowCtx.getJobState(resourceName) == TaskState.FAILED
+ || workflowCtx.getJobState(resourceName) == TaskState.COMPLETED) {
+ return emptyAssignment(resourceName);
+ }
+
+ ResourceAssignment prevAssignment = TaskUtil.getPrevResourceAssignment(_manager, resourceName);
+ if (prevAssignment == null) {
+ prevAssignment = new ResourceAssignment(ResourceId.from(resourceName));
+ }
+
+ // Will contain the list of partitions that must be explicitly dropped from the ideal state that
+ // is stored in zk.
+ // Fetch the previous resource assignment from the property store. This is required because of
+ // HELIX-230.
+ Set<Integer> partitionsToDrop = new TreeSet<Integer>();
+
+ ResourceAssignment newAssignment =
+ computeResourceMapping(resourceName, workflowCfg, jobCfg, prevAssignment, clusterData
+ .getLiveParticipantMap().keySet(), currStateOutput, workflowCtx, jobCtx,
+ partitionsToDrop, clusterData);
+
+ if (!partitionsToDrop.isEmpty()) {
+ for (Integer pId : partitionsToDrop) {
+ taskIs.getRecord().getMapFields().remove(pName(resourceName, pId));
+ }
+ HelixDataAccessor accessor = _manager.getHelixDataAccessor();
+ PropertyKey propertyKey = accessor.keyBuilder().idealStates(resourceName);
+ accessor.setProperty(propertyKey, taskIs);
+ }
+
+ // Update rebalancer context, previous ideal state.
+ TaskUtil.setJobContext(_manager, resourceName, jobCtx);
+ TaskUtil.setWorkflowContext(_manager, workflowResource, workflowCtx);
+ TaskUtil.setPrevResourceAssignment(_manager, resourceName, newAssignment);
+
+ return newAssignment;
+ }
+
+ private ResourceAssignment computeResourceMapping(String jobResource,
+ WorkflowConfig workflowConfig, JobConfig jobCfg, ResourceAssignment prevAssignment,
+ Iterable<ParticipantId> liveInstances, ResourceCurrentState currStateOutput,
+ WorkflowContext workflowCtx, JobContext jobCtx, Set<Integer> partitionsToDropFromIs,
+ Cluster cache) {
+ TargetState jobTgtState = workflowConfig.getTargetState();
+
+ // Update running status in workflow context
+ if (jobTgtState == TargetState.STOP) {
+ workflowCtx.setJobState(jobResource, TaskState.STOPPED);
+ // Workflow has been stopped if all jobs are stopped
+ if (isWorkflowStopped(workflowCtx, workflowConfig)) {
+ workflowCtx.setWorkflowState(TaskState.STOPPED);
+ }
+ } else {
+ workflowCtx.setJobState(jobResource, TaskState.IN_PROGRESS);
+ // Workflow is in progress if any task is in progress
+ workflowCtx.setWorkflowState(TaskState.IN_PROGRESS);
+ }
+
+ // Used to keep track of tasks that have already been assigned to instances.
+ Set<Integer> assignedPartitions = new HashSet<Integer>();
+
+ // Keeps a mapping of (partition) -> (instance, state)
+ Map<Integer, PartitionAssignment> paMap = new TreeMap<Integer, PartitionAssignment>();
+
+ // Process all the current assignments of tasks.
+ Set<Integer> allPartitions =
+ getAllTaskPartitions(jobCfg, jobCtx, workflowConfig, workflowCtx, cache);
+ Map<ParticipantId, SortedSet<Integer>> taskAssignments =
+ getTaskPartitionAssignments(liveInstances, prevAssignment, allPartitions);
+ for (ParticipantId instance : taskAssignments.keySet()) {
+ Set<Integer> pSet = taskAssignments.get(instance);
+ // Used to keep track of partitions that are in one of the final states: COMPLETED, TIMED_OUT,
+ // TASK_ERROR, ERROR.
+ Set<Integer> donePartitions = new TreeSet<Integer>();
+ for (int pId : pSet) {
+ final String pName = pName(jobResource, pId);
+
+ // Check for pending state transitions on this (partition, instance).
+ State pendingState =
+ currStateOutput.getPendingState(ResourceId.from(jobResource), PartitionId.from(pName),
+ instance);
+ if (pendingState != null) {
+ // There is a pending state transition for this (partition, instance). Just copy forward
+ // the state
+ // assignment from the previous ideal state.
+ Map<ParticipantId, State> stateMap =
+ prevAssignment.getReplicaMap(PartitionId.from(pName));
+ if (stateMap != null) {
+ State prevState = stateMap.get(instance);
+ paMap.put(pId, new PartitionAssignment(instance.toString(), prevState.toString()));
+ assignedPartitions.add(pId);
+ LOG.debug(String
+ .format(
+ "Task partition %s has a pending state transition on instance %s. Using the previous ideal state which was %s.",
+ pName, instance, prevState));
+ }
+
+ continue;
+ }
+
+ State currHelixState =
+ currStateOutput.getCurrentState(ResourceId.from(jobResource), PartitionId.from(pName),
+ instance);
+ TaskPartitionState currState =
+ (currHelixState != null) ? TaskPartitionState.valueOf(currHelixState.toString()) : null;
+
+ // Process any requested state transitions.
+ State requestedStateStr =
+ currStateOutput.getRequestedState(ResourceId.from(jobResource),
+ PartitionId.from(pName), instance);
+ if (requestedStateStr != null && !requestedStateStr.toString().isEmpty()) {
+ TaskPartitionState requestedState =
+ TaskPartitionState.valueOf(requestedStateStr.toString());
+ if (requestedState.equals(currState)) {
+ LOG.warn(String.format(
+ "Requested state %s is the same as the current state for instance %s.",
+ requestedState, instance));
+ }
+
+ paMap.put(pId, new PartitionAssignment(instance.toString(), requestedState.name()));
+ assignedPartitions.add(pId);
+ LOG.debug(String.format(
+ "Instance %s requested a state transition to %s for partition %s.", instance,
+ requestedState, pName));
+ continue;
+ }
+
+ switch (currState) {
+ case RUNNING:
+ case STOPPED: {
+ TaskPartitionState nextState;
+ if (jobTgtState == TargetState.START) {
+ nextState = TaskPartitionState.RUNNING;
+ } else {
+ nextState = TaskPartitionState.STOPPED;
+ }
+
+ paMap.put(pId, new PartitionAssignment(instance.toString(), nextState.name()));
+ assignedPartitions.add(pId);
+ LOG.debug(String.format("Setting task partition %s state to %s on instance %s.", pName,
+ nextState, instance));
+ }
+ break;
+ case COMPLETED: {
+ // The task has completed on this partition. Mark as such in the context object.
+ donePartitions.add(pId);
+ LOG.debug(String
+ .format(
+ "Task partition %s has completed with state %s. Marking as such in rebalancer context.",
+ pName, currState));
+ partitionsToDropFromIs.add(pId);
+ markPartitionCompleted(jobCtx, pId);
+ }
+ break;
+ case TIMED_OUT:
+ case TASK_ERROR:
+ case ERROR: {
+ donePartitions.add(pId); // The task may be rescheduled on a different instance.
+ LOG.debug(String.format(
+ "Task partition %s has error state %s. Marking as such in rebalancer context.",
+ pName, currState));
+ markPartitionError(jobCtx, pId, currState);
+ // The error policy is to fail the task as soon a single partition fails for a specified
+ // maximum number of
+ // attempts.
+ if (jobCtx.getPartitionNumAttempts(pId) >= jobCfg.getMaxAttemptsPerTask()) {
+ workflowCtx.setJobState(jobResource, TaskState.FAILED);
+ workflowCtx.setWorkflowState(TaskState.FAILED);
+ addAllPartitions(allPartitions, partitionsToDropFromIs);
+ return emptyAssignment(jobResource);
+ }
+ }
+ break;
+ case INIT:
+ case DROPPED: {
+ // currState in [INIT, DROPPED]. Do nothing, the partition is eligible to be reassigned.
+ donePartitions.add(pId);
+ LOG.debug(String.format(
+ "Task partition %s has state %s. It will be dropped from the current ideal state.",
+ pName, currState));
+ }
+ break;
+ default:
+ throw new AssertionError("Unknown enum symbol: " + currState);
+ }
+ }
+
+ // Remove the set of task partitions that are completed or in one of the error states.
+ pSet.removeAll(donePartitions);
+ }
+
+ if (isJobComplete(jobCtx, allPartitions)) {
+ workflowCtx.setJobState(jobResource, TaskState.COMPLETED);
+ if (isWorkflowComplete(workflowCtx, workflowConfig)) {
+ workflowCtx.setWorkflowState(TaskState.COMPLETED);
+ workflowCtx.setFinishTime(System.currentTimeMillis());
+ }
+ }
+
+ // Make additional task assignments if needed.
+ if (jobTgtState == TargetState.START) {
+ // Contains the set of task partitions that must be excluded from consideration when making
+ // any new assignments.
+ // This includes all completed, failed, already assigned partitions.
+ Set<Integer> excludeSet = Sets.newTreeSet(assignedPartitions);
+ addCompletedPartitions(excludeSet, jobCtx, allPartitions);
+ // Get instance->[partition, ...] mappings for the target resource.
+ Map<ParticipantId, SortedSet<Integer>> tgtPartitionAssignments =
+ getTaskAssignment(currStateOutput, prevAssignment, liveInstances, jobCfg, jobCtx,
+ workflowConfig, workflowCtx, allPartitions, cache);
+ for (Map.Entry<ParticipantId, SortedSet<Integer>> entry : taskAssignments.entrySet()) {
+ ParticipantId instance = entry.getKey();
+ if (!tgtPartitionAssignments.containsKey(instance)) {
+ continue;
+ }
+ // Contains the set of task partitions currently assigned to the instance.
+ Set<Integer> pSet = entry.getValue();
+ int numToAssign = jobCfg.getNumConcurrentTasksPerInstance() - pSet.size();
+ if (numToAssign > 0) {
+ List<Integer> nextPartitions =
+ getNextPartitions(tgtPartitionAssignments.get(instance), excludeSet, numToAssign);
+ for (Integer pId : nextPartitions) {
+ String pName = pName(jobResource, pId);
+ paMap.put(pId,
+ new PartitionAssignment(instance.toString(), TaskPartitionState.RUNNING.name()));
+ excludeSet.add(pId);
+ LOG.debug(String.format("Setting task partition %s state to %s on instance %s.", pName,
+ TaskPartitionState.RUNNING, instance));
+ }
+ }
+ }
}
- Set<String> tgtStates = taskCfg.getTargetPartitionStates();
- return getTgtPartitionAssignment(currStateOutput, instanceList, tgtIs, tgtStates, partitionSet);
+
+ // Construct a ResourceAssignment object from the map of partition assignments.
+ ResourceAssignment ra = new ResourceAssignment(ResourceId.from(jobResource));
+ for (Map.Entry<Integer, PartitionAssignment> e : paMap.entrySet()) {
+ PartitionAssignment pa = e.getValue();
+ ra.addReplicaMap(PartitionId.from(pName(jobResource, e.getKey())),
+ ImmutableMap.of(ParticipantId.from(pa._instance), State.from(pa._state)));
+ }
+
+ return ra;
}
/**
- * Gets the ideal state of the target resource of this task
- * @param taskCfg task config containing target resource id
- * @param cluster snapshot of the cluster containing the task and target resource
- * @return target resource ideal state, or null
+ * Checks if the job has completed.
+ * @param ctx The rebalancer context.
+ * @param allPartitions The set of partitions to check.
+ * @return true if all task partitions have been marked with status
+ * {@link TaskPartitionState#COMPLETED} in the rebalancer
+ * context, false otherwise.
*/
- private static IdealState getTgtIdealState(TaskConfig taskCfg, Cluster cluster) {
- ResourceId tgtResourceId = ResourceId.from(taskCfg.getTargetResource());
- Resource resource = cluster.getResource(tgtResourceId);
- return resource != null ? resource.getIdealState() : null;
+ private static boolean isJobComplete(JobContext ctx, Set<Integer> allPartitions) {
+ for (Integer pId : allPartitions) {
+ TaskPartitionState state = ctx.getPartitionState(pId);
+ if (state != TaskPartitionState.COMPLETED) {
+ return false;
+ }
+ }
+ return true;
}
/**
- * Returns the set of all partition ids for a task.
- * <p/>
- * If a set of partition ids was explicitly specified in the config, that is used. Otherwise, we
- * use the list of all partition ids from the target resource.
+ * Checks if the workflow has completed.
+ * @param ctx Workflow context containing job states
+ * @param cfg Workflow config containing set of jobs
+ * @return returns true if all tasks are {@link TaskState#COMPLETED}, false otherwise.
*/
- private static Set<Integer> getAllTaskPartitions(IdealState tgtResourceIs, TaskConfig taskCfg) {
- if (tgtResourceIs == null) {
- return null;
+ private static boolean isWorkflowComplete(WorkflowContext ctx, WorkflowConfig cfg) {
+ for (String job : cfg.getJobDag().getAllNodes()) {
+ if (ctx.getJobState(job) != TaskState.COMPLETED) {
+ return false;
+ }
}
- Set<Integer> taskPartitions = new HashSet<Integer>();
- if (taskCfg.getTargetPartitions() != null) {
- for (Integer pId : taskCfg.getTargetPartitions()) {
- taskPartitions.add(pId);
+ return true;
+ }
+
+ /**
+ * Checks if the workflow has been stopped.
+ * @param ctx Workflow context containing task states
+ * @param cfg Workflow config containing set of tasks
+ * @return returns true if all tasks are {@link TaskState#STOPPED}, false otherwise.
+ */
+ private static boolean isWorkflowStopped(WorkflowContext ctx, WorkflowConfig cfg) {
+ for (String job : cfg.getJobDag().getAllNodes()) {
+ if (ctx.getJobState(job) != TaskState.STOPPED && ctx.getJobState(job) != null) {
+ return false;
}
- } else {
- for (String pName : tgtResourceIs.getPartitionSet()) {
- taskPartitions.add(pId(pName));
+ }
+ return true;
+ }
+
+ private static void markForDeletion(HelixManager mgr, String resourceName) {
+ mgr.getConfigAccessor().set(
+ TaskUtil.getResourceConfigScope(mgr.getClusterName(), resourceName),
+ WorkflowConfig.TARGET_STATE, TargetState.DELETE.name());
+ }
+
+ /**
+ * Cleans up all Helix state associated with this job, wiping workflow-level information if this
+ * is the last remaining job in its workflow.
+ */
+ private static void cleanup(HelixManager mgr, String resourceName, WorkflowConfig cfg,
+ String workflowResource) {
+ HelixDataAccessor accessor = mgr.getHelixDataAccessor();
+ // Delete resource configs.
+ PropertyKey cfgKey = getConfigPropertyKey(accessor, resourceName);
+ if (!accessor.removeProperty(cfgKey)) {
+ throw new RuntimeException(
+ String
+ .format(
+ "Error occurred while trying to clean up task %s. Failed to remove node %s from Helix. Aborting further clean up steps.",
+ resourceName, cfgKey));
+ }
+ // Delete property store information for this resource.
+ String propStoreKey = getRebalancerPropStoreKey(resourceName);
+ if (!mgr.getHelixPropertyStore().remove(propStoreKey, AccessOption.PERSISTENT)) {
+ throw new RuntimeException(
+ String
+ .format(
+ "Error occurred while trying to clean up task %s. Failed to remove node %s from Helix. Aborting further clean up steps.",
+ resourceName, propStoreKey));
+ }
+ // Finally, delete the ideal state itself.
+ PropertyKey isKey = getISPropertyKey(accessor, resourceName);
+ if (!accessor.removeProperty(isKey)) {
+ throw new RuntimeException(String.format(
+ "Error occurred while trying to clean up task %s. Failed to remove node %s from Helix.",
+ resourceName, isKey));
+ }
+ LOG.info(String.format("Successfully cleaned up task resource %s.", resourceName));
+
+ boolean lastInWorkflow = true;
+ for (String job : cfg.getJobDag().getAllNodes()) {
+ // check if property store information or resource configs exist for this job
+ if (mgr.getHelixPropertyStore().exists(getRebalancerPropStoreKey(job),
+ AccessOption.PERSISTENT)
+ || accessor.getProperty(getConfigPropertyKey(accessor, job)) != null
+ || accessor.getProperty(getISPropertyKey(accessor, job)) != null) {
+ lastInWorkflow = false;
+ }
+ }
+
+ // clean up job-level info if this was the last in workflow
+ if (lastInWorkflow) {
+ // delete workflow config
+ PropertyKey workflowCfgKey = getConfigPropertyKey(accessor, workflowResource);
+ if (!accessor.removeProperty(workflowCfgKey)) {
+ throw new RuntimeException(
+ String
+ .format(
+ "Error occurred while trying to clean up workflow %s. Failed to remove node %s from Helix. Aborting further clean up steps.",
+ workflowResource, workflowCfgKey));
+ }
+ // Delete property store information for this workflow
+ String workflowPropStoreKey = getRebalancerPropStoreKey(workflowResource);
+ if (!mgr.getHelixPropertyStore().remove(workflowPropStoreKey, AccessOption.PERSISTENT)) {
+ throw new RuntimeException(
+ String
+ .format(
+ "Error occurred while trying to clean up workflow %s. Failed to remove node %s from Helix. Aborting further clean up steps.",
+ workflowResource, workflowPropStoreKey));
+ }
+ }
+
+ }
+
+ private static String getRebalancerPropStoreKey(String resource) {
+ return Joiner.on("/").join(TaskConstants.REBALANCER_CONTEXT_ROOT, resource);
+ }
+
+ private static PropertyKey getISPropertyKey(HelixDataAccessor accessor, String resource) {
+ return accessor.keyBuilder().idealStates(resource);
+ }
+
+ private static PropertyKey getConfigPropertyKey(HelixDataAccessor accessor, String resource) {
+ return accessor.keyBuilder().resourceConfig(resource);
+ }
+
+ private static void addAllPartitions(Set<Integer> toAdd, Set<Integer> destination) {
+ for (Integer pId : toAdd) {
+ destination.add(pId);
+ }
+ }
+
+ private static ResourceAssignment emptyAssignment(String name) {
+ return new ResourceAssignment(ResourceId.from(name));
+ }
+
+ private static void addCompletedPartitions(Set<Integer> set, JobContext ctx,
+ Iterable<Integer> pIds) {
+ for (Integer pId : pIds) {
+ TaskPartitionState state = ctx.getPartitionState(pId);
+ if (state == TaskPartitionState.COMPLETED) {
+ set.add(pId);
}
}
- return taskPartitions;
+ }
+
+ private static List<Integer> getNextPartitions(SortedSet<Integer> candidatePartitions,
+ Set<Integer> excluded, int n) {
+ List<Integer> result = new ArrayList<Integer>();
+ for (Integer pId : candidatePartitions) {
+ if (result.size() >= n) {
+ break;
+ }
+
+ if (!excluded.contains(pId)) {
+ result.add(pId);
+ }
+ }
+
+ return result;
+ }
+
+ private static void markPartitionCompleted(JobContext ctx, int pId) {
+ ctx.setPartitionState(pId, TaskPartitionState.COMPLETED);
+ ctx.setPartitionFinishTime(pId, System.currentTimeMillis());
+ ctx.incrementNumAttempts(pId);
+ }
+
+ private static void markPartitionError(JobContext ctx, int pId, TaskPartitionState state) {
+ ctx.setPartitionState(pId, state);
+ ctx.setPartitionFinishTime(pId, System.currentTimeMillis());
+ ctx.incrementNumAttempts(pId);
}
/**
- * Get partition assignments for the target resource, but only for the partitions of interest.
- * @param currStateOutput The current state of the instances in the cluster.
- * @param instanceList The set of instances.
- * @param tgtIs The ideal state of the target resource.
- * @param tgtStates Only partitions in this set of states will be considered. If null, partitions
- * do not need to
- * be in any specific state to be considered.
- * @param includeSet The set of partitions to consider.
- * @return A map of instance vs set of partition ids assigned to that instance.
+ * Return the assignment of task partitions per instance.
*/
- private static Map<String, SortedSet<Integer>> getTgtPartitionAssignment(
- ResourceCurrentState currStateOutput, Iterable<ParticipantId> instanceList, IdealState tgtIs,
- Set<String> tgtStates, Set<Integer> includeSet) {
- Map<String, SortedSet<Integer>> result = new HashMap<String, SortedSet<Integer>>();
+ private static Map<ParticipantId, SortedSet<Integer>> getTaskPartitionAssignments(
+ Iterable<ParticipantId> instanceList, ResourceAssignment assignment, Set<Integer> includeSet) {
+ Map<ParticipantId, SortedSet<Integer>> result =
+ new HashMap<ParticipantId, SortedSet<Integer>>();
for (ParticipantId instance : instanceList) {
- result.put(instance.stringify(), new TreeSet<Integer>());
+ result.put(instance, new TreeSet<Integer>());
}
- for (String pName : tgtIs.getPartitionSet()) {
- int pId = pId(pName);
+ for (PartitionId partition : assignment.getMappedPartitionIds()) {
+ int pId = pId(partition.toString());
if (includeSet.contains(pId)) {
- for (ParticipantId instance : instanceList) {
- State s =
- currStateOutput.getCurrentState(ResourceId.from(tgtIs.getResourceName()),
- PartitionId.from(pName), instance);
- String state = (s == null ? null : s.toString());
- if (tgtStates == null || tgtStates.contains(state)) {
- result.get(instance).add(pId);
+ Map<ParticipantId, State> replicaMap = assignment.getReplicaMap(partition);
+ for (ParticipantId instance : replicaMap.keySet()) {
+ SortedSet<Integer> pList = result.get(instance);
+ if (pList != null) {
+ pList.add(pId);
}
}
}
@@ -132,4 +605,32 @@ public class TaskRebalancer extends AbstractTaskRebalancer {
return result;
}
+
+ /**
+ * Computes the partition name given the resource name and partition id.
+ */
+ protected static String pName(String resource, int pId) {
+ return resource + "_" + pId;
+ }
+
+ /**
+ * Extracts the partition id from the given partition name.
+ */
+ protected static int pId(String pName) {
+ String[] tokens = pName.split("_");
+ return Integer.valueOf(tokens[tokens.length - 1]);
+ }
+
+ /**
+ * An (instance, state) pair.
+ */
+ private static class PartitionAssignment {
+ private final String _instance;
+ private final String _state;
+
+ private PartitionAssignment(String instance, String state) {
+ _instance = instance;
+ _state = state;
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/TaskRunner.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskRunner.java b/helix-core/src/main/java/org/apache/helix/task/TaskRunner.java
index 92976b0..cd909ed 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskRunner.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskRunner.java
@@ -29,7 +29,6 @@ import org.apache.log4j.Logger;
*/
public class TaskRunner implements Runnable {
private static final Logger LOG = Logger.getLogger(TaskRunner.class);
-
private final StateModel _taskStateModel;
private final HelixManager _manager;
private final String _taskName;
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/TaskStateModel.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskStateModel.java b/helix-core/src/main/java/org/apache/helix/task/TaskStateModel.java
index c399930..a44a8cb 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskStateModel.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskStateModel.java
@@ -34,12 +34,7 @@ import org.apache.helix.participant.statemachine.StateModelInfo;
import org.apache.helix.participant.statemachine.Transition;
import org.apache.log4j.Logger;
-/**
- * task state model
- */
-@StateModelInfo(states = {
- "INIT", "RUNNING", "STOPPED", "COMPLETED", "TIMED_OUT", "TASK_ERROR", "DROPPED"
-}, initialState = "INIT")
+@StateModelInfo(states = "{'NOT USED BY HELIX'}", initialState = "INIT")
public class TaskStateModel extends StateModel {
private static final Logger LOG = Logger.getLogger(TaskStateModel.class);
private final HelixManager _manager;
@@ -217,19 +212,37 @@ public class TaskStateModel extends StateModel {
}
private void startTask(Message msg, String taskPartition) {
- TaskConfig cfg = TaskUtil.getTaskCfg(_manager, msg.getResourceName());
+ JobConfig cfg = TaskUtil.getJobCfg(_manager, msg.getResourceName());
+ TaskConfig taskConfig = null;
String command = cfg.getCommand();
- Map<String, String> taskNameMap = cfg.getTaskNameMap();
- if (taskNameMap != null && taskNameMap.containsKey(taskPartition)) {
- // Support a partition-specifc override of tasks to run
- String taskName = taskNameMap.get(taskPartition);
- if (_taskFactoryRegistry.containsKey(taskName)) {
- command = taskName;
+
+ // Get a task-specific command if specified
+ JobContext ctx = TaskUtil.getJobContext(_manager, msg.getResourceName());
+ int pId = Integer.parseInt(taskPartition.substring(taskPartition.lastIndexOf('_') + 1));
+ if (ctx.getTaskIdForPartition(pId) != null) {
+ taskConfig = cfg.getTaskConfig(ctx.getTaskIdForPartition(pId));
+ if (taskConfig != null) {
+ if (taskConfig.getCommand() != null) {
+ command = taskConfig.getCommand();
+ }
}
}
+
+ // Populate a task callback context
+ TaskCallbackContext callbackContext = new TaskCallbackContext();
+ callbackContext.setManager(_manager);
+ callbackContext.setJobConfig(cfg);
+ callbackContext.setTaskConfig(taskConfig);
+
+ // Create a task instance with this command
+ if (command == null || _taskFactoryRegistry == null
+ || !_taskFactoryRegistry.containsKey(command)) {
+ throw new IllegalStateException("No callback implemented for task " + command);
+ }
TaskFactory taskFactory = _taskFactoryRegistry.get(command);
- Task task = taskFactory.createNewTask(cfg.getCommandConfig());
+ Task task = taskFactory.createNewTask(callbackContext);
+ // Submit the task for execution
_taskRunner =
new TaskRunner(this, task, msg.getResourceName(), taskPartition, msg.getTgtName(),
_manager, msg.getTgtSessionId());
@@ -244,6 +257,6 @@ public class TaskStateModel extends StateModel {
_taskRunner.timeout();
}
}
- }, cfg.getTimeoutPerPartition());
+ }, cfg.getTimeoutPerTask());
}
}
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/TaskUtil.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskUtil.java b/helix-core/src/main/java/org/apache/helix/task/TaskUtil.java
index 0f980b8..96b7e55 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskUtil.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskUtil.java
@@ -19,11 +19,17 @@ package org.apache.helix.task;
* under the License.
*/
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
import java.util.Map;
import org.apache.helix.AccessOption;
+import org.apache.helix.ConfigAccessor;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
+import org.apache.helix.HelixProperty;
import org.apache.helix.PropertyKey;
import org.apache.helix.ZNRecord;
import org.apache.helix.api.State;
@@ -31,54 +37,48 @@ import org.apache.helix.api.id.PartitionId;
import org.apache.helix.model.CurrentState;
import org.apache.helix.model.HelixConfigScope;
import org.apache.helix.model.ResourceAssignment;
-import org.apache.helix.model.ResourceConfiguration;
import org.apache.helix.model.builder.HelixConfigScopeBuilder;
import org.apache.log4j.Logger;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.type.TypeReference;
import com.google.common.base.Joiner;
+import com.google.common.collect.Maps;
/**
* Static utility methods.
*/
public class TaskUtil {
private static final Logger LOG = Logger.getLogger(TaskUtil.class);
-
- enum TaskUtilEnum {
- CONTEXT_NODE("Context"),
- PREV_RA_NODE("PreviousResourceAssignment");
-
- final String _value;
-
- private TaskUtilEnum(String value) {
- _value = value;
- }
-
- public String value() {
- return _value;
- }
- }
+ private static final String CONTEXT_NODE = "Context";
+ private static final String PREV_RA_NODE = "PreviousResourceAssignment";
/**
- * Parses task resource configurations in Helix into a {@link TaskConfig} object.
+ * Parses job resource configurations in Helix into a {@link JobConfig} object.
* @param manager HelixManager object used to connect to Helix.
- * @param taskResource The name of the task resource.
- * @return A {@link TaskConfig} object if Helix contains valid configurations for the task, null
+ * @param jobResource The name of the job resource.
+ * @return A {@link JobConfig} object if Helix contains valid configurations for the job, null
* otherwise.
*/
- public static TaskConfig getTaskCfg(HelixManager manager, String taskResource) {
- ResourceConfiguration config = getResourceConfig(manager, taskResource);
- Map<String, String> taskCfg = config.getRecord().getSimpleFields();
- TaskConfig.Builder b = TaskConfig.Builder.fromMap(taskCfg);
- if (config.getRecord().getMapFields().containsKey(TaskConfig.TASK_NAME_MAP)) {
- b.setTaskNameMap(config.getRecord().getMapField(TaskConfig.TASK_NAME_MAP));
+ public static JobConfig getJobCfg(HelixManager manager, String jobResource) {
+ HelixProperty jobResourceConfig = getResourceConfig(manager, jobResource);
+ JobConfig.Builder b =
+ JobConfig.Builder.fromMap(jobResourceConfig.getRecord().getSimpleFields());
+ Map<String, Map<String, String>> rawTaskConfigMap =
+ jobResourceConfig.getRecord().getMapFields();
+ Map<String, TaskConfig> taskConfigMap = Maps.newHashMap();
+ for (Map<String, String> rawTaskConfig : rawTaskConfigMap.values()) {
+ TaskConfig taskConfig = TaskConfig.from(rawTaskConfig);
+ taskConfigMap.put(taskConfig.getId(), taskConfig);
}
+ b.addTaskConfigMap(taskConfigMap);
return b.build();
}
public static WorkflowConfig getWorkflowCfg(HelixManager manager, String workflowResource) {
- ResourceConfiguration config = getResourceConfig(manager, workflowResource);
- Map<String, String> workflowCfg = config.getRecord().getSimpleFields();
+ Map<String, String> workflowCfg = getResourceConfigMap(manager, workflowResource);
WorkflowConfig.Builder b = WorkflowConfig.Builder.fromMap(workflowCfg);
+
return b.build();
}
@@ -109,56 +109,97 @@ public class TaskUtil {
String resourceName) {
ZNRecord r =
manager.getHelixPropertyStore().get(
- Joiner.on("/").join(TaskConstants.REBALANCER_CONTEXT_ROOT, resourceName,
- TaskUtilEnum.PREV_RA_NODE.value()), null, AccessOption.PERSISTENT);
+ Joiner.on("/").join(TaskConstants.REBALANCER_CONTEXT_ROOT, resourceName, PREV_RA_NODE),
+ null, AccessOption.PERSISTENT);
return r != null ? new ResourceAssignment(r) : null;
}
public static void setPrevResourceAssignment(HelixManager manager, String resourceName,
ResourceAssignment ra) {
manager.getHelixPropertyStore().set(
- Joiner.on("/").join(TaskConstants.REBALANCER_CONTEXT_ROOT, resourceName,
- TaskUtilEnum.PREV_RA_NODE.value()), ra.getRecord(), AccessOption.PERSISTENT);
+ Joiner.on("/").join(TaskConstants.REBALANCER_CONTEXT_ROOT, resourceName, PREV_RA_NODE),
+ ra.getRecord(), AccessOption.PERSISTENT);
}
- public static TaskContext getTaskContext(HelixManager manager, String taskResource) {
+ public static JobContext getJobContext(HelixManager manager, String jobResource) {
ZNRecord r =
manager.getHelixPropertyStore().get(
- Joiner.on("/").join(TaskConstants.REBALANCER_CONTEXT_ROOT, taskResource,
- TaskUtilEnum.CONTEXT_NODE.value()), null, AccessOption.PERSISTENT);
- return r != null ? new TaskContext(r) : null;
+ Joiner.on("/").join(TaskConstants.REBALANCER_CONTEXT_ROOT, jobResource, CONTEXT_NODE),
+ null, AccessOption.PERSISTENT);
+ return r != null ? new JobContext(r) : null;
}
- public static void setTaskContext(HelixManager manager, String taskResource, TaskContext ctx) {
+ public static void setJobContext(HelixManager manager, String jobResource, JobContext ctx) {
manager.getHelixPropertyStore().set(
- Joiner.on("/").join(TaskConstants.REBALANCER_CONTEXT_ROOT, taskResource,
- TaskUtilEnum.CONTEXT_NODE.value()), ctx.getRecord(), AccessOption.PERSISTENT);
+ Joiner.on("/").join(TaskConstants.REBALANCER_CONTEXT_ROOT, jobResource, CONTEXT_NODE),
+ ctx.getRecord(), AccessOption.PERSISTENT);
}
public static WorkflowContext getWorkflowContext(HelixManager manager, String workflowResource) {
ZNRecord r =
manager.getHelixPropertyStore().get(
Joiner.on("/").join(TaskConstants.REBALANCER_CONTEXT_ROOT, workflowResource,
- TaskUtilEnum.CONTEXT_NODE.value()), null, AccessOption.PERSISTENT);
+ CONTEXT_NODE), null, AccessOption.PERSISTENT);
return r != null ? new WorkflowContext(r) : null;
}
public static void setWorkflowContext(HelixManager manager, String workflowResource,
WorkflowContext ctx) {
manager.getHelixPropertyStore().set(
- Joiner.on("/").join(TaskConstants.REBALANCER_CONTEXT_ROOT, workflowResource,
- TaskUtilEnum.CONTEXT_NODE.value()), ctx.getRecord(), AccessOption.PERSISTENT);
+ Joiner.on("/").join(TaskConstants.REBALANCER_CONTEXT_ROOT, workflowResource, CONTEXT_NODE),
+ ctx.getRecord(), AccessOption.PERSISTENT);
+ }
+
+ public static String getNamespacedJobName(String singleJobWorkflow) {
+ return getNamespacedJobName(singleJobWorkflow, singleJobWorkflow);
}
- public static String getNamespacedTaskName(String singleTaskWorkflow) {
- return getNamespacedTaskName(singleTaskWorkflow, singleTaskWorkflow);
+ public static String getNamespacedJobName(String workflowResource, String jobName) {
+ return workflowResource + "_" + jobName;
}
- public static String getNamespacedTaskName(String workflowResource, String taskName) {
- return workflowResource + "_" + taskName;
+ public static String serializeJobConfigMap(Map<String, String> commandConfig) {
+ ObjectMapper mapper = new ObjectMapper();
+ try {
+ String serializedMap = mapper.writeValueAsString(commandConfig);
+ return serializedMap;
+ } catch (IOException e) {
+ LOG.error("Error serializing " + commandConfig, e);
+ }
+ return null;
+ }
+
+ public static Map<String, String> deserializeJobConfigMap(String commandConfig) {
+ ObjectMapper mapper = new ObjectMapper();
+ try {
+ Map<String, String> commandConfigMap =
+ mapper.readValue(commandConfig, new TypeReference<HashMap<String, String>>() {
+ });
+ return commandConfigMap;
+ } catch (IOException e) {
+ LOG.error("Error deserializing " + commandConfig, e);
+ }
+ return Collections.emptyMap();
+ }
+
+ private static Map<String, String> getResourceConfigMap(HelixManager manager, String resource) {
+ HelixConfigScope scope = getResourceConfigScope(manager.getClusterName(), resource);
+ ConfigAccessor configAccessor = manager.getConfigAccessor();
+
+ Map<String, String> taskCfg = new HashMap<String, String>();
+ List<String> cfgKeys = configAccessor.getKeys(scope);
+ if (cfgKeys == null || cfgKeys.isEmpty()) {
+ return null;
+ }
+
+ for (String cfgKey : cfgKeys) {
+ taskCfg.put(cfgKey, configAccessor.get(scope, cfgKey));
+ }
+
+ return getResourceConfig(manager, resource).getRecord().getSimpleFields();
}
- private static ResourceConfiguration getResourceConfig(HelixManager manager, String resource) {
+ private static HelixProperty getResourceConfig(HelixManager manager, String resource) {
HelixDataAccessor accessor = manager.getHelixDataAccessor();
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
return accessor.getProperty(keyBuilder.resourceConfig(resource));
[14/50] [abbrv] git commit: Moved yarn hello world to recipes,
nonworking
Posted by ka...@apache.org.
Moved yarn hello world to recipes, nonworking
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/71187176
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/71187176
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/71187176
Branch: refs/heads/master
Commit: 7118717674fce0303e7c0ed2fbc8828c1a5c25de
Parents: c22cdd9
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Fri Feb 21 19:08:51 2014 -0800
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Fri Feb 21 19:08:51 2014 -0800
----------------------------------------------------------------------
.../yarn/example/HelloWordAppSpecFactory.java | 90 -----------
.../yarn/example/HelloWorldService.java | 40 -----
.../yarn/example/HelloWorldStateModel.java | 29 ----
.../example/HelloWorldStateModelFactory.java | 12 --
.../yarn/example/HelloworldAppSpec.java | 138 ----------------
.../main/resources/hello_world_app_spec.yaml | 24 ---
recipes/pom.xml | 1 +
recipes/provisioning/pom.xml | 50 ++++++
recipes/provisioning/yarn/helloworld/pom.xml | 159 +++++++++++++++++++
.../yarn/helloworld/src/assemble/assembly.xml | 60 +++++++
.../helloworld/src/main/config/log4j.properties | 31 ++++
.../yarn/example/HelloWordAppSpecFactory.java | 92 +++++++++++
.../yarn/example/HelloWorldService.java | 41 +++++
.../yarn/example/HelloWorldStateModel.java | 29 ++++
.../example/HelloWorldStateModelFactory.java | 13 ++
.../yarn/example/HelloworldAppSpec.java | 138 ++++++++++++++++
.../main/resources/hello_world_app_spec.yaml | 24 +++
.../yarn/helloworld/src/test/conf/testng.xml | 27 ++++
recipes/provisioning/yarn/pom.xml | 50 ++++++
19 files changed, 715 insertions(+), 333 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/71187176/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
deleted file mode 100644
index f9f1980..0000000
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
+++ /dev/null
@@ -1,90 +0,0 @@
-package org.apache.helix.provisioning.yarn.example;
-
-import java.io.File;
-import java.io.InputStream;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.helix.provisioning.yarn.AppConfig;
-import org.apache.helix.provisioning.yarn.ApplicationSpec;
-import org.apache.helix.provisioning.yarn.ApplicationSpecFactory;
-import org.yaml.snakeyaml.DumperOptions;
-import org.yaml.snakeyaml.Yaml;
-
-public class HelloWordAppSpecFactory implements ApplicationSpecFactory {
-
- static HelloworldAppSpec data;
-
- static {
- HelloworldAppSpec data = new HelloworldAppSpec();
- AppConfig appConfig = new AppConfig();
- appConfig.setValue("k1", "v1");
- data.setAppConfig(appConfig);
- data.setAppName("testApp");
- data.setAppMasterPackageUri(
- "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
- HashMap<String, Map<String, String>> serviceConfigMap = new HashMap<String, Map<String, String>>();
- serviceConfigMap.put("HelloWorld", new HashMap<String, String>());
- serviceConfigMap.get("HelloWorld").put("k1", "v1");
- data.setServiceConfigMap(serviceConfigMap);
- HashMap<String, String> serviceMainClassMap = new HashMap<String, String>();
- serviceMainClassMap.put("HelloWorld", HelloWorldService.class.getCanonicalName());
- data.setServiceMainClassMap(serviceMainClassMap);
- HashMap<String, String> servicePackageURIMap = new HashMap<String, String>();
- servicePackageURIMap
- .put(
- "HelloWorld",
- "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
- data.setServicePackageURIMap(servicePackageURIMap);
- data.setServices(Arrays.asList(new String[] {
- "HelloWorld"
- })); }
-
- @Override
- public ApplicationSpec fromYaml(InputStream inputstream) {
- return (ApplicationSpec) new Yaml().load(inputstream);
- // return data;
- }
-
- public static void main(String[] args) {
- DumperOptions options = new DumperOptions();
- options.setPrettyFlow(true);
-
- Yaml yaml = new Yaml(options);
- HelloworldAppSpec data = new HelloworldAppSpec();
- AppConfig appConfig = new AppConfig();
- appConfig.setValue("k1", "v1");
- data.setAppConfig(appConfig);
- data.setAppName("testApp");
- data.setAppMasterPackageUri(
- "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
- HashMap<String, Map<String, String>> serviceConfigMap = new HashMap<String, Map<String, String>>();
- serviceConfigMap.put("HelloWorld", new HashMap<String, String>());
- serviceConfigMap.get("HelloWorld").put("k1", "v1");
- data.setServiceConfigMap(serviceConfigMap);
- HashMap<String, String> serviceMainClassMap = new HashMap<String, String>();
- serviceMainClassMap.put("HelloWorld", HelloWorldService.class.getCanonicalName());
- data.setServiceMainClassMap(serviceMainClassMap);
- HashMap<String, String> servicePackageURIMap = new HashMap<String, String>();
- servicePackageURIMap
- .put(
- "HelloWorld",
- "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
- data.setServicePackageURIMap(servicePackageURIMap);
- data.setServices(Arrays.asList(new String[] {
- "HelloWorld"
- }));
- String dump = yaml.dump(data);
- System.out.println(dump);
-
- InputStream resourceAsStream = ClassLoader.getSystemClassLoader().getResourceAsStream("hello_world_app_spec.yaml");
- HelloworldAppSpec load = yaml.loadAs(resourceAsStream,HelloworldAppSpec.class);
- String dumpnew = yaml.dump(load);
- System.out.println(dumpnew.equals(dump));
-
- System.out.println("==================================");
- System.out.println(dumpnew);
-
- }
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/71187176/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
deleted file mode 100644
index f65fd5d..0000000
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
+++ /dev/null
@@ -1,40 +0,0 @@
-package org.apache.helix.provisioning.yarn.example;
-
-import org.apache.helix.HelixConnection;
-import org.apache.helix.api.accessor.ResourceAccessor;
-import org.apache.helix.api.config.UserConfig;
-import org.apache.helix.api.id.ClusterId;
-import org.apache.helix.api.id.ParticipantId;
-import org.apache.helix.api.id.ResourceId;
-import org.apache.helix.api.id.StateModelDefId;
-import org.apache.helix.manager.zk.AbstractParticipantService;
-import org.apache.log4j.Logger;
-
-public class HelloWorldService extends AbstractParticipantService {
-
- private static Logger LOG = Logger.getLogger(AbstractParticipantService.class);
-
- static String SERVICE_NAME = "HelloWorld";
-
- public HelloWorldService(HelixConnection connection, ClusterId clusterId,
- ParticipantId participantId) {
- super(connection, clusterId, participantId);
- }
-
- /**
- * init method to setup appropriate call back handlers.
- */
- @Override
- public void init() {
- ClusterId clusterId = getClusterId();
- ResourceAccessor resourceAccessor = getConnection().createResourceAccessor(clusterId);
- UserConfig serviceConfig = resourceAccessor.readUserConfig(ResourceId.from(SERVICE_NAME));
- LOG.info("Starting service:" + SERVICE_NAME + " with configuration:" + serviceConfig);
-
- HelloWorldStateModelFactory stateModelFactory = new HelloWorldStateModelFactory();
- getParticipant().getStateMachineEngine().registerStateModelFactory(
- StateModelDefId.from("StatelessService"), stateModelFactory);
-
- }
-
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/71187176/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModel.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModel.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModel.java
deleted file mode 100644
index 95f66e3..0000000
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModel.java
+++ /dev/null
@@ -1,29 +0,0 @@
-package org.apache.helix.provisioning.yarn.example;
-
-import org.apache.helix.NotificationContext;
-import org.apache.helix.api.id.PartitionId;
-import org.apache.helix.model.Message;
-import org.apache.helix.participant.statemachine.StateModel;
-import org.apache.helix.participant.statemachine.StateModelInfo;
-import org.apache.helix.participant.statemachine.Transition;
-
-@StateModelInfo(initialState = "OFFLINE", states = { "OFFLINE", "ONLINE",
- "ERROR" })
-public class HelloWorldStateModel extends StateModel {
-
- public HelloWorldStateModel(PartitionId partitionId) {
- // TODO Auto-generated constructor stub
- }
-
- @Transition(to = "ONLINE", from = "OFFLINE")
- public void onBecomeOnlineFromOffline(Message message,
- NotificationContext context) throws Exception {
- System.out.println("Started HelloWorld service");
- }
-
- @Transition(to = "OFFLINE", from = "ONLINE")
- public void onBecomeOfflineFromOnline(Message message,
- NotificationContext context) throws InterruptedException {
- System.out.println("Stopped HelloWorld service");
- }
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/71187176/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModelFactory.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModelFactory.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModelFactory.java
deleted file mode 100644
index 850cc19..0000000
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModelFactory.java
+++ /dev/null
@@ -1,12 +0,0 @@
-package org.apache.helix.provisioning.yarn.example;
-
-import org.apache.helix.api.id.PartitionId;
-import org.apache.helix.participant.statemachine.HelixStateModelFactory;
-import org.apache.helix.participant.statemachine.StateModel;
-
-public class HelloWorldStateModelFactory extends HelixStateModelFactory<StateModel> {
- @Override
- public StateModel createNewStateModel(PartitionId partitionId) {
- return new HelloWorldStateModel(partitionId);
- }
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/71187176/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
deleted file mode 100644
index e22c7b2..0000000
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
+++ /dev/null
@@ -1,138 +0,0 @@
-package org.apache.helix.provisioning.yarn.example;
-
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.helix.api.Scope;
-import org.apache.helix.api.config.ParticipantConfig;
-import org.apache.helix.api.config.ResourceConfig;
-import org.apache.helix.api.config.ResourceConfig.Builder;
-import org.apache.helix.api.config.UserConfig;
-import org.apache.helix.api.id.ParticipantId;
-import org.apache.helix.api.id.ResourceId;
-import org.apache.helix.provisioning.yarn.AppConfig;
-import org.apache.helix.provisioning.yarn.ApplicationSpec;
-import org.apache.helix.provisioning.yarn.ServiceConfig;
-import org.apache.helix.provisioning.yarn.TaskConfig;
-
-public class HelloworldAppSpec implements ApplicationSpec {
-
- public String _appName;
-
- public AppConfig _appConfig;
-
- public List<String> _services;
-
- private String _appMasterPackageUri;
-
- private Map<String, String> _servicePackageURIMap;
-
- private Map<String, String> _serviceMainClassMap;
-
- private Map<String, Map<String, String>> _serviceConfigMap;
-
- private List<TaskConfig> _taskConfigs;
-
- public AppConfig getAppConfig() {
- return _appConfig;
- }
-
- public void setAppConfig(AppConfig appConfig) {
- _appConfig = appConfig;
- }
-
- public String getAppMasterPackageUri() {
- return _appMasterPackageUri;
- }
-
- public void setAppMasterPackageUri(String appMasterPackageUri) {
- _appMasterPackageUri = appMasterPackageUri;
- }
-
- public Map<String, String> getServicePackageURIMap() {
- return _servicePackageURIMap;
- }
-
- public void setServicePackageURIMap(Map<String, String> servicePackageURIMap) {
- _servicePackageURIMap = servicePackageURIMap;
- }
-
- public Map<String, String> getServiceMainClassMap() {
- return _serviceMainClassMap;
- }
-
- public void setServiceMainClassMap(Map<String, String> serviceMainClassMap) {
- _serviceMainClassMap = serviceMainClassMap;
- }
-
- public Map<String, Map<String, String>> getServiceConfigMap() {
- return _serviceConfigMap;
- }
-
- public void setServiceConfigMap(Map<String, Map<String, String>> serviceConfigMap) {
- _serviceConfigMap = serviceConfigMap;
- }
-
- public void setAppName(String appName) {
- _appName = appName;
- }
-
- public void setServices(List<String> services) {
- _services = services;
- }
-
- public void setTaskConfigs(List<TaskConfig> taskConfigs) {
- _taskConfigs = taskConfigs;
- }
-
- @Override
- public String getAppName() {
- return _appName;
- }
-
- @Override
- public AppConfig getConfig() {
- return _appConfig;
- }
-
- @Override
- public List<String> getServices() {
- return _services;
- }
-
- @Override
- public URI getAppMasterPackage() {
- try {
- return new URI(_appMasterPackageUri);
- } catch (URISyntaxException e) {
- return null;
- }
- }
-
- @Override
- public URI getServicePackage(String serviceName) {
- try {
- return new URI(_servicePackageURIMap.get(serviceName));
- } catch (URISyntaxException e) {
- return null;
- }
- }
-
- @Override
- public String getServiceMainClass(String service) {
- return _serviceMainClassMap.get(service);
- }
-
- @Override
- public ServiceConfig getServiceConfig(String serviceName) {
- return new ServiceConfig(Scope.resource(ResourceId.from(serviceName)));
- }
-
- @Override
- public List<TaskConfig> getTaskConfigs() {
- return _taskConfigs;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/71187176/helix-provisioning/src/main/resources/hello_world_app_spec.yaml
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/resources/hello_world_app_spec.yaml b/helix-provisioning/src/main/resources/hello_world_app_spec.yaml
deleted file mode 100644
index 1d4f1b7..0000000
--- a/helix-provisioning/src/main/resources/hello_world_app_spec.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-!!org.apache.helix.provisioning.yarn.example.HelloworldAppSpec
-appConfig:
- config: {
- k1: v1
- }
-appMasterPackageUri: 'file:///Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/target/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar'
-appName: testApp
-serviceConfigMap:
- HelloWorld: {
- num_containers: 3,
- memory: 1024
- }
-serviceMainClassMap: {
- HelloWorld: org.apache.helix.provisioning.yarn.example.HelloWorldService
-}
-servicePackageURIMap: {
- HelloWorld: 'file:///Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/target/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar'
-}
-services: [
- HelloWorld]
-taskConfigs: null
-
-
-
http://git-wip-us.apache.org/repos/asf/helix/blob/71187176/recipes/pom.xml
----------------------------------------------------------------------
diff --git a/recipes/pom.xml b/recipes/pom.xml
index 70dd2bd..7d9952a 100644
--- a/recipes/pom.xml
+++ b/recipes/pom.xml
@@ -36,6 +36,7 @@ under the License.
<module>user-defined-rebalancer</module>
<module>task-execution</module>
<module>service-discovery</module>
+ <module>provisioning</module>
</modules>
<build>
http://git-wip-us.apache.org/repos/asf/helix/blob/71187176/recipes/provisioning/pom.xml
----------------------------------------------------------------------
diff --git a/recipes/provisioning/pom.xml b/recipes/provisioning/pom.xml
new file mode 100644
index 0000000..dc5277b
--- /dev/null
+++ b/recipes/provisioning/pom.xml
@@ -0,0 +1,50 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.apache.helix.recipes</groupId>
+ <artifactId>recipes</artifactId>
+ <version>0.7.1-incubating-SNAPSHOT</version>
+ </parent>
+ <groupId>org.apache.helix.recipes.provisioning</groupId>
+ <artifactId>provisioning</artifactId>
+ <packaging>pom</packaging>
+ <name>Apache Helix :: Recipes :: Provisioning</name>
+
+ <modules>
+ <module>yarn</module>
+ </modules>
+
+ <build>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-deploy-plugin</artifactId>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ </build>
+
+</project>
http://git-wip-us.apache.org/repos/asf/helix/blob/71187176/recipes/provisioning/yarn/helloworld/pom.xml
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/helloworld/pom.xml b/recipes/provisioning/yarn/helloworld/pom.xml
new file mode 100644
index 0000000..bc6aca2
--- /dev/null
+++ b/recipes/provisioning/yarn/helloworld/pom.xml
@@ -0,0 +1,159 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.apache.helix.recipes.provisioning.yarn</groupId>
+ <artifactId>yarn</artifactId>
+ <version>0.7.1-incubating-SNAPSHOT</version>
+ </parent>
+
+ <artifactId>helloworld</artifactId>
+ <packaging>bundle</packaging>
+ <name>Apache Helix :: Recipes :: Provisioning :: YARN :: Hello World</name>
+
+ <properties>
+ <osgi.import>
+ org.apache.helix*,
+ org.apache.log4j,
+ *
+ </osgi.import>
+ <osgi.export>org.apache.helix.recipes.provisioning.yarn.helloworld*;version="${project.version};-noimport:=true</osgi.export>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.testng</groupId>
+ <artifactId>testng</artifactId>
+ <version>6.0.1</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.helix</groupId>
+ <artifactId>helix-core</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.helix</groupId>
+ <artifactId>helix-provisioning</artifactId>
+ <version>0.7.1-incubating-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>log4j</groupId>
+ <artifactId>log4j</artifactId>
+ <exclusions>
+ <exclusion>
+ <groupId>javax.mail</groupId>
+ <artifactId>mail</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>javax.jms</groupId>
+ <artifactId>jms</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.sun.jdmk</groupId>
+ <artifactId>jmxtools</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.sun.jmx</groupId>
+ <artifactId>jmxri</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ </dependencies>
+ <build>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>appassembler-maven-plugin</artifactId>
+ <configuration>
+ <!-- Set the target configuration directory to be used in the bin scripts -->
+ <!-- <configurationDirectory>conf</configurationDirectory> -->
+ <!-- Copy the contents from "/src/main/config" to the target configuration
+ directory in the assembled application -->
+ <!-- <copyConfigurationDirectory>true</copyConfigurationDirectory> -->
+ <!-- Include the target configuration directory in the beginning of
+ the classpath declaration in the bin scripts -->
+ <includeConfigurationDirectoryInClasspath>true</includeConfigurationDirectoryInClasspath>
+ <assembleDirectory>${project.build.directory}/${project.artifactId}-pkg</assembleDirectory>
+ <!-- Extra JVM arguments that will be included in the bin scripts -->
+ <extraJvmArguments>-Xms512m -Xmx512m</extraJvmArguments>
+ <!-- Generate bin scripts for windows and unix pr default -->
+ <platforms>
+ <platform>windows</platform>
+ <platform>unix</platform>
+ </platforms>
+ </configuration>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>assemble</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.rat</groupId>
+ <artifactId>apache-rat-plugin</artifactId>
+ <configuration>
+ <excludes combine.children="append">
+ </excludes>
+ </configuration>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>appassembler-maven-plugin</artifactId>
+ <configuration>
+ <programs>
+ <program>
+ <mainClass>org.apache.helix.provisioning.yarn.Client</mainClass>
+ <name>yarn-job-launcher</name>
+ </program>
+ <program>
+ <mainClass>org.apache.helix.provisioning.yarn.AppLauncher</mainClass>
+ <name>app-launcher</name>
+ </program>
+ </programs>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <configuration>
+ <descriptors>
+ <descriptor>src/assemble/assembly.xml</descriptor>
+ </descriptors>
+ </configuration>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>single</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
http://git-wip-us.apache.org/repos/asf/helix/blob/71187176/recipes/provisioning/yarn/helloworld/src/assemble/assembly.xml
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/helloworld/src/assemble/assembly.xml b/recipes/provisioning/yarn/helloworld/src/assemble/assembly.xml
new file mode 100644
index 0000000..c2d08a1
--- /dev/null
+++ b/recipes/provisioning/yarn/helloworld/src/assemble/assembly.xml
@@ -0,0 +1,60 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+<assembly>
+ <id>pkg</id>
+ <formats>
+ <format>tar</format>
+ </formats>
+ <fileSets>
+ <fileSet>
+ <directory>${project.build.directory}/${project.artifactId}-pkg/bin</directory>
+ <outputDirectory>bin</outputDirectory>
+ <lineEnding>unix</lineEnding>
+ <fileMode>0755</fileMode>
+ <directoryMode>0755</directoryMode>
+ </fileSet>
+ <fileSet>
+ <directory>${project.build.directory}/${project.artifactId}-pkg/repo/</directory>
+ <outputDirectory>repo</outputDirectory>
+ <fileMode>0755</fileMode>
+ <directoryMode>0755</directoryMode>
+ <excludes>
+ <exclude>**/*.xml</exclude>
+ </excludes>
+ </fileSet>
+ <fileSet>
+ <directory>${project.build.directory}/${project.artifactId}-pkg/conf</directory>
+ <outputDirectory>conf</outputDirectory>
+ <lineEnding>unix</lineEnding>
+ <fileMode>0755</fileMode>
+ <directoryMode>0755</directoryMode>
+ </fileSet>
+ <fileSet>
+ <directory>${project.basedir}</directory>
+ <outputDirectory>/</outputDirectory>
+ <includes>
+ <include>LICENSE</include>
+ <include>NOTICE</include>
+ <include>DISCLAIMER</include>
+ </includes>
+ <fileMode>0755</fileMode>
+ </fileSet>
+ </fileSets>
+</assembly>
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/helix/blob/71187176/recipes/provisioning/yarn/helloworld/src/main/config/log4j.properties
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/helloworld/src/main/config/log4j.properties b/recipes/provisioning/yarn/helloworld/src/main/config/log4j.properties
new file mode 100644
index 0000000..91fac03
--- /dev/null
+++ b/recipes/provisioning/yarn/helloworld/src/main/config/log4j.properties
@@ -0,0 +1,31 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+##
+
+# Set root logger level to DEBUG and its only appender to A1.
+log4j.rootLogger=DEBUG,A1
+
+# A1 is set to be a ConsoleAppender.
+log4j.appender.A1=org.apache.log4j.ConsoleAppender
+
+# A1 uses PatternLayout.
+log4j.appender.A1.layout=org.apache.log4j.PatternLayout
+log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
+
+log4j.logger.org.I0Itec=ERROR
+log4j.logger.org.apache=ERROR
http://git-wip-us.apache.org/repos/asf/helix/blob/71187176/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java b/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
new file mode 100644
index 0000000..2e1ad41
--- /dev/null
+++ b/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
@@ -0,0 +1,92 @@
+package org.apache.helix.provisioning.yarn.example;
+
+import java.io.File;
+import java.io.InputStream;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.helix.provisioning.yarn.AppConfig;
+import org.apache.helix.provisioning.yarn.ApplicationSpec;
+import org.apache.helix.provisioning.yarn.ApplicationSpecFactory;
+import org.apache.helix.provisioning.yarn.example.HelloWorldService;
+import org.apache.helix.provisioning.yarn.example.HelloworldAppSpec;
+import org.yaml.snakeyaml.DumperOptions;
+import org.yaml.snakeyaml.Yaml;
+
+public class HelloWordAppSpecFactory implements ApplicationSpecFactory {
+
+ static HelloworldAppSpec data;
+
+ static {
+ HelloworldAppSpec data = new HelloworldAppSpec();
+ AppConfig appConfig = new AppConfig();
+ appConfig.setValue("k1", "v1");
+ data.setAppConfig(appConfig);
+ data.setAppName("testApp");
+ data.setAppMasterPackageUri(
+ "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
+ HashMap<String, Map<String, String>> serviceConfigMap = new HashMap<String, Map<String, String>>();
+ serviceConfigMap.put("HelloWorld", new HashMap<String, String>());
+ serviceConfigMap.get("HelloWorld").put("k1", "v1");
+ data.setServiceConfigMap(serviceConfigMap);
+ HashMap<String, String> serviceMainClassMap = new HashMap<String, String>();
+ serviceMainClassMap.put("HelloWorld", HelloWorldService.class.getCanonicalName());
+ data.setServiceMainClassMap(serviceMainClassMap);
+ HashMap<String, String> servicePackageURIMap = new HashMap<String, String>();
+ servicePackageURIMap
+ .put(
+ "HelloWorld",
+ "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
+ data.setServicePackageURIMap(servicePackageURIMap);
+ data.setServices(Arrays.asList(new String[] {
+ "HelloWorld"
+ })); }
+
+ @Override
+ public ApplicationSpec fromYaml(InputStream inputstream) {
+ return (ApplicationSpec) new Yaml().load(inputstream);
+ // return data;
+ }
+
+ public static void main(String[] args) {
+ DumperOptions options = new DumperOptions();
+ options.setPrettyFlow(true);
+
+ Yaml yaml = new Yaml(options);
+ HelloworldAppSpec data = new HelloworldAppSpec();
+ AppConfig appConfig = new AppConfig();
+ appConfig.setValue("k1", "v1");
+ data.setAppConfig(appConfig);
+ data.setAppName("testApp");
+ data.setAppMasterPackageUri(
+ "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
+ HashMap<String, Map<String, String>> serviceConfigMap = new HashMap<String, Map<String, String>>();
+ serviceConfigMap.put("HelloWorld", new HashMap<String, String>());
+ serviceConfigMap.get("HelloWorld").put("k1", "v1");
+ data.setServiceConfigMap(serviceConfigMap);
+ HashMap<String, String> serviceMainClassMap = new HashMap<String, String>();
+ serviceMainClassMap.put("HelloWorld", HelloWorldService.class.getCanonicalName());
+ data.setServiceMainClassMap(serviceMainClassMap);
+ HashMap<String, String> servicePackageURIMap = new HashMap<String, String>();
+ servicePackageURIMap
+ .put(
+ "HelloWorld",
+ "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
+ data.setServicePackageURIMap(servicePackageURIMap);
+ data.setServices(Arrays.asList(new String[] {
+ "HelloWorld"
+ }));
+ String dump = yaml.dump(data);
+ System.out.println(dump);
+
+ InputStream resourceAsStream = ClassLoader.getSystemClassLoader().getResourceAsStream("hello_world_app_spec.yaml");
+ HelloworldAppSpec load = yaml.loadAs(resourceAsStream,HelloworldAppSpec.class);
+ String dumpnew = yaml.dump(load);
+ System.out.println(dumpnew.equals(dump));
+
+ System.out.println("==================================");
+ System.out.println(dumpnew);
+
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/71187176/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java b/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
new file mode 100644
index 0000000..8999817
--- /dev/null
+++ b/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
@@ -0,0 +1,41 @@
+package org.apache.helix.provisioning.yarn.example;
+
+import org.apache.helix.HelixConnection;
+import org.apache.helix.api.accessor.ResourceAccessor;
+import org.apache.helix.api.config.UserConfig;
+import org.apache.helix.api.id.ClusterId;
+import org.apache.helix.api.id.ParticipantId;
+import org.apache.helix.api.id.ResourceId;
+import org.apache.helix.api.id.StateModelDefId;
+import org.apache.helix.manager.zk.AbstractParticipantService;
+import org.apache.helix.provisioning.yarn.example.HelloWorldStateModelFactory;
+import org.apache.log4j.Logger;
+
+public class HelloWorldService extends AbstractParticipantService {
+
+ private static Logger LOG = Logger.getLogger(AbstractParticipantService.class);
+
+ static String SERVICE_NAME = "HelloWorld";
+
+ public HelloWorldService(HelixConnection connection, ClusterId clusterId,
+ ParticipantId participantId) {
+ super(connection, clusterId, participantId);
+ }
+
+ /**
+ * init method to setup appropriate call back handlers.
+ */
+ @Override
+ public void init() {
+ ClusterId clusterId = getClusterId();
+ ResourceAccessor resourceAccessor = getConnection().createResourceAccessor(clusterId);
+ UserConfig serviceConfig = resourceAccessor.readUserConfig(ResourceId.from(SERVICE_NAME));
+ LOG.info("Starting service:" + SERVICE_NAME + " with configuration:" + serviceConfig);
+
+ HelloWorldStateModelFactory stateModelFactory = new HelloWorldStateModelFactory();
+ getParticipant().getStateMachineEngine().registerStateModelFactory(
+ StateModelDefId.from("StatelessService"), stateModelFactory);
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/71187176/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModel.java
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModel.java b/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModel.java
new file mode 100644
index 0000000..95f66e3
--- /dev/null
+++ b/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModel.java
@@ -0,0 +1,29 @@
+package org.apache.helix.provisioning.yarn.example;
+
+import org.apache.helix.NotificationContext;
+import org.apache.helix.api.id.PartitionId;
+import org.apache.helix.model.Message;
+import org.apache.helix.participant.statemachine.StateModel;
+import org.apache.helix.participant.statemachine.StateModelInfo;
+import org.apache.helix.participant.statemachine.Transition;
+
+@StateModelInfo(initialState = "OFFLINE", states = { "OFFLINE", "ONLINE",
+ "ERROR" })
+public class HelloWorldStateModel extends StateModel {
+
+ public HelloWorldStateModel(PartitionId partitionId) {
+ // TODO Auto-generated constructor stub
+ }
+
+ @Transition(to = "ONLINE", from = "OFFLINE")
+ public void onBecomeOnlineFromOffline(Message message,
+ NotificationContext context) throws Exception {
+ System.out.println("Started HelloWorld service");
+ }
+
+ @Transition(to = "OFFLINE", from = "ONLINE")
+ public void onBecomeOfflineFromOnline(Message message,
+ NotificationContext context) throws InterruptedException {
+ System.out.println("Stopped HelloWorld service");
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/71187176/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModelFactory.java
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModelFactory.java b/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModelFactory.java
new file mode 100644
index 0000000..2766f6d
--- /dev/null
+++ b/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModelFactory.java
@@ -0,0 +1,13 @@
+package org.apache.helix.provisioning.yarn.example;
+
+import org.apache.helix.api.id.PartitionId;
+import org.apache.helix.participant.statemachine.HelixStateModelFactory;
+import org.apache.helix.participant.statemachine.StateModel;
+import org.apache.helix.provisioning.yarn.example.HelloWorldStateModel;
+
+public class HelloWorldStateModelFactory extends HelixStateModelFactory<StateModel> {
+ @Override
+ public StateModel createNewStateModel(PartitionId partitionId) {
+ return new HelloWorldStateModel(partitionId);
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/71187176/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java b/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
new file mode 100644
index 0000000..e22c7b2
--- /dev/null
+++ b/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
@@ -0,0 +1,138 @@
+package org.apache.helix.provisioning.yarn.example;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.helix.api.Scope;
+import org.apache.helix.api.config.ParticipantConfig;
+import org.apache.helix.api.config.ResourceConfig;
+import org.apache.helix.api.config.ResourceConfig.Builder;
+import org.apache.helix.api.config.UserConfig;
+import org.apache.helix.api.id.ParticipantId;
+import org.apache.helix.api.id.ResourceId;
+import org.apache.helix.provisioning.yarn.AppConfig;
+import org.apache.helix.provisioning.yarn.ApplicationSpec;
+import org.apache.helix.provisioning.yarn.ServiceConfig;
+import org.apache.helix.provisioning.yarn.TaskConfig;
+
+public class HelloworldAppSpec implements ApplicationSpec {
+
+ public String _appName;
+
+ public AppConfig _appConfig;
+
+ public List<String> _services;
+
+ private String _appMasterPackageUri;
+
+ private Map<String, String> _servicePackageURIMap;
+
+ private Map<String, String> _serviceMainClassMap;
+
+ private Map<String, Map<String, String>> _serviceConfigMap;
+
+ private List<TaskConfig> _taskConfigs;
+
+ public AppConfig getAppConfig() {
+ return _appConfig;
+ }
+
+ public void setAppConfig(AppConfig appConfig) {
+ _appConfig = appConfig;
+ }
+
+ public String getAppMasterPackageUri() {
+ return _appMasterPackageUri;
+ }
+
+ public void setAppMasterPackageUri(String appMasterPackageUri) {
+ _appMasterPackageUri = appMasterPackageUri;
+ }
+
+ public Map<String, String> getServicePackageURIMap() {
+ return _servicePackageURIMap;
+ }
+
+ public void setServicePackageURIMap(Map<String, String> servicePackageURIMap) {
+ _servicePackageURIMap = servicePackageURIMap;
+ }
+
+ public Map<String, String> getServiceMainClassMap() {
+ return _serviceMainClassMap;
+ }
+
+ public void setServiceMainClassMap(Map<String, String> serviceMainClassMap) {
+ _serviceMainClassMap = serviceMainClassMap;
+ }
+
+ public Map<String, Map<String, String>> getServiceConfigMap() {
+ return _serviceConfigMap;
+ }
+
+ public void setServiceConfigMap(Map<String, Map<String, String>> serviceConfigMap) {
+ _serviceConfigMap = serviceConfigMap;
+ }
+
+ public void setAppName(String appName) {
+ _appName = appName;
+ }
+
+ public void setServices(List<String> services) {
+ _services = services;
+ }
+
+ public void setTaskConfigs(List<TaskConfig> taskConfigs) {
+ _taskConfigs = taskConfigs;
+ }
+
+ @Override
+ public String getAppName() {
+ return _appName;
+ }
+
+ @Override
+ public AppConfig getConfig() {
+ return _appConfig;
+ }
+
+ @Override
+ public List<String> getServices() {
+ return _services;
+ }
+
+ @Override
+ public URI getAppMasterPackage() {
+ try {
+ return new URI(_appMasterPackageUri);
+ } catch (URISyntaxException e) {
+ return null;
+ }
+ }
+
+ @Override
+ public URI getServicePackage(String serviceName) {
+ try {
+ return new URI(_servicePackageURIMap.get(serviceName));
+ } catch (URISyntaxException e) {
+ return null;
+ }
+ }
+
+ @Override
+ public String getServiceMainClass(String service) {
+ return _serviceMainClassMap.get(service);
+ }
+
+ @Override
+ public ServiceConfig getServiceConfig(String serviceName) {
+ return new ServiceConfig(Scope.resource(ResourceId.from(serviceName)));
+ }
+
+ @Override
+ public List<TaskConfig> getTaskConfigs() {
+ return _taskConfigs;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/71187176/recipes/provisioning/yarn/helloworld/src/main/resources/hello_world_app_spec.yaml
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/helloworld/src/main/resources/hello_world_app_spec.yaml b/recipes/provisioning/yarn/helloworld/src/main/resources/hello_world_app_spec.yaml
new file mode 100644
index 0000000..535bece
--- /dev/null
+++ b/recipes/provisioning/yarn/helloworld/src/main/resources/hello_world_app_spec.yaml
@@ -0,0 +1,24 @@
+!!org.apache.helix.provisioning.yarn.example.HelloworldAppSpec
+appConfig:
+ config: {
+ k1: v1
+ }
+appMasterPackageUri: 'file:///Users/kbiscuit/helix/incubator-helix/recipes/provisioning/yarn/helloworld/target/helloworld-0.7.1-incubating-SNAPSHOT-pkg.tar'
+appName: testApp
+serviceConfigMap:
+ HelloWorld: {
+ num_containers: 3,
+ memory: 1024
+ }
+serviceMainClassMap: {
+ HelloWorld: org.apache.helix.provisioning.yarn.example.HelloWorldService
+}
+servicePackageURIMap: {
+ HelloWorld: 'file:///Users/kbiscuit/helix/incubator-helix/recipes/provisioning/yarn/helloworld/target/helloworld-0.7.1-incubating-SNAPSHOT-pkg.tar'
+}
+services: [
+ HelloWorld]
+taskConfigs: null
+
+
+
http://git-wip-us.apache.org/repos/asf/helix/blob/71187176/recipes/provisioning/yarn/helloworld/src/test/conf/testng.xml
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/helloworld/src/test/conf/testng.xml b/recipes/provisioning/yarn/helloworld/src/test/conf/testng.xml
new file mode 100644
index 0000000..37bccf3
--- /dev/null
+++ b/recipes/provisioning/yarn/helloworld/src/test/conf/testng.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+<!DOCTYPE suite SYSTEM "http://testng.org/testng-1.0.dtd">
+<suite name="Suite" parallel="none">
+ <test name="Test" preserve-order="false">
+ <packages>
+ <package name="org.apache.helix.agent"/>
+ </packages>
+ </test>
+</suite>
http://git-wip-us.apache.org/repos/asf/helix/blob/71187176/recipes/provisioning/yarn/pom.xml
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/pom.xml b/recipes/provisioning/yarn/pom.xml
new file mode 100644
index 0000000..d557b2b
--- /dev/null
+++ b/recipes/provisioning/yarn/pom.xml
@@ -0,0 +1,50 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.apache.helix.recipes.provisioning</groupId>
+ <artifactId>provisioning</artifactId>
+ <version>0.7.1-incubating-SNAPSHOT</version>
+ </parent>
+ <groupId>org.apache.helix.recipes.provisioning.yarn</groupId>
+ <artifactId>yarn</artifactId>
+ <packaging>pom</packaging>
+ <name>Apache Helix :: Recipes :: Provisioning :: YARN</name>
+
+ <modules>
+ <module>helloworld</module>
+ </modules>
+
+ <build>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-deploy-plugin</artifactId>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ </build>
+
+</project>
[49/50] [abbrv] Merge remote-tracking branch
'origin/helix-provisioning'
Posted by ka...@apache.org.
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchNode.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchNode.java
index 0000000,1ac8061..1ae635b
mode 000000,100644..100644
--- a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchNode.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchNode.java
@@@ -1,0 -1,62 +1,81 @@@
+ package org.apache.helix.controller.strategy.knapsack;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ /**
+ * Description of a knapsack element during the search process<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+ public interface KnapsackSearchNode {
+ /**
+ * Depth of the node in this search
+ * @return node depth
+ */
+ int depth();
+
+ /**
+ * The parent node in this search
+ * @return the node's immediate parent
+ */
+ KnapsackSearchNode parent();
+
+ /**
+ * The current node assignment
+ * @return KnapsackAssignment instance
+ */
+ KnapsackAssignment assignment();
+
+ /**
+ * The current profit with this node and search
+ * @return current profit
+ */
+ long currentProfit();
+
+ /**
+ * Set the current profit with this node and search
+ * @param profit current profit
+ */
+ void setCurrentProfit(long profit);
+
+ /**
+ * The maximum possible profit with this node and search
+ * @return profit upper bound
+ */
+ long profitUpperBound();
+
+ /**
+ * Set the maximum possible profit with this node and search
+ * @param profit profit upper bound
+ */
+ void setProfitUpperBound(long profit);
+
+ /**
+ * The next item given this node and search
+ * @return next item id
+ */
+ int nextItemId();
+
+ /**
+ * Set the next item given this node and search
+ * @param id next item id
+ */
+ void setNextItemId(int id);
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchNodeImpl.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchNodeImpl.java
index 0000000,ea9cb98..aeba786
mode 000000,100644..100644
--- a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchNodeImpl.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchNodeImpl.java
@@@ -1,0 -1,77 +1,96 @@@
+ package org.apache.helix.controller.strategy.knapsack;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ /**
+ * Implementation of {@link KnapsackSearchNode}<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+ public class KnapsackSearchNodeImpl implements KnapsackSearchNode {
+ private static final int NO_SELECTION = -1;
+
+ private int _depth;
+ private KnapsackSearchNode _parent;
+ private KnapsackAssignment _assignment;
+ private long _currentProfit;
+ private long _profitUpperBound;
+ private int _nextItemId;
+
+ /**
+ * Initialize a search node
+ * @param parent the node's parent
+ * @param assignment the node's assignment
+ */
+ public KnapsackSearchNodeImpl(final KnapsackSearchNode parent, final KnapsackAssignment assignment) {
+ _depth = (parent == null) ? 0 : parent.depth() + 1;
+ _parent = parent;
+ _assignment = assignment;
+ _currentProfit = 0L;
+ _profitUpperBound = Long.MAX_VALUE;
+ _nextItemId = NO_SELECTION;
+ }
+
+ @Override
+ public int depth() {
+ return _depth;
+ }
+
+ @Override
+ public KnapsackSearchNode parent() {
+ return _parent;
+ }
+
+ @Override
+ public KnapsackAssignment assignment() {
+ return _assignment;
+ }
+
+ @Override
+ public long currentProfit() {
+ return _currentProfit;
+ }
+
+ @Override
+ public void setCurrentProfit(long profit) {
+ _currentProfit = profit;
+ }
+
+ @Override
+ public long profitUpperBound() {
+ return _profitUpperBound;
+ }
+
+ @Override
+ public void setProfitUpperBound(long profit) {
+ _profitUpperBound = profit;
+ }
+
+ @Override
+ public int nextItemId() {
+ return _nextItemId;
+ }
+
+ @Override
+ public void setNextItemId(int id) {
+ _nextItemId = id;
+ }
+
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchPath.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchPath.java
index 0000000,d977143..012e9c0
mode 000000,100644..100644
--- a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchPath.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchPath.java
@@@ -1,0 -1,39 +1,58 @@@
+ package org.apache.helix.controller.strategy.knapsack;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ /**
+ * Construction of the path between search nodes in a knapsack<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+ public interface KnapsackSearchPath {
+ /**
+ * Initialize the path
+ */
+ void init();
+
+ /**
+ * Get the source node
+ * @return starting KnapsackSearchNode
+ */
+ KnapsackSearchNode from();
+
+ /**
+ * Get the intermediate node
+ * @return KnapsackSearchNode between source and destination
+ */
+ KnapsackSearchNode via();
+
+ /**
+ * Get the destination node
+ * @return terminating KnapsackSearchNode
+ */
+ KnapsackSearchNode to();
+
+ /**
+ * Get an ancestor of a given search node
+ * @param node the search node
+ * @param depth the depth of the ancestor
+ * @return the ancestor node
+ */
+ KnapsackSearchNode moveUpToDepth(final KnapsackSearchNode node, int depth);
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchPathImpl.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchPathImpl.java
index 0000000,06a9ec7..1e02768
mode 000000,100644..100644
--- a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchPathImpl.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchPathImpl.java
@@@ -1,0 -1,65 +1,84 @@@
+ package org.apache.helix.controller.strategy.knapsack;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ /**
+ * Implementation of {@link KnapsackSearchPath}<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+ public class KnapsackSearchPathImpl implements KnapsackSearchPath {
+ private KnapsackSearchNode _from;
+ private KnapsackSearchNode _via;
+ private KnapsackSearchNode _to;
+
+ /**
+ * Create a search path between nodes in a knapsack
+ * @param from the source node
+ * @param to the destination node
+ */
+ public KnapsackSearchPathImpl(final KnapsackSearchNode from, final KnapsackSearchNode to) {
+ _from = from;
+ _via = null;
+ _to = to;
+ }
+
+ @Override
+ public void init() {
+ KnapsackSearchNode nodeFrom = moveUpToDepth(_from, _to.depth());
+ KnapsackSearchNode nodeTo = moveUpToDepth(_to, _from.depth());
+ if (nodeFrom.depth() != nodeTo.depth()) {
+ throw new RuntimeException("to and from depths do not match!");
+ }
+
+ // Find common parent
+ // TODO: check if basic equality is enough
+ while (nodeFrom != nodeTo) {
+ nodeFrom = nodeFrom.parent();
+ nodeTo = nodeTo.parent();
+ }
+ _via = nodeFrom;
+ }
+
+ @Override
+ public KnapsackSearchNode from() {
+ return _from;
+ }
+
+ @Override
+ public KnapsackSearchNode via() {
+ return _via;
+ }
+
+ @Override
+ public KnapsackSearchNode to() {
+ return _to;
+ }
+
+ @Override
+ public KnapsackSearchNode moveUpToDepth(KnapsackSearchNode node, int depth) {
+ KnapsackSearchNode currentNode = node;
+ while (currentNode.depth() > depth) {
+ currentNode = currentNode.parent();
+ }
+ return currentNode;
+ }
+
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSolver.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSolver.java
index 0000000,832a470..624082b
mode 000000,100644..100644
--- a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSolver.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSolver.java
@@@ -1,0 -1,60 +1,79 @@@
+ package org.apache.helix.controller.strategy.knapsack;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.util.ArrayList;
+
+ /**
+ * Interface for a factory of multidimensional 0-1 knapsack solvers that support reductions<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+ public interface KnapsackSolver {
+ /**
+ * Collection of supported algorithms
+ */
+ enum SolverType {
+ /**
+ * A solver that uses the branch-and-bound technique, supports multiple dimensions
+ */
+ KNAPSACK_MULTIDIMENSION_BRANCH_AND_BOUND_SOLVER
+ }
+
+ /**
+ * Initialize the solver
+ * @param profits profit for each element if selected
+ * @param weights cost of each element in each dimension
+ * @param capacities maximum total weight in each dimension
+ */
+ void init(final ArrayList<Long> profits, final ArrayList<ArrayList<Long>> weights,
+ final ArrayList<Long> capacities);
+
+ /**
+ * Solve the knapsack problem
+ * @return the approximated optimal weight
+ */
+ long solve();
+
+ /**
+ * Check if an element was selected in the optimal solution
+ * @param itemId the index of the element to check
+ * @return true if the item is present, false otherwise
+ */
+ boolean bestSolutionContains(int itemId);
+
+ /**
+ * Get the name of this solver
+ * @return solver name
+ */
+ String getName();
+
+ /**
+ * Check if a reduction should be used to prune paths early on
+ * @return true if reduction enabled, false otherwise
+ */
+ boolean useReduction();
+
+ /**
+ * Set whether a reduction should be used to prune paths early on
+ * @param useReduction true to enable, false to disable
+ */
+ void setUseReduction(boolean useReduction);
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSolverImpl.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSolverImpl.java
index 0000000,eeab0b1..2d521f0
mode 000000,100644..100644
--- a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSolverImpl.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSolverImpl.java
@@@ -1,0 -1,191 +1,210 @@@
+ package org.apache.helix.controller.strategy.knapsack;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.util.ArrayList;
+
+ /**
+ * Implementation of {@link KnapsackSolver}<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+ public class KnapsackSolverImpl implements KnapsackSolver {
+ private final BaseKnapsackSolver _solver;
+ private final ArrayList<Boolean> _knownValue;
+ private final ArrayList<Boolean> _bestSolution;
+ private final ArrayList<Integer> _mappingReducedItemId;
+ private boolean _isProblemSolved;
+ private long _additionalProfit;
+ private boolean _useReduction;
+
+ /**
+ * Initialize a generic knapsack solver
+ * @param solverName the name of the solver
+ */
+ public KnapsackSolverImpl(String solverName) {
+ _solver = new KnapsackGenericSolverImpl(solverName);
+ _knownValue = new ArrayList<Boolean>();
+ _bestSolution = new ArrayList<Boolean>();
+ _mappingReducedItemId = new ArrayList<Integer>();
+ _isProblemSolved = false;
+ _additionalProfit = 0L;
+ _useReduction = true;
+ }
+
+ /**
+ * Initialize a specified knapsack solver
+ * @param solverType the type of solver
+ * @param solverName the name of the solver
+ */
+ public KnapsackSolverImpl(SolverType solverType, String solverName) {
+ _knownValue = new ArrayList<Boolean>();
+ _bestSolution = new ArrayList<Boolean>();
+ _mappingReducedItemId = new ArrayList<Integer>();
+ _isProblemSolved = false;
+ _additionalProfit = 0L;
+ _useReduction = true;
+ BaseKnapsackSolver solver = null;
+ switch (solverType) {
+ case KNAPSACK_MULTIDIMENSION_BRANCH_AND_BOUND_SOLVER:
+ solver = new KnapsackGenericSolverImpl(solverName);
+ break;
+ default:
+ throw new RuntimeException("Solver " + solverType + " not supported");
+ }
+ _solver = solver;
+ }
+
+ @Override
+ public void init(ArrayList<Long> profits, ArrayList<ArrayList<Long>> weights,
+ ArrayList<Long> capacities) {
+ _additionalProfit = 0L;
+ _isProblemSolved = false;
+ _solver.init(profits, weights, capacities);
+ if (_useReduction) {
+ final int numItems = profits.size();
+ final int numReducedItems = reduceProblem(numItems);
+
+ if (numReducedItems > 0) {
+ computeAdditionalProfit(profits);
+ }
+
+ if (numReducedItems > 0 && numReducedItems < numItems) {
+ initReducedProblem(profits, weights, capacities);
+ }
+ }
+ }
+
+ @Override
+ public long solve() {
+ return _additionalProfit + ((_isProblemSolved) ? 0 : _solver.solve());
+ }
+
+ @Override
+ public boolean bestSolutionContains(int itemId) {
+ final int mappedItemId = (_useReduction) ? _mappingReducedItemId.get(itemId) : itemId;
+ return (_useReduction && _knownValue.get(itemId)) ? _bestSolution.get(itemId) : _solver
+ .bestSolution(mappedItemId);
+ }
+
+ @Override
+ public String getName() {
+ return _solver.getName();
+ }
+
+ @Override
+ public boolean useReduction() {
+ return _useReduction;
+ }
+
+ @Override
+ public void setUseReduction(boolean useReduction) {
+ _useReduction = useReduction;
+ }
+
+ private int reduceProblem(int numItems) {
+ _knownValue.clear();
+ _bestSolution.clear();
+ _mappingReducedItemId.clear();
+ ArrayList<Long> j0UpperBounds = new ArrayList<Long>();
+ ArrayList<Long> j1UpperBounds = new ArrayList<Long>();
+ for (int i = 0; i < numItems; i++) {
+ _knownValue.add(false);
+ _bestSolution.add(false);
+ _mappingReducedItemId.add(i);
+ j0UpperBounds.add(Long.MAX_VALUE);
+ j1UpperBounds.add(Long.MAX_VALUE);
+ }
+ _additionalProfit = 0L;
+ long bestLowerBound = 0L;
+ for (int itemId = 0; itemId < numItems; itemId++) {
+ long upperBound = 0L;
+ long lowerBound = Long.MAX_VALUE;
+ long[] bounds = _solver.getLowerAndUpperBoundWhenItem(itemId, false, upperBound, lowerBound);
+ lowerBound = bounds[0];
+ upperBound = bounds[1];
+ j1UpperBounds.set(itemId, upperBound);
+ bestLowerBound = Math.max(bestLowerBound, lowerBound);
+ bounds = _solver.getLowerAndUpperBoundWhenItem(itemId, true, upperBound, lowerBound);
+ lowerBound = bounds[0];
+ upperBound = bounds[1];
+ j0UpperBounds.set(itemId, upperBound);
+ bestLowerBound = Math.max(bestLowerBound, lowerBound);
+ }
+
+ int numReducedItems = 0;
+ for (int itemId = 0; itemId < numItems; itemId++) {
+ if (bestLowerBound > j0UpperBounds.get(itemId)) {
+ _knownValue.set(itemId, true);
+ _bestSolution.set(itemId, false);
+ numReducedItems++;
+ } else if (bestLowerBound > j1UpperBounds.get(itemId)) {
+ _knownValue.set(itemId, true);
+ _bestSolution.set(itemId, true);
+ numReducedItems++;
+ }
+ }
+ _isProblemSolved = numReducedItems == numItems;
+ return numReducedItems;
+ }
+
+ private void computeAdditionalProfit(final ArrayList<Long> profits) {
+ final int numItems = profits.size();
+ _additionalProfit = 0L;
+ for (int itemId = 0; itemId < numItems; itemId++) {
+ if (_knownValue.get(itemId) && _bestSolution.get(itemId)) {
+ _additionalProfit += profits.get(itemId);
+ }
+ }
+ }
+
+ private void initReducedProblem(final ArrayList<Long> profits,
+ final ArrayList<ArrayList<Long>> weights, final ArrayList<Long> capacities) {
+ final int numItems = profits.size();
+ final int numDimensions = capacities.size();
+
+ ArrayList<Long> reducedProfits = new ArrayList<Long>();
+ for (int itemId = 0; itemId < numItems; itemId++) {
+ if (!_knownValue.get(itemId)) {
+ _mappingReducedItemId.set(itemId, reducedProfits.size());
+ reducedProfits.add(profits.get(itemId));
+ }
+ }
+
+ ArrayList<ArrayList<Long>> reducedWeights = new ArrayList<ArrayList<Long>>();
+ ArrayList<Long> reducedCapacities = new ArrayList<Long>(capacities);
+ for (int dim = 0; dim < numDimensions; dim++) {
+ final ArrayList<Long> oneDimensionWeights = weights.get(dim);
+ ArrayList<Long> oneDimensionReducedWeights = new ArrayList<Long>();
+ for (int itemId = 0; itemId < numItems; itemId++) {
+ if (_knownValue.get(itemId)) {
+ if (_bestSolution.get(itemId)) {
+ reducedCapacities
+ .set(dim, reducedCapacities.get(dim) - oneDimensionWeights.get(itemId));
+ }
+ } else {
+ oneDimensionReducedWeights.add(oneDimensionWeights.get(itemId));
+ }
+ }
+ reducedWeights.add(oneDimensionReducedWeights);
+ }
+ _solver.init(reducedProfits, reducedWeights, reducedCapacities);
+ }
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackState.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackState.java
index 0000000,66713eb..c07ebf7
mode 000000,100644..100644
--- a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackState.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackState.java
@@@ -1,0 -1,42 +1,61 @@@
+ package org.apache.helix.controller.strategy.knapsack;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ /**
+ * The current state of the knapsack<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+ public interface KnapsackState {
+ /**
+ * Initialize the knapsack with the number of items
+ * @param numberOfItems the number of items
+ */
+ void init(int numberOfItems);
+
+ /**
+ * Update this state with an assignment
+ * @param revert true to revert to the previous state, false otherwise
+ * @param assignment the assignment that was made
+ * @return true on success, false on failure
+ */
+ boolean updateState(boolean revert, final KnapsackAssignment assignment);
+
+ /**
+ * Get the current number of items in the knapsack
+ * @return number of items
+ */
+ int getNumberOfItems();
+
+ /**
+ * Check if an item is currently bound to the knapsack
+ * @param id the item id
+ * @return true if bound, false otherwise
+ */
+ boolean isBound(int id);
+
+ /**
+ * Check if an item is currently in the knapsack
+ * @param id the item id
+ * @return true if inside, false otherwise
+ */
+ boolean isIn(int id);
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackStateImpl.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackStateImpl.java
index 0000000,8e86872..1fcd797
mode 000000,100644..100644
--- a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackStateImpl.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackStateImpl.java
@@@ -1,0 -1,61 +1,80 @@@
+ package org.apache.helix.controller.strategy.knapsack;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.util.ArrayList;
+
+ /**
+ * Implementation of {@link KnapsackState}<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+ public class KnapsackStateImpl implements KnapsackState {
+ private ArrayList<Boolean> _isBound;
+ private ArrayList<Boolean> _isIn;
+
+ /**
+ * Initialize the knapsack state
+ */
+ public KnapsackStateImpl() {
+ _isBound = new ArrayList<Boolean>();
+ _isIn = new ArrayList<Boolean>();
+ }
+
+ @Override
+ public void init(int numberOfItems) {
+ _isBound.clear();
+ _isIn.clear();
+ for (int i = 0; i < numberOfItems; i++) {
+ _isBound.add(false);
+ _isIn.add(false);
+ }
+ }
+
+ @Override
+ public boolean updateState(boolean revert, KnapsackAssignment assignment) {
+ if (revert) {
+ _isBound.set(assignment.itemId, false);
+ } else {
+ if (_isBound.get(assignment.itemId) && _isIn.get(assignment.itemId) != assignment.isIn) {
+ return false;
+ }
+ _isBound.set(assignment.itemId, true);
+ _isIn.set(assignment.itemId, assignment.isIn);
+ }
+ return true;
+ }
+
+ @Override
+ public int getNumberOfItems() {
+ return _isBound.size();
+ }
+
+ @Override
+ public boolean isBound(int id) {
+ return _isBound.get(id);
+ }
+
+ @Override
+ public boolean isIn(int id) {
+ return _isIn.get(id);
+ }
+
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixAdmin.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixAdmin.java
index 6e30074,39ea1c5..6dc5541
--- a/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixAdmin.java
+++ b/helix-core/src/main/java/org/apache/helix/manager/zk/ZKHelixAdmin.java
@@@ -164,7 -164,7 +164,7 @@@ public class ZKHelixAdmin implements He
PropertyPathConfig.getPath(PropertyType.CONFIGS, clusterName,
ConfigScopeProperty.PARTICIPANT.toString(), instanceName);
if (!_zkClient.exists(instanceConfigPath)) {
-- throw new HelixException("instance" + instanceName + " does not exist in cluster "
++ throw new HelixException("instance " + instanceName + " does not exist in cluster "
+ clusterName);
}
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/manager/zk/ZkHelixParticipant.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/model/ClusterConfiguration.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/model/ClusterConfiguration.java
index 5e2daa6,63f5776..a7a7088
--- a/helix-core/src/main/java/org/apache/helix/model/ClusterConfiguration.java
+++ b/helix-core/src/main/java/org/apache/helix/model/ClusterConfiguration.java
@@@ -30,11 -19,19 +19,24 @@@ package org.apache.helix.model
* under the License.
*/
++import java.util.Map;
++
+ import org.apache.helix.HelixProperty;
+ import org.apache.helix.ZNRecord;
+ import org.apache.helix.api.config.NamespacedConfig;
+ import org.apache.helix.api.config.UserConfig;
+ import org.apache.helix.api.id.ClusterId;
+ import org.apache.helix.manager.zk.ZKHelixManager;
+ import org.apache.log4j.Logger;
+
++import com.google.common.collect.Maps;
++
/**
* Persisted configuration properties for a cluster
*/
public class ClusterConfiguration extends HelixProperty {
+ private static final String IDEAL_STATE_RULE_PREFIX = "IdealStateRule";
+ private static final Logger LOG = Logger.getLogger(ClusterConfiguration.class);
/**
* Instantiate for an id
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/model/IdealState.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/main/java/org/apache/helix/model/ResourceConfiguration.java
----------------------------------------------------------------------
diff --cc helix-core/src/main/java/org/apache/helix/model/ResourceConfiguration.java
index aae58a8,46d7ed7..452ca65
--- a/helix-core/src/main/java/org/apache/helix/model/ResourceConfiguration.java
+++ b/helix-core/src/main/java/org/apache/helix/model/ResourceConfiguration.java
@@@ -1,17 -1,19 +1,5 @@@
package org.apache.helix.model;
--import org.apache.helix.HelixProperty;
--import org.apache.helix.ZNRecord;
--import org.apache.helix.api.config.NamespacedConfig;
--import org.apache.helix.api.config.ResourceConfig.ResourceType;
--import org.apache.helix.api.config.UserConfig;
--import org.apache.helix.api.id.ResourceId;
- import org.apache.helix.controller.rebalancer.config.RebalancerConfig;
- import org.apache.helix.controller.rebalancer.config.RebalancerConfigHolder;
-
- import com.google.common.base.Enums;
- import com.google.common.base.Optional;
-
-import org.apache.helix.controller.provisioner.ProvisionerConfig;
-import org.apache.helix.controller.rebalancer.config.RebalancerConfig;
-import org.apache.helix.controller.rebalancer.config.RebalancerConfigHolder;
-import org.apache.log4j.Logger;
-
-import com.google.common.base.Enums;
-import com.google.common.base.Optional;
-
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@@@ -31,6 -33,6 +19,20 @@@
* under the License.
*/
++import org.apache.helix.HelixProperty;
++import org.apache.helix.ZNRecord;
++import org.apache.helix.api.config.NamespacedConfig;
++import org.apache.helix.api.config.ResourceConfig.ResourceType;
++import org.apache.helix.api.config.UserConfig;
++import org.apache.helix.api.id.ResourceId;
++import org.apache.helix.controller.provisioner.ProvisionerConfig;
++import org.apache.helix.controller.rebalancer.config.RebalancerConfig;
++import org.apache.helix.controller.rebalancer.config.RebalancerConfigHolder;
++import org.apache.log4j.Logger;
++
++import com.google.common.base.Enums;
++import com.google.common.base.Optional;
++
/**
* Persisted configuration properties for a resource
*/
@@@ -114,12 -122,12 +122,22 @@@ public class ResourceConfiguration exte
}
/**
+ * Check if this resource config has a rebalancer config
+ * @return true if a rebalancer config is attached, false otherwise
+ */
+ public boolean hasRebalancerConfig() {
+ return _record.getSimpleFields().containsKey(
+ RebalancerConfigHolder.class.getSimpleName() + NamespacedConfig.PREFIX_CHAR
+ + RebalancerConfigHolder.Fields.REBALANCER_CONFIG);
+ }
++
++ /**
+ * Get a ProvisionerConfig, if available
+ * @param clazz the class to cast to
+ * @return ProvisionerConfig, or null
+ */
+ public <T extends ProvisionerConfig> T getProvisionerConfig(Class<T> clazz) {
+ ProvisionerConfigHolder configHolder = new ProvisionerConfigHolder(this);
+ return configHolder.getProvisionerConfig(clazz);
+ }
}
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/test/java/org/apache/helix/integration/TestHelixConnection.java
----------------------------------------------------------------------
diff --cc helix-core/src/test/java/org/apache/helix/integration/TestHelixConnection.java
index 318ab66,9b772f2..d0c9bb8
--- a/helix-core/src/test/java/org/apache/helix/integration/TestHelixConnection.java
+++ b/helix-core/src/test/java/org/apache/helix/integration/TestHelixConnection.java
@@@ -141,8 -141,7 +141,8 @@@ public class TestHelixConnection extend
participant.getStateMachineEngine().registerStateModelFactory(
StateModelDefId.from("MasterSlave"), new MockStateModelFactory());
- participant.startAsync();
+ participant.start();
+ Thread.sleep(1000);
// verify
final HelixDataAccessor accessor = connection.createDataAccessor(clusterId);
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/test/java/org/apache/helix/integration/TestLocalContainerProvider.java
----------------------------------------------------------------------
diff --cc helix-core/src/test/java/org/apache/helix/integration/TestLocalContainerProvider.java
index 0000000,f4153cc..f27ce79
mode 000000,100644..100644
--- a/helix-core/src/test/java/org/apache/helix/integration/TestLocalContainerProvider.java
+++ b/helix-core/src/test/java/org/apache/helix/integration/TestLocalContainerProvider.java
@@@ -1,0 -1,340 +1,346 @@@
+ package org.apache.helix.integration;
+
+ /*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+ import java.util.Collection;
+ import java.util.Date;
+ import java.util.List;
+ import java.util.Map;
++import java.util.concurrent.CountDownLatch;
++import java.util.concurrent.TimeUnit;
+
+ import org.apache.helix.HelixConnection;
+ import org.apache.helix.HelixController;
+ import org.apache.helix.HelixManager;
+ import org.apache.helix.HelixParticipant;
+ import org.apache.helix.TestHelper;
+ import org.apache.helix.ZkUnitTestBase;
+ import org.apache.helix.api.Cluster;
+ import org.apache.helix.api.Participant;
+ import org.apache.helix.api.accessor.ClusterAccessor;
+ import org.apache.helix.api.config.ClusterConfig;
+ import org.apache.helix.api.config.ContainerConfig;
+ import org.apache.helix.api.config.ResourceConfig;
+ import org.apache.helix.api.id.ClusterId;
+ import org.apache.helix.api.id.ControllerId;
+ import org.apache.helix.api.id.ParticipantId;
+ import org.apache.helix.api.id.ResourceId;
+ import org.apache.helix.api.id.StateModelDefId;
+ import org.apache.helix.controller.provisioner.ContainerId;
+ import org.apache.helix.controller.provisioner.ContainerProvider;
+ import org.apache.helix.controller.provisioner.ContainerSpec;
+ import org.apache.helix.controller.provisioner.ContainerState;
+ import org.apache.helix.controller.provisioner.Provisioner;
+ import org.apache.helix.controller.provisioner.ProvisionerConfig;
+ import org.apache.helix.controller.provisioner.ProvisionerRef;
+ import org.apache.helix.controller.provisioner.TargetProvider;
+ import org.apache.helix.controller.provisioner.TargetProviderResponse;
+ import org.apache.helix.controller.rebalancer.config.FullAutoRebalancerConfig;
+ import org.apache.helix.controller.rebalancer.config.RebalancerConfig;
+ import org.apache.helix.controller.serializer.DefaultStringSerializer;
+ import org.apache.helix.controller.serializer.StringSerializer;
+ import org.apache.helix.manager.zk.ZkHelixConnection;
+ import org.apache.helix.model.StateModelDefinition;
+ import org.apache.helix.tools.StateModelConfigGenerator;
+ import org.codehaus.jackson.annotate.JsonProperty;
+ import org.testng.Assert;
+ import org.testng.annotations.Test;
+
+ import com.google.common.collect.Lists;
+ import com.google.common.collect.Maps;
+ import com.google.common.util.concurrent.AbstractService;
+ import com.google.common.util.concurrent.ListenableFuture;
+ import com.google.common.util.concurrent.SettableFuture;
+
+ public class TestLocalContainerProvider extends ZkUnitTestBase {
+ private static final int MAX_PARTICIPANTS = 10;
+ static String clusterName = null;
+ static String resourceName = null;
+ static int allocated = 0;
+ static int started = 0;
+ static int stopped = 0;
+ static int deallocated = 0;
+ static HelixConnection connection = null;
++ static CountDownLatch latch = new CountDownLatch(MAX_PARTICIPANTS);
+
+ @Test
+ public void testBasic() throws Exception {
+ final int NUM_PARTITIONS = 4;
+ final int NUM_REPLICAS = 2;
+ resourceName = "TestDB0";
+
+ String className = TestHelper.getTestClassName();
+ String methodName = TestHelper.getTestMethodName();
+ clusterName = className + "_" + methodName;
+ System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
+
+ allocated = 0;
+ started = 0;
+ stopped = 0;
+ deallocated = 0;
+
+ // connect
+ connection = new ZkHelixConnection(ZK_ADDR);
+ connection.connect();
+
+ // create the cluster
+ ClusterId clusterId = ClusterId.from(clusterName);
+ ClusterAccessor clusterAccessor = connection.createClusterAccessor(clusterId);
+ StateModelDefinition masterSlave =
+ new StateModelDefinition(StateModelConfigGenerator.generateConfigForMasterSlave());
+ clusterAccessor.createCluster(new ClusterConfig.Builder(clusterId).addStateModelDefinition(
+ masterSlave).build());
+
+ // add the resource with the local provisioner
+ ResourceId resourceId = ResourceId.from(resourceName);
+ ProvisionerConfig provisionerConfig = new LocalProvisionerConfig(resourceId);
+ RebalancerConfig rebalancerConfig =
+ new FullAutoRebalancerConfig.Builder(resourceId).addPartitions(NUM_PARTITIONS)
+ .replicaCount(NUM_REPLICAS).stateModelDefId(masterSlave.getStateModelDefId()).build();
+ clusterAccessor.addResourceToCluster(new ResourceConfig.Builder(ResourceId.from(resourceName))
+ .provisionerConfig(provisionerConfig).rebalancerConfig(rebalancerConfig).build());
+
+ // start controller
+ ControllerId controllerId = ControllerId.from("controller1");
+ HelixController controller = connection.createController(clusterId, controllerId);
+ controller.start();
+
- Thread.sleep(10000);
++ latch.await(10000, TimeUnit.MILLISECONDS);
+
+ // clean up
+ controller.stop();
+ connection.disconnect();
+
+ Assert.assertEquals(allocated, MAX_PARTICIPANTS);
+ Assert.assertEquals(started, MAX_PARTICIPANTS);
+ Assert.assertEquals(stopped, MAX_PARTICIPANTS);
+ Assert.assertEquals(deallocated, MAX_PARTICIPANTS);
+ }
+
+ /**
+ * Use Guava's service to wrap a participant lifecycle
+ */
+ public static class ParticipantService extends AbstractService {
+ private final ClusterId _clusterId;
+ private final ParticipantId _participantId;
+ private HelixParticipant _participant;
+
+ public ParticipantService(ClusterId clusterId, ParticipantId participantId) {
+ // TODO: probably should pass a connection in here
+ _clusterId = clusterId;
+ _participantId = participantId;
+ }
+
+ @Override
+ protected void doStart() {
+ _participant = connection.createParticipant(_clusterId, _participantId);
+ _participant.getStateMachineEngine().registerStateModelFactory(
+ StateModelDefId.from("MasterSlave"), new TestHelixConnection.MockStateModelFactory());
+ _participant.start();
+ notifyStarted();
+ }
+
+ @Override
+ protected void doStop() {
+ _participant.stop();
+ notifyStopped();
+ }
+
+ }
+
+ /**
+ * Bare-bones ProvisionerConfig
+ */
+ public static class LocalProvisionerConfig implements ProvisionerConfig {
+ private ResourceId _resourceId;
+ private Class<? extends StringSerializer> _serializerClass;
+ private ProvisionerRef _provisionerRef;
+
+ public LocalProvisionerConfig(@JsonProperty("resourceId") ResourceId resourceId) {
+ _resourceId = resourceId;
+ _serializerClass = DefaultStringSerializer.class;
+ _provisionerRef = ProvisionerRef.from(LocalProvisioner.class.getName());
+ }
+
+ @Override
+ public ResourceId getResourceId() {
+ return _resourceId;
+ }
+
+ @Override
+ public ProvisionerRef getProvisionerRef() {
+ return _provisionerRef;
+ }
+
+ public void setProvisionerRef(ProvisionerRef provisionerRef) {
+ _provisionerRef = provisionerRef;
+ }
+
+ @Override
+ public Class<? extends StringSerializer> getSerializerClass() {
+ return _serializerClass;
+ }
+
+ public void setSerializerClass(Class<? extends StringSerializer> serializerClass) {
+ _serializerClass = serializerClass;
+ }
+ }
+
+ /**
+ * Provisioner that will start and stop participants locally
+ */
+ public static class LocalProvisioner implements Provisioner, TargetProvider, ContainerProvider {
+ private HelixManager _helixManager;
+ private ClusterId _clusterId;
+ private int _askCount;
+ private Map<ContainerId, ContainerState> _states;
+ private Map<ContainerId, ParticipantId> _containerParticipants;
+ private Map<ContainerId, ParticipantService> _participants;
+
+ @Override
+ public void init(HelixManager helixManager, ResourceConfig resourceConfig) {
+ // TODO: would be nice to have a HelixConnection instead of a HelixManager
+ _helixManager = helixManager;
+ _clusterId = ClusterId.from(_helixManager.getClusterName());
+ _askCount = 0;
+ _states = Maps.newHashMap();
+ _containerParticipants = Maps.newHashMap();
+ _participants = Maps.newHashMap();
+ }
+
+ @Override
+ public ListenableFuture<ContainerId> allocateContainer(ContainerSpec spec) {
+ // allocation is a no-op
- ContainerId containerId = spec.getContainerId();
++ ContainerId containerId = ContainerId.from(spec.getParticipantId().toString());
+ _states.put(containerId, ContainerState.ACQUIRED);
++ _containerParticipants.put(containerId, spec.getParticipantId());
+ allocated++;
+ SettableFuture<ContainerId> future = SettableFuture.create();
+ future.set(containerId);
+ return future;
+ }
+
+ @Override
+ public ListenableFuture<Boolean> deallocateContainer(ContainerId containerId) {
+ // deallocation is a no-op
+ _states.put(containerId, ContainerState.FINALIZED);
+ deallocated++;
++ latch.countDown();
+ SettableFuture<Boolean> future = SettableFuture.create();
+ future.set(true);
+ return future;
+ }
+
+ @Override
+ public ListenableFuture<Boolean> startContainer(ContainerId containerId, Participant participant) {
+ ParticipantService participantService =
+ new ParticipantService(_clusterId, _containerParticipants.get(containerId));
+ participantService.startAsync();
+ participantService.awaitRunning();
+ _participants.put(containerId, participantService);
+ _states.put(containerId, ContainerState.CONNECTED);
+ started++;
+ SettableFuture<Boolean> future = SettableFuture.create();
+ future.set(true);
+ return future;
+ }
+
+ @Override
+ public ListenableFuture<Boolean> stopContainer(ContainerId containerId) {
+ ParticipantService participant = _participants.get(containerId);
+ participant.stopAsync();
+ participant.awaitTerminated();
+ _states.put(containerId, ContainerState.HALTED);
+ stopped++;
+ SettableFuture<Boolean> future = SettableFuture.create();
+ future.set(true);
+ return future;
+ }
+
+ @Override
+ public TargetProviderResponse evaluateExistingContainers(Cluster cluster,
+ ResourceId resourceId, Collection<Participant> participants) {
+ TargetProviderResponse response = new TargetProviderResponse();
+ // ask for two containers at a time
+ List<ContainerSpec> containersToAcquire = Lists.newArrayList();
+ boolean asked = false;
+ if (_askCount < MAX_PARTICIPANTS) {
+ containersToAcquire.add(new ContainerSpec(ParticipantId.from("container" + _askCount)));
- containersToAcquire.add(new ContainerSpec(ParticipantId.from("container" + (_askCount + 1))));
++ containersToAcquire
++ .add(new ContainerSpec(ParticipantId.from("container" + (_askCount + 1))));
+ asked = true;
+ }
+ List<Participant> containersToStart = Lists.newArrayList();
+ List<Participant> containersToStop = Lists.newArrayList();
+ List<Participant> containersToRelease = Lists.newArrayList();
+ int stopCount = 0;
+ for (Participant participant : participants) {
+ ContainerConfig containerConfig = participant.getContainerConfig();
+ if (containerConfig != null && containerConfig.getState() != null) {
+ ContainerState state = containerConfig.getState();
+ switch (state) {
+ case ACQUIRED:
+ // acquired containers are ready to start
+ containersToStart.add(participant);
+ break;
+ case CONNECTED:
+ // stop at most two active at a time, wait for everything to be up first
+ if (stopCount < 2 && _askCount >= MAX_PARTICIPANTS) {
+ containersToStop.add(participant);
+ stopCount++;
+ }
+ break;
+ case HALTED:
+ // halted containers can be released
+ containersToRelease.add(participant);
+ break;
+ default:
+ break;
+ }
+ ContainerId containerId = containerConfig.getId();
+ if (containerId != null) {
+ _containerParticipants.put(containerId, participant.getId());
+ _states.put(containerId, state);
+ }
+ }
+ }
+ // update acquire request count
+ if (asked) {
+ _askCount += 2;
+ }
+ // set the response
+ response.setContainersToAcquire(containersToAcquire);
+ response.setContainersToStart(containersToStart);
+ response.setContainersToStop(containersToStop);
+ response.setContainersToRelease(containersToRelease);
+ return response;
+ }
+
+ @Override
+ public ContainerProvider getContainerProvider() {
+ return this;
+ }
+
+ @Override
+ public TargetProvider getTargetProvider() {
+ return this;
+ }
+ }
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-core/src/test/java/org/apache/helix/integration/TestSharedConnection.java
----------------------------------------------------------------------
diff --cc helix-core/src/test/java/org/apache/helix/integration/TestSharedConnection.java
index bf89cdb,0000000..0aef00e
mode 100644,000000..100644
--- a/helix-core/src/test/java/org/apache/helix/integration/TestSharedConnection.java
+++ b/helix-core/src/test/java/org/apache/helix/integration/TestSharedConnection.java
@@@ -1,199 -1,0 +1,199 @@@
+package org.apache.helix.integration;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.util.Date;
+import java.util.Map;
+
+import org.apache.helix.HelixAdmin;
+import org.apache.helix.HelixConnection;
+import org.apache.helix.HelixController;
+import org.apache.helix.HelixParticipant;
+import org.apache.helix.TestHelper;
+import org.apache.helix.ZkUnitTestBase;
+import org.apache.helix.api.State;
+import org.apache.helix.api.id.ClusterId;
+import org.apache.helix.api.id.ControllerId;
+import org.apache.helix.api.id.ParticipantId;
+import org.apache.helix.api.id.PartitionId;
+import org.apache.helix.api.id.StateModelDefId;
+import org.apache.helix.manager.zk.HelixConnectionAdaptor;
+import org.apache.helix.manager.zk.ZkHelixConnection;
+import org.apache.helix.manager.zk.ZkHelixLeaderElection;
+import org.apache.helix.model.IdealState;
+import org.apache.helix.model.IdealState.RebalanceMode;
+import org.apache.helix.tools.ClusterStateVerifier;
+import org.apache.helix.tools.ClusterStateVerifier.BestPossAndExtViewZkVerifier;
+import org.testng.Assert;
+import org.testng.annotations.Test;
+
+/**
+ * Ensure that the external view is able to update properly when participants share a connection.
+ */
+public class TestSharedConnection extends ZkUnitTestBase {
+ /**
+ * Ensure that the external view is able to update properly when participants share a connection.
+ */
+ @Test
+ public void testSharedParticipantConnection() throws Exception {
+ final int NUM_PARTICIPANTS = 2;
+ final int NUM_PARTITIONS = 4;
+ final int NUM_REPLICAS = 2;
+ final String RESOURCE_NAME = "TestDB0";
+
+ String className = TestHelper.getTestClassName();
+ String methodName = TestHelper.getTestMethodName();
+ String clusterName = className + "_" + methodName;
+ System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
+
+ // Set up cluster
+ TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
+ "localhost", // participant name prefix
+ "TestDB", // resource name prefix
+ 1, // resources
+ NUM_PARTITIONS, // partitions per resource
+ NUM_PARTICIPANTS, // number of nodes
+ NUM_REPLICAS, // replicas
+ "OnlineOffline", RebalanceMode.CUSTOMIZED, true); // do rebalance
+
+ // Connect
+ HelixConnection connection = new ZkHelixConnection(ZK_ADDR);
+ connection.connect();
+
+ // Start some participants
+ HelixParticipant[] participants = new HelixParticipant[NUM_PARTICIPANTS];
+ for (int i = 0; i < NUM_PARTICIPANTS; i++) {
+ participants[i] =
+ connection.createParticipant(ClusterId.from(clusterName),
+ ParticipantId.from("localhost_" + (12918 + i)));
+ participants[i].getStateMachineEngine().registerStateModelFactory(
+ StateModelDefId.from("OnlineOffline"), new TestHelixConnection.MockStateModelFactory());
- participants[i].startAsync();
++ participants[i].start();
+ }
+
+ // Start the controller
+ HelixController controller =
+ connection.createController(ClusterId.from(clusterName), ControllerId.from("controller"));
- controller.startAsync();
++ controller.start();
+ Thread.sleep(500);
+
+ // Verify balanced cluster
+ boolean result =
+ ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR,
+ clusterName));
+ Assert.assertTrue(result);
+
+ // Drop a partition from the first participant
+ HelixAdmin admin = connection.createClusterManagementTool();
+ IdealState idealState = admin.getResourceIdealState(clusterName, RESOURCE_NAME);
+ Map<ParticipantId, State> participantStateMap =
+ idealState.getParticipantStateMap(PartitionId.from(RESOURCE_NAME + "_0"));
+ participantStateMap.remove(ParticipantId.from("localhost_12918"));
+ idealState.setParticipantStateMap(PartitionId.from(RESOURCE_NAME + "_0"), participantStateMap);
+ admin.setResourceIdealState(clusterName, RESOURCE_NAME, idealState);
+ Thread.sleep(1000);
+
+ // Verify balanced cluster
+ result =
+ ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR,
+ clusterName));
+ Assert.assertTrue(result);
+
+ // Drop a partition from the second participant
+ participantStateMap = idealState.getParticipantStateMap(PartitionId.from(RESOURCE_NAME + "_1"));
+ participantStateMap.remove(ParticipantId.from("localhost_12919"));
+ idealState.setParticipantStateMap(PartitionId.from(RESOURCE_NAME + "_1"), participantStateMap);
+ admin.setResourceIdealState(clusterName, RESOURCE_NAME, idealState);
+ Thread.sleep(1000);
+
+ // Verify balanced cluster
+ result =
+ ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR,
+ clusterName));
+ Assert.assertTrue(result);
+
+ // Clean up
- controller.stopAsync();
++ controller.stop();
+ for (HelixParticipant participant : participants) {
- participant.stopAsync();
++ participant.stop();
+ }
+ admin.dropCluster(clusterName);
+ System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
+ }
+
+ /**
+ * Ensure that only one controller with a shared connection thinks it's leader
+ */
+ @Test
+ public void testSharedControllerConnection() throws Exception {
+ final int NUM_PARTICIPANTS = 2;
+ final int NUM_PARTITIONS = 4;
+ final int NUM_REPLICAS = 2;
+ final int NUM_CONTROLLERS = 2;
+
+ String className = TestHelper.getTestClassName();
+ String methodName = TestHelper.getTestMethodName();
+ String clusterName = className + "_" + methodName;
+ System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis()));
+
+ // Set up cluster
+ TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
+ "localhost", // participant name prefix
+ "TestDB", // resource name prefix
+ 1, // resources
+ NUM_PARTITIONS, // partitions per resource
+ NUM_PARTICIPANTS, // number of nodes
+ NUM_REPLICAS, // replicas
+ "OnlineOffline", RebalanceMode.CUSTOMIZED, true); // do rebalance
+
+ // Connect
+ HelixConnection connection = new ZkHelixConnection(ZK_ADDR);
+ connection.connect();
+
+ // Create a couple controllers
+ HelixController[] controllers = new HelixController[NUM_CONTROLLERS];
+ for (int i = 0; i < NUM_CONTROLLERS; i++) {
+ controllers[i] =
+ connection.createController(ClusterId.from(clusterName),
+ ControllerId.from("controller_" + i));
- controllers[i].startAsync();
++ controllers[i].start();
+ }
+ Thread.sleep(1000);
+
+ // Now verify that exactly one is leader
+ int leaderCount = 0;
+ for (HelixController controller : controllers) {
+ HelixConnectionAdaptor adaptor = new HelixConnectionAdaptor(controller);
+ boolean result = ZkHelixLeaderElection.tryUpdateController(adaptor);
+ if (result) {
+ leaderCount++;
+ }
+ }
+ Assert.assertEquals(leaderCount, 1);
+
+ // Clean up
+ for (HelixController controller : controllers) {
- controller.stopAsync();
++ controller.stop();
+ }
+ HelixAdmin admin = connection.createClusterManagementTool();
+ admin.dropCluster(clusterName);
+ System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis()));
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/README.md
----------------------------------------------------------------------
diff --cc helix-provisioning/README.md
index 0000000,77a4e81..30e160d
mode 000000,100644..100644
--- a/helix-provisioning/README.md
+++ b/helix-provisioning/README.md
@@@ -1,0 -1,16 +1,35 @@@
-Checkout helix provisioning branch
++<!---
++Licensed to the Apache Software Foundation (ASF) under one
++or more contributor license agreements. See the NOTICE file
++distributed with this work for additional information
++regarding copyright ownership. The ASF licenses this file
++to you under the Apache License, Version 2.0 (the
++"License"); you may not use this file except in compliance
++with the License. You may obtain a copy of the License at
++
++ http://www.apache.org/licenses/LICENSE-2.0
++
++Unless required by applicable law or agreed to in writing,
++software distributed under the License is distributed on an
++"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++KIND, either express or implied. See the License for the
++specific language governing permissions and limitations
++under the License.
++-->
++
++checkout helix provisioning branch
+ cd helix
+ mvn clean package -DskipTests
+ cd helix-provisioning
+
+
-yyDownload and install YARN start all services (datanode, resourcemanage, nodemanager, jobHistoryServer(optional))
++Download and install YARN start all services (datanode, resourcemanage, nodemanager, jobHistoryServer(optional))
+
+ Will post the instructions to get a local YARN cluster.
+
+ target/helix-provisioning-pkg/bin/app-launcher.sh org.apache.helix.provisioning.yarn.example.HelloWordAppSpecFactory /Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/src/main/resources/hello_world_app_spec.yaml
+
+
+
+
+
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/java/org/apache/helix/provisioning/AppConfig.java
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/java/org/apache/helix/provisioning/AppConfig.java
index 0000000,a51db1c..81a9a2c
mode 000000,100644..100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/AppConfig.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/AppConfig.java
@@@ -1,0 -1,17 +1,35 @@@
+ package org.apache.helix.provisioning;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.util.HashMap;
+ import java.util.Map;
+
-
+ public class AppConfig {
- public Map<String, String> config = new HashMap<String, String>();
-
- public String getValue(String key) {
- return (config != null ? config.get(key) : null);
- }
-
- public void setValue(String key, String value){
- config.put(key, value);
- }
++ public Map<String, String> config = new HashMap<String, String>();
++
++ public String getValue(String key) {
++ return (config != null ? config.get(key) : null);
++ }
++
++ public void setValue(String key, String value) {
++ config.put(key, value);
++ }
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/java/org/apache/helix/provisioning/ApplicationSpec.java
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/java/org/apache/helix/provisioning/ApplicationSpec.java
index 0000000,f7454d2..e50cfb4
mode 000000,100644..100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ApplicationSpec.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ApplicationSpec.java
@@@ -1,0 -1,29 +1,46 @@@
+ package org.apache.helix.provisioning;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.net.URI;
+ import java.util.List;
+
-
-
+ public interface ApplicationSpec {
+ /**
+ * Returns the name of the application
+ * @return
+ */
+ String getAppName();
+
+ AppConfig getConfig();
+
+ List<String> getServices();
+
+ URI getAppMasterPackage();
-
++
+ URI getServicePackage(String serviceName);
-
++
+ String getServiceMainClass(String service);
+
+ ServiceConfig getServiceConfig(String serviceName);
+
+ List<TaskConfig> getTaskConfigs();
+
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/java/org/apache/helix/provisioning/ApplicationSpecFactory.java
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/java/org/apache/helix/provisioning/ApplicationSpecFactory.java
index 0000000,0c524f2..866e10e
mode 000000,100644..100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ApplicationSpecFactory.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ApplicationSpecFactory.java
@@@ -1,0 -1,9 +1,28 @@@
+ package org.apache.helix.provisioning;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.io.InputStream;
+
+ public interface ApplicationSpecFactory {
-
++
+ ApplicationSpec fromYaml(InputStream yamlFile);
+
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerAskResponse.java
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerAskResponse.java
index 0000000,18f66d2..1a2b8aa
mode 000000,100644..100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerAskResponse.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerAskResponse.java
@@@ -1,0 -1,17 +1,36 @@@
+ package org.apache.helix.provisioning;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import org.apache.hadoop.yarn.api.records.Container;
+
+ public class ContainerAskResponse {
-
++
+ Container container;
+
+ public Container getContainer() {
+ return container;
+ }
+
+ public void setContainer(Container container) {
+ this.container = container;
+ }
-
++
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerLaunchResponse.java
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerLaunchResponse.java
index 0000000,ea6ef12..48ae3f1
mode 000000,100644..100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerLaunchResponse.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerLaunchResponse.java
@@@ -1,0 -1,5 +1,24 @@@
+ package org.apache.helix.provisioning;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ public class ContainerLaunchResponse {
+
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerReleaseResponse.java
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerReleaseResponse.java
index 0000000,e4a5dc4..dc5289c
mode 000000,100644..100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerReleaseResponse.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerReleaseResponse.java
@@@ -1,0 -1,5 +1,24 @@@
+ package org.apache.helix.provisioning;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ public class ContainerReleaseResponse {
+
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerStopResponse.java
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerStopResponse.java
index 0000000,d8c8a46..31f90a4
mode 000000,100644..100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerStopResponse.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerStopResponse.java
@@@ -1,0 -1,5 +1,24 @@@
+ package org.apache.helix.provisioning;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ public class ContainerStopResponse {
+
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/java/org/apache/helix/provisioning/HelixYarnUtil.java
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/java/org/apache/helix/provisioning/HelixYarnUtil.java
index 0000000,80ac16b..dc71f94
mode 000000,100644..100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/HelixYarnUtil.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/HelixYarnUtil.java
@@@ -1,0 -1,42 +1,61 @@@
+ package org.apache.helix.provisioning;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import org.apache.log4j.Logger;
+
+ public class HelixYarnUtil {
+ private static Logger LOG = Logger.getLogger(HelixYarnUtil.class);
+
+ @SuppressWarnings("unchecked")
+ public static <T extends ApplicationSpecFactory> T createInstance(String className) {
+ Class<ApplicationSpecFactory> factoryClazz = null;
+ {
+ try {
+ factoryClazz =
+ (Class<ApplicationSpecFactory>) Thread.currentThread().getContextClassLoader()
+ .loadClass(className);
+ } catch (ClassNotFoundException e) {
+ try {
+ factoryClazz =
+ (Class<ApplicationSpecFactory>) ClassLoader.getSystemClassLoader().loadClass(
+ className);
+ } catch (ClassNotFoundException e1) {
+ try {
+ factoryClazz = (Class<ApplicationSpecFactory>) Class.forName(className);
+ } catch (ClassNotFoundException e2) {
+
+ }
+ }
+ }
+ }
+ System.out.println(System.getProperty("java.class.path"));
+ if (factoryClazz == null) {
+ LOG.error("Unable to find class:" + className);
+ }
+ ApplicationSpecFactory factory = null;
+ try {
+ factory = factoryClazz.newInstance();
+ } catch (Exception e) {
+ LOG.error("Unable to create instance of class: " + className, e);
+ }
+ return (T) factory;
+ }
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/java/org/apache/helix/provisioning/ParticipantLauncher.java
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/java/org/apache/helix/provisioning/ParticipantLauncher.java
index 0000000,59a9eb5..6bbe9ad
mode 000000,100644..100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ParticipantLauncher.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ParticipantLauncher.java
@@@ -1,0 -1,137 +1,156 @@@
+ package org.apache.helix.provisioning;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.util.Arrays;
+ import java.util.concurrent.TimeUnit;
+
+ import org.apache.commons.cli.CommandLine;
+ import org.apache.commons.cli.GnuParser;
+ import org.apache.commons.cli.Options;
+ import org.apache.helix.HelixConnection;
+ import org.apache.helix.NotificationContext;
+ import org.apache.helix.api.id.ClusterId;
+ import org.apache.helix.api.id.ParticipantId;
+ import org.apache.helix.manager.zk.ZkHelixConnection;
+ import org.apache.helix.messaging.handling.HelixTaskResult;
+ import org.apache.helix.messaging.handling.MessageHandler;
+ import org.apache.helix.messaging.handling.MessageHandlerFactory;
+ import org.apache.helix.model.Message;
+ import org.apache.helix.model.Message.MessageType;
+ import org.apache.helix.participant.AbstractParticipantService;
+ import org.apache.log4j.Logger;
+
+ /**
+ * Main class that invokes the Participant Api
+ */
+ public class ParticipantLauncher {
+ private static Logger LOG = Logger.getLogger(ParticipantLauncher.class);
+
+ public static void main(String[] args) {
+
+ System.out.println("Starting Helix Participant: " + Arrays.toString(args));
+ Options opts;
+ opts = new Options();
+ opts.addOption("cluster", true, "Cluster name, default app name");
+ opts.addOption("participantId", true, "Participant Id");
+ opts.addOption("zkAddress", true, "Zookeeper address");
+ opts.addOption("participantClass", true, "Participant service class");
+ try {
+ CommandLine cliParser = new GnuParser().parse(opts, args);
+ String zkAddress = cliParser.getOptionValue("zkAddress");
+ final HelixConnection connection = new ZkHelixConnection(zkAddress);
+ connection.connect();
+ ClusterId clusterId = ClusterId.from(cliParser.getOptionValue("cluster"));
+ ParticipantId participantId = ParticipantId.from(cliParser.getOptionValue("participantId"));
+ String participantClass = cliParser.getOptionValue("participantClass");
+ @SuppressWarnings("unchecked")
+ Class<? extends AbstractParticipantService> clazz =
+ (Class<? extends AbstractParticipantService>) Class.forName(participantClass);
+ final AbstractParticipantService containerParticipant =
+ clazz.getConstructor(HelixConnection.class, ClusterId.class, ParticipantId.class)
+ .newInstance(connection, clusterId, participantId);
+ containerParticipant.startAsync();
+ containerParticipant.awaitRunning(60, TimeUnit.SECONDS);
+ containerParticipant
+ .getParticipant()
+ .getMessagingService()
+ .registerMessageHandlerFactory(MessageType.SHUTDOWN.toString(),
+ new ShutdownMessageHandlerFactory(containerParticipant, connection));
+ Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
+ @Override
+ public void run() {
+ LOG.info("Received a shutdown signal. Stopping participant");
+ containerParticipant.stopAsync();
+ containerParticipant.awaitTerminated();
+ connection.disconnect();
+ }
+ }) {
+
+ });
+ Thread.currentThread().join();
+ } catch (Exception e) {
+ e.printStackTrace();
+ System.out.println("Failed to start Helix participant" + e);
+ // System.exit(1);
+ }
+ try {
+ Thread.currentThread().join();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+
+ }
+
+ public static class ShutdownMessageHandlerFactory implements MessageHandlerFactory {
+ private final AbstractParticipantService _service;
+ private final HelixConnection _connection;
+
+ public ShutdownMessageHandlerFactory(AbstractParticipantService service,
+ HelixConnection connection) {
+ _service = service;
+ _connection = connection;
+ }
+
+ @Override
+ public MessageHandler createHandler(Message message, NotificationContext context) {
+ return new ShutdownMessageHandler(_service, _connection, message, context);
+ }
+
+ @Override
+ public String getMessageType() {
+ return MessageType.SHUTDOWN.toString();
+ }
+
+ @Override
+ public void reset() {
+ }
+
+ }
+
+ public static class ShutdownMessageHandler extends MessageHandler {
+ private final AbstractParticipantService _service;
+ private final HelixConnection _connection;
+
+ public ShutdownMessageHandler(AbstractParticipantService service, HelixConnection connection,
+ Message message, NotificationContext context) {
+ super(message, context);
+ _service = service;
+ _connection = connection;
+ }
+
+ @Override
+ public HelixTaskResult handleMessage() throws InterruptedException {
+ LOG.info("Received a shutdown message. Trying to shut down.");
+ _service.stopAsync();
+ _service.awaitTerminated();
+ _connection.disconnect();
+ LOG.info("Shutdown complete. Process exiting gracefully");
+ System.exit(0);
+ return null;
+ }
+
+ @Override
+ public void onError(Exception e, ErrorCode code, ErrorType type) {
+ LOG.error("Shutdown message error", e);
+ }
+
+ }
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/java/org/apache/helix/provisioning/ServiceConfig.java
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/java/org/apache/helix/provisioning/ServiceConfig.java
index 0000000,55ca0ae..f2f8189
mode 000000,100644..100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ServiceConfig.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ServiceConfig.java
@@@ -1,0 -1,16 +1,32 @@@
+ package org.apache.helix.provisioning;
+
-import java.util.HashMap;
-import java.util.Map;
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
+
+ import org.apache.helix.api.Scope;
+ import org.apache.helix.api.config.UserConfig;
+ import org.apache.helix.api.id.ResourceId;
+
-public class ServiceConfig extends UserConfig{
-
- public ServiceConfig(Scope<ResourceId> scope) {
- super(scope);
++public class ServiceConfig extends UserConfig {
++
++ public ServiceConfig(Scope<ResourceId> scope) {
++ super(scope);
+ }
-
++
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/java/org/apache/helix/provisioning/TaskConfig.java
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/java/org/apache/helix/provisioning/TaskConfig.java
index 0000000,442d074..3e3b767
mode 000000,100644..100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/TaskConfig.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/TaskConfig.java
@@@ -1,0 -1,29 +1,48 @@@
+ package org.apache.helix.provisioning;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.net.URI;
+ import java.net.URISyntaxException;
+ import java.util.HashMap;
+ import java.util.Map;
+
+ import org.apache.log4j.Logger;
+
+ public class TaskConfig {
+ private static final Logger LOG = Logger.getLogger(TaskConfig.class);
+
+ public Map<String, String> config = new HashMap<String, String>();
+ public String yamlFile;
+ public String name;
+
+ public URI getYamlURI() {
+ try {
+ return yamlFile != null ? new URI(yamlFile) : null;
+ } catch (URISyntaxException e) {
+ LOG.error("Error parsing URI for task config", e);
+ }
+ return null;
+ }
+
+ public String getValue(String key) {
+ return (config != null ? config.get(key) : null);
+ }
+ }
[25/50] [abbrv] git commit: Merge branch 'helix-provisioning' of
https://git-wip-us.apache.org/repos/asf/helix into helix-provisioning
Posted by ka...@apache.org.
Merge branch 'helix-provisioning' of https://git-wip-us.apache.org/repos/asf/helix into helix-provisioning
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/c9031860
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/c9031860
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/c9031860
Branch: refs/heads/master
Commit: c9031860a9f6e175b9bd0de3441287db96eb8b8d
Parents: 7d5bd78 c0a25f6
Author: Kishore Gopalakrishna <g....@gmail.com>
Authored: Mon Feb 24 17:49:52 2014 -0800
Committer: Kishore Gopalakrishna <g....@gmail.com>
Committed: Mon Feb 24 17:49:52 2014 -0800
----------------------------------------------------------------------
----------------------------------------------------------------------
[11/50] [abbrv] git commit: Set future for release response when it
comes
Posted by ka...@apache.org.
Set future for release response when it comes
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/64e15314
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/64e15314
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/64e15314
Branch: refs/heads/master
Commit: 64e153144c1b5602808a3f99f950f14465ca1e17
Parents: 1862834
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Fri Feb 21 12:40:03 2014 -0800
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Fri Feb 21 12:40:03 2014 -0800
----------------------------------------------------------------------
.../helix/controller/stages/ContainerProvisioningStage.java | 8 ++++++++
.../apache/helix/provisioning/yarn/NMCallbackHandler.java | 5 ++---
.../apache/helix/provisioning/yarn/RMCallbackHandler.java | 4 ++--
.../org/apache/helix/provisioning/yarn/YarnProvisioner.java | 9 ++++-----
4 files changed, 16 insertions(+), 10 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/64e15314/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java b/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
index 5cccd68..f258525 100644
--- a/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
@@ -155,10 +155,12 @@ public class ContainerProvisioningStage extends AbstractBaseStage {
// create the helix participant and add it to cluster
helixAdmin.addInstance(cluster.getId().toString(), instanceConfig);
}
+ LOG.info("Allocating container for " + participantId);
ListenableFuture<ContainerId> future = containerProvider.allocateContainer(spec);
FutureCallback<ContainerId> callback = new FutureCallback<ContainerId>() {
@Override
public void onSuccess(ContainerId containerId) {
+ LOG.info("Container " + containerId + " acquired. Marking " + participantId);
InstanceConfig existingInstance =
helixAdmin
.getInstanceConfig(cluster.getId().toString(), participantId.toString());
@@ -188,12 +190,14 @@ public class ContainerProvisioningStage extends AbstractBaseStage {
accessor.updateProperty(keyBuilder.instanceConfig(participant.getId().toString()),
existingInstance);
// create the helix participant and add it to cluster
+ LOG.info("Starting container " + containerId + " for " + participant.getId());
ListenableFuture<Boolean> future =
containerProvider.startContainer(containerId, participant);
FutureCallback<Boolean> callback = new FutureCallback<Boolean>() {
@Override
public void onSuccess(Boolean result) {
// Do nothing yet, need to wait for live instance
+ LOG.info("Container " + containerId + " started for " + participant.getId());
}
@Override
@@ -218,10 +222,12 @@ public class ContainerProvisioningStage extends AbstractBaseStage {
accessor.updateProperty(keyBuilder.instanceConfig(participant.getId().toString()),
existingInstance);
// remove the participant
+ LOG.info("Deallocating container " + containerId + " for " + participant.getId());
ListenableFuture<Boolean> future = containerProvider.deallocateContainer(containerId);
FutureCallback<Boolean> callback = new FutureCallback<Boolean>() {
@Override
public void onSuccess(Boolean result) {
+ LOG.info("Container " + containerId + " deallocated. Dropping " + participant.getId());
InstanceConfig existingInstance =
helixAdmin.getInstanceConfig(cluster.getId().toString(), participant.getId()
.toString());
@@ -251,10 +257,12 @@ public class ContainerProvisioningStage extends AbstractBaseStage {
accessor.updateProperty(keyBuilder.instanceConfig(participant.getId().toString()),
existingInstance);
// stop the container
+ LOG.info("Stopping container " + containerId + " for " + participant.getId());
ListenableFuture<Boolean> future = containerProvider.stopContainer(containerId);
FutureCallback<Boolean> callback = new FutureCallback<Boolean>() {
@Override
public void onSuccess(Boolean result) {
+ LOG.info("Container " + containerId + " stopped. Marking " + participant.getId());
updateContainerState(helixAdmin, accessor, keyBuilder, cluster, participant.getId(),
ContainerState.HALTED);
}
http://git-wip-us.apache.org/repos/asf/helix/blob/64e15314/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/NMCallbackHandler.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/NMCallbackHandler.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/NMCallbackHandler.java
index da6c01f..1566c28 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/NMCallbackHandler.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/NMCallbackHandler.java
@@ -8,7 +8,6 @@ import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
-import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
import org.apache.hadoop.yarn.client.api.async.NMClientAsync;
import org.apache.log4j.Logger;
@@ -39,7 +38,7 @@ class NMCallbackHandler implements NMClientAsync.CallbackHandler {
applicationMaster.nmClientAsync.getContainerStatusAsync(containerId, container.getNodeId());
}
SettableFuture<ContainerStopResponse> settableFuture =
- applicationMaster.containerStopMap.get(containerId);
+ applicationMaster.containerStopMap.remove(containerId);
ContainerStopResponse value = new ContainerStopResponse();
settableFuture.set(value);
containers.remove(containerId);
@@ -59,7 +58,7 @@ class NMCallbackHandler implements NMClientAsync.CallbackHandler {
applicationMaster.nmClientAsync.getContainerStatusAsync(containerId, container.getNodeId());
}
SettableFuture<ContainerLaunchResponse> settableFuture =
- applicationMaster.containerLaunchResponseMap.get(containerId);
+ applicationMaster.containerLaunchResponseMap.remove(containerId);
ContainerLaunchResponse value = new ContainerLaunchResponse();
settableFuture.set(value);
}
http://git-wip-us.apache.org/repos/asf/helix/blob/64e15314/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java
index dae28a8..fe2c854 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java
@@ -43,13 +43,13 @@ class RMCallbackHandler implements AMRMClientAsync.CallbackHandler {
// non complete containers should not be here
assert (containerStatus.getState() == ContainerState.COMPLETE);
SettableFuture<ContainerStopResponse> stopResponseFuture =
- _genericApplicationMaster.containerStopMap.get(containerStatus.getContainerId());
+ _genericApplicationMaster.containerStopMap.remove(containerStatus.getContainerId());
if (stopResponseFuture != null) {
ContainerStopResponse value = new ContainerStopResponse();
stopResponseFuture.set(value);
} else {
SettableFuture<ContainerReleaseResponse> releaseResponseFuture =
- _genericApplicationMaster.containerReleaseMap.get(containerStatus.getContainerId());
+ _genericApplicationMaster.containerReleaseMap.remove(containerStatus.getContainerId());
if (releaseResponseFuture != null) {
ContainerReleaseResponse value = new ContainerReleaseResponse();
releaseResponseFuture.set(value);
http://git-wip-us.apache.org/repos/asf/helix/blob/64e15314/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
index 2eedfd0..2d6e306 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
@@ -33,7 +33,6 @@ import org.apache.helix.HelixManager;
import org.apache.helix.api.Cluster;
import org.apache.helix.api.Participant;
import org.apache.helix.api.config.ContainerConfig;
-import org.apache.helix.api.config.ParticipantConfig;
import org.apache.helix.api.config.ResourceConfig;
import org.apache.helix.api.id.ParticipantId;
import org.apache.helix.api.id.ResourceId;
@@ -210,7 +209,7 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
vargs.add("--cluster " + appName);
vargs.add("--participantId " + participant.getId().stringify());
vargs.add("--participantClass " + mainClass);
-
+
vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/ContainerParticipant.stdout");
vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/ContainerParticipant.stderr");
@@ -323,9 +322,9 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
excessActiveContainers.remove(participantId); // don't stop this container if active
if (excessHaltedContainers.containsKey(participantId)) {
// Halted containers can be restarted if necessary
- Participant participant = excessHaltedContainers.get(participantId);
- //containersToStart.add(participant);
- //excessHaltedContainers.remove(participantId); // don't release this container
+ // Participant participant = excessHaltedContainers.get(participantId);
+ // containersToStart.add(participant);
+ // excessHaltedContainers.remove(participantId); // don't release this container
} else if (!existingContainersIdSet.contains(participantId)) {
// Unallocated containers must be allocated
ContainerSpec containerSpec = new ContainerSpec(participantId);
[21/50] [abbrv] git commit: Adding tool to update a container count
for a given service
Posted by ka...@apache.org.
Adding tool to update a container count for a given service
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/2339465d
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/2339465d
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/2339465d
Branch: refs/heads/master
Commit: 2339465ddc934862b71425953a64cc476f1e63c5
Parents: 224c7ea
Author: Kishore Gopalakrishna <g....@gmail.com>
Authored: Mon Feb 24 13:30:39 2014 -0800
Committer: Kishore Gopalakrishna <g....@gmail.com>
Committed: Mon Feb 24 13:30:39 2014 -0800
----------------------------------------------------------------------
helix-provisioning/pom.xml | 8 +-
.../tools/UpdateProvisionerConfig.java | 87 ++++++++++++++++++++
.../java/tools/UpdateProvisionerConfig.java | 87 --------------------
.../yarn/example/HelloWordAppSpecFactory.java | 67 ++-------------
4 files changed, 96 insertions(+), 153 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/2339465d/helix-provisioning/pom.xml
----------------------------------------------------------------------
diff --git a/helix-provisioning/pom.xml b/helix-provisioning/pom.xml
index 3ba7d39..4a2e523 100644
--- a/helix-provisioning/pom.xml
+++ b/helix-provisioning/pom.xml
@@ -81,14 +81,14 @@ under the License.
<artifactId>appassembler-maven-plugin</artifactId>
<configuration>
<programs>
- <program>
- <mainClass>org.apache.helix.provisioning.yarn.Client</mainClass>
- <name>yarn-job-launcher</name>
- </program>
<program>
<mainClass>org.apache.helix.provisioning.yarn.AppLauncher</mainClass>
<name>app-launcher</name>
</program>
+ <program>
+ <mainClass>org.apache.helix.provisioning.tools.UpdateProvisionerConfig</mainClass>
+ <name>update-provisioner-config</name>
+ </program>
</programs>
</configuration>
</plugin>
http://git-wip-us.apache.org/repos/asf/helix/blob/2339465d/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/UpdateProvisionerConfig.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/UpdateProvisionerConfig.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/UpdateProvisionerConfig.java
new file mode 100644
index 0000000..f3cce42
--- /dev/null
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/UpdateProvisionerConfig.java
@@ -0,0 +1,87 @@
+package org.apache.helix.provisioning.tools;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.OptionBuilder;
+import org.apache.commons.cli.OptionGroup;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.helix.HelixConnection;
+import org.apache.helix.api.Resource;
+import org.apache.helix.api.accessor.ResourceAccessor;
+import org.apache.helix.api.config.ResourceConfig;
+import org.apache.helix.api.id.ClusterId;
+import org.apache.helix.api.id.ResourceId;
+import org.apache.helix.manager.zk.ZkHelixConnection;
+import org.apache.helix.provisioning.yarn.YarnProvisionerConfig;
+import org.apache.log4j.Logger;
+/**
+ * Update the provisioner config
+ */
+public class UpdateProvisionerConfig {
+ private static Logger LOG = Logger.getLogger(UpdateProvisionerConfig.class);
+ private static String updateContainerCount = "updateContainerCount";
+ private HelixConnection _connection;
+
+ public UpdateProvisionerConfig(String zkAddress) {
+ _connection = new ZkHelixConnection(zkAddress);
+ _connection.connect();
+ }
+
+ public void setNumContainers(String appName, String serviceName, int numContainers) {
+ ResourceId resourceId = ResourceId.from(serviceName);
+
+ ResourceAccessor resourceAccessor = _connection.createResourceAccessor(ClusterId.from(appName));
+ Resource resource = resourceAccessor.readResource(resourceId);
+ LOG.info("Current provisioner config:"+ resource.getProvisionerConfig());
+
+ ResourceConfig.Delta delta = new ResourceConfig.Delta(resourceId);
+ YarnProvisionerConfig config = new YarnProvisionerConfig(resourceId);
+ config.setNumContainers(numContainers);
+ delta.setProvisionerConfig(config);
+ ResourceConfig updatedResourceConfig = resourceAccessor.updateResource(resourceId, delta);
+ LOG.info("Update provisioner config:"+ updatedResourceConfig.getProvisionerConfig());
+
+ }
+
+ @SuppressWarnings("static-access")
+ public static void main(String[] args) throws ParseException {
+ Option zkServerOption =
+ OptionBuilder.withLongOpt("zookeeperAddress").withDescription("Provide zookeeper address")
+ .create();
+ zkServerOption.setArgs(1);
+ zkServerOption.setRequired(true);
+ zkServerOption.setArgName("zookeeperAddress(Required)");
+
+ OptionGroup group = new OptionGroup();
+ group.setRequired(true);
+
+ // update container count per service
+ Option updateContainerCountOption =
+ OptionBuilder.withLongOpt(updateContainerCount)
+ .withDescription("appName serviceName numContainers").create();
+ updateContainerCountOption.setArgs(3);
+ updateContainerCountOption.setRequired(false);
+ updateContainerCountOption.setArgName("appName serviceName numContainers");
+
+ group.addOption(updateContainerCountOption);
+
+ Options options = new Options();
+ options.addOption(zkServerOption);
+ options.addOptionGroup(group);
+ CommandLine cliParser = new GnuParser().parse(options, args);
+
+ String zkAddress = cliParser.getOptionValue("zookeeperAddress");
+ UpdateProvisionerConfig updater = new UpdateProvisionerConfig(zkAddress);
+
+ if (cliParser.hasOption(updateContainerCount)) {
+ String appName = cliParser.getOptionValues(updateContainerCount)[0];
+ String serviceName = cliParser.getOptionValues(updateContainerCount)[1];
+ int numContainers = Integer.parseInt(
+ cliParser.getOptionValues(updateContainerCount)[2]);
+ updater.setNumContainers(appName, serviceName, numContainers);
+ }
+
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/2339465d/helix-provisioning/src/main/java/tools/UpdateProvisionerConfig.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/tools/UpdateProvisionerConfig.java b/helix-provisioning/src/main/java/tools/UpdateProvisionerConfig.java
deleted file mode 100644
index 89ee1c5..0000000
--- a/helix-provisioning/src/main/java/tools/UpdateProvisionerConfig.java
+++ /dev/null
@@ -1,87 +0,0 @@
-package tools;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.GnuParser;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.OptionBuilder;
-import org.apache.commons.cli.OptionGroup;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.helix.HelixConnection;
-import org.apache.helix.api.Resource;
-import org.apache.helix.api.accessor.ResourceAccessor;
-import org.apache.helix.api.config.ResourceConfig;
-import org.apache.helix.api.id.ClusterId;
-import org.apache.helix.api.id.ResourceId;
-import org.apache.helix.manager.zk.ZkHelixConnection;
-import org.apache.helix.provisioning.yarn.YarnProvisionerConfig;
-import org.apache.log4j.Logger;
-/**
- * Update the provisioner config
- */
-public class UpdateProvisionerConfig {
- private static Logger LOG = Logger.getLogger(UpdateProvisionerConfig.class);
- private static String updateContainerCount = "updateContainerCount";
- private HelixConnection _connection;
-
- public UpdateProvisionerConfig(String zkAddress) {
- _connection = new ZkHelixConnection(zkAddress);
- _connection.connect();
- }
-
- public void setNumContainers(String appName, String serviceName, int numContainers) {
- ResourceId resourceId = ResourceId.from(serviceName);
-
- ResourceAccessor resourceAccessor = _connection.createResourceAccessor(ClusterId.from(appName));
- Resource resource = resourceAccessor.readResource(resourceId);
- LOG.info("Current provisioner config:"+ resource.getProvisionerConfig());
-
- ResourceConfig.Delta delta = new ResourceConfig.Delta(resourceId);
- YarnProvisionerConfig config = new YarnProvisionerConfig(resourceId);
- config.setNumContainers(numContainers);
- delta.setProvisionerConfig(config);
- ResourceConfig updatedResourceConfig = resourceAccessor.updateResource(resourceId, delta);
- LOG.info("Update provisioner config:"+ updatedResourceConfig.getProvisionerConfig());
-
- }
-
- @SuppressWarnings("static-access")
- public static void main(String[] args) throws ParseException {
- Option zkServerOption =
- OptionBuilder.withLongOpt("zookeeperAddress").withDescription("Provide zookeeper address")
- .create();
- zkServerOption.setArgs(1);
- zkServerOption.setRequired(true);
- zkServerOption.setArgName("zookeeperAddress(Required)");
-
- OptionGroup group = new OptionGroup();
- group.setRequired(true);
-
- // update container count per service
- Option updateContainerCountOption =
- OptionBuilder.withLongOpt(updateContainerCount)
- .withDescription("set the number of containers per service").create();
- updateContainerCountOption.setArgs(3);
- updateContainerCountOption.setRequired(false);
- updateContainerCountOption.setArgName("appName serviceName numContainers");
-
- group.addOption(updateContainerCountOption);
-
- Options options = new Options();
- options.addOption(zkServerOption);
- options.addOptionGroup(group);
- CommandLine cliParser = new GnuParser().parse(options, args);
-
- String zkAddress = cliParser.getOptionValue("zookeeperAddress");
- UpdateProvisionerConfig updater = new UpdateProvisionerConfig(zkAddress);
-
- if (cliParser.hasOption(updateContainerCount)) {
- String appName = cliParser.getOptionValues(updateContainerCount)[0];
- String serviceName = cliParser.getOptionValues(updateContainerCount)[1];
- int numContainers = Integer.parseInt(
- cliParser.getOptionValues(updateContainerCount)[2]);
- updater.setNumContainers(appName, serviceName, numContainers);
- }
-
- }
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/2339465d/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
----------------------------------------------------------------------
diff --git a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java b/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
index 03c1341..20591cf 100644
--- a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
+++ b/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
@@ -16,33 +16,6 @@ import org.yaml.snakeyaml.Yaml;
public class HelloWordAppSpecFactory implements ApplicationSpecFactory {
- static HelloworldAppSpec data;
-
- static {
- HelloworldAppSpec data = new HelloworldAppSpec();
- AppConfig appConfig = new AppConfig();
- appConfig.setValue("k1", "v1");
- data.setAppConfig(appConfig);
- data.setAppName("testApp");
- data.setAppMasterPackageUri(
- "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
- HashMap<String, Map<String, String>> serviceConfigMap = new HashMap<String, Map<String, String>>();
- serviceConfigMap.put("HelloWorld", new HashMap<String, String>());
- serviceConfigMap.get("HelloWorld").put("k1", "v1");
- data.setServiceConfigMap(serviceConfigMap);
- HashMap<String, String> serviceMainClassMap = new HashMap<String, String>();
- serviceMainClassMap.put("HelloWorld", HelloWorldService.class.getCanonicalName());
- data.setServiceMainClassMap(serviceMainClassMap);
- HashMap<String, String> servicePackageURIMap = new HashMap<String, String>();
- servicePackageURIMap
- .put(
- "HelloWorld",
- "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
- data.setServicePackageURIMap(servicePackageURIMap);
- data.setServices(Arrays.asList(new String[] {
- "HelloWorld"
- })); }
-
@Override
public ApplicationSpec fromYaml(InputStream inputstream) {
return (ApplicationSpec) new Yaml().load(inputstream);
@@ -50,43 +23,13 @@ public class HelloWordAppSpecFactory implements ApplicationSpecFactory {
}
public static void main(String[] args) {
- DumperOptions options = new DumperOptions();
- options.setPrettyFlow(true);
- Yaml yaml = new Yaml(options);
- HelloworldAppSpec data = new HelloworldAppSpec();
- AppConfig appConfig = new AppConfig();
- appConfig.setValue("k1", "v1");
- data.setAppConfig(appConfig);
- data.setAppName("testApp");
- data.setAppMasterPackageUri(
- "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
- HashMap<String, Map<String, String>> serviceConfigMap = new HashMap<String, Map<String, String>>();
- serviceConfigMap.put("HelloWorld", new HashMap<String, String>());
- serviceConfigMap.get("HelloWorld").put("k1", "v1");
- data.setServiceConfigMap(serviceConfigMap);
- HashMap<String, String> serviceMainClassMap = new HashMap<String, String>();
- serviceMainClassMap.put("HelloWorld", HelloWorldService.class.getCanonicalName());
- data.setServiceMainClassMap(serviceMainClassMap);
- HashMap<String, String> servicePackageURIMap = new HashMap<String, String>();
- servicePackageURIMap
- .put(
- "HelloWorld",
- "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
- data.setServicePackageURIMap(servicePackageURIMap);
- data.setServices(Arrays.asList(new String[] {
- "HelloWorld"
- }));
- String dump = yaml.dump(data);
+ Yaml yaml = new Yaml();
+ InputStream resourceAsStream =
+ ClassLoader.getSystemClassLoader().getResourceAsStream("hello_world_app_spec.yaml");
+ HelloworldAppSpec spec = yaml.loadAs(resourceAsStream, HelloworldAppSpec.class);
+ String dump = yaml.dump(spec);
System.out.println(dump);
- InputStream resourceAsStream = ClassLoader.getSystemClassLoader().getResourceAsStream("hello_world_app_spec.yaml");
- HelloworldAppSpec load = yaml.loadAs(resourceAsStream,HelloworldAppSpec.class);
- String dumpnew = yaml.dump(load);
- System.out.println(dumpnew.equals(dump));
-
- System.out.println("==================================");
- System.out.println(dumpnew);
-
}
}
[07/50] [abbrv] git commit: Almost complete working example of
Helloworld
Posted by ka...@apache.org.
Almost complete working example of Helloworld
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/970770ac
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/970770ac
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/970770ac
Branch: refs/heads/master
Commit: 970770acf5c2cf6d267ebb112534c1a22c63a4bb
Parents: 8b19cfc
Author: Kishore Gopalakrishna <g....@gmail.com>
Authored: Thu Feb 20 23:40:07 2014 -0800
Committer: Kishore Gopalakrishna <g....@gmail.com>
Committed: Thu Feb 20 23:40:07 2014 -0800
----------------------------------------------------------------------
.../provisioning/yarn/NMCallbackHandler.java | 27 +++++++++++---------
1 file changed, 15 insertions(+), 12 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/970770ac/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/NMCallbackHandler.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/NMCallbackHandler.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/NMCallbackHandler.java
index 3735e7a..da6c01f 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/NMCallbackHandler.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/NMCallbackHandler.java
@@ -10,6 +10,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
import org.apache.hadoop.yarn.client.api.async.NMClientAsync;
+import org.apache.log4j.Logger;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.SettableFuture;
@@ -17,6 +18,7 @@ import com.google.common.util.concurrent.SettableFuture;
@VisibleForTesting
class NMCallbackHandler implements NMClientAsync.CallbackHandler {
+ private Logger LOG = Logger.getLogger(NMCallbackHandler.class);
private ConcurrentMap<ContainerId, Container> containers =
new ConcurrentHashMap<ContainerId, Container>();
private final GenericApplicationMaster applicationMaster;
@@ -31,25 +33,26 @@ class NMCallbackHandler implements NMClientAsync.CallbackHandler {
@Override
public void onContainerStopped(ContainerId containerId) {
- if (GenericApplicationMaster.LOG.isDebugEnabled()) {
- GenericApplicationMaster.LOG.debug("Succeeded to stop Container " + containerId);
+ LOG.info("Succeeded to stop Container " + containerId);
+ Container container = containers.get(containerId);
+ if (container != null) {
+ applicationMaster.nmClientAsync.getContainerStatusAsync(containerId, container.getNodeId());
}
+ SettableFuture<ContainerStopResponse> settableFuture =
+ applicationMaster.containerStopMap.get(containerId);
+ ContainerStopResponse value = new ContainerStopResponse();
+ settableFuture.set(value);
containers.remove(containerId);
}
@Override
public void onContainerStatusReceived(ContainerId containerId, ContainerStatus containerStatus) {
- if (GenericApplicationMaster.LOG.isDebugEnabled()) {
- GenericApplicationMaster.LOG.debug("Container Status: id=" + containerId + ", status="
- + containerStatus);
- }
+ LOG.info("Container Status: id=" + containerId + ", status=" + containerStatus);
}
@Override
public void onContainerStarted(ContainerId containerId, Map<String, ByteBuffer> allServiceResponse) {
- if (GenericApplicationMaster.LOG.isDebugEnabled()) {
- GenericApplicationMaster.LOG.debug("Succeeded to start Container " + containerId);
- }
+ LOG.debug("Succeeded to start Container " + containerId);
Container container = containers.get(containerId);
if (container != null) {
@@ -63,18 +66,18 @@ class NMCallbackHandler implements NMClientAsync.CallbackHandler {
@Override
public void onStartContainerError(ContainerId containerId, Throwable t) {
- GenericApplicationMaster.LOG.error("Failed to start Container " + containerId);
+ LOG.error("Failed to start Container " + containerId);
containers.remove(containerId);
}
@Override
public void onGetContainerStatusError(ContainerId containerId, Throwable t) {
- GenericApplicationMaster.LOG.error("Failed to query the status of Container " + containerId);
+ LOG.error("Failed to query the status of Container " + containerId);
}
@Override
public void onStopContainerError(ContainerId containerId, Throwable t) {
- GenericApplicationMaster.LOG.error("Failed to stop Container " + containerId);
+ LOG.error("Failed to stop Container " + containerId);
containers.remove(containerId);
}
}
[05/50] [abbrv] git commit: Made container states more consistent,
changed yarn target provider logic
Posted by ka...@apache.org.
Made container states more consistent, changed yarn target provider logic
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/57b4b180
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/57b4b180
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/57b4b180
Branch: refs/heads/master
Commit: 57b4b180e0c0b7f3ae0c21191af1f72bca61732f
Parents: cb6aa4f
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Wed Feb 19 18:58:00 2014 -0800
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Wed Feb 19 18:58:00 2014 -0800
----------------------------------------------------------------------
.../controller/provisioner/ContainerState.java | 5 +-
.../stages/ContainerProvisioningStage.java | 9 ++-
.../integration/TestLocalContainerProvider.java | 4 +-
.../provisioning/yarn/YarnProvisioner.java | 83 ++++++++++++--------
4 files changed, 61 insertions(+), 40 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/57b4b180/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerState.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerState.java b/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerState.java
index cf4b736..449f636 100644
--- a/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerState.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerState.java
@@ -23,8 +23,9 @@ public enum ContainerState {
ACQUIRING,
ACQUIRED,
CONNECTING,
- ACTIVE,
- TEARDOWN,
+ CONNECTED,
+ DISCONNECTED,
+ HALTING,
HALTED,
FINALIZING,
FINALIZED,
http://git-wip-us.apache.org/repos/asf/helix/blob/57b4b180/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java b/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
index 48166bf..42c8218 100644
--- a/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
@@ -22,7 +22,6 @@ package org.apache.helix.controller.stages;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
-import java.util.UUID;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixDataAccessor;
@@ -166,12 +165,13 @@ public class ContainerProvisioningStage extends AbstractBaseStage {
accessor.updateProperty(keyBuilder.instanceConfig(participant.getId().toString()),
existingInstance);
// create the helix participant and add it to cluster
- ListenableFuture<Boolean> future = containerProvider.startContainer(containerId, participant);
+ ListenableFuture<Boolean> future =
+ containerProvider.startContainer(containerId, participant);
FutureCallback<Boolean> callback = new FutureCallback<Boolean>() {
@Override
public void onSuccess(Boolean result) {
updateContainerState(helixAdmin, accessor, keyBuilder, cluster, participant.getId(),
- ContainerState.ACTIVE);
+ ContainerState.CONNECTED);
}
@Override
@@ -225,7 +225,7 @@ public class ContainerProvisioningStage extends AbstractBaseStage {
.toString());
final ContainerId containerId = existingInstance.getContainerId();
existingInstance.setInstanceEnabled(false);
- existingInstance.setContainerState(ContainerState.TEARDOWN);
+ existingInstance.setContainerState(ContainerState.HALTING);
accessor.updateProperty(keyBuilder.instanceConfig(participant.getId().toString()),
existingInstance);
// stop the container
@@ -267,6 +267,7 @@ public class ContainerProvisioningStage extends AbstractBaseStage {
InstanceConfig existingInstance =
helixAdmin.getInstanceConfig(cluster.getId().toString(), participantId.toString());
existingInstance.setContainerState(state);
+ existingInstance.setInstanceEnabled(state.equals(ContainerState.CONNECTED));
accessor.updateProperty(keyBuilder.instanceConfig(participantId.toString()), existingInstance);
}
http://git-wip-us.apache.org/repos/asf/helix/blob/57b4b180/helix-core/src/test/java/org/apache/helix/integration/TestLocalContainerProvider.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestLocalContainerProvider.java b/helix-core/src/test/java/org/apache/helix/integration/TestLocalContainerProvider.java
index 0e4c803..0f7be64 100644
--- a/helix-core/src/test/java/org/apache/helix/integration/TestLocalContainerProvider.java
+++ b/helix-core/src/test/java/org/apache/helix/integration/TestLocalContainerProvider.java
@@ -250,7 +250,7 @@ public class TestLocalContainerProvider extends ZkUnitTestBase {
participantService.startAsync();
participantService.awaitRunning();
_participants.put(containerId, participantService);
- _states.put(containerId, ContainerState.ACTIVE);
+ _states.put(containerId, ContainerState.CONNECTED);
started++;
SettableFuture<Boolean> future = SettableFuture.create();
future.set(true);
@@ -294,7 +294,7 @@ public class TestLocalContainerProvider extends ZkUnitTestBase {
// acquired containers are ready to start
containersToStart.add(participant);
break;
- case ACTIVE:
+ case CONNECTED:
// stop at most two active at a time, wait for everything to be up first
if (stopCount < 2 && _askCount >= MAX_PARTICIPANTS) {
containersToStop.add(participant);
http://git-wip-us.apache.org/repos/asf/helix/blob/57b4b180/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
index 4fcc219..daac87b 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
@@ -1,10 +1,6 @@
package org.apache.helix.provisioning.yarn;
import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
@@ -13,22 +9,13 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Vector;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
import java.util.concurrent.Executors;
-import org.apache.commons.compress.archivers.ArchiveStreamFactory;
-import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
-import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.records.Container;
@@ -55,13 +42,13 @@ import org.apache.helix.controller.provisioner.ContainerProvider;
import org.apache.helix.controller.provisioner.ContainerSpec;
import org.apache.helix.controller.provisioner.ContainerState;
import org.apache.helix.controller.provisioner.Provisioner;
-import org.apache.helix.controller.provisioner.ProvisionerConfig;
import org.apache.helix.controller.provisioner.TargetProvider;
import org.apache.helix.controller.provisioner.TargetProviderResponse;
import org.apache.helix.model.InstanceConfig;
-import com.google.common.collect.Lists;
import com.google.common.base.Function;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
@@ -274,39 +261,57 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
.getProvisionerConfig();
int targetNumContainers = provisionerConfig.getNumContainers();
+ // Any container that is in a state should be put in this set
Set<ContainerId> existingContainersIdSet = new HashSet<ContainerId>();
-
+
+ // Cache halted containers to determine which to restart and which to release
+ Map<ContainerId, Participant> excessHaltedContainers = Maps.newHashMap();
+
+ // Cache participants to ensure that excess participants are stopped
+ Map<ContainerId, Participant> excessActiveContainers = Maps.newHashMap();
for (Participant participant : participants) {
ContainerConfig containerConfig = participant.getContainerConfig();
if (containerConfig != null && containerConfig.getState() != null) {
ContainerState state = containerConfig.getState();
switch (state) {
+ case ACQUIRING:
+ existingContainersIdSet.add(containerConfig.getId());
+ break;
case ACQUIRED:
// acquired containers are ready to start
+ existingContainersIdSet.add(containerConfig.getId());
containersToStart.add(participant);
break;
- case ACTIVE:
+ case CONNECTING:
existingContainersIdSet.add(containerConfig.getId());
break;
- case HALTED:
- // halted containers can be released
- containersToRelease.add(participant);
+ case CONNECTED:
+ // active containers can be stopped or kept active
+ existingContainersIdSet.add(containerConfig.getId());
+ excessActiveContainers.put(containerConfig.getId(), participant);
break;
- case ACQUIRING:
+ case DISCONNECTED:
+ // disconnected containers must be stopped
+ existingContainersIdSet.add(containerConfig.getId());
+ containersToStop.add(participant);
+ case HALTING:
existingContainersIdSet.add(containerConfig.getId());
break;
- case CONNECTING:
+ case HALTED:
+ // halted containers can be released or restarted
+ existingContainersIdSet.add(containerConfig.getId());
+ excessHaltedContainers.put(containerConfig.getId(), participant);
break;
- case FAILED:
- //remove the failed instance
- _helixManager.getClusterManagmentTool().dropInstance(cluster.getId().toString(), new InstanceConfig(participant.getId()));
+ case FINALIZING:
+ existingContainersIdSet.add(containerConfig.getId());
break;
case FINALIZED:
break;
- case FINALIZING:
- break;
- case TEARDOWN:
+ case FAILED:
+ // remove the failed instance
+ _helixManager.getClusterManagmentTool().dropInstance(cluster.getId().toString(),
+ new InstanceConfig(participant.getId()));
break;
default:
break;
@@ -318,18 +323,32 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
}
}
}
-
+
for (int i = 0; i < targetNumContainers; i++) {
ContainerId containerId = ContainerId.from(resourceId + "_container_" + (i));
- if(!existingContainersIdSet.contains(containerId)){
+ excessActiveContainers.remove(containerId); // don't stop this container if active
+ if (excessHaltedContainers.containsKey(containerId)) {
+ // Halted containers can be restarted if necessary
+ Participant participant = excessHaltedContainers.get(containerId);
+ containersToStart.add(participant);
+ excessHaltedContainers.remove(containerId); // don't release this container
+ } else if (!existingContainersIdSet.contains(containerId)) {
+ // Unallocated containers must be allocated
ContainerSpec containerSpec = new ContainerSpec(containerId);
ParticipantId participantId = ParticipantId.from(containerId.stringify());
- ParticipantConfig participantConfig = applicationSpec.getParticipantConfig(resourceId.stringify(), participantId);
+ ParticipantConfig participantConfig =
+ applicationSpec.getParticipantConfig(resourceId.stringify(), participantId);
containerSpec.setMemory(participantConfig.getUserConfig().getIntField("memory", 1024));
containersToAcquire.add(containerSpec);
}
}
-
+
+ // Add all the containers that should be stopped because they fall outside the target range
+ containersToStop.addAll(excessActiveContainers.values());
+
+ // Add halted containers that should not be restarted
+ containersToRelease.addAll(excessHaltedContainers.values());
+
response.setContainersToAcquire(containersToAcquire);
response.setContainersToStart(containersToStart);
response.setContainersToRelease(containersToRelease);
[34/50] [abbrv] git commit: Port recent task framework changes
Posted by ka...@apache.org.
Port recent task framework changes
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/97ca4de4
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/97ca4de4
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/97ca4de4
Branch: refs/heads/master
Commit: 97ca4de4af522ec8dd8ab2e63b30a34d2a455d34
Parents: 8f0b7e4
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Wed Apr 30 11:39:23 2014 -0700
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Wed Apr 30 11:39:23 2014 -0700
----------------------------------------------------------------------
.../handling/HelixStateTransitionHandler.java | 2 +
.../helix/task/AbstractTaskRebalancer.java | 639 -------------------
.../helix/task/FixedTargetTaskRebalancer.java | 162 +++++
.../helix/task/GenericTaskRebalancer.java | 196 ++++++
.../helix/task/IndependentTaskRebalancer.java | 137 ----
.../java/org/apache/helix/task/JobConfig.java | 334 ++++++++++
.../java/org/apache/helix/task/JobContext.java | 227 +++++++
.../main/java/org/apache/helix/task/JobDag.java | 151 +++++
.../java/org/apache/helix/task/TargetState.java | 8 +-
.../apache/helix/task/TaskCallbackContext.java | 67 ++
.../java/org/apache/helix/task/TaskConfig.java | 359 +++--------
.../java/org/apache/helix/task/TaskContext.java | 135 ----
.../java/org/apache/helix/task/TaskDag.java | 152 -----
.../java/org/apache/helix/task/TaskDriver.java | 132 ++--
.../java/org/apache/helix/task/TaskFactory.java | 5 +-
.../org/apache/helix/task/TaskRebalancer.java | 625 ++++++++++++++++--
.../java/org/apache/helix/task/TaskRunner.java | 1 -
.../org/apache/helix/task/TaskStateModel.java | 43 +-
.../java/org/apache/helix/task/TaskUtil.java | 133 ++--
.../java/org/apache/helix/task/Workflow.java | 175 +++--
.../org/apache/helix/task/WorkflowConfig.java | 20 +-
.../org/apache/helix/task/WorkflowContext.java | 64 +-
.../org/apache/helix/task/beans/JobBean.java | 42 ++
.../org/apache/helix/task/beans/TaskBean.java | 16 +-
.../apache/helix/task/beans/WorkflowBean.java | 2 +-
.../task/TestIndependentTaskRebalancer.java | 171 +++++
.../integration/task/TestTaskRebalancer.java | 152 ++---
.../task/TestTaskRebalancerStopResume.java | 43 +-
.../apache/helix/integration/task/TestUtil.java | 15 +-
.../integration/task/WorkflowGenerator.java | 75 ++-
.../helix/provisioning/tools/TaskManager.java | 247 -------
.../provisioning/tools/TestTaskManager.java | 149 -----
32 files changed, 2478 insertions(+), 2201 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/messaging/handling/HelixStateTransitionHandler.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/messaging/handling/HelixStateTransitionHandler.java b/helix-core/src/main/java/org/apache/helix/messaging/handling/HelixStateTransitionHandler.java
index 55d8965..1bb6506 100644
--- a/helix-core/src/main/java/org/apache/helix/messaging/handling/HelixStateTransitionHandler.java
+++ b/helix-core/src/main/java/org/apache/helix/messaging/handling/HelixStateTransitionHandler.java
@@ -58,6 +58,8 @@ import org.apache.log4j.Logger;
public class HelixStateTransitionHandler extends MessageHandler {
public static class HelixStateMismatchException extends Exception {
+ private static final long serialVersionUID = -7669959598697794766L;
+
public HelixStateMismatchException(String info) {
super(info);
}
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/AbstractTaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/AbstractTaskRebalancer.java b/helix-core/src/main/java/org/apache/helix/task/AbstractTaskRebalancer.java
deleted file mode 100644
index f733fb5..0000000
--- a/helix-core/src/main/java/org/apache/helix/task/AbstractTaskRebalancer.java
+++ /dev/null
@@ -1,639 +0,0 @@
-package org.apache.helix.task;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-
-import org.apache.helix.AccessOption;
-import org.apache.helix.HelixDataAccessor;
-import org.apache.helix.HelixManager;
-import org.apache.helix.PropertyKey;
-import org.apache.helix.ZNRecord;
-import org.apache.helix.api.Cluster;
-import org.apache.helix.api.State;
-import org.apache.helix.api.accessor.ResourceAccessor;
-import org.apache.helix.api.id.ParticipantId;
-import org.apache.helix.api.id.PartitionId;
-import org.apache.helix.api.id.ResourceId;
-import org.apache.helix.controller.context.ControllerContextProvider;
-import org.apache.helix.controller.rebalancer.HelixRebalancer;
-import org.apache.helix.controller.rebalancer.config.BasicRebalancerConfig;
-import org.apache.helix.controller.rebalancer.config.PartitionedRebalancerConfig;
-import org.apache.helix.controller.rebalancer.config.RebalancerConfig;
-import org.apache.helix.controller.stages.ResourceCurrentState;
-import org.apache.helix.model.IdealState;
-import org.apache.helix.model.ResourceAssignment;
-import org.apache.log4j.Logger;
-
-import com.google.common.base.Joiner;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Sets;
-
-/**
- * Custom rebalancer implementation for the {@code Task} state model. Abstract task rebalancer with
- * a pluggable assignment policy.
- */
-public abstract class AbstractTaskRebalancer implements HelixRebalancer {
- private static final Logger LOG = Logger.getLogger(AbstractTaskRebalancer.class);
- private HelixManager _manager;
-
- /**
- * Get all the partitions that should be created by this task
- * @param taskCfg the task configuration
- * @param workflowCfg the workflow configuration
- * @param workflowCtx the workflow context
- * @param cluster cluster snapshot
- * @return set of partition numbers
- */
- public abstract Set<Integer> getAllTaskPartitions(TaskConfig taskCfg, WorkflowConfig workflowCfg,
- WorkflowContext workflowCtx, Cluster cluster);
-
- /**
- * Compute an assignment of tasks to instances
- * @param currStateOutput the current state of the instances
- * @param prevAssignment the previous task partition assignment
- * @param instanceList the instances
- * @param taskCfg the task configuration
- * @param taskCtx the task context
- * @param workflowCfg the workflow configuration
- * @param workflowCtx the workflow context
- * @param partitionSet the partitions to assign
- * @param cluster cluster snapshot
- * @return map of instances to set of partition numbers
- */
- public abstract Map<String, SortedSet<Integer>> getTaskAssignment(
- ResourceCurrentState currStateOutput, ResourceAssignment prevAssignment,
- Iterable<ParticipantId> instanceList, TaskConfig taskCfg, TaskContext taskContext,
- WorkflowConfig workflowCfg, WorkflowContext workflowCtx, Set<Integer> partitionSet,
- Cluster cluster);
-
- @Override
- public void init(HelixManager helixManager, ControllerContextProvider contextProvider) {
- _manager = helixManager;
- }
-
- @Override
- public ResourceAssignment computeResourceMapping(RebalancerConfig rebalancerConfig,
- ResourceAssignment helixPrevAssignment, Cluster cluster, ResourceCurrentState currentState) {
- final ResourceId resourceId = rebalancerConfig.getResourceId();
- final String resourceName = resourceId.stringify();
-
- // Fetch task configuration
- TaskConfig taskCfg = TaskUtil.getTaskCfg(_manager, resourceName);
- String workflowResource = taskCfg.getWorkflow();
-
- // Fetch workflow configuration and context
- WorkflowConfig workflowCfg = TaskUtil.getWorkflowCfg(_manager, workflowResource);
- WorkflowContext workflowCtx = TaskUtil.getWorkflowContext(_manager, workflowResource);
-
- // Initialize workflow context if needed
- if (workflowCtx == null) {
- workflowCtx = new WorkflowContext(new ZNRecord("WorkflowContext"));
- workflowCtx.setStartTime(System.currentTimeMillis());
- }
-
- // Check parent dependencies
- for (String parent : workflowCfg.getTaskDag().getDirectParents(resourceName)) {
- if (workflowCtx.getTaskState(parent) == null
- || !workflowCtx.getTaskState(parent).equals(TaskState.COMPLETED)) {
- return emptyAssignment(resourceId);
- }
- }
-
- // Clean up if workflow marked for deletion
- TargetState targetState = workflowCfg.getTargetState();
- if (targetState == TargetState.DELETE) {
- cleanup(_manager, resourceName, workflowCfg, workflowResource);
- return emptyAssignment(resourceId);
- }
-
- // Check if this workflow has been finished past its expiry.
- if (workflowCtx.getFinishTime() != WorkflowContext.UNFINISHED
- && workflowCtx.getFinishTime() + workflowCfg.getExpiry() <= System.currentTimeMillis()) {
- markForDeletion(_manager, workflowResource);
- cleanup(_manager, resourceName, workflowCfg, workflowResource);
- return emptyAssignment(resourceId);
- }
-
- // Fetch any existing context information from the property store.
- TaskContext taskCtx = TaskUtil.getTaskContext(_manager, resourceName);
- if (taskCtx == null) {
- taskCtx = new TaskContext(new ZNRecord("TaskContext"));
- taskCtx.setStartTime(System.currentTimeMillis());
- }
-
- // The task is already in a final state (completed/failed).
- if (workflowCtx.getTaskState(resourceName) == TaskState.FAILED
- || workflowCtx.getTaskState(resourceName) == TaskState.COMPLETED) {
- return emptyAssignment(resourceId);
- }
-
- // Will contain the list of partitions that must be explicitly dropped from the ideal state that
- // is stored in zk.
- // Fetch the previous resource assignment from the property store. This is required because of
- // HELIX-230.
- Set<Integer> partitionsToDrop = new TreeSet<Integer>();
- Set<ParticipantId> liveInstances = cluster.getLiveParticipantMap().keySet();
- ResourceAssignment prevAssignment = TaskUtil.getPrevResourceAssignment(_manager, resourceName);
- if (prevAssignment == null) {
- prevAssignment = new ResourceAssignment(resourceId);
- }
- ResourceAssignment newAssignment =
- computeResourceMapping(resourceName, workflowCfg, taskCfg, prevAssignment, liveInstances,
- currentState, workflowCtx, taskCtx, partitionsToDrop, cluster);
-
- PartitionedRebalancerConfig userConfig =
- BasicRebalancerConfig.convert(rebalancerConfig, PartitionedRebalancerConfig.class);
- if (!partitionsToDrop.isEmpty()) {
- for (Integer pId : partitionsToDrop) {
- userConfig.getPartitionMap().remove(PartitionId.from(pName(resourceName, pId)));
- }
- HelixDataAccessor accessor = _manager.getHelixDataAccessor();
- PropertyKey propertyKey = accessor.keyBuilder().idealStates(resourceName);
-
- IdealState taskIs =
- ResourceAccessor.rebalancerConfigToIdealState(rebalancerConfig,
- cluster.getResource(resourceId).getBucketSize(), cluster.getResource(resourceId)
- .getBatchMessageMode());
- accessor.setProperty(propertyKey, taskIs);
- }
-
- // Update rebalancer context, previous ideal state.
- TaskUtil.setTaskContext(_manager, resourceName, taskCtx);
- TaskUtil.setWorkflowContext(_manager, workflowResource, workflowCtx);
- TaskUtil.setPrevResourceAssignment(_manager, resourceName, newAssignment);
-
- return newAssignment;
- }
-
- private ResourceAssignment computeResourceMapping(String taskResource,
- WorkflowConfig workflowConfig, TaskConfig taskCfg, ResourceAssignment prevAssignment,
- Iterable<ParticipantId> liveInstances, ResourceCurrentState currStateOutput,
- WorkflowContext workflowCtx, TaskContext taskCtx, Set<Integer> partitionsToDropFromIs,
- Cluster cluster) {
- TargetState taskTgtState = workflowConfig.getTargetState();
-
- // Update running status in workflow context
- if (taskTgtState == TargetState.STOP) {
- workflowCtx.setTaskState(taskResource, TaskState.STOPPED);
- // Workflow has been stopped if all tasks are stopped
- if (isWorkflowStopped(workflowCtx, workflowConfig)) {
- workflowCtx.setWorkflowState(TaskState.STOPPED);
- }
- } else {
- workflowCtx.setTaskState(taskResource, TaskState.IN_PROGRESS);
- // Workflow is in progress if any task is in progress
- workflowCtx.setWorkflowState(TaskState.IN_PROGRESS);
- }
-
- // Used to keep track of task partitions that have already been assigned to instances.
- Set<Integer> assignedPartitions = new HashSet<Integer>();
-
- // Keeps a mapping of (partition) -> (instance, state)
- Map<Integer, PartitionAssignment> paMap = new TreeMap<Integer, PartitionAssignment>();
-
- // Process all the current assignments of task partitions.
- Set<Integer> allPartitions =
- getAllTaskPartitions(taskCfg, workflowConfig, workflowCtx, cluster);
- Map<String, SortedSet<Integer>> taskAssignments =
- getTaskPartitionAssignments(liveInstances, prevAssignment, allPartitions);
- for (String instance : taskAssignments.keySet()) {
- Set<Integer> pSet = taskAssignments.get(instance);
- // Used to keep track of partitions that are in one of the final states: COMPLETED, TIMED_OUT,
- // TASK_ERROR, ERROR.
- Set<Integer> donePartitions = new TreeSet<Integer>();
- for (int pId : pSet) {
- final String pName = pName(taskResource, pId);
-
- // Check for pending state transitions on this (partition, instance).
- State s =
- currStateOutput.getPendingState(ResourceId.from(taskResource), PartitionId.from(pName),
- ParticipantId.from(instance));
- String pendingState = (s == null ? null : s.toString());
- if (pendingState != null) {
- // There is a pending state transition for this (partition, instance). Just copy forward
- // the state assignment from the previous ideal state.
- Map<ParticipantId, State> stateMap =
- prevAssignment.getReplicaMap(PartitionId.from(pName));
- if (stateMap != null) {
- State prevState = stateMap.get(ParticipantId.from(instance));
- paMap.put(pId, new PartitionAssignment(instance, prevState.toString()));
- assignedPartitions.add(pId);
- if (LOG.isDebugEnabled()) {
- LOG.debug(String
- .format("Task partition %s has a pending state transition on instance %s."
- + " Using the previous ideal state which was %s.", pName, instance, prevState));
- }
- }
-
- continue;
- }
-
- // Current state is either present or dropped
- State currentState =
- currStateOutput.getCurrentState(ResourceId.from(taskResource), PartitionId.from(pName),
- ParticipantId.from(instance));
- String currentStateStr =
- currentState != null ? currentState.toString() : TaskPartitionState.DROPPED.toString();
- TaskPartitionState currState = TaskPartitionState.valueOf(currentStateStr);
-
- // Process any requested state transitions.
- State reqS =
- currStateOutput.getRequestedState(ResourceId.from(taskResource),
- PartitionId.from(pName), ParticipantId.from(instance));
- String requestedStateStr = (reqS == null ? null : reqS.toString());
- if (requestedStateStr != null && !requestedStateStr.isEmpty()) {
- TaskPartitionState requestedState = TaskPartitionState.valueOf(requestedStateStr);
- if (requestedState.equals(currState)) {
- LOG.warn(String.format(
- "Requested state %s is the same as the current state for instance %s.",
- requestedState, instance));
- }
-
- paMap.put(pId, new PartitionAssignment(instance, requestedState.name()));
- assignedPartitions.add(pId);
- if (LOG.isDebugEnabled()) {
- LOG.debug(String.format(
- "Instance %s requested a state transition to %s for partition %s.", instance,
- requestedState, pName));
- }
- continue;
- }
-
- switch (currState) {
- case RUNNING:
- case STOPPED: {
- TaskPartitionState nextState;
- if (taskTgtState == TargetState.START) {
- nextState = TaskPartitionState.RUNNING;
- } else {
- nextState = TaskPartitionState.STOPPED;
- }
-
- paMap.put(pId, new PartitionAssignment(instance, nextState.name()));
- assignedPartitions.add(pId);
- if (LOG.isDebugEnabled()) {
- LOG.debug(String.format("Setting task partition %s state to %s on instance %s.", pName,
- nextState, instance));
- }
- }
- break;
- case COMPLETED: {
- // The task has completed on this partition. Mark as such in the context object.
- donePartitions.add(pId);
- if (LOG.isDebugEnabled()) {
- LOG.debug(String
- .format(
- "Task partition %s has completed with state %s. Marking as such in rebalancer context.",
- pName, currState));
- }
- partitionsToDropFromIs.add(pId);
- markPartitionCompleted(taskCtx, pId);
- }
- break;
- case TIMED_OUT:
- case TASK_ERROR:
- case ERROR: {
- donePartitions.add(pId); // The task may be rescheduled on a different instance.
- if (LOG.isDebugEnabled()) {
- LOG.debug(String.format(
- "Task partition %s has error state %s. Marking as such in rebalancer context.",
- pName, currState));
- }
- markPartitionError(taskCtx, pId, currState);
- // The error policy is to fail the task as soon a single partition fails for a specified
- // maximum number of attempts.
- if (taskCtx.getPartitionNumAttempts(pId) >= taskCfg.getMaxAttemptsPerPartition()) {
- workflowCtx.setTaskState(taskResource, TaskState.FAILED);
- workflowCtx.setWorkflowState(TaskState.FAILED);
- partitionsToDropFromIs.addAll(allPartitions);
- return emptyAssignment(ResourceId.from(taskResource));
- }
- }
- break;
- case INIT:
- case DROPPED: {
- // currState in [INIT, DROPPED]. Do nothing, the partition is eligible to be reassigned.
- donePartitions.add(pId);
- if (LOG.isDebugEnabled()) {
- LOG.debug(String.format(
- "Task partition %s has state %s. It will be dropped from the current ideal state.",
- pName, currState));
- }
- }
- break;
- default:
- throw new AssertionError("Unknown enum symbol: " + currState);
- }
- }
-
- // Remove the set of task partitions that are completed or in one of the error states.
- pSet.removeAll(donePartitions);
- }
-
- if (isTaskComplete(taskCtx, allPartitions)) {
- if (!taskCfg.isLongLived()) {
- workflowCtx.setTaskState(taskResource, TaskState.COMPLETED);
- }
- if (isWorkflowComplete(workflowCtx, workflowConfig)) {
- workflowCtx.setWorkflowState(TaskState.COMPLETED);
- workflowCtx.setFinishTime(System.currentTimeMillis());
- }
- }
-
- // Make additional task assignments if needed.
- if (taskTgtState == TargetState.START) {
- // Contains the set of task partitions that must be excluded from consideration when making
- // any new assignments.
- // This includes all completed, failed, already assigned partitions.
- Set<Integer> excludeSet = Sets.newTreeSet(assignedPartitions);
- addCompletedPartitions(excludeSet, taskCtx, allPartitions);
- // Get instance->[partition, ...] mappings for the target resource.
- Map<String, SortedSet<Integer>> tgtPartitionAssignments =
- getTaskAssignment(currStateOutput, prevAssignment, liveInstances, taskCfg, taskCtx,
- workflowConfig, workflowCtx, allPartitions, cluster);
- for (Map.Entry<String, SortedSet<Integer>> entry : taskAssignments.entrySet()) {
- String instance = entry.getKey();
- // Contains the set of task partitions currently assigned to the instance.
- Set<Integer> pSet = entry.getValue();
- int numToAssign = taskCfg.getNumConcurrentTasksPerInstance() - pSet.size();
- if (numToAssign > 0) {
- List<Integer> nextPartitions =
- getNextPartitions(tgtPartitionAssignments.get(instance), excludeSet, numToAssign);
- for (Integer pId : nextPartitions) {
- String pName = pName(taskResource, pId);
- paMap.put(pId, new PartitionAssignment(instance, TaskPartitionState.RUNNING.name()));
- excludeSet.add(pId);
- if (LOG.isDebugEnabled()) {
- LOG.debug(String.format("Setting task partition %s state to %s on instance %s.",
- pName, TaskPartitionState.RUNNING, instance));
- }
- }
- }
- }
- }
-
- // Construct a ResourceAssignment object from the map of partition assignments.
- ResourceAssignment ra = new ResourceAssignment(ResourceId.from(taskResource));
- for (Map.Entry<Integer, PartitionAssignment> e : paMap.entrySet()) {
- PartitionAssignment pa = e.getValue();
- ra.addReplicaMap(PartitionId.from(pName(taskResource, e.getKey())),
- ImmutableMap.of(ParticipantId.from(pa._instance), State.from(pa._state)));
- }
-
- return ra;
- }
-
- /**
- * Checks if the task has completed.
- * @param ctx The rebalancer context.
- * @param allPartitions The set of partitions to check.
- * @return true if all task partitions have been marked with status
- * {@link TaskPartitionState#COMPLETED} in the rebalancer
- * context, false otherwise.
- */
- private static boolean isTaskComplete(TaskContext ctx, Set<Integer> allPartitions) {
- for (Integer pId : allPartitions) {
- TaskPartitionState state = ctx.getPartitionState(pId);
- if (state != TaskPartitionState.COMPLETED) {
- return false;
- }
- }
- return true;
- }
-
- /**
- * Checks if the workflow has completed.
- * @param ctx Workflow context containing task states
- * @param cfg Workflow config containing set of tasks
- * @return returns true if all tasks are {@link TaskState#COMPLETED}, false otherwise.
- */
- private static boolean isWorkflowComplete(WorkflowContext ctx, WorkflowConfig cfg) {
- for (String task : cfg.getTaskDag().getAllNodes()) {
- if (ctx.getTaskState(task) != TaskState.COMPLETED) {
- return false;
- }
- }
- return true;
- }
-
- /**
- * Checks if the workflow has been stopped.
- * @param ctx Workflow context containing task states
- * @param cfg Workflow config containing set of tasks
- * @return returns true if all tasks are {@link TaskState#STOPPED}, false otherwise.
- */
- private static boolean isWorkflowStopped(WorkflowContext ctx, WorkflowConfig cfg) {
- for (String task : cfg.getTaskDag().getAllNodes()) {
- if (ctx.getTaskState(task) != TaskState.STOPPED && ctx.getTaskState(task) != null) {
- return false;
- }
- }
- return true;
- }
-
- private static void markForDeletion(HelixManager mgr, String resourceName) {
- mgr.getConfigAccessor().set(
- TaskUtil.getResourceConfigScope(mgr.getClusterName(), resourceName),
- WorkflowConfig.TARGET_STATE, TargetState.DELETE.name());
- }
-
- /**
- * Cleans up all Helix state associated with this task, wiping workflow-level information if this
- * is the last remaining task in its workflow.
- */
- private static void cleanup(HelixManager mgr, String resourceName, WorkflowConfig cfg,
- String workflowResource) {
- HelixDataAccessor accessor = mgr.getHelixDataAccessor();
- // Delete resource configs.
- PropertyKey cfgKey = getConfigPropertyKey(accessor, resourceName);
- if (!accessor.removeProperty(cfgKey)) {
- throw new RuntimeException(String.format("Error occurred while trying to clean up task %s. "
- + "Failed to remove node %s from Helix. Aborting further clean up steps.", resourceName,
- cfgKey));
- }
- // Delete property store information for this resource.
- String propStoreKey = getRebalancerPropStoreKey(resourceName);
- if (!mgr.getHelixPropertyStore().remove(propStoreKey, AccessOption.PERSISTENT)) {
- throw new RuntimeException(String.format("Error occurred while trying to clean up task %s. "
- + "Failed to remove node %s from Helix. Aborting further clean up steps.", resourceName,
- propStoreKey));
- }
- // Finally, delete the ideal state itself.
- PropertyKey isKey = getISPropertyKey(accessor, resourceName);
- if (!accessor.removeProperty(isKey)) {
- throw new RuntimeException(String.format(
- "Error occurred while trying to clean up task %s. Failed to remove node %s from Helix.",
- resourceName, isKey));
- }
- LOG.info(String.format("Successfully cleaned up task resource %s.", resourceName));
-
- boolean lastInWorkflow = true;
- for (String task : cfg.getTaskDag().getAllNodes()) {
- // check if property store information or resource configs exist for this task
- if (mgr.getHelixPropertyStore().exists(getRebalancerPropStoreKey(task),
- AccessOption.PERSISTENT)
- || accessor.getProperty(getConfigPropertyKey(accessor, task)) != null
- || accessor.getProperty(getISPropertyKey(accessor, task)) != null) {
- lastInWorkflow = false;
- }
- }
-
- // clean up task-level info if this was the last in workflow
- if (lastInWorkflow) {
- // delete workflow config
- PropertyKey workflowCfgKey = getConfigPropertyKey(accessor, workflowResource);
- if (!accessor.removeProperty(workflowCfgKey)) {
- throw new RuntimeException(String.format(
- "Error occurred while trying to clean up workflow %s. "
- + "Failed to remove node %s from Helix. Aborting further clean up steps.",
- workflowResource, workflowCfgKey));
- }
- // Delete property store information for this workflow
- String workflowPropStoreKey = getRebalancerPropStoreKey(workflowResource);
- if (!mgr.getHelixPropertyStore().remove(workflowPropStoreKey, AccessOption.PERSISTENT)) {
- throw new RuntimeException(String.format(
- "Error occurred while trying to clean up workflow %s. "
- + "Failed to remove node %s from Helix. Aborting further clean up steps.",
- workflowResource, workflowPropStoreKey));
- }
- }
-
- }
-
- private static String getRebalancerPropStoreKey(String resource) {
- return Joiner.on("/").join(TaskConstants.REBALANCER_CONTEXT_ROOT, resource);
- }
-
- private static PropertyKey getISPropertyKey(HelixDataAccessor accessor, String resource) {
- return accessor.keyBuilder().idealStates(resource);
- }
-
- private static PropertyKey getConfigPropertyKey(HelixDataAccessor accessor, String resource) {
- return accessor.keyBuilder().resourceConfig(resource);
- }
-
- private static ResourceAssignment emptyAssignment(ResourceId resourceId) {
- return new ResourceAssignment(resourceId);
- }
-
- private static void addCompletedPartitions(Set<Integer> set, TaskContext ctx,
- Iterable<Integer> pIds) {
- for (Integer pId : pIds) {
- TaskPartitionState state = ctx.getPartitionState(pId);
- if (state == TaskPartitionState.COMPLETED) {
- set.add(pId);
- }
- }
- }
-
- private static List<Integer> getNextPartitions(SortedSet<Integer> candidatePartitions,
- Set<Integer> excluded, int n) {
- List<Integer> result = new ArrayList<Integer>();
- if (candidatePartitions == null || candidatePartitions.isEmpty()) {
- return result;
- }
- for (Integer pId : candidatePartitions) {
- if (result.size() >= n) {
- break;
- }
-
- if (!excluded.contains(pId)) {
- result.add(pId);
- }
- }
-
- return result;
- }
-
- private static void markPartitionCompleted(TaskContext ctx, int pId) {
- ctx.setPartitionState(pId, TaskPartitionState.COMPLETED);
- ctx.setPartitionFinishTime(pId, System.currentTimeMillis());
- ctx.incrementNumAttempts(pId);
- }
-
- private static void markPartitionError(TaskContext ctx, int pId, TaskPartitionState state) {
- ctx.setPartitionState(pId, state);
- ctx.setPartitionFinishTime(pId, System.currentTimeMillis());
- ctx.incrementNumAttempts(pId);
- }
-
- /**
- * Return the assignment of task partitions per instance.
- */
- private static Map<String, SortedSet<Integer>> getTaskPartitionAssignments(
- Iterable<ParticipantId> instanceList, ResourceAssignment assignment, Set<Integer> includeSet) {
- Map<String, SortedSet<Integer>> result = new HashMap<String, SortedSet<Integer>>();
- for (ParticipantId instance : instanceList) {
- result.put(instance.stringify(), new TreeSet<Integer>());
- }
-
- for (PartitionId partitionId : assignment.getMappedPartitionIds()) {
- int pId = pId(partitionId.stringify());
- if (includeSet.contains(pId)) {
- Map<ParticipantId, State> replicaMap = assignment.getReplicaMap(partitionId);
- for (ParticipantId instance : replicaMap.keySet()) {
- SortedSet<Integer> pList = result.get(instance.stringify());
- if (pList != null) {
- pList.add(pId);
- }
- }
- }
- }
-
- return result;
- }
-
- /**
- * Computes the partition name given the resource name and partition id.
- */
- protected static String pName(String resource, int pId) {
- return resource + "_" + pId;
- }
-
- /**
- * Extracts the partition id from the given partition name.
- */
- protected static int pId(String pName) {
- String[] tokens = pName.split("_");
- return Integer.valueOf(tokens[tokens.length - 1]);
- }
-
- /**
- * An (instance, state) pair.
- */
- private static class PartitionAssignment {
- private final String _instance;
- private final String _state;
-
- private PartitionAssignment(String instance, String state) {
- _instance = instance;
- _state = state;
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/FixedTargetTaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/FixedTargetTaskRebalancer.java b/helix-core/src/main/java/org/apache/helix/task/FixedTargetTaskRebalancer.java
new file mode 100644
index 0000000..d1329ee
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/task/FixedTargetTaskRebalancer.java
@@ -0,0 +1,162 @@
+package org.apache.helix.task;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.helix.api.Cluster;
+import org.apache.helix.api.Resource;
+import org.apache.helix.api.State;
+import org.apache.helix.api.id.ParticipantId;
+import org.apache.helix.api.id.PartitionId;
+import org.apache.helix.api.id.ResourceId;
+import org.apache.helix.controller.stages.ResourceCurrentState;
+import org.apache.helix.model.IdealState;
+import org.apache.helix.model.ResourceAssignment;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+
+/**
+ * A rebalancer for when a task group must be assigned according to partitions/states on a target
+ * resource. Here, tasks are colocated according to where a resource's partitions are, as well as
+ * (if desired) only where those partitions are in a given state.
+ */
+public class FixedTargetTaskRebalancer extends TaskRebalancer {
+
+ @Override
+ public Set<Integer> getAllTaskPartitions(JobConfig jobCfg, JobContext jobCtx,
+ WorkflowConfig workflowCfg, WorkflowContext workflowCtx, Cluster cache) {
+ return getAllTaskPartitions(getTgtIdealState(jobCfg, cache), jobCfg, jobCtx);
+ }
+
+ @Override
+ public Map<ParticipantId, SortedSet<Integer>> getTaskAssignment(
+ ResourceCurrentState currStateOutput, ResourceAssignment prevAssignment,
+ Iterable<ParticipantId> instanceList, JobConfig jobCfg, JobContext jobContext,
+ WorkflowConfig workflowCfg, WorkflowContext workflowCtx, Set<Integer> partitionSet,
+ Cluster cache) {
+ IdealState tgtIs = getTgtIdealState(jobCfg, cache);
+ if (tgtIs == null) {
+ return Collections.emptyMap();
+ }
+ Set<String> tgtStates = jobCfg.getTargetPartitionStates();
+ return getTgtPartitionAssignment(currStateOutput, instanceList, tgtIs, tgtStates, partitionSet,
+ jobContext);
+ }
+
+ /**
+ * Gets the ideal state of the target resource of this job
+ * @param jobCfg job config containing target resource id
+ * @param cluster snapshot of the cluster containing the task and target resource
+ * @return target resource ideal state, or null
+ */
+ private static IdealState getTgtIdealState(JobConfig jobCfg, Cluster cache) {
+ String tgtResourceId = jobCfg.getTargetResource();
+ Resource resource = cache.getResource(ResourceId.from(tgtResourceId));
+ return resource.getIdealState();
+ }
+
+ /**
+ * Returns the set of all partition ids for a job.
+ * <p/>
+ * If a set of partition ids was explicitly specified in the config, that is used. Otherwise, we
+ * use the list of all partition ids from the target resource.
+ */
+ private static Set<Integer> getAllTaskPartitions(IdealState tgtResourceIs, JobConfig jobCfg,
+ JobContext taskCtx) {
+ if (tgtResourceIs == null) {
+ return null;
+ }
+ Map<String, List<Integer>> currentTargets = taskCtx.getPartitionsByTarget();
+ SortedSet<String> targetPartitions = Sets.newTreeSet();
+ if (jobCfg.getTargetPartitions() != null) {
+ targetPartitions.addAll(jobCfg.getTargetPartitions());
+ } else {
+ targetPartitions.addAll(tgtResourceIs.getPartitionSet());
+ }
+
+ Set<Integer> taskPartitions = Sets.newTreeSet();
+ for (String pName : targetPartitions) {
+ taskPartitions.addAll(getPartitionsForTargetPartition(pName, currentTargets, taskCtx));
+ }
+ return taskPartitions;
+ }
+
+ private static List<Integer> getPartitionsForTargetPartition(String targetPartition,
+ Map<String, List<Integer>> currentTargets, JobContext jobCtx) {
+ if (!currentTargets.containsKey(targetPartition)) {
+ int nextId = jobCtx.getPartitionSet().size();
+ jobCtx.setPartitionTarget(nextId, targetPartition);
+ return Lists.newArrayList(nextId);
+ } else {
+ return currentTargets.get(targetPartition);
+ }
+ }
+
+ /**
+ * Get partition assignments for the target resource, but only for the partitions of interest.
+ * @param currStateOutput The current state of the instances in the cluster.
+ * @param instanceList The set of instances.
+ * @param tgtIs The ideal state of the target resource.
+ * @param tgtStates Only partitions in this set of states will be considered. If null, partitions
+ * do not need to
+ * be in any specific state to be considered.
+ * @param includeSet The set of partitions to consider.
+ * @return A map of instance vs set of partition ids assigned to that instance.
+ */
+ private static Map<ParticipantId, SortedSet<Integer>> getTgtPartitionAssignment(
+ ResourceCurrentState currStateOutput, Iterable<ParticipantId> instanceList, IdealState tgtIs,
+ Set<String> tgtStates, Set<Integer> includeSet, JobContext jobCtx) {
+ Map<ParticipantId, SortedSet<Integer>> result =
+ new HashMap<ParticipantId, SortedSet<Integer>>();
+ for (ParticipantId instance : instanceList) {
+ result.put(instance, new TreeSet<Integer>());
+ }
+
+ Map<String, List<Integer>> partitionsByTarget = jobCtx.getPartitionsByTarget();
+ for (String pName : tgtIs.getPartitionSet()) {
+ List<Integer> partitions = partitionsByTarget.get(pName);
+ if (partitions == null || partitions.size() < 1) {
+ continue;
+ }
+ int pId = partitions.get(0);
+ if (includeSet.contains(pId)) {
+ for (ParticipantId instance : instanceList) {
+ State s =
+ currStateOutput.getCurrentState(ResourceId.from(tgtIs.getResourceName()),
+ PartitionId.from(pName), instance);
+ String state = (s == null ? null : s.toString());
+ if (tgtStates == null || tgtStates.contains(state)) {
+ result.get(instance).add(pId);
+ }
+ }
+ }
+ }
+
+ return result;
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/GenericTaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/GenericTaskRebalancer.java b/helix-core/src/main/java/org/apache/helix/task/GenericTaskRebalancer.java
new file mode 100644
index 0000000..8b5a258
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/task/GenericTaskRebalancer.java
@@ -0,0 +1,196 @@
+package org.apache.helix.task;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.helix.ZNRecord;
+import org.apache.helix.api.Cluster;
+import org.apache.helix.api.Resource;
+import org.apache.helix.api.State;
+import org.apache.helix.api.id.ParticipantId;
+import org.apache.helix.api.id.PartitionId;
+import org.apache.helix.api.id.ResourceId;
+import org.apache.helix.controller.stages.ResourceCurrentState;
+import org.apache.helix.controller.strategy.AutoRebalanceStrategy;
+import org.apache.helix.model.IdealState;
+import org.apache.helix.model.ResourceAssignment;
+
+import com.google.common.base.Function;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+/**
+ * This class does an assignment based on an automatic rebalancing strategy, rather than requiring
+ * assignment to target partitions and states of another resource
+ */
+public class GenericTaskRebalancer extends TaskRebalancer {
+ @Override
+ public Set<Integer> getAllTaskPartitions(JobConfig jobCfg, JobContext jobCtx,
+ WorkflowConfig workflowCfg, WorkflowContext workflowCtx, Cluster cache) {
+ Map<String, TaskConfig> taskMap = jobCfg.getTaskConfigMap();
+ Map<String, Integer> taskIdMap = jobCtx.getTaskIdPartitionMap();
+ for (TaskConfig taskCfg : taskMap.values()) {
+ String taskId = taskCfg.getId();
+ int nextPartition = jobCtx.getPartitionSet().size();
+ if (!taskIdMap.containsKey(taskId)) {
+ jobCtx.setTaskIdForPartition(nextPartition, taskId);
+ }
+ }
+ return jobCtx.getPartitionSet();
+ }
+
+ @Override
+ public Map<ParticipantId, SortedSet<Integer>> getTaskAssignment(
+ ResourceCurrentState currStateOutput, ResourceAssignment prevAssignment,
+ Iterable<ParticipantId> instanceList, JobConfig jobCfg, final JobContext jobContext,
+ WorkflowConfig workflowCfg, WorkflowContext workflowCtx, Set<Integer> partitionSet,
+ Cluster cache) {
+ // Gather input to the full auto rebalancing algorithm
+ LinkedHashMap<State, Integer> states = new LinkedHashMap<State, Integer>();
+ states.put(State.from("ONLINE"), 1);
+
+ // Only map partitions whose assignment we care about
+ final Set<TaskPartitionState> honoredStates =
+ Sets.newHashSet(TaskPartitionState.INIT, TaskPartitionState.RUNNING,
+ TaskPartitionState.STOPPED);
+ Set<Integer> filteredPartitionSet = Sets.newHashSet();
+ for (Integer p : partitionSet) {
+ TaskPartitionState state = (jobContext == null) ? null : jobContext.getPartitionState(p);
+ if (state == null || honoredStates.contains(state)) {
+ filteredPartitionSet.add(p);
+ }
+ }
+
+ // Transform from partition id to fully qualified partition name
+ List<Integer> partitionNums = Lists.newArrayList(partitionSet);
+ Collections.sort(partitionNums);
+ final ResourceId resourceId = prevAssignment.getResourceId();
+ List<PartitionId> partitions =
+ new ArrayList<PartitionId>(Lists.transform(partitionNums,
+ new Function<Integer, PartitionId>() {
+ @Override
+ public PartitionId apply(Integer partitionNum) {
+ return PartitionId.from(resourceId + "_" + partitionNum);
+ }
+ }));
+
+ // Compute the current assignment
+ Map<PartitionId, Map<ParticipantId, State>> currentMapping = Maps.newHashMap();
+ for (PartitionId partition : currStateOutput.getCurrentStateMappedPartitions(resourceId)) {
+ if (!filteredPartitionSet.contains(pId(partition.toString()))) {
+ // not computing old partitions
+ continue;
+ }
+ Map<ParticipantId, State> allPreviousDecisionMap = Maps.newHashMap();
+ if (prevAssignment != null) {
+ allPreviousDecisionMap.putAll(prevAssignment.getReplicaMap(partition));
+ }
+ allPreviousDecisionMap.putAll(currStateOutput.getCurrentStateMap(resourceId, partition));
+ allPreviousDecisionMap.putAll(currStateOutput.getPendingStateMap(resourceId, partition));
+ currentMapping.put(partition, allPreviousDecisionMap);
+ }
+
+ // Get the assignment keyed on partition
+ AutoRebalanceStrategy strategy =
+ new AutoRebalanceStrategy(resourceId, partitions, states, Integer.MAX_VALUE,
+ new AutoRebalanceStrategy.DefaultPlacementScheme());
+ List<ParticipantId> allNodes =
+ Lists.newArrayList(getEligibleInstances(jobCfg, currStateOutput, instanceList, cache));
+ Collections.sort(allNodes);
+ ZNRecord record = strategy.typedComputePartitionAssignment(allNodes, currentMapping, allNodes);
+ Map<String, List<String>> preferenceLists = record.getListFields();
+
+ // Convert to an assignment keyed on participant
+ Map<ParticipantId, SortedSet<Integer>> taskAssignment = Maps.newHashMap();
+ for (Map.Entry<String, List<String>> e : preferenceLists.entrySet()) {
+ String partitionName = e.getKey();
+ partitionName = String.valueOf(pId(partitionName));
+ List<String> preferenceList = e.getValue();
+ for (String participantName : preferenceList) {
+ ParticipantId participantId = ParticipantId.from(participantName);
+ if (!taskAssignment.containsKey(participantId)) {
+ taskAssignment.put(participantId, new TreeSet<Integer>());
+ }
+ taskAssignment.get(participantId).add(Integer.valueOf(partitionName));
+ }
+ }
+ return taskAssignment;
+ }
+
+ /**
+ * Filter a list of instances based on targeted resource policies
+ * @param jobCfg the job configuration
+ * @param currStateOutput the current state of all instances in the cluster
+ * @param instanceList valid instances
+ * @param cache current snapshot of the cluster
+ * @return a set of instances that can be assigned to
+ */
+ private Set<ParticipantId> getEligibleInstances(JobConfig jobCfg,
+ ResourceCurrentState currStateOutput, Iterable<ParticipantId> instanceList, Cluster cache) {
+ // No target resource means any instance is available
+ Set<ParticipantId> allInstances = Sets.newHashSet(instanceList);
+ String targetResource = jobCfg.getTargetResource();
+ if (targetResource == null) {
+ return allInstances;
+ }
+
+ // Bad ideal state means don't assign
+ Resource resource = cache.getResource(ResourceId.from(targetResource));
+ IdealState idealState = (resource != null) ? resource.getIdealState() : null;
+ if (idealState == null) {
+ return Collections.emptySet();
+ }
+
+ // Get the partitions on the target resource to use
+ Set<String> partitions = idealState.getPartitionSet();
+ List<String> targetPartitions = jobCfg.getTargetPartitions();
+ if (targetPartitions != null && !targetPartitions.isEmpty()) {
+ partitions.retainAll(targetPartitions);
+ }
+
+ // Based on state matches, add eligible instances
+ Set<ParticipantId> eligibleInstances = Sets.newHashSet();
+ Set<String> targetStates = jobCfg.getTargetPartitionStates();
+ for (String partition : partitions) {
+ Map<ParticipantId, State> stateMap =
+ currStateOutput.getCurrentStateMap(ResourceId.from(targetResource),
+ PartitionId.from(partition));
+ for (Map.Entry<ParticipantId, State> e : stateMap.entrySet()) {
+ ParticipantId instanceName = e.getKey();
+ State state = e.getValue();
+ if (targetStates == null || targetStates.isEmpty()
+ || targetStates.contains(state.toString())) {
+ eligibleInstances.add(instanceName);
+ }
+ }
+ }
+ allInstances.retainAll(eligibleInstances);
+ return allInstances;
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/IndependentTaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/IndependentTaskRebalancer.java b/helix-core/src/main/java/org/apache/helix/task/IndependentTaskRebalancer.java
deleted file mode 100644
index 2bc4081..0000000
--- a/helix-core/src/main/java/org/apache/helix/task/IndependentTaskRebalancer.java
+++ /dev/null
@@ -1,137 +0,0 @@
-package org.apache.helix.task;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.helix.ZNRecord;
-import org.apache.helix.api.Cluster;
-import org.apache.helix.api.State;
-import org.apache.helix.api.id.ParticipantId;
-import org.apache.helix.api.id.PartitionId;
-import org.apache.helix.api.id.ResourceId;
-import org.apache.helix.controller.stages.ResourceCurrentState;
-import org.apache.helix.controller.strategy.AutoRebalanceStrategy;
-import org.apache.helix.model.ResourceAssignment;
-
-import com.google.common.base.Function;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-
-/**
- * A task rebalancer that evenly assigns tasks to nodes
- */
-public class IndependentTaskRebalancer extends AbstractTaskRebalancer {
-
- @Override
- public Set<Integer> getAllTaskPartitions(TaskConfig taskCfg, WorkflowConfig workflowCfg,
- WorkflowContext workflowCtx, Cluster cluster) {
- Set<Integer> taskPartitions = new HashSet<Integer>();
- if (taskCfg.getTargetPartitions() != null) {
- for (Integer pId : taskCfg.getTargetPartitions()) {
- taskPartitions.add(pId);
- }
- }
- return taskPartitions;
- }
-
- @Override
- public Map<String, SortedSet<Integer>> getTaskAssignment(ResourceCurrentState currStateOutput,
- ResourceAssignment prevAssignment, Iterable<ParticipantId> instanceList, TaskConfig taskCfg,
- final TaskContext taskContext, WorkflowConfig workflowCfg, WorkflowContext workflowCtx,
- Set<Integer> partitionSet, Cluster cluster) {
- // Gather input to the full auto rebalancing algorithm
- LinkedHashMap<State, Integer> states = new LinkedHashMap<State, Integer>();
- states.put(State.from("ONLINE"), 1);
-
- // Only map partitions whose assignment we care about
- final Set<TaskPartitionState> honoredStates =
- Sets.newHashSet(TaskPartitionState.INIT, TaskPartitionState.RUNNING,
- TaskPartitionState.STOPPED);
- Set<Integer> filteredPartitionSet = Sets.newHashSet();
- for (Integer p : partitionSet) {
- TaskPartitionState state = (taskContext == null) ? null : taskContext.getPartitionState(p);
- if (state == null || honoredStates.contains(state)) {
- filteredPartitionSet.add(p);
- }
- }
-
- // Transform from partition id to fully qualified partition name
- List<Integer> partitionNums = Lists.newArrayList(partitionSet);
- Collections.sort(partitionNums);
- final ResourceId resourceId = prevAssignment.getResourceId();
- List<PartitionId> partitions =
- new ArrayList<PartitionId>(Lists.transform(partitionNums,
- new Function<Integer, PartitionId>() {
- @Override
- public PartitionId apply(Integer partitionNum) {
- return PartitionId.from(resourceId, partitionNum.toString());
- }
- }));
-
- // Compute the current assignment
- Map<PartitionId, Map<ParticipantId, State>> currentMapping = Maps.newHashMap();
- for (PartitionId partitionId : currStateOutput.getCurrentStateMappedPartitions(resourceId)) {
- if (!filteredPartitionSet.contains(pId(partitionId.toString()))) {
- // not computing old partitions
- continue;
- }
- Map<ParticipantId, State> allPreviousDecisionMap = Maps.newHashMap();
- if (prevAssignment != null) {
- allPreviousDecisionMap.putAll(prevAssignment.getReplicaMap(partitionId));
- }
- allPreviousDecisionMap.putAll(currStateOutput.getCurrentStateMap(resourceId, partitionId));
- allPreviousDecisionMap.putAll(currStateOutput.getPendingStateMap(resourceId, partitionId));
- currentMapping.put(partitionId, allPreviousDecisionMap);
- }
-
- // Get the assignment keyed on partition
- AutoRebalanceStrategy strategy =
- new AutoRebalanceStrategy(resourceId, partitions, states, Integer.MAX_VALUE,
- new AutoRebalanceStrategy.DefaultPlacementScheme());
- List<ParticipantId> allNodes = Lists.newArrayList(instanceList);
- ZNRecord record = strategy.typedComputePartitionAssignment(allNodes, currentMapping, allNodes);
- Map<String, List<String>> preferenceLists = record.getListFields();
-
- // Convert to an assignment keyed on participant
- Map<String, SortedSet<Integer>> taskAssignment = Maps.newHashMap();
- for (Map.Entry<String, List<String>> e : preferenceLists.entrySet()) {
- String partitionName = e.getKey();
- partitionName = String.valueOf(pId(partitionName));
- List<String> preferenceList = e.getValue();
- for (String participantName : preferenceList) {
- if (!taskAssignment.containsKey(participantName)) {
- taskAssignment.put(participantName, new TreeSet<Integer>());
- }
- taskAssignment.get(participantName).add(Integer.valueOf(partitionName));
- }
- }
- return taskAssignment;
- }
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/JobConfig.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/JobConfig.java b/helix-core/src/main/java/org/apache/helix/task/JobConfig.java
new file mode 100644
index 0000000..90e3cfc
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/task/JobConfig.java
@@ -0,0 +1,334 @@
+package org.apache.helix.task;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Maps;
+
+/**
+ * Provides a typed interface to job configurations.
+ */
+public class JobConfig {
+ // // Property names ////
+
+ /** The name of the workflow to which the job belongs. */
+ public static final String WORKFLOW_ID = "WorkflowID";
+ /** The assignment strategy of this job */
+ public static final String ASSIGNMENT_STRATEGY = "AssignmentStrategy";
+ /** The name of the target resource. */
+ public static final String TARGET_RESOURCE = "TargetResource";
+ /**
+ * The set of the target partition states. The value must be a comma-separated list of partition
+ * states.
+ */
+ public static final String TARGET_PARTITION_STATES = "TargetPartitionStates";
+ /**
+ * The set of the target partition ids. The value must be a comma-separated list of partition ids.
+ */
+ public static final String TARGET_PARTITIONS = "TargetPartitions";
+ /** The command that is to be run by participants in the case of identical tasks. */
+ public static final String COMMAND = "Command";
+ /** The command configuration to be used by the tasks. */
+ public static final String JOB_CONFIG_MAP = "JobConfig";
+ /** The timeout for a task. */
+ public static final String TIMEOUT_PER_TASK = "TimeoutPerPartition";
+ /** The maximum number of times the task rebalancer may attempt to execute a task. */
+ public static final String MAX_ATTEMPTS_PER_TASK = "MaxAttemptsPerTask";
+ /** The number of concurrent tasks that are allowed to run on an instance. */
+ public static final String NUM_CONCURRENT_TASKS_PER_INSTANCE = "ConcurrentTasksPerInstance";
+
+ /** The individual task configurations, if any **/
+ public static final String TASK_CONFIGS = "TaskConfigs";
+
+ // // Default property values ////
+
+ public static final long DEFAULT_TIMEOUT_PER_TASK = 60 * 60 * 1000; // 1 hr.
+ public static final int DEFAULT_MAX_ATTEMPTS_PER_TASK = 10;
+ public static final int DEFAULT_NUM_CONCURRENT_TASKS_PER_INSTANCE = 1;
+
+ private final String _workflow;
+ private final String _targetResource;
+ private final List<String> _targetPartitions;
+ private final Set<String> _targetPartitionStates;
+ private final String _command;
+ private final Map<String, String> _jobConfigMap;
+ private final long _timeoutPerTask;
+ private final int _numConcurrentTasksPerInstance;
+ private final int _maxAttemptsPerTask;
+ private final Map<String, TaskConfig> _taskConfigMap;
+
+ private JobConfig(String workflow, String targetResource, List<String> targetPartitions,
+ Set<String> targetPartitionStates, String command, Map<String, String> jobConfigMap,
+ long timeoutPerTask, int numConcurrentTasksPerInstance, int maxAttemptsPerTask,
+ Map<String, TaskConfig> taskConfigMap) {
+ _workflow = workflow;
+ _targetResource = targetResource;
+ _targetPartitions = targetPartitions;
+ _targetPartitionStates = targetPartitionStates;
+ _command = command;
+ _jobConfigMap = jobConfigMap;
+ _timeoutPerTask = timeoutPerTask;
+ _numConcurrentTasksPerInstance = numConcurrentTasksPerInstance;
+ _maxAttemptsPerTask = maxAttemptsPerTask;
+ if (taskConfigMap != null) {
+ _taskConfigMap = taskConfigMap;
+ } else {
+ _taskConfigMap = Collections.emptyMap();
+ }
+ }
+
+ public String getWorkflow() {
+ return _workflow == null ? Workflow.UNSPECIFIED : _workflow;
+ }
+
+ public String getTargetResource() {
+ return _targetResource;
+ }
+
+ public List<String> getTargetPartitions() {
+ return _targetPartitions;
+ }
+
+ public Set<String> getTargetPartitionStates() {
+ return _targetPartitionStates;
+ }
+
+ public String getCommand() {
+ return _command;
+ }
+
+ public Map<String, String> getJobConfigMap() {
+ return _jobConfigMap;
+ }
+
+ public long getTimeoutPerTask() {
+ return _timeoutPerTask;
+ }
+
+ public int getNumConcurrentTasksPerInstance() {
+ return _numConcurrentTasksPerInstance;
+ }
+
+ public int getMaxAttemptsPerTask() {
+ return _maxAttemptsPerTask;
+ }
+
+ public Map<String, TaskConfig> getTaskConfigMap() {
+ return _taskConfigMap;
+ }
+
+ public TaskConfig getTaskConfig(String id) {
+ return _taskConfigMap.get(id);
+ }
+
+ public Map<String, String> getResourceConfigMap() {
+ Map<String, String> cfgMap = new HashMap<String, String>();
+ cfgMap.put(JobConfig.WORKFLOW_ID, _workflow);
+ if (_command != null) {
+ cfgMap.put(JobConfig.COMMAND, _command);
+ }
+ if (_jobConfigMap != null) {
+ String serializedConfig = TaskUtil.serializeJobConfigMap(_jobConfigMap);
+ if (serializedConfig != null) {
+ cfgMap.put(JobConfig.JOB_CONFIG_MAP, serializedConfig);
+ }
+ }
+ if (_targetResource != null) {
+ cfgMap.put(JobConfig.TARGET_RESOURCE, _targetResource);
+ }
+ if (_targetPartitionStates != null) {
+ cfgMap.put(JobConfig.TARGET_PARTITION_STATES, Joiner.on(",").join(_targetPartitionStates));
+ }
+ if (_targetPartitions != null) {
+ cfgMap.put(JobConfig.TARGET_PARTITIONS, Joiner.on(",").join(_targetPartitions));
+ }
+ cfgMap.put(JobConfig.TIMEOUT_PER_TASK, "" + _timeoutPerTask);
+ cfgMap.put(JobConfig.MAX_ATTEMPTS_PER_TASK, "" + _maxAttemptsPerTask);
+ return cfgMap;
+ }
+
+ /**
+ * A builder for {@link JobConfig}. Validates the configurations.
+ */
+ public static class Builder {
+ private String _workflow;
+ private String _targetResource;
+ private List<String> _targetPartitions;
+ private Set<String> _targetPartitionStates;
+ private String _command;
+ private Map<String, String> _commandConfig;
+ private Map<String, TaskConfig> _taskConfigMap = Maps.newHashMap();
+ private long _timeoutPerTask = DEFAULT_TIMEOUT_PER_TASK;
+ private int _numConcurrentTasksPerInstance = DEFAULT_NUM_CONCURRENT_TASKS_PER_INSTANCE;
+ private int _maxAttemptsPerTask = DEFAULT_MAX_ATTEMPTS_PER_TASK;
+
+ public JobConfig build() {
+ validate();
+
+ return new JobConfig(_workflow, _targetResource, _targetPartitions, _targetPartitionStates,
+ _command, _commandConfig, _timeoutPerTask, _numConcurrentTasksPerInstance,
+ _maxAttemptsPerTask, _taskConfigMap);
+ }
+
+ /**
+ * Convenience method to build a {@link JobConfig} from a {@code Map<String, String>}.
+ * @param cfg A map of property names to their string representations.
+ * @return A {@link Builder}.
+ */
+ public static Builder fromMap(Map<String, String> cfg) {
+ Builder b = new Builder();
+ if (cfg.containsKey(WORKFLOW_ID)) {
+ b.setWorkflow(cfg.get(WORKFLOW_ID));
+ }
+ if (cfg.containsKey(TARGET_RESOURCE)) {
+ b.setTargetResource(cfg.get(TARGET_RESOURCE));
+ }
+ if (cfg.containsKey(TARGET_PARTITIONS)) {
+ b.setTargetPartitions(csvToStringList(cfg.get(TARGET_PARTITIONS)));
+ }
+ if (cfg.containsKey(TARGET_PARTITION_STATES)) {
+ b.setTargetPartitionStates(new HashSet<String>(Arrays.asList(cfg.get(
+ TARGET_PARTITION_STATES).split(","))));
+ }
+ if (cfg.containsKey(COMMAND)) {
+ b.setCommand(cfg.get(COMMAND));
+ }
+ if (cfg.containsKey(JOB_CONFIG_MAP)) {
+ Map<String, String> commandConfigMap =
+ TaskUtil.deserializeJobConfigMap(cfg.get(JOB_CONFIG_MAP));
+ b.setJobConfigMap(commandConfigMap);
+ }
+ if (cfg.containsKey(TIMEOUT_PER_TASK)) {
+ b.setTimeoutPerTask(Long.parseLong(cfg.get(TIMEOUT_PER_TASK)));
+ }
+ if (cfg.containsKey(NUM_CONCURRENT_TASKS_PER_INSTANCE)) {
+ b.setNumConcurrentTasksPerInstance(Integer.parseInt(cfg
+ .get(NUM_CONCURRENT_TASKS_PER_INSTANCE)));
+ }
+ if (cfg.containsKey(MAX_ATTEMPTS_PER_TASK)) {
+ b.setMaxAttemptsPerTask(Integer.parseInt(cfg.get(MAX_ATTEMPTS_PER_TASK)));
+ }
+ return b;
+ }
+
+ public Builder setWorkflow(String v) {
+ _workflow = v;
+ return this;
+ }
+
+ public Builder setTargetResource(String v) {
+ _targetResource = v;
+ return this;
+ }
+
+ public Builder setTargetPartitions(List<String> v) {
+ _targetPartitions = ImmutableList.copyOf(v);
+ return this;
+ }
+
+ public Builder setTargetPartitionStates(Set<String> v) {
+ _targetPartitionStates = ImmutableSet.copyOf(v);
+ return this;
+ }
+
+ public Builder setCommand(String v) {
+ _command = v;
+ return this;
+ }
+
+ public Builder setJobConfigMap(Map<String, String> v) {
+ _commandConfig = v;
+ return this;
+ }
+
+ public Builder setTimeoutPerTask(long v) {
+ _timeoutPerTask = v;
+ return this;
+ }
+
+ public Builder setNumConcurrentTasksPerInstance(int v) {
+ _numConcurrentTasksPerInstance = v;
+ return this;
+ }
+
+ public Builder setMaxAttemptsPerTask(int v) {
+ _maxAttemptsPerTask = v;
+ return this;
+ }
+
+ public Builder addTaskConfigs(List<TaskConfig> taskConfigs) {
+ if (taskConfigs != null) {
+ for (TaskConfig taskConfig : taskConfigs) {
+ _taskConfigMap.put(taskConfig.getId(), taskConfig);
+ }
+ }
+ return this;
+ }
+
+ public Builder addTaskConfigMap(Map<String, TaskConfig> taskConfigMap) {
+ _taskConfigMap.putAll(taskConfigMap);
+ return this;
+ }
+
+ private void validate() {
+ if (_taskConfigMap.isEmpty() && _targetResource == null) {
+ throw new IllegalArgumentException(String.format("%s cannot be null", TARGET_RESOURCE));
+ }
+ if (_taskConfigMap.isEmpty() && _targetPartitionStates != null
+ && _targetPartitionStates.isEmpty()) {
+ throw new IllegalArgumentException(String.format("%s cannot be an empty set",
+ TARGET_PARTITION_STATES));
+ }
+ if (_taskConfigMap.isEmpty() && _command == null) {
+ throw new IllegalArgumentException(String.format("%s cannot be null", COMMAND));
+ }
+ if (_timeoutPerTask < 0) {
+ throw new IllegalArgumentException(String.format("%s has invalid value %s",
+ TIMEOUT_PER_TASK, _timeoutPerTask));
+ }
+ if (_numConcurrentTasksPerInstance < 1) {
+ throw new IllegalArgumentException(String.format("%s has invalid value %s",
+ NUM_CONCURRENT_TASKS_PER_INSTANCE, _numConcurrentTasksPerInstance));
+ }
+ if (_maxAttemptsPerTask < 1) {
+ throw new IllegalArgumentException(String.format("%s has invalid value %s",
+ MAX_ATTEMPTS_PER_TASK, _maxAttemptsPerTask));
+ }
+ if (_workflow == null) {
+ throw new IllegalArgumentException(String.format("%s cannot be null", WORKFLOW_ID));
+ }
+ }
+
+ private static List<String> csvToStringList(String csv) {
+ String[] vals = csv.split(",");
+ return Arrays.asList(vals);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/JobContext.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/JobContext.java b/helix-core/src/main/java/org/apache/helix/task/JobContext.java
new file mode 100644
index 0000000..7742c67
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/task/JobContext.java
@@ -0,0 +1,227 @@
+package org.apache.helix.task;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+import org.apache.helix.HelixProperty;
+import org.apache.helix.ZNRecord;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+/**
+ * Provides a typed interface to the context information stored by {@link TaskRebalancer} in the
+ * Helix property store.
+ */
+public class JobContext extends HelixProperty {
+ private enum ContextProperties {
+ START_TIME,
+ STATE,
+ NUM_ATTEMPTS,
+ FINISH_TIME,
+ TARGET,
+ TASK_ID
+ }
+
+ public JobContext(ZNRecord record) {
+ super(record);
+ }
+
+ public void setStartTime(long t) {
+ _record.setSimpleField(ContextProperties.START_TIME.toString(), String.valueOf(t));
+ }
+
+ public long getStartTime() {
+ String tStr = _record.getSimpleField(ContextProperties.START_TIME.toString());
+ if (tStr == null) {
+ return -1;
+ }
+
+ return Long.parseLong(tStr);
+ }
+
+ public void setPartitionState(int p, TaskPartitionState s) {
+ String pStr = String.valueOf(p);
+ Map<String, String> map = _record.getMapField(pStr);
+ if (map == null) {
+ map = new TreeMap<String, String>();
+ _record.setMapField(pStr, map);
+ }
+ map.put(ContextProperties.STATE.toString(), s.name());
+ }
+
+ public TaskPartitionState getPartitionState(int p) {
+ Map<String, String> map = _record.getMapField(String.valueOf(p));
+ if (map == null) {
+ return null;
+ }
+
+ String str = map.get(ContextProperties.STATE.toString());
+ if (str != null) {
+ return TaskPartitionState.valueOf(str);
+ } else {
+ return null;
+ }
+ }
+
+ public void setPartitionNumAttempts(int p, int n) {
+ String pStr = String.valueOf(p);
+ Map<String, String> map = _record.getMapField(pStr);
+ if (map == null) {
+ map = new TreeMap<String, String>();
+ _record.setMapField(pStr, map);
+ }
+ map.put(ContextProperties.NUM_ATTEMPTS.toString(), String.valueOf(n));
+ }
+
+ public int incrementNumAttempts(int pId) {
+ int n = this.getPartitionNumAttempts(pId);
+ if (n < 0) {
+ n = 0;
+ }
+ n += 1;
+ this.setPartitionNumAttempts(pId, n);
+ return n;
+ }
+
+ public int getPartitionNumAttempts(int p) {
+ Map<String, String> map = _record.getMapField(String.valueOf(p));
+ if (map == null) {
+ return -1;
+ }
+
+ String nStr = map.get(ContextProperties.NUM_ATTEMPTS.toString());
+ if (nStr == null) {
+ return -1;
+ }
+
+ return Integer.parseInt(nStr);
+ }
+
+ public void setPartitionFinishTime(int p, long t) {
+ String pStr = String.valueOf(p);
+ Map<String, String> map = _record.getMapField(pStr);
+ if (map == null) {
+ map = new TreeMap<String, String>();
+ _record.setMapField(pStr, map);
+ }
+ map.put(ContextProperties.FINISH_TIME.toString(), String.valueOf(t));
+ }
+
+ public long getPartitionFinishTime(int p) {
+ Map<String, String> map = _record.getMapField(String.valueOf(p));
+ if (map == null) {
+ return -1;
+ }
+
+ String tStr = map.get(ContextProperties.FINISH_TIME.toString());
+ if (tStr == null) {
+ return -1;
+ }
+
+ return Long.parseLong(tStr);
+ }
+
+ public void setPartitionTarget(int p, String targetPName) {
+ String pStr = String.valueOf(p);
+ Map<String, String> map = _record.getMapField(pStr);
+ if (map == null) {
+ map = new TreeMap<String, String>();
+ _record.setMapField(pStr, map);
+ }
+ map.put(ContextProperties.TARGET.toString(), targetPName);
+ }
+
+ public String getTargetForPartition(int p) {
+ String pStr = String.valueOf(p);
+ Map<String, String> map = _record.getMapField(pStr);
+ if (map == null) {
+ return null;
+ } else {
+ return map.get(ContextProperties.TARGET.toString());
+ }
+ }
+
+ public Map<String, List<Integer>> getPartitionsByTarget() {
+ Map<String, List<Integer>> result = Maps.newHashMap();
+ for (Map.Entry<String, Map<String, String>> mapField : _record.getMapFields().entrySet()) {
+ Integer pId = Integer.parseInt(mapField.getKey());
+ Map<String, String> map = mapField.getValue();
+ String target = map.get(ContextProperties.TARGET.toString());
+ if (target != null) {
+ List<Integer> partitions;
+ if (!result.containsKey(target)) {
+ partitions = Lists.newArrayList();
+ result.put(target, partitions);
+ } else {
+ partitions = result.get(target);
+ }
+ partitions.add(pId);
+ }
+ }
+ return result;
+ }
+
+ public Set<Integer> getPartitionSet() {
+ Set<Integer> partitions = Sets.newHashSet();
+ for (String pName : _record.getMapFields().keySet()) {
+ partitions.add(Integer.valueOf(pName));
+ }
+ return partitions;
+ }
+
+ public void setTaskIdForPartition(int p, String taskId) {
+ String pStr = String.valueOf(p);
+ Map<String, String> map = _record.getMapField(pStr);
+ if (map == null) {
+ map = new TreeMap<String, String>();
+ _record.setMapField(pStr, map);
+ }
+ map.put(ContextProperties.TASK_ID.toString(), taskId);
+ }
+
+ public String getTaskIdForPartition(int p) {
+ String pStr = String.valueOf(p);
+ Map<String, String> map = _record.getMapField(pStr);
+ if (map == null) {
+ return null;
+ } else {
+ return map.get(ContextProperties.TASK_ID.toString());
+ }
+ }
+
+ public Map<String, Integer> getTaskIdPartitionMap() {
+ Map<String, Integer> partitionMap = new HashMap<String, Integer>();
+ for (Map.Entry<String, Map<String, String>> mapField : _record.getMapFields().entrySet()) {
+ Integer pId = Integer.parseInt(mapField.getKey());
+ Map<String, String> map = mapField.getValue();
+ if (map.containsKey(ContextProperties.TASK_ID.toString())) {
+ partitionMap.put(map.get(ContextProperties.TASK_ID.toString()), pId);
+ }
+ }
+ return partitionMap;
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/JobDag.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/JobDag.java b/helix-core/src/main/java/org/apache/helix/task/JobDag.java
new file mode 100644
index 0000000..18a721e
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/task/JobDag.java
@@ -0,0 +1,151 @@
+package org.apache.helix.task;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.codehaus.jackson.map.ObjectMapper;
+
+/**
+ * Provides a convenient way to construct, traverse,
+ * and validate a job dependency graph
+ */
+public class JobDag {
+ @JsonProperty("parentsToChildren")
+ private Map<String, Set<String>> _parentsToChildren;
+
+ @JsonProperty("childrenToParents")
+ private Map<String, Set<String>> _childrenToParents;
+
+ @JsonProperty("allNodes")
+ private Set<String> _allNodes;
+
+ public static final JobDag EMPTY_DAG = new JobDag();
+
+ public JobDag() {
+ _parentsToChildren = new TreeMap<String, Set<String>>();
+ _childrenToParents = new TreeMap<String, Set<String>>();
+ _allNodes = new TreeSet<String>();
+ }
+
+ public void addParentToChild(String parent, String child) {
+ if (!_parentsToChildren.containsKey(parent)) {
+ _parentsToChildren.put(parent, new TreeSet<String>());
+ }
+ _parentsToChildren.get(parent).add(child);
+
+ if (!_childrenToParents.containsKey(child)) {
+ _childrenToParents.put(child, new TreeSet<String>());
+ }
+ _childrenToParents.get(child).add(parent);
+
+ _allNodes.add(parent);
+ _allNodes.add(child);
+ }
+
+ public void addNode(String node) {
+ _allNodes.add(node);
+ }
+
+ public Map<String, Set<String>> getParentsToChildren() {
+ return _parentsToChildren;
+ }
+
+ public Map<String, Set<String>> getChildrenToParents() {
+ return _childrenToParents;
+ }
+
+ public Set<String> getAllNodes() {
+ return _allNodes;
+ }
+
+ public Set<String> getDirectChildren(String node) {
+ if (!_parentsToChildren.containsKey(node)) {
+ return new TreeSet<String>();
+ }
+ return _parentsToChildren.get(node);
+ }
+
+ public Set<String> getDirectParents(String node) {
+ if (!_childrenToParents.containsKey(node)) {
+ return new TreeSet<String>();
+ }
+ return _childrenToParents.get(node);
+ }
+
+ public String toJson() throws Exception {
+ return new ObjectMapper().writeValueAsString(this);
+ }
+
+ public static JobDag fromJson(String json) {
+ try {
+ return new ObjectMapper().readValue(json, JobDag.class);
+ } catch (Exception e) {
+ throw new IllegalArgumentException("Unable to parse json " + json + " into job dag");
+ }
+ }
+
+ /**
+ * Checks that dag contains no cycles and all nodes are reachable.
+ */
+ public void validate() {
+ Set<String> prevIteration = new TreeSet<String>();
+
+ // get all unparented nodes
+ for (String node : _allNodes) {
+ if (getDirectParents(node).isEmpty()) {
+ prevIteration.add(node);
+ }
+ }
+
+ // visit children nodes up to max iteration count, by which point we should have exited
+ // naturally
+ Set<String> allNodesReached = new TreeSet<String>();
+ int iterationCount = 0;
+ int maxIterations = _allNodes.size() + 1;
+
+ while (!prevIteration.isEmpty() && iterationCount < maxIterations) {
+ // construct set of all children reachable from prev iteration
+ Set<String> thisIteration = new TreeSet<String>();
+ for (String node : prevIteration) {
+ thisIteration.addAll(getDirectChildren(node));
+ }
+
+ allNodesReached.addAll(prevIteration);
+ prevIteration = thisIteration;
+ iterationCount++;
+ }
+
+ allNodesReached.addAll(prevIteration);
+
+ if (iterationCount >= maxIterations) {
+ throw new IllegalArgumentException("DAG invalid: cycles detected");
+ }
+
+ if (!allNodesReached.containsAll(_allNodes)) {
+ throw new IllegalArgumentException("DAG invalid: unreachable nodes found. Reachable set is "
+ + allNodesReached);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/TargetState.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TargetState.java b/helix-core/src/main/java/org/apache/helix/task/TargetState.java
index 0551d6c..4285e67 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TargetState.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TargetState.java
@@ -20,20 +20,20 @@ package org.apache.helix.task;
*/
/**
- * Enumeration of target states for a task.
+ * Enumeration of target states for a job.
*/
public enum TargetState {
/**
- * Indicates that the rebalancer must start/resume the task.
+ * Indicates that the rebalancer must start/resume the job.
*/
START,
/**
- * Indicates that the rebalancer should stop any running task partitions and cease doing any
+ * Indicates that the rebalancer should stop any running tasks and cease doing any
* further task assignments.
*/
STOP,
/**
- * Indicates that the rebalancer must delete this task.
+ * Indicates that the rebalancer must delete this job.
*/
DELETE
}
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/TaskCallbackContext.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskCallbackContext.java b/helix-core/src/main/java/org/apache/helix/task/TaskCallbackContext.java
new file mode 100644
index 0000000..124ec12
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskCallbackContext.java
@@ -0,0 +1,67 @@
+package org.apache.helix.task;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.apache.helix.HelixManager;
+
+/**
+ * A wrapper for all information about a task and the job of which it is a part.
+ */
+public class TaskCallbackContext {
+ private HelixManager _manager;
+ private TaskConfig _taskConfig;
+ private JobConfig _jobConfig;
+
+ void setManager(HelixManager manager) {
+ _manager = manager;
+ }
+
+ void setTaskConfig(TaskConfig taskConfig) {
+ _taskConfig = taskConfig;
+ }
+
+ void setJobConfig(JobConfig jobConfig) {
+ _jobConfig = jobConfig;
+ }
+
+ /**
+ * Get an active Helix connection
+ * @return HelixManager instance
+ */
+ public HelixManager getManager() {
+ return _manager;
+ }
+
+ /**
+ * Get task-specific configuration properties
+ * @return TaskConfig instance
+ */
+ public TaskConfig getTaskConfig() {
+ return _taskConfig;
+ }
+
+ /**
+ * Get job-specific configuration properties
+ * @return JobConfig instance
+ */
+ public JobConfig getJobConfig() {
+ return _jobConfig;
+ }
+}
[03/50] [abbrv] git commit: Works with hello world spec.yaml
Posted by ka...@apache.org.
Works with hello world spec.yaml
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/d1e7ca60
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/d1e7ca60
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/d1e7ca60
Branch: refs/heads/master
Commit: d1e7ca604de7cefd2effa65f43765902448b0820
Parents: 5a1391e
Author: Kishore Gopalakrishna <g....@gmail.com>
Authored: Wed Feb 19 10:29:48 2014 -0800
Committer: Kishore Gopalakrishna <g....@gmail.com>
Committed: Wed Feb 19 10:29:48 2014 -0800
----------------------------------------------------------------------
.../provisioner/ContainerProvider.java | 1 -
.../integration/TestLocalContainerProvider.java | 5 --
.../helix/provisioning/yarn/AppLauncher.java | 2 -
.../provisioning/yarn/YarnProvisioner.java | 10 +--
.../yarn/example/HelloWordAppSpecFactory.java | 85 ++++++++++++--------
.../yarn/example/HelloworldAppSpec.java | 69 ++++++++++++++--
.../main/resources/hello_world_app_spec.yaml | 20 ++---
7 files changed, 126 insertions(+), 66 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/d1e7ca60/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerProvider.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerProvider.java b/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerProvider.java
index a95abe0..a6fd791 100644
--- a/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerProvider.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerProvider.java
@@ -33,5 +33,4 @@ public interface ContainerProvider {
ListenableFuture<Boolean> stopContainer(ContainerId containerId);
- ContainerState getContainerState(ContainerId containerId);
}
http://git-wip-us.apache.org/repos/asf/helix/blob/d1e7ca60/helix-core/src/test/java/org/apache/helix/integration/TestLocalContainerProvider.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestLocalContainerProvider.java b/helix-core/src/test/java/org/apache/helix/integration/TestLocalContainerProvider.java
index 8eb5f56..0e4c803 100644
--- a/helix-core/src/test/java/org/apache/helix/integration/TestLocalContainerProvider.java
+++ b/helix-core/src/test/java/org/apache/helix/integration/TestLocalContainerProvider.java
@@ -270,11 +270,6 @@ public class TestLocalContainerProvider extends ZkUnitTestBase {
}
@Override
- public ContainerState getContainerState(ContainerId containerId) {
- return _states.get(containerId);
- }
-
- @Override
public TargetProviderResponse evaluateExistingContainers(Cluster cluster,
ResourceId resourceId, Collection<Participant> participants) {
TargetProviderResponse response = new TargetProviderResponse();
http://git-wip-us.apache.org/repos/asf/helix/blob/d1e7ca60/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
index 409a195..d06ae67 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
@@ -367,7 +367,5 @@ public class AppLauncher {
AppLauncher launcher = new AppLauncher(applicationSpecFactory, yamlConfigFile);
launcher.launch();
launcher.waitUntilDone();
-
}
-
}
http://git-wip-us.apache.org/repos/asf/helix/blob/d1e7ca60/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
index 61b1d70..477023b 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
@@ -73,6 +73,9 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
Map<ContainerId, Container> allocatedContainersMap = new HashMap<ContainerId, Container>();
private HelixManager _helixManager;
private ResourceConfig _resourceConfig;
+ public YarnProvisioner(){
+
+ }
@Override
public ListenableFuture<ContainerId> allocateContainer(ContainerSpec spec) {
@@ -93,7 +96,7 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
}
@Override
- public ListenableFuture<Boolean> deallocateContainer(ContainerId containerId) {
+ public ListenableFuture<Boolean> deallocateContainer(final ContainerId containerId) {
ListenableFuture<ContainerReleaseResponse> releaseContainer =
applicationMaster.releaseContainer(allocatedContainersMap.get(containerId));
return Futures.transform(releaseContainer, new Function<ContainerReleaseResponse, Boolean>() {
@@ -243,11 +246,6 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
}
@Override
- public ContainerState getContainerState(ContainerId containerId) {
- return null;
- }
-
- @Override
public void init(HelixManager helixManager, ResourceConfig resourceConfig) {
_helixManager = helixManager;
_resourceConfig = resourceConfig;
http://git-wip-us.apache.org/repos/asf/helix/blob/d1e7ca60/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
index 1746fe6..f9f1980 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
@@ -17,31 +17,34 @@ public class HelloWordAppSpecFactory implements ApplicationSpecFactory {
static HelloworldAppSpec data;
static {
- data = new HelloworldAppSpec();
- data._appConfig = new AppConfig();
- data._appConfig.setValue("k1", "v1");
- data._appName = "testApp";
- data._appMasterPackageUri =
- new File("/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/target/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar").toURI().toString();
- data._serviceConfigMap = new HashMap<String, Map<String, String>>();
- data._serviceConfigMap.put("HelloWorld", new HashMap<String, String>());
- data._serviceConfigMap.get("HelloWorld").put("k1", "v1");
- data._serviceMainClassMap = new HashMap<String, String>();
- data._serviceMainClassMap.put("HelloWorld", HelloWorldService.class.getCanonicalName());
- data._servicePackageURIMap = new HashMap<String, String>();
- data._servicePackageURIMap
+ HelloworldAppSpec data = new HelloworldAppSpec();
+ AppConfig appConfig = new AppConfig();
+ appConfig.setValue("k1", "v1");
+ data.setAppConfig(appConfig);
+ data.setAppName("testApp");
+ data.setAppMasterPackageUri(
+ "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
+ HashMap<String, Map<String, String>> serviceConfigMap = new HashMap<String, Map<String, String>>();
+ serviceConfigMap.put("HelloWorld", new HashMap<String, String>());
+ serviceConfigMap.get("HelloWorld").put("k1", "v1");
+ data.setServiceConfigMap(serviceConfigMap);
+ HashMap<String, String> serviceMainClassMap = new HashMap<String, String>();
+ serviceMainClassMap.put("HelloWorld", HelloWorldService.class.getCanonicalName());
+ data.setServiceMainClassMap(serviceMainClassMap);
+ HashMap<String, String> servicePackageURIMap = new HashMap<String, String>();
+ servicePackageURIMap
.put(
"HelloWorld",
- new File("/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/target/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar").toURI().toString());
- data._services = Arrays.asList(new String[] {
+ "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
+ data.setServicePackageURIMap(servicePackageURIMap);
+ data.setServices(Arrays.asList(new String[] {
"HelloWorld"
- });
-
- }
+ })); }
@Override
- public ApplicationSpec fromYaml(InputStream yamlFile) {
- return data;
+ public ApplicationSpec fromYaml(InputStream inputstream) {
+ return (ApplicationSpec) new Yaml().load(inputstream);
+ // return data;
}
public static void main(String[] args) {
@@ -50,26 +53,38 @@ public class HelloWordAppSpecFactory implements ApplicationSpecFactory {
Yaml yaml = new Yaml(options);
HelloworldAppSpec data = new HelloworldAppSpec();
- data._appConfig = new AppConfig();
- data._appConfig.setValue("k1", "v1");
- data._appName = "testApp";
- data._appMasterPackageUri =
- "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar";
- data._serviceConfigMap = new HashMap<String, Map<String, String>>();
- data._serviceConfigMap.put("HelloWorld", new HashMap<String, String>());
- data._serviceConfigMap.get("HelloWorld").put("k1", "v1");
- data._serviceMainClassMap = new HashMap<String, String>();
- data._serviceMainClassMap.put("HelloWorld", HelloWorldService.class.getCanonicalName());
- data._servicePackageURIMap = new HashMap<String, String>();
- data._servicePackageURIMap
+ AppConfig appConfig = new AppConfig();
+ appConfig.setValue("k1", "v1");
+ data.setAppConfig(appConfig);
+ data.setAppName("testApp");
+ data.setAppMasterPackageUri(
+ "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
+ HashMap<String, Map<String, String>> serviceConfigMap = new HashMap<String, Map<String, String>>();
+ serviceConfigMap.put("HelloWorld", new HashMap<String, String>());
+ serviceConfigMap.get("HelloWorld").put("k1", "v1");
+ data.setServiceConfigMap(serviceConfigMap);
+ HashMap<String, String> serviceMainClassMap = new HashMap<String, String>();
+ serviceMainClassMap.put("HelloWorld", HelloWorldService.class.getCanonicalName());
+ data.setServiceMainClassMap(serviceMainClassMap);
+ HashMap<String, String> servicePackageURIMap = new HashMap<String, String>();
+ servicePackageURIMap
.put(
"HelloWorld",
"/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
- data._services = Arrays.asList(new String[] {
+ data.setServicePackageURIMap(servicePackageURIMap);
+ data.setServices(Arrays.asList(new String[] {
"HelloWorld"
- });
+ }));
String dump = yaml.dump(data);
System.out.println(dump);
- }
+ InputStream resourceAsStream = ClassLoader.getSystemClassLoader().getResourceAsStream("hello_world_app_spec.yaml");
+ HelloworldAppSpec load = yaml.loadAs(resourceAsStream,HelloworldAppSpec.class);
+ String dumpnew = yaml.dump(load);
+ System.out.println(dumpnew.equals(dump));
+
+ System.out.println("==================================");
+ System.out.println(dumpnew);
+
+ }
}
http://git-wip-us.apache.org/repos/asf/helix/blob/d1e7ca60/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
index 4bd3caa..2e4cd75 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
@@ -15,21 +15,74 @@ import org.apache.helix.provisioning.yarn.TaskConfig;
public class HelloworldAppSpec implements ApplicationSpec {
- public String _appName;
+ private String _appName;
- public AppConfig _appConfig;
+ private AppConfig _appConfig;
- public List<String> _services;
+ private List<String> _services;
- public String _appMasterPackageUri;
+ private String _appMasterPackageUri;
+
+ private Map<String, String> _servicePackageURIMap;
- public Map<String, String> _servicePackageURIMap;
+ private Map<String, String> _serviceMainClassMap;
- public Map<String, String> _serviceMainClassMap;
+ private Map<String,Map<String,String>> _serviceConfigMap;
- public Map<String,Map<String,String>> _serviceConfigMap;
+ private List<TaskConfig> _taskConfigs;
+
+ public AppConfig getAppConfig() {
+ return _appConfig;
+ }
+
+ public void setAppConfig(AppConfig appConfig) {
+ _appConfig = appConfig;
+ }
+
+ public String getAppMasterPackageUri() {
+ return _appMasterPackageUri;
+ }
+
+ public void setAppMasterPackageUri(String appMasterPackageUri) {
+ _appMasterPackageUri = appMasterPackageUri;
+ }
+
+ public Map<String, String> getServicePackageURIMap() {
+ return _servicePackageURIMap;
+ }
+
+ public void setServicePackageURIMap(Map<String, String> servicePackageURIMap) {
+ _servicePackageURIMap = servicePackageURIMap;
+ }
+
+ public Map<String, String> getServiceMainClassMap() {
+ return _serviceMainClassMap;
+ }
+
+ public void setServiceMainClassMap(Map<String, String> serviceMainClassMap) {
+ _serviceMainClassMap = serviceMainClassMap;
+ }
+
+ public Map<String, Map<String, String>> getServiceConfigMap() {
+ return _serviceConfigMap;
+ }
+
+ public void setServiceConfigMap(Map<String, Map<String, String>> serviceConfigMap) {
+ _serviceConfigMap = serviceConfigMap;
+ }
+
+ public void setAppName(String appName) {
+ _appName = appName;
+ }
+
+ public void setServices(List<String> services) {
+ _services = services;
+ }
+
+ public void setTaskConfigs(List<TaskConfig> taskConfigs) {
+ _taskConfigs = taskConfigs;
+ }
- public List<TaskConfig> _taskConfigs;
@Override
public String getAppName() {
return _appName;
http://git-wip-us.apache.org/repos/asf/helix/blob/d1e7ca60/helix-provisioning/src/main/resources/hello_world_app_spec.yaml
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/resources/hello_world_app_spec.yaml b/helix-provisioning/src/main/resources/hello_world_app_spec.yaml
index 1f0bc70..648104a 100644
--- a/helix-provisioning/src/main/resources/hello_world_app_spec.yaml
+++ b/helix-provisioning/src/main/resources/hello_world_app_spec.yaml
@@ -1,21 +1,23 @@
!!org.apache.helix.provisioning.yarn.example.HelloworldAppSpec
-_appConfig:
+appConfig:
config: {
k1: v1
}
-_appMasterPackageUri: null
-_appName: testApp
-_serviceConfigMap:
+appMasterPackageUri: 'file:///Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/target/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar'
+appName: testApp
+serviceConfigMap:
HelloWorld: {
k1: v1
}
-_serviceMainClassMap: {
+serviceMainClassMap: {
HelloWorld: org.apache.helix.provisioning.yarn.example.HelloWorldService
}
-_servicePackageURIMap: {
- HelloWorld: /Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar
+servicePackageURIMap: {
+ HelloWorld: 'file:///Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/target/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar'
}
-_services: [
+services: [
HelloWorld]
-_taskConfigs: null
+taskConfigs: null
+
+
[13/50] [abbrv] git commit: Shutdown message
Posted by ka...@apache.org.
Shutdown message
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/c22cdd98
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/c22cdd98
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/c22cdd98
Branch: refs/heads/master
Commit: c22cdd98a51595d796925a682fcead7f0a90f881
Parents: b3dacb7
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Fri Feb 21 18:13:47 2014 -0800
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Fri Feb 21 18:13:47 2014 -0800
----------------------------------------------------------------------
.../stages/ContainerProvisioningStage.java | 22 +++++-
.../manager/zk/AbstractParticipantService.java | 2 +-
.../java/org/apache/helix/model/Message.java | 3 +-
.../org/apache/helix/tools/ClusterSetup.java | 2 +
.../provisioning/yarn/ParticipantLauncher.java | 81 +++++++++++++++++++-
5 files changed, 102 insertions(+), 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/c22cdd98/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java b/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
index f258525..bc3e0c6 100644
--- a/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
@@ -23,6 +23,7 @@ import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.UUID;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixDataAccessor;
@@ -45,6 +46,8 @@ import org.apache.helix.controller.provisioner.ProvisionerRef;
import org.apache.helix.controller.provisioner.TargetProvider;
import org.apache.helix.controller.provisioner.TargetProviderResponse;
import org.apache.helix.model.InstanceConfig;
+import org.apache.helix.model.Message;
+import org.apache.helix.model.Message.MessageType;
import org.apache.log4j.Logger;
import com.google.common.util.concurrent.FutureCallback;
@@ -123,6 +126,11 @@ public class ContainerProvisioningStage extends AbstractBaseStage {
LOG.info("Participant " + participantId + " is ready, marking as CONNECTED");
updateContainerState(helixAdmin, accessor, keyBuilder, cluster, participantId,
ContainerState.CONNECTED);
+ } else if (!participant.isAlive() && ContainerState.HALTING.equals(containerState)) {
+ // Need to mark as connected only when the live instance is visible
+ LOG.info("Participant " + participantId + " is has been killed, marking as HALTED");
+ updateContainerState(helixAdmin, accessor, keyBuilder, cluster, participantId,
+ ContainerState.HALTED);
}
}
}
@@ -262,9 +270,17 @@ public class ContainerProvisioningStage extends AbstractBaseStage {
FutureCallback<Boolean> callback = new FutureCallback<Boolean>() {
@Override
public void onSuccess(Boolean result) {
- LOG.info("Container " + containerId + " stopped. Marking " + participant.getId());
- updateContainerState(helixAdmin, accessor, keyBuilder, cluster, participant.getId(),
- ContainerState.HALTED);
+ // Don't update the state here, wait for the live instance, but do send a shutdown
+ // message
+ LOG.info("Container " + containerId + " stopped for " + participant.getId());
+ if (participant.isAlive()) {
+ Message message = new Message(MessageType.SHUTDOWN, UUID.randomUUID().toString());
+ message.setTgtName(participant.getId().toString());
+ message.setTgtSessionId("*");
+ message.setMsgId(message.getId());
+ accessor.createProperty(
+ keyBuilder.message(participant.getId().toString(), message.getId()), message);
+ }
}
@Override
http://git-wip-us.apache.org/repos/asf/helix/blob/c22cdd98/helix-core/src/main/java/org/apache/helix/manager/zk/AbstractParticipantService.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/manager/zk/AbstractParticipantService.java b/helix-core/src/main/java/org/apache/helix/manager/zk/AbstractParticipantService.java
index f515092..49a7159 100644
--- a/helix-core/src/main/java/org/apache/helix/manager/zk/AbstractParticipantService.java
+++ b/helix-core/src/main/java/org/apache/helix/manager/zk/AbstractParticipantService.java
@@ -111,7 +111,7 @@ public abstract class AbstractParticipantService extends AbstractService {
* Get an instantiated participant instance.
* @return HelixParticipant
*/
- protected HelixParticipant getParticipant() {
+ public HelixParticipant getParticipant() {
return _participant;
}
http://git-wip-us.apache.org/repos/asf/helix/blob/c22cdd98/helix-core/src/main/java/org/apache/helix/model/Message.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/model/Message.java b/helix-core/src/main/java/org/apache/helix/model/Message.java
index d465a80..dcd77d9 100644
--- a/helix-core/src/main/java/org/apache/helix/model/Message.java
+++ b/helix-core/src/main/java/org/apache/helix/model/Message.java
@@ -60,7 +60,8 @@ public class Message extends HelixProperty {
CONTROLLER_MSG,
TASK_REPLY,
NO_OP,
- PARTICIPANT_ERROR_REPORT
+ PARTICIPANT_ERROR_REPORT,
+ SHUTDOWN
};
/**
http://git-wip-us.apache.org/repos/asf/helix/blob/c22cdd98/helix-core/src/main/java/org/apache/helix/tools/ClusterSetup.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/tools/ClusterSetup.java b/helix-core/src/main/java/org/apache/helix/tools/ClusterSetup.java
index 0247846..d97a853 100644
--- a/helix-core/src/main/java/org/apache/helix/tools/ClusterSetup.java
+++ b/helix-core/src/main/java/org/apache/helix/tools/ClusterSetup.java
@@ -166,6 +166,8 @@ public class ClusterSetup {
addStateModelDef(clusterName, "Task",
new StateModelDefinition(StateModelConfigGenerator.generateConfigForTaskStateModel()));
+ addStateModelDef(clusterName, "StatelessService", new StateModelDefinition(
+ StateModelConfigGenerator.generateConfigForStatelessService()));
}
public void activateCluster(String clusterName, String grandCluster, boolean enable) {
http://git-wip-us.apache.org/repos/asf/helix/blob/c22cdd98/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ParticipantLauncher.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ParticipantLauncher.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ParticipantLauncher.java
index e9b6795..1a21a71 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ParticipantLauncher.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ParticipantLauncher.java
@@ -7,13 +7,19 @@ import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.Options;
import org.apache.helix.HelixConnection;
+import org.apache.helix.NotificationContext;
import org.apache.helix.api.id.ClusterId;
import org.apache.helix.api.id.ParticipantId;
import org.apache.helix.manager.zk.AbstractParticipantService;
import org.apache.helix.manager.zk.ZkHelixConnection;
+import org.apache.helix.messaging.handling.HelixTaskResult;
+import org.apache.helix.messaging.handling.MessageHandler;
+import org.apache.helix.messaging.handling.MessageHandlerFactory;
+import org.apache.helix.model.Message;
+import org.apache.helix.model.Message.MessageType;
import org.apache.log4j.Logger;
+
/**
- *
* Main class that invokes the Participant Api
*/
public class ParticipantLauncher {
@@ -31,7 +37,7 @@ public class ParticipantLauncher {
try {
CommandLine cliParser = new GnuParser().parse(opts, args);
String zkAddress = cliParser.getOptionValue("zkAddress");
- HelixConnection connection = new ZkHelixConnection(zkAddress);
+ final HelixConnection connection = new ZkHelixConnection(zkAddress);
connection.connect();
ClusterId clusterId = ClusterId.from(cliParser.getOptionValue("cluster"));
ParticipantId participantId = ParticipantId.from(cliParser.getOptionValue("participantId"));
@@ -39,11 +45,27 @@ public class ParticipantLauncher {
@SuppressWarnings("unchecked")
Class<? extends AbstractParticipantService> clazz =
(Class<? extends AbstractParticipantService>) Class.forName(participantClass);
- AbstractParticipantService containerParticipant =
+ final AbstractParticipantService containerParticipant =
clazz.getConstructor(HelixConnection.class, ClusterId.class, ParticipantId.class)
.newInstance(connection, clusterId, participantId);
containerParticipant.startAsync();
containerParticipant.awaitRunning(60, TimeUnit.SECONDS);
+ containerParticipant
+ .getParticipant()
+ .getMessagingService()
+ .registerMessageHandlerFactory(MessageType.SHUTDOWN.toString(),
+ new ShutdownMessageHandlerFactory(containerParticipant, connection));
+ Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
+ @Override
+ public void run() {
+ LOG.info("Received a shutdown signal. Stopping participant");
+ containerParticipant.stopAsync();
+ containerParticipant.awaitTerminated();
+ connection.disconnect();
+ }
+ }) {
+
+ });
Thread.currentThread().join();
} catch (Exception e) {
e.printStackTrace();
@@ -57,4 +79,57 @@ public class ParticipantLauncher {
}
}
+
+ public static class ShutdownMessageHandlerFactory implements MessageHandlerFactory {
+ private final AbstractParticipantService _service;
+ private final HelixConnection _connection;
+
+ public ShutdownMessageHandlerFactory(AbstractParticipantService service,
+ HelixConnection connection) {
+ _service = service;
+ _connection = connection;
+ }
+
+ @Override
+ public MessageHandler createHandler(Message message, NotificationContext context) {
+ return new ShutdownMessageHandler(_service, _connection, message, context);
+ }
+
+ @Override
+ public String getMessageType() {
+ return MessageType.SHUTDOWN.toString();
+ }
+
+ @Override
+ public void reset() {
+ }
+
+ }
+
+ public static class ShutdownMessageHandler extends MessageHandler {
+ private final AbstractParticipantService _service;
+ private final HelixConnection _connection;
+
+ public ShutdownMessageHandler(AbstractParticipantService service, HelixConnection connection,
+ Message message, NotificationContext context) {
+ super(message, context);
+ _service = service;
+ _connection = connection;
+ }
+
+ @Override
+ public HelixTaskResult handleMessage() throws InterruptedException {
+ LOG.info("Received a shutdown message. Trying to shut down.");
+ _service.stopAsync();
+ _service.awaitTerminated();
+ _connection.disconnect();
+ System.exit(1);
+ return null;
+ }
+
+ @Override
+ public void onError(Exception e, ErrorCode code, ErrorType type) {
+ }
+
+ }
}
[17/50] [abbrv] Moving packages around
Posted by ka...@apache.org.
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/Client.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/Client.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/Client.java
deleted file mode 100644
index 500df9c..0000000
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/Client.java
+++ /dev/null
@@ -1,627 +0,0 @@
-package org.apache.helix.provisioning.yarn;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Vector;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.GnuParser;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.compress.archivers.ArchiveStreamFactory;
-import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
-import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
-import org.apache.hadoop.yarn.api.ApplicationConstants;
-import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationReport;
-import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
-import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
-import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.api.records.LocalResource;
-import org.apache.hadoop.yarn.api.records.LocalResourceType;
-import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
-import org.apache.hadoop.yarn.api.records.NodeReport;
-import org.apache.hadoop.yarn.api.records.NodeState;
-import org.apache.hadoop.yarn.api.records.Priority;
-import org.apache.hadoop.yarn.api.records.QueueACL;
-import org.apache.hadoop.yarn.api.records.QueueInfo;
-import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.YarnApplicationState;
-import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
-import org.apache.hadoop.yarn.client.api.YarnClient;
-import org.apache.hadoop.yarn.client.api.YarnClientApplication;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.util.ConverterUtils;
-import org.apache.hadoop.yarn.util.Records;
-
-/**
- * Client for Distributed Shell application submission to YARN.
- * <p>
- * The distributed shell client allows an application master to be launched that in turn would run
- * the provided shell command on a set of containers.
- * </p>
- * <p>
- * This client is meant to act as an example on how to write yarn-based applications.
- * </p>
- * <p>
- * To submit an application, a client first needs to connect to the <code>ResourceManager</code> aka
- * ApplicationsManager or ASM via the {@link ApplicationClientProtocol}. The
- * {@link ApplicationClientProtocol} provides a way for the client to get access to cluster
- * information and to request for a new {@link ApplicationId}.
- * <p>
- * <p>
- * For the actual job submission, the client first has to create an
- * {@link ApplicationSubmissionContext}. The {@link ApplicationSubmissionContext} defines the
- * application details such as {@link ApplicationId} and application name, the priority assigned to
- * the application and the queue to which this application needs to be assigned. In addition to
- * this, the {@link ApplicationSubmissionContext} also defines the {@link ContainerLaunchContext}
- * which describes the <code>Container</code> with which the {@link ApplicationMaster} is launched.
- * </p>
- * <p>
- * The {@link ContainerLaunchContext} in this scenario defines the resources to be allocated for the
- * {@link ApplicationMaster}'s container, the local resources (jars, configuration files) to be made
- * available and the environment to be set for the {@link ApplicationMaster} and the commands to be
- * executed to run the {@link ApplicationMaster}.
- * <p>
- * <p>
- * Using the {@link ApplicationSubmissionContext}, the client submits the application to the
- * <code>ResourceManager</code> and then monitors the application by requesting the
- * <code>ResourceManager</code> for an {@link ApplicationReport} at regular time intervals. In case
- * of the application taking too long, the client kills the application by submitting a
- * {@link KillApplicationRequest} to the <code>ResourceManager</code>.
- * </p>
- */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public class Client {
-
- private static final Log LOG = LogFactory.getLog(Client.class);
-
- // Configuration
- private Configuration conf;
- private YarnClient yarnClient;
- // Application master specific info to register a new Application with RM/ASM
- private String appName = "";
- // App master priority
- private int amPriority = 0;
- // Queue for App master
- private String amQueue = "";
- // Amt. of memory resource to request for to run the App Master
- private int amMemory = 1024;
-
- // Application master jar file
- private String appMasterArchive = "";
- // Main class to invoke application master
- private final String appMasterMainClass;
-
- private String appSpecFile = "";
-
- // No. of containers in which helix participants will be started
- private int numContainers = 1;
-
- // log4j.properties file
- // if available, add to local resources and set into classpath
- private String log4jPropFile = "";
-
- // Start time for client
- private final long clientStartTime = System.currentTimeMillis();
- // Timeout threshold for client. Kill app after time interval expires.
- private long clientTimeout = 600000;
-
- // Debug flag
- boolean debugFlag = false;
-
- // Command line options
- private Options opts;
-
- /**
- */
- public Client(Configuration conf) throws Exception {
- this("org.apache.helix.provisioning.yarn.HelixYarnApplicationMasterMain", conf);
- }
-
- Client(String appMasterMainClass, Configuration conf) {
- this.conf = conf;
- this.appMasterMainClass = appMasterMainClass;
- yarnClient = YarnClient.createYarnClient();
- yarnClient.init(conf);
- opts = new Options();
- opts.addOption("appName", true, "Application Name.");
- opts.addOption("priority", true, "Application Priority. Default 0");
- opts.addOption("queue", true, "RM Queue in which this application is to be submitted");
- opts.addOption("master_memory", true,
- "Amount of memory in MB to be requested to run the application master");
- opts.addOption("archive", true, "Archive file containing the app code");
- opts.addOption("appSpec", true, "Application specification");
- opts.addOption("num_containers", true,
- "No. of containers on which Helix Participants will be launched");
- opts.addOption("log_properties", true, "log4j.properties file");
- opts.addOption("debug", false, "Dump out debug information");
- opts.addOption("help", false, "Print usage");
-
- }
-
- /**
- */
- public Client() throws Exception {
- this(new YarnConfiguration());
- }
-
- /**
- * Helper function to print out usage
- */
- private void printUsage() {
- new HelpFormatter().printHelp("Client", opts);
- }
-
- /**
- * Parse command line options
- * @param args Parsed command line options
- * @return Whether the init was successful to run the client
- * @throws ParseException
- */
- public boolean init(String[] args) throws ParseException {
-
- CommandLine cliParser = new GnuParser().parse(opts, args);
-
- if (args.length == 0) {
- throw new IllegalArgumentException("No args specified for client to initialize");
- }
-
- if (cliParser.hasOption("help")) {
- printUsage();
- return false;
- }
-
- if (cliParser.hasOption("debug")) {
- debugFlag = true;
- }
-
- appName = cliParser.getOptionValue("appName");
- amPriority = Integer.parseInt(cliParser.getOptionValue("priority", "0"));
- amQueue = cliParser.getOptionValue("queue", "default");
- amMemory = Integer.parseInt(cliParser.getOptionValue("master_memory", "1024"));
-
- if (amMemory < 0) {
- throw new IllegalArgumentException(
- "Invalid memory specified for application master, exiting." + " Specified memory="
- + amMemory);
- }
-
- if (!cliParser.hasOption("archive")) {
- throw new IllegalArgumentException("No archive file specified for application master");
- }
-
- appMasterArchive = cliParser.getOptionValue("archive");
-
- numContainers = Integer.parseInt(cliParser.getOptionValue("num_containers", "4"));
-
- log4jPropFile = cliParser.getOptionValue("log_properties", "");
-
- return true;
- }
-
- /**
- * Main run function for the client
- * @return true if application completed successfully
- * @throws IOException
- * @throws YarnException
- */
- public boolean run() throws IOException, YarnException {
-
- LOG.info("Running Client");
- yarnClient.start();
-
- YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
- LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers="
- + clusterMetrics.getNumNodeManagers());
-
- List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
- LOG.info("Got Cluster node info from ASM");
- for (NodeReport node : clusterNodeReports) {
- LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress"
- + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers"
- + node.getNumContainers());
- }
-
- QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue);
- LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity="
- + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity()
- + ", queueApplicationCount=" + queueInfo.getApplications().size()
- + ", queueChildQueueCount=" + queueInfo.getChildQueues().size());
-
- List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
- for (QueueUserACLInfo aclInfo : listAclInfo) {
- for (QueueACL userAcl : aclInfo.getUserAcls()) {
- LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl="
- + userAcl.name());
- }
- }
-
- // Get a new application id
- YarnClientApplication app = yarnClient.createApplication();
- GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
- // TODO get min/max resource capabilities from RM and change memory ask if needed
- // If we do not have min/max, we may not be able to correctly request
- // the required resources from the RM for the app master
- // Memory ask has to be a multiple of min and less than max.
- // Dump out information about cluster capability as seen by the resource manager
- int maxMem = appResponse.getMaximumResourceCapability().getMemory();
- LOG.info("Max mem capabililty of resources in this cluster " + maxMem);
-
- // A resource ask cannot exceed the max.
- if (amMemory > maxMem) {
- LOG.info("AM memory specified above max threshold of cluster. Using max value."
- + ", specified=" + amMemory + ", max=" + maxMem);
- amMemory = maxMem;
- }
-
- // set the application name
- ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
- ApplicationId appId = appContext.getApplicationId();
- appContext.setApplicationName(appName);
-
- // Set up the container launch context for the application master
- ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
-
- // set local resources for the application master
- // local files or archives as needed
- // In this scenario, the jar file for the application master is part of the local resources
- Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
-
- LOG.info("Copy App archive file from local filesystem and add to local environment");
- // Copy the application master jar to the filesystem
- // Create a local resource to point to the destination jar path
- FileSystem fs = FileSystem.get(conf);
- Path src = new Path(appMasterArchive);
- String pathSuffix = appName + "/" + appId.getId() + "/app-pkg.tar";
- Path dst = new Path(fs.getHomeDirectory(), pathSuffix);
- fs.copyFromLocalFile(false, true, src, dst);
- FileStatus destStatus = fs.getFileStatus(dst);
- LocalResource amJarRsrc = Records.newRecord(LocalResource.class);
-
- // Set the type of resource - file or archive
- // archives are untarred at destination
- // we don't need the jar file to be untarred for now
- amJarRsrc.setType(LocalResourceType.ARCHIVE);
- // Set visibility of the resource
- // Setting to most private option
- amJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
- // Set the resource to be copied over
- amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(dst));
- // Set timestamp and length of file so that the framework
- // can do basic sanity checks for the local resource
- // after it has been copied over to ensure it is the same
- // resource the client intended to use with the application
- amJarRsrc.setTimestamp(destStatus.getModificationTime());
- amJarRsrc.setSize(destStatus.getLen());
- localResources.put("app-pkg", amJarRsrc);
-
- Path localAppSpec = new Path(appSpecFile);
- pathSuffix = appName + "/" + appId.getId() + "/app-spec.yaml";
- Path dstAppSpec = new Path(fs.getHomeDirectory(), pathSuffix);
- fs.copyFromLocalFile(false, true, localAppSpec, dstAppSpec);
- destStatus = fs.getFileStatus(dst);
- LocalResource appSpecResource = Records.newRecord(LocalResource.class);
-
- appSpecResource.setType(LocalResourceType.FILE);
- appSpecResource.setVisibility(LocalResourceVisibility.APPLICATION);
- appSpecResource.setResource(ConverterUtils.getYarnUrlFromPath(dstAppSpec));
- appSpecResource.setTimestamp(destStatus.getModificationTime());
- appSpecResource.setSize(destStatus.getLen());
- localResources.put("app-spec", appSpecResource);
-
- // Set the log4j properties if needed
- if (!log4jPropFile.isEmpty()) {
- Path log4jSrc = new Path(log4jPropFile);
- Path log4jDst = new Path(fs.getHomeDirectory(), "log4j.props");
- fs.copyFromLocalFile(false, true, log4jSrc, log4jDst);
- FileStatus log4jFileStatus = fs.getFileStatus(log4jDst);
- LocalResource log4jRsrc = Records.newRecord(LocalResource.class);
- log4jRsrc.setType(LocalResourceType.FILE);
- log4jRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
- log4jRsrc.setResource(ConverterUtils.getYarnUrlFromURI(log4jDst.toUri()));
- log4jRsrc.setTimestamp(log4jFileStatus.getModificationTime());
- log4jRsrc.setSize(log4jFileStatus.getLen());
- localResources.put("log4j.properties", log4jRsrc);
- }
-
- // Set local resource info into app master container launch context
- amContainer.setLocalResources(localResources);
-
- // Set the necessary security tokens as needed
- // amContainer.setContainerTokens(containerToken);
-
- // Add AppMaster.jar location to classpath
- // At some point we should not be required to add
- // the hadoop specific classpaths to the env.
- // It should be provided out of the box.
- // For now setting all required classpaths including
- // the classpath to "." for the application jar
- StringBuilder classPathEnv =
- new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar).append("./*");
- StringBuilder appClassPathEnv = new StringBuilder();
- // put the jar files under the archive in the classpath
- try {
- final InputStream is = new FileInputStream(appMasterArchive);
- final TarArchiveInputStream debInputStream =
- (TarArchiveInputStream) new ArchiveStreamFactory().createArchiveInputStream("tar", is);
- TarArchiveEntry entry = null;
- while ((entry = (TarArchiveEntry) debInputStream.getNextEntry()) != null) {
- if (entry.isFile()) {
- appClassPathEnv.append(File.pathSeparatorChar);
- appClassPathEnv.append("./app-pkg/" + entry.getName());
- }
- }
- debInputStream.close();
-
- } catch (Exception e) {
- LOG.error("Unable to read archive file:" + appMasterArchive, e);
- }
- classPathEnv.append(appClassPathEnv);
- for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
- YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
- classPathEnv.append(File.pathSeparatorChar);
- classPathEnv.append(c.trim());
- }
- classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties");
-
- // add the runtime classpath needed for tests to work
- if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
- classPathEnv.append(':');
- classPathEnv.append(System.getProperty("java.class.path"));
- }
- System.out.println("classpath" + classPathEnv.toString());
- // Set the env variables to be setup in the env where the application master will be run
- LOG.info("Set the environment for the application master");
- Map<String, String> env = new HashMap<String, String>();
- env.put("app_pkg_path", fs.getHomeDirectory() + "/" + appName + "/" + appId.getId()
- + "/app-pkg.tar");
- env.put("appName", appName);
- env.put("appId", "" + appId.getId());
- env.put("CLASSPATH", classPathEnv.toString());
- env.put("appClasspath", appClassPathEnv.toString());
- env.put("containerParticipantMainClass",
- "org.apache.helix.provisioning.yarn.ParticipantLauncher");
- amContainer.setEnvironment(env);
-
- // Set the necessary command to execute the application master
- Vector<CharSequence> vargs = new Vector<CharSequence>(30);
-
- // Set java executable command
- LOG.info("Setting up app master command");
- vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
- // Set Xmx based on am memory size
- vargs.add("-Xmx" + amMemory + "m");
- // Set class name
- vargs.add(appMasterMainClass);
- // Set params for Application Master
- vargs.add("--num_containers " + String.valueOf(numContainers));
-
- if (debugFlag) {
- vargs.add("--debug");
- }
-
- vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
- vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");
-
- // Get final commmand
- StringBuilder command = new StringBuilder();
- for (CharSequence str : vargs) {
- command.append(str).append(" ");
- }
-
- LOG.info("Completed setting up app master command " + command.toString());
- List<String> commands = new ArrayList<String>();
- commands.add(command.toString());
- amContainer.setCommands(commands);
-
- // Set up resource type requirements
- // For now, only memory is supported so we set memory requirements
- Resource capability = Records.newRecord(Resource.class);
- capability.setMemory(amMemory);
- appContext.setResource(capability);
-
- // Service data is a binary blob that can be passed to the application
- // Not needed in this scenario
- // amContainer.setServiceData(serviceData);
-
- // Setup security tokens
- if (UserGroupInformation.isSecurityEnabled()) {
- Credentials credentials = new Credentials();
- String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
- if (tokenRenewer == null || tokenRenewer.length() == 0) {
- throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
- }
-
- // For now, only getting tokens for the default file-system.
- final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
- if (tokens != null) {
- for (Token<?> token : tokens) {
- LOG.info("Got dt for " + fs.getUri() + "; " + token);
- }
- }
- DataOutputBuffer dob = new DataOutputBuffer();
- credentials.writeTokenStorageToStream(dob);
- ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
- amContainer.setTokens(fsTokens);
- }
-
- appContext.setAMContainerSpec(amContainer);
-
- // Set the priority for the application master
- Priority pri = Records.newRecord(Priority.class);
- // TODO - what is the range for priority? how to decide?
- pri.setPriority(amPriority);
- appContext.setPriority(pri);
-
- // Set the queue to which this application is to be submitted in the RM
- appContext.setQueue(amQueue);
-
- // Submit the application to the applications manager
- // SubmitApplicationResponse submitResp = applicationsManager.submitApplication(appRequest);
- // Ignore the response as either a valid response object is returned on success
- // or an exception thrown to denote some form of a failure
- LOG.info("Submitting application to ASM");
-
- yarnClient.submitApplication(appContext);
-
- // TODO
- // Try submitting the same request again
- // app submission failure?
-
- // Monitor the application
- return monitorApplication(appId);
-
- }
-
- /**
- * Monitor the submitted application for completion.
- * Kill application if time expires.
- * @param appId Application Id of application to be monitored
- * @return true if application completed successfully
- * @throws YarnException
- * @throws IOException
- */
- private boolean monitorApplication(ApplicationId appId) throws YarnException, IOException {
-
- while (true) {
-
- // Check app status every 10 second.
- try {
- Thread.sleep(10000);
- } catch (InterruptedException e) {
- LOG.debug("Thread sleep in monitoring loop interrupted");
- }
-
- // Get application report for the appId we are interested in
- ApplicationReport report = yarnClient.getApplicationReport(appId);
-
- LOG.info("Got application report from ASM for" + ", appId=" + appId.getId()
- + ", clientToAMToken=" + report.getClientToAMToken() + ", appDiagnostics="
- + report.getDiagnostics() + ", appMasterHost=" + report.getHost() + ", appQueue="
- + report.getQueue() + ", appMasterRpcPort=" + report.getRpcPort() + ", appStartTime="
- + report.getStartTime() + ", yarnAppState=" + report.getYarnApplicationState().toString()
- + ", distributedFinalState=" + report.getFinalApplicationStatus().toString()
- + ", appTrackingUrl=" + report.getTrackingUrl() + ", appUser=" + report.getUser());
-
- YarnApplicationState state = report.getYarnApplicationState();
- FinalApplicationStatus dsStatus = report.getFinalApplicationStatus();
- if (YarnApplicationState.FINISHED == state) {
- if (FinalApplicationStatus.SUCCEEDED == dsStatus) {
- LOG.info("Application has completed successfully. Breaking monitoring loop");
- return true;
- } else {
- LOG.info("Application did finished unsuccessfully." + " YarnState=" + state.toString()
- + ", DSFinalStatus=" + dsStatus.toString() + ". Breaking monitoring loop");
- return false;
- }
- } else if (YarnApplicationState.KILLED == state || YarnApplicationState.FAILED == state) {
- LOG.info("Application did not finish." + " YarnState=" + state.toString()
- + ", DSFinalStatus=" + dsStatus.toString() + ". Breaking monitoring loop");
- return false;
- }
-
- /*
- * if (System.currentTimeMillis() > (clientStartTime + clientTimeout)) {
- * LOG.info("Reached client specified timeout for application. Killing application");
- * forceKillApplication(appId);
- * return false;
- * }
- */
- }
-
- }
-
- /**
- * Kill a submitted application by sending a call to the ASM
- * @param appId Application Id to be killed.
- * @throws YarnException
- * @throws IOException
- */
- private void forceKillApplication(ApplicationId appId) throws YarnException, IOException {
- // TODO clarify whether multiple jobs with the same app id can be submitted and be running at
- // the same time.
- // If yes, can we kill a particular attempt only?
-
- // Response can be ignored as it is non-null on success or
- // throws an exception in case of failures
- // yarnClient.killApplication(appId);
- }
-
- /**
- * @param args Command line arguments
- */
- public static void main(String[] args) {
- boolean result = false;
- try {
- Client client = new Client();
- LOG.info("Initializing Client");
- try {
- boolean doRun = client.init(args);
- if (!doRun) {
- System.exit(0);
- }
- } catch (IllegalArgumentException e) {
- System.err.println(e.getLocalizedMessage());
- client.printUsage();
- System.exit(-1);
- }
- result = client.run();
- } catch (Throwable t) {
- LOG.fatal("Error running CLient", t);
- System.exit(1);
- }
- if (result) {
- LOG.info("Application completed successfully");
- System.exit(0);
- }
- LOG.error("Application failed to complete successfully");
- System.exit(2);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ContainerAskResponse.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ContainerAskResponse.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ContainerAskResponse.java
deleted file mode 100644
index c570932..0000000
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ContainerAskResponse.java
+++ /dev/null
@@ -1,17 +0,0 @@
-package org.apache.helix.provisioning.yarn;
-
-import org.apache.hadoop.yarn.api.records.Container;
-
-public class ContainerAskResponse {
-
- Container container;
-
- public Container getContainer() {
- return container;
- }
-
- public void setContainer(Container container) {
- this.container = container;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ContainerLaunchResponse.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ContainerLaunchResponse.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ContainerLaunchResponse.java
deleted file mode 100644
index c91cb93..0000000
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ContainerLaunchResponse.java
+++ /dev/null
@@ -1,5 +0,0 @@
-package org.apache.helix.provisioning.yarn;
-
-public class ContainerLaunchResponse {
-
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ContainerReleaseResponse.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ContainerReleaseResponse.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ContainerReleaseResponse.java
deleted file mode 100644
index 77d50ba..0000000
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ContainerReleaseResponse.java
+++ /dev/null
@@ -1,5 +0,0 @@
-package org.apache.helix.provisioning.yarn;
-
-public class ContainerReleaseResponse {
-
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ContainerStopResponse.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ContainerStopResponse.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ContainerStopResponse.java
deleted file mode 100644
index 4c0022a..0000000
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ContainerStopResponse.java
+++ /dev/null
@@ -1,5 +0,0 @@
-package org.apache.helix.provisioning.yarn;
-
-public class ContainerStopResponse {
-
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/DSConstants.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/DSConstants.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/DSConstants.java
deleted file mode 100644
index a9fdf3d..0000000
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/DSConstants.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.helix.provisioning.yarn;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * Constants used in both Client and Application Master
- */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public class DSConstants {
-
- /**
- * Environment key name pointing to the shell script's location
- */
- public static final String DISTRIBUTEDSHELLSCRIPTLOCATION = "DISTRIBUTEDSHELLSCRIPTLOCATION";
-
- /**
- * Environment key name denoting the file timestamp for the shell script.
- * Used to validate the local resource.
- */
- public static final String DISTRIBUTEDSHELLSCRIPTTIMESTAMP = "DISTRIBUTEDSHELLSCRIPTTIMESTAMP";
-
- /**
- * Environment key name denoting the file content length for the shell script.
- * Used to validate the local resource.
- */
- public static final String DISTRIBUTEDSHELLSCRIPTLEN = "DISTRIBUTEDSHELLSCRIPTLEN";
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/FixedTargetProvider.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/FixedTargetProvider.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/FixedTargetProvider.java
new file mode 100644
index 0000000..83ad461
--- /dev/null
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/FixedTargetProvider.java
@@ -0,0 +1,20 @@
+package org.apache.helix.provisioning.yarn;
+
+import java.util.Collection;
+
+import org.apache.helix.api.Cluster;
+import org.apache.helix.api.Participant;
+import org.apache.helix.api.id.ResourceId;
+import org.apache.helix.controller.provisioner.TargetProvider;
+import org.apache.helix.controller.provisioner.TargetProviderResponse;
+
+public class FixedTargetProvider implements TargetProvider {
+
+ @Override
+ public TargetProviderResponse evaluateExistingContainers(Cluster cluster, ResourceId resourceId,
+ Collection<Participant> participants) {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/GenericApplicationMaster.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/GenericApplicationMaster.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/GenericApplicationMaster.java
index 79eb402..346af4b 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/GenericApplicationMaster.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/GenericApplicationMaster.java
@@ -56,6 +56,10 @@ import org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
+import org.apache.helix.provisioning.ContainerAskResponse;
+import org.apache.helix.provisioning.ContainerLaunchResponse;
+import org.apache.helix.provisioning.ContainerReleaseResponse;
+import org.apache.helix.provisioning.ContainerStopResponse;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/HelixYarnApplicationMasterMain.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/HelixYarnApplicationMasterMain.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/HelixYarnApplicationMasterMain.java
deleted file mode 100644
index 5884a35..0000000
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/HelixYarnApplicationMasterMain.java
+++ /dev/null
@@ -1,159 +0,0 @@
-package org.apache.helix.provisioning.yarn;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Map;
-
-import org.I0Itec.zkclient.IDefaultNameSpace;
-import org.I0Itec.zkclient.ZkClient;
-import org.I0Itec.zkclient.ZkServer;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.GnuParser;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.util.ConverterUtils;
-import org.apache.helix.HelixController;
-import org.apache.helix.api.accessor.ClusterAccessor;
-import org.apache.helix.api.config.ClusterConfig;
-import org.apache.helix.api.config.ResourceConfig;
-import org.apache.helix.api.id.ClusterId;
-import org.apache.helix.api.id.ControllerId;
-import org.apache.helix.api.id.ResourceId;
-import org.apache.helix.controller.provisioner.ProvisionerConfig;
-import org.apache.helix.controller.rebalancer.config.FullAutoRebalancerConfig;
-import org.apache.helix.controller.rebalancer.config.RebalancerConfig;
-import org.apache.helix.manager.zk.ZkHelixConnection;
-import org.apache.helix.model.StateModelDefinition;
-import org.apache.helix.tools.StateModelConfigGenerator;
-import org.apache.log4j.Logger;
-
-/**
- * This will <br/>
- * <ul>
- * <li>start zookeeper automatically</li>
- * <li>create the cluster</li>
- * <li>set up resource(s)</li>
- * <li>start helix controller</li>
- * </ul>
- */
-public class HelixYarnApplicationMasterMain {
- public static Logger LOG = Logger.getLogger(HelixYarnApplicationMasterMain.class);
-
- @SuppressWarnings("unchecked")
- public static void main(String[] args) throws Exception{
- Map<String, String> env = System.getenv();
- LOG.info("Starting app master with the following environment variables");
- for (String key : env.keySet()) {
- LOG.info(key + "\t\t=" + env.get(key));
- }
- int numContainers = 1;
-
- Options opts;
- opts = new Options();
- opts.addOption("num_containers", true, "Number of containers");
- try {
- CommandLine cliParser = new GnuParser().parse(opts, args);
- numContainers = Integer.parseInt(cliParser.getOptionValue("num_containers"));
- } catch (Exception e) {
- LOG.error("Error parsing input arguments" + Arrays.toString(args), e);
- }
-
- // START ZOOKEEPER
- String dataDir = "dataDir";
- String logDir = "logDir";
- IDefaultNameSpace defaultNameSpace = new IDefaultNameSpace() {
-
- @Override
- public void createDefaultNameSpace(ZkClient zkClient) {
-
- }
- };
- try {
- FileUtils.deleteDirectory(new File(dataDir));
- FileUtils.deleteDirectory(new File(logDir));
- } catch (IOException e) {
- LOG.error(e);
- }
-
- final ZkServer server = new ZkServer(dataDir, logDir, defaultNameSpace);
- server.start();
-
- // start
- AppMasterConfig appMasterConfig = new AppMasterConfig();
- String containerIdStr = appMasterConfig.getContainerId();
- ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
- ApplicationAttemptId appAttemptID = containerId.getApplicationAttemptId();
-
- String configFile = AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString();
- String className = appMasterConfig.getApplicationSpecFactory();
-
- GenericApplicationMaster genericApplicationMaster = new GenericApplicationMaster(appAttemptID);
- try {
- genericApplicationMaster.start();
- } catch (Exception e) {
- LOG.error("Unable to start application master: ", e);
- }
- ApplicationSpecFactory factory = HelixYarnUtil.createInstance(className);
- YarnProvisioner.applicationMaster = genericApplicationMaster;
- YarnProvisioner.applicationMasterConfig = appMasterConfig;
- ApplicationSpec applicationSpec = factory.fromYaml(new FileInputStream(configFile));
- YarnProvisioner.applicationSpec = applicationSpec;
- String zkAddress = appMasterConfig.getZKAddress();
- String clusterName = appMasterConfig.getAppName();
-
- // CREATE CLUSTER and setup the resources
- // connect
- ZkHelixConnection connection = new ZkHelixConnection(zkAddress);
- connection.connect();
-
- // create the cluster
- ClusterId clusterId = ClusterId.from(clusterName);
- ClusterAccessor clusterAccessor = connection.createClusterAccessor(clusterId);
- StateModelDefinition statelessService =
- new StateModelDefinition(StateModelConfigGenerator.generateConfigForStatelessService());
- clusterAccessor.createCluster(new ClusterConfig.Builder(clusterId).addStateModelDefinition(
- statelessService).build());
- for (String service : applicationSpec.getServices()) {
- String resourceName = service;
- // add the resource with the local provisioner
- ResourceId resourceId = ResourceId.from(resourceName);
- YarnProvisionerConfig provisionerConfig = new YarnProvisionerConfig(resourceId);
- ServiceConfig serviceConfig = applicationSpec.getServiceConfig(resourceName);
- provisionerConfig.setNumContainers(serviceConfig.getIntField("num_containers", 1));
- serviceConfig.setSimpleField("service_name", service);
- FullAutoRebalancerConfig.Builder rebalancerConfigBuilder =
- new FullAutoRebalancerConfig.Builder(resourceId);
- RebalancerConfig rebalancerConfig =
- rebalancerConfigBuilder.stateModelDefId(statelessService.getStateModelDefId())//
- .build();
- ResourceConfig.Builder resourceConfigBuilder =
- new ResourceConfig.Builder(ResourceId.from(resourceName));
- ResourceConfig resourceConfig = resourceConfigBuilder.provisionerConfig(provisionerConfig) //
- .rebalancerConfig(rebalancerConfig) //
- .userConfig(serviceConfig) //
- .build();
- clusterAccessor.addResourceToCluster(resourceConfig);
- }
- // start controller
- ControllerId controllerId = ControllerId.from("controller1");
- HelixController controller = connection.createController(clusterId, controllerId);
- controller.start();
-
- Thread shutdownhook = new Thread(new Runnable() {
- @Override
- public void run() {
- server.shutdown();
- }
- });
- Runtime.getRuntime().addShutdownHook(shutdownhook);
- Thread.sleep(10000);
-
- }
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/HelixYarnUtil.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/HelixYarnUtil.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/HelixYarnUtil.java
deleted file mode 100644
index ad606ba..0000000
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/HelixYarnUtil.java
+++ /dev/null
@@ -1,42 +0,0 @@
-package org.apache.helix.provisioning.yarn;
-
-import org.apache.log4j.Logger;
-
-public class HelixYarnUtil {
- private static Logger LOG = Logger.getLogger(HelixYarnUtil.class);
-
- @SuppressWarnings("unchecked")
- public static <T extends ApplicationSpecFactory> T createInstance(String className) {
- Class<ApplicationSpecFactory> factoryClazz = null;
- {
- try {
- factoryClazz =
- (Class<ApplicationSpecFactory>) Thread.currentThread().getContextClassLoader()
- .loadClass(className);
- } catch (ClassNotFoundException e) {
- try {
- factoryClazz =
- (Class<ApplicationSpecFactory>) ClassLoader.getSystemClassLoader().loadClass(
- className);
- } catch (ClassNotFoundException e1) {
- try {
- factoryClazz = (Class<ApplicationSpecFactory>) Class.forName(className);
- } catch (ClassNotFoundException e2) {
-
- }
- }
- }
- }
- System.out.println(System.getProperty("java.class.path"));
- if (factoryClazz == null) {
- LOG.error("Unable to find class:" + className);
- }
- ApplicationSpecFactory factory = null;
- try {
- factory = factoryClazz.newInstance();
- } catch (Exception e) {
- LOG.error("Unable to create instance of class: " + className, e);
- }
- return (T) factory;
- }
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/NMCallbackHandler.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/NMCallbackHandler.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/NMCallbackHandler.java
index 1566c28..f7c3a9f 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/NMCallbackHandler.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/NMCallbackHandler.java
@@ -9,6 +9,8 @@ import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.client.api.async.NMClientAsync;
+import org.apache.helix.provisioning.ContainerLaunchResponse;
+import org.apache.helix.provisioning.ContainerStopResponse;
import org.apache.log4j.Logger;
import com.google.common.annotations.VisibleForTesting;
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ParticipantLauncher.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ParticipantLauncher.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ParticipantLauncher.java
deleted file mode 100644
index 1a21a71..0000000
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ParticipantLauncher.java
+++ /dev/null
@@ -1,135 +0,0 @@
-package org.apache.helix.provisioning.yarn;
-
-import java.util.Arrays;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.GnuParser;
-import org.apache.commons.cli.Options;
-import org.apache.helix.HelixConnection;
-import org.apache.helix.NotificationContext;
-import org.apache.helix.api.id.ClusterId;
-import org.apache.helix.api.id.ParticipantId;
-import org.apache.helix.manager.zk.AbstractParticipantService;
-import org.apache.helix.manager.zk.ZkHelixConnection;
-import org.apache.helix.messaging.handling.HelixTaskResult;
-import org.apache.helix.messaging.handling.MessageHandler;
-import org.apache.helix.messaging.handling.MessageHandlerFactory;
-import org.apache.helix.model.Message;
-import org.apache.helix.model.Message.MessageType;
-import org.apache.log4j.Logger;
-
-/**
- * Main class that invokes the Participant Api
- */
-public class ParticipantLauncher {
- private static Logger LOG = Logger.getLogger(ParticipantLauncher.class);
-
- public static void main(String[] args) {
-
- System.out.println("Starting Helix Participant: " + Arrays.toString(args));
- Options opts;
- opts = new Options();
- opts.addOption("cluster", true, "Cluster name, default app name");
- opts.addOption("participantId", true, "Participant Id");
- opts.addOption("zkAddress", true, "Zookeeper address");
- opts.addOption("participantClass", true, "Participant service class");
- try {
- CommandLine cliParser = new GnuParser().parse(opts, args);
- String zkAddress = cliParser.getOptionValue("zkAddress");
- final HelixConnection connection = new ZkHelixConnection(zkAddress);
- connection.connect();
- ClusterId clusterId = ClusterId.from(cliParser.getOptionValue("cluster"));
- ParticipantId participantId = ParticipantId.from(cliParser.getOptionValue("participantId"));
- String participantClass = cliParser.getOptionValue("participantClass");
- @SuppressWarnings("unchecked")
- Class<? extends AbstractParticipantService> clazz =
- (Class<? extends AbstractParticipantService>) Class.forName(participantClass);
- final AbstractParticipantService containerParticipant =
- clazz.getConstructor(HelixConnection.class, ClusterId.class, ParticipantId.class)
- .newInstance(connection, clusterId, participantId);
- containerParticipant.startAsync();
- containerParticipant.awaitRunning(60, TimeUnit.SECONDS);
- containerParticipant
- .getParticipant()
- .getMessagingService()
- .registerMessageHandlerFactory(MessageType.SHUTDOWN.toString(),
- new ShutdownMessageHandlerFactory(containerParticipant, connection));
- Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
- @Override
- public void run() {
- LOG.info("Received a shutdown signal. Stopping participant");
- containerParticipant.stopAsync();
- containerParticipant.awaitTerminated();
- connection.disconnect();
- }
- }) {
-
- });
- Thread.currentThread().join();
- } catch (Exception e) {
- e.printStackTrace();
- System.out.println("Failed to start Helix participant" + e);
- // System.exit(1);
- }
- try {
- Thread.currentThread().join();
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
-
- }
-
- public static class ShutdownMessageHandlerFactory implements MessageHandlerFactory {
- private final AbstractParticipantService _service;
- private final HelixConnection _connection;
-
- public ShutdownMessageHandlerFactory(AbstractParticipantService service,
- HelixConnection connection) {
- _service = service;
- _connection = connection;
- }
-
- @Override
- public MessageHandler createHandler(Message message, NotificationContext context) {
- return new ShutdownMessageHandler(_service, _connection, message, context);
- }
-
- @Override
- public String getMessageType() {
- return MessageType.SHUTDOWN.toString();
- }
-
- @Override
- public void reset() {
- }
-
- }
-
- public static class ShutdownMessageHandler extends MessageHandler {
- private final AbstractParticipantService _service;
- private final HelixConnection _connection;
-
- public ShutdownMessageHandler(AbstractParticipantService service, HelixConnection connection,
- Message message, NotificationContext context) {
- super(message, context);
- _service = service;
- _connection = connection;
- }
-
- @Override
- public HelixTaskResult handleMessage() throws InterruptedException {
- LOG.info("Received a shutdown message. Trying to shut down.");
- _service.stopAsync();
- _service.awaitTerminated();
- _connection.disconnect();
- System.exit(1);
- return null;
- }
-
- @Override
- public void onError(Exception e, ErrorCode code, ErrorType type) {
- }
-
- }
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java
index 8612d3a..ced1431 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java
@@ -11,6 +11,9 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
+import org.apache.helix.provisioning.ContainerAskResponse;
+import org.apache.helix.provisioning.ContainerReleaseResponse;
+import org.apache.helix.provisioning.ContainerStopResponse;
import com.google.common.util.concurrent.SettableFuture;
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ServiceConfig.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ServiceConfig.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ServiceConfig.java
deleted file mode 100644
index 87b5f12..0000000
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ServiceConfig.java
+++ /dev/null
@@ -1,17 +0,0 @@
-package org.apache.helix.provisioning.yarn;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.helix.api.Scope;
-import org.apache.helix.api.config.UserConfig;
-import org.apache.helix.api.id.ResourceId;
-
-public class ServiceConfig extends UserConfig{
- public Map<String, String> config = new HashMap<String, String>();
-
- public ServiceConfig(Scope<ResourceId> scope) {
- super(scope);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/TaskConfig.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/TaskConfig.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/TaskConfig.java
deleted file mode 100644
index 0b500a9..0000000
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/TaskConfig.java
+++ /dev/null
@@ -1,13 +0,0 @@
-package org.apache.helix.provisioning.yarn;
-
-import java.util.HashMap;
-import java.util.Map;
-
-
-public class TaskConfig {
- public Map<String, String> config = new HashMap<String, String>();
-
- public String getValue(String key) {
- return (config != null ? config.get(key) : null);
- }
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
index 2d6e306..833efa5 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
@@ -44,6 +44,12 @@ import org.apache.helix.controller.provisioner.Provisioner;
import org.apache.helix.controller.provisioner.TargetProvider;
import org.apache.helix.controller.provisioner.TargetProviderResponse;
import org.apache.helix.model.InstanceConfig;
+import org.apache.helix.provisioning.ApplicationSpec;
+import org.apache.helix.provisioning.ContainerAskResponse;
+import org.apache.helix.provisioning.ContainerLaunchResponse;
+import org.apache.helix.provisioning.ContainerReleaseResponse;
+import org.apache.helix.provisioning.ContainerStopResponse;
+import org.apache.helix.provisioning.ParticipantLauncher;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/helloworld-provisioning-yarn/pom.xml
----------------------------------------------------------------------
diff --git a/recipes/helloworld-provisioning-yarn/pom.xml b/recipes/helloworld-provisioning-yarn/pom.xml
new file mode 100644
index 0000000..4cef9a7
--- /dev/null
+++ b/recipes/helloworld-provisioning-yarn/pom.xml
@@ -0,0 +1,159 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.apache.helix.recipes</groupId>
+ <artifactId>recipes</artifactId>
+ <version>0.7.1-incubating-SNAPSHOT</version>
+ </parent>
+
+ <artifactId>helloworld-provisioning-yarn</artifactId>
+ <packaging>bundle</packaging>
+ <name>Apache Helix :: Recipes :: Provisioning :: YARN :: Hello World</name>
+
+ <properties>
+ <osgi.import>
+ org.apache.helix*,
+ org.apache.log4j,
+ *
+ </osgi.import>
+ <osgi.export>org.apache.helix.provisioning.yarn.example*;version="${project.version};-noimport:=true</osgi.export>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.testng</groupId>
+ <artifactId>testng</artifactId>
+ <version>6.0.1</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.helix</groupId>
+ <artifactId>helix-core</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.helix</groupId>
+ <artifactId>helix-provisioning</artifactId>
+ <version>0.7.1-incubating-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>log4j</groupId>
+ <artifactId>log4j</artifactId>
+ <exclusions>
+ <exclusion>
+ <groupId>javax.mail</groupId>
+ <artifactId>mail</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>javax.jms</groupId>
+ <artifactId>jms</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.sun.jdmk</groupId>
+ <artifactId>jmxtools</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.sun.jmx</groupId>
+ <artifactId>jmxri</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ </dependencies>
+ <build>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>appassembler-maven-plugin</artifactId>
+ <configuration>
+ <!-- Set the target configuration directory to be used in the bin scripts -->
+ <!-- <configurationDirectory>conf</configurationDirectory> -->
+ <!-- Copy the contents from "/src/main/config" to the target configuration
+ directory in the assembled application -->
+ <!-- <copyConfigurationDirectory>true</copyConfigurationDirectory> -->
+ <!-- Include the target configuration directory in the beginning of
+ the classpath declaration in the bin scripts -->
+ <includeConfigurationDirectoryInClasspath>true</includeConfigurationDirectoryInClasspath>
+ <assembleDirectory>${project.build.directory}/${project.artifactId}-pkg</assembleDirectory>
+ <!-- Extra JVM arguments that will be included in the bin scripts -->
+ <extraJvmArguments>-Xms512m -Xmx512m</extraJvmArguments>
+ <!-- Generate bin scripts for windows and unix pr default -->
+ <platforms>
+ <platform>windows</platform>
+ <platform>unix</platform>
+ </platforms>
+ </configuration>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>assemble</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.rat</groupId>
+ <artifactId>apache-rat-plugin</artifactId>
+ <configuration>
+ <excludes combine.children="append">
+ </excludes>
+ </configuration>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>appassembler-maven-plugin</artifactId>
+ <configuration>
+ <programs>
+ <program>
+ <mainClass>org.apache.helix.provisioning.yarn.Client</mainClass>
+ <name>yarn-job-launcher</name>
+ </program>
+ <program>
+ <mainClass>org.apache.helix.provisioning.yarn.AppLauncher</mainClass>
+ <name>app-launcher</name>
+ </program>
+ </programs>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <configuration>
+ <descriptors>
+ <descriptor>src/assemble/assembly.xml</descriptor>
+ </descriptors>
+ </configuration>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>single</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/helloworld-provisioning-yarn/run.sh
----------------------------------------------------------------------
diff --git a/recipes/helloworld-provisioning-yarn/run.sh b/recipes/helloworld-provisioning-yarn/run.sh
new file mode 100755
index 0000000..51d4c35
--- /dev/null
+++ b/recipes/helloworld-provisioning-yarn/run.sh
@@ -0,0 +1,6 @@
+cd ../../../../
+mvn clean install -DskipTests
+cd recipes/provisioning/yarn/helloworld/
+mvn clean package -DskipTests
+chmod +x target/helloworld-pkg/bin/app-launcher.sh
+target/helloworld-pkg/bin/app-launcher.sh org.apache.helix.provisioning.yarn.example.HelloWordAppSpecFactory /Users/kgopalak/Documents/projects/incubator-helix/recipes/provisioning/yarn/helloworld/src/main/resources/hello_world_app_spec.yaml
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/helloworld-provisioning-yarn/src/assemble/assembly.xml
----------------------------------------------------------------------
diff --git a/recipes/helloworld-provisioning-yarn/src/assemble/assembly.xml b/recipes/helloworld-provisioning-yarn/src/assemble/assembly.xml
new file mode 100644
index 0000000..c2d08a1
--- /dev/null
+++ b/recipes/helloworld-provisioning-yarn/src/assemble/assembly.xml
@@ -0,0 +1,60 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+<assembly>
+ <id>pkg</id>
+ <formats>
+ <format>tar</format>
+ </formats>
+ <fileSets>
+ <fileSet>
+ <directory>${project.build.directory}/${project.artifactId}-pkg/bin</directory>
+ <outputDirectory>bin</outputDirectory>
+ <lineEnding>unix</lineEnding>
+ <fileMode>0755</fileMode>
+ <directoryMode>0755</directoryMode>
+ </fileSet>
+ <fileSet>
+ <directory>${project.build.directory}/${project.artifactId}-pkg/repo/</directory>
+ <outputDirectory>repo</outputDirectory>
+ <fileMode>0755</fileMode>
+ <directoryMode>0755</directoryMode>
+ <excludes>
+ <exclude>**/*.xml</exclude>
+ </excludes>
+ </fileSet>
+ <fileSet>
+ <directory>${project.build.directory}/${project.artifactId}-pkg/conf</directory>
+ <outputDirectory>conf</outputDirectory>
+ <lineEnding>unix</lineEnding>
+ <fileMode>0755</fileMode>
+ <directoryMode>0755</directoryMode>
+ </fileSet>
+ <fileSet>
+ <directory>${project.basedir}</directory>
+ <outputDirectory>/</outputDirectory>
+ <includes>
+ <include>LICENSE</include>
+ <include>NOTICE</include>
+ <include>DISCLAIMER</include>
+ </includes>
+ <fileMode>0755</fileMode>
+ </fileSet>
+ </fileSets>
+</assembly>
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/helloworld-provisioning-yarn/src/main/config/log4j.properties
----------------------------------------------------------------------
diff --git a/recipes/helloworld-provisioning-yarn/src/main/config/log4j.properties b/recipes/helloworld-provisioning-yarn/src/main/config/log4j.properties
new file mode 100644
index 0000000..91fac03
--- /dev/null
+++ b/recipes/helloworld-provisioning-yarn/src/main/config/log4j.properties
@@ -0,0 +1,31 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+##
+
+# Set root logger level to DEBUG and its only appender to A1.
+log4j.rootLogger=DEBUG,A1
+
+# A1 is set to be a ConsoleAppender.
+log4j.appender.A1=org.apache.log4j.ConsoleAppender
+
+# A1 uses PatternLayout.
+log4j.appender.A1.layout=org.apache.log4j.PatternLayout
+log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
+
+log4j.logger.org.I0Itec=ERROR
+log4j.logger.org.apache=ERROR
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
----------------------------------------------------------------------
diff --git a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java b/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
new file mode 100644
index 0000000..03c1341
--- /dev/null
+++ b/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
@@ -0,0 +1,92 @@
+package org.apache.helix.provisioning.yarn.example;
+
+import java.io.File;
+import java.io.InputStream;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.helix.provisioning.AppConfig;
+import org.apache.helix.provisioning.ApplicationSpec;
+import org.apache.helix.provisioning.ApplicationSpecFactory;
+import org.apache.helix.provisioning.yarn.example.HelloWorldService;
+import org.apache.helix.provisioning.yarn.example.HelloworldAppSpec;
+import org.yaml.snakeyaml.DumperOptions;
+import org.yaml.snakeyaml.Yaml;
+
+public class HelloWordAppSpecFactory implements ApplicationSpecFactory {
+
+ static HelloworldAppSpec data;
+
+ static {
+ HelloworldAppSpec data = new HelloworldAppSpec();
+ AppConfig appConfig = new AppConfig();
+ appConfig.setValue("k1", "v1");
+ data.setAppConfig(appConfig);
+ data.setAppName("testApp");
+ data.setAppMasterPackageUri(
+ "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
+ HashMap<String, Map<String, String>> serviceConfigMap = new HashMap<String, Map<String, String>>();
+ serviceConfigMap.put("HelloWorld", new HashMap<String, String>());
+ serviceConfigMap.get("HelloWorld").put("k1", "v1");
+ data.setServiceConfigMap(serviceConfigMap);
+ HashMap<String, String> serviceMainClassMap = new HashMap<String, String>();
+ serviceMainClassMap.put("HelloWorld", HelloWorldService.class.getCanonicalName());
+ data.setServiceMainClassMap(serviceMainClassMap);
+ HashMap<String, String> servicePackageURIMap = new HashMap<String, String>();
+ servicePackageURIMap
+ .put(
+ "HelloWorld",
+ "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
+ data.setServicePackageURIMap(servicePackageURIMap);
+ data.setServices(Arrays.asList(new String[] {
+ "HelloWorld"
+ })); }
+
+ @Override
+ public ApplicationSpec fromYaml(InputStream inputstream) {
+ return (ApplicationSpec) new Yaml().load(inputstream);
+ // return data;
+ }
+
+ public static void main(String[] args) {
+ DumperOptions options = new DumperOptions();
+ options.setPrettyFlow(true);
+
+ Yaml yaml = new Yaml(options);
+ HelloworldAppSpec data = new HelloworldAppSpec();
+ AppConfig appConfig = new AppConfig();
+ appConfig.setValue("k1", "v1");
+ data.setAppConfig(appConfig);
+ data.setAppName("testApp");
+ data.setAppMasterPackageUri(
+ "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
+ HashMap<String, Map<String, String>> serviceConfigMap = new HashMap<String, Map<String, String>>();
+ serviceConfigMap.put("HelloWorld", new HashMap<String, String>());
+ serviceConfigMap.get("HelloWorld").put("k1", "v1");
+ data.setServiceConfigMap(serviceConfigMap);
+ HashMap<String, String> serviceMainClassMap = new HashMap<String, String>();
+ serviceMainClassMap.put("HelloWorld", HelloWorldService.class.getCanonicalName());
+ data.setServiceMainClassMap(serviceMainClassMap);
+ HashMap<String, String> servicePackageURIMap = new HashMap<String, String>();
+ servicePackageURIMap
+ .put(
+ "HelloWorld",
+ "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
+ data.setServicePackageURIMap(servicePackageURIMap);
+ data.setServices(Arrays.asList(new String[] {
+ "HelloWorld"
+ }));
+ String dump = yaml.dump(data);
+ System.out.println(dump);
+
+ InputStream resourceAsStream = ClassLoader.getSystemClassLoader().getResourceAsStream("hello_world_app_spec.yaml");
+ HelloworldAppSpec load = yaml.loadAs(resourceAsStream,HelloworldAppSpec.class);
+ String dumpnew = yaml.dump(load);
+ System.out.println(dumpnew.equals(dump));
+
+ System.out.println("==================================");
+ System.out.println(dumpnew);
+
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
----------------------------------------------------------------------
diff --git a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java b/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
new file mode 100644
index 0000000..8999817
--- /dev/null
+++ b/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
@@ -0,0 +1,41 @@
+package org.apache.helix.provisioning.yarn.example;
+
+import org.apache.helix.HelixConnection;
+import org.apache.helix.api.accessor.ResourceAccessor;
+import org.apache.helix.api.config.UserConfig;
+import org.apache.helix.api.id.ClusterId;
+import org.apache.helix.api.id.ParticipantId;
+import org.apache.helix.api.id.ResourceId;
+import org.apache.helix.api.id.StateModelDefId;
+import org.apache.helix.manager.zk.AbstractParticipantService;
+import org.apache.helix.provisioning.yarn.example.HelloWorldStateModelFactory;
+import org.apache.log4j.Logger;
+
+public class HelloWorldService extends AbstractParticipantService {
+
+ private static Logger LOG = Logger.getLogger(AbstractParticipantService.class);
+
+ static String SERVICE_NAME = "HelloWorld";
+
+ public HelloWorldService(HelixConnection connection, ClusterId clusterId,
+ ParticipantId participantId) {
+ super(connection, clusterId, participantId);
+ }
+
+ /**
+ * init method to setup appropriate call back handlers.
+ */
+ @Override
+ public void init() {
+ ClusterId clusterId = getClusterId();
+ ResourceAccessor resourceAccessor = getConnection().createResourceAccessor(clusterId);
+ UserConfig serviceConfig = resourceAccessor.readUserConfig(ResourceId.from(SERVICE_NAME));
+ LOG.info("Starting service:" + SERVICE_NAME + " with configuration:" + serviceConfig);
+
+ HelloWorldStateModelFactory stateModelFactory = new HelloWorldStateModelFactory();
+ getParticipant().getStateMachineEngine().registerStateModelFactory(
+ StateModelDefId.from("StatelessService"), stateModelFactory);
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModel.java
----------------------------------------------------------------------
diff --git a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModel.java b/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModel.java
new file mode 100644
index 0000000..078d847
--- /dev/null
+++ b/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModel.java
@@ -0,0 +1,33 @@
+package org.apache.helix.provisioning.yarn.example;
+
+import org.apache.helix.NotificationContext;
+import org.apache.helix.api.id.PartitionId;
+import org.apache.helix.model.Message;
+import org.apache.helix.participant.statemachine.StateModel;
+import org.apache.helix.participant.statemachine.StateModelInfo;
+import org.apache.helix.participant.statemachine.Transition;
+import org.apache.log4j.Logger;
+
+@StateModelInfo(initialState = "OFFLINE", states = {
+ "OFFLINE", "ONLINE", "ERROR"
+})
+public class HelloWorldStateModel extends StateModel {
+
+ private static Logger LOG = Logger.getLogger(HelloWorldStateModel.class);
+
+ public HelloWorldStateModel(PartitionId partitionId) {
+ // ignore the partitionId
+ }
+
+ @Transition(to = "ONLINE", from = "OFFLINE")
+ public void onBecomeOnlineFromOffline(Message message, NotificationContext context)
+ throws Exception {
+ LOG.info("Started HelloWorld service");
+ }
+
+ @Transition(to = "OFFLINE", from = "ONLINE")
+ public void onBecomeOfflineFromOnline(Message message, NotificationContext context)
+ throws InterruptedException {
+ LOG.info("Stopped HelloWorld service");
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModelFactory.java
----------------------------------------------------------------------
diff --git a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModelFactory.java b/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModelFactory.java
new file mode 100644
index 0000000..2766f6d
--- /dev/null
+++ b/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModelFactory.java
@@ -0,0 +1,13 @@
+package org.apache.helix.provisioning.yarn.example;
+
+import org.apache.helix.api.id.PartitionId;
+import org.apache.helix.participant.statemachine.HelixStateModelFactory;
+import org.apache.helix.participant.statemachine.StateModel;
+import org.apache.helix.provisioning.yarn.example.HelloWorldStateModel;
+
+public class HelloWorldStateModelFactory extends HelixStateModelFactory<StateModel> {
+ @Override
+ public StateModel createNewStateModel(PartitionId partitionId) {
+ return new HelloWorldStateModel(partitionId);
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
----------------------------------------------------------------------
diff --git a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java b/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
new file mode 100644
index 0000000..588c84c
--- /dev/null
+++ b/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
@@ -0,0 +1,138 @@
+package org.apache.helix.provisioning.yarn.example;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.helix.api.Scope;
+import org.apache.helix.api.config.ParticipantConfig;
+import org.apache.helix.api.config.ResourceConfig;
+import org.apache.helix.api.config.ResourceConfig.Builder;
+import org.apache.helix.api.config.UserConfig;
+import org.apache.helix.api.id.ParticipantId;
+import org.apache.helix.api.id.ResourceId;
+import org.apache.helix.provisioning.AppConfig;
+import org.apache.helix.provisioning.ApplicationSpec;
+import org.apache.helix.provisioning.ServiceConfig;
+import org.apache.helix.provisioning.TaskConfig;
+
+public class HelloworldAppSpec implements ApplicationSpec {
+
+ public String _appName;
+
+ public AppConfig _appConfig;
+
+ public List<String> _services;
+
+ private String _appMasterPackageUri;
+
+ private Map<String, String> _servicePackageURIMap;
+
+ private Map<String, String> _serviceMainClassMap;
+
+ private Map<String, Map<String, String>> _serviceConfigMap;
+
+ private List<TaskConfig> _taskConfigs;
+
+ public AppConfig getAppConfig() {
+ return _appConfig;
+ }
+
+ public void setAppConfig(AppConfig appConfig) {
+ _appConfig = appConfig;
+ }
+
+ public String getAppMasterPackageUri() {
+ return _appMasterPackageUri;
+ }
+
+ public void setAppMasterPackageUri(String appMasterPackageUri) {
+ _appMasterPackageUri = appMasterPackageUri;
+ }
+
+ public Map<String, String> getServicePackageURIMap() {
+ return _servicePackageURIMap;
+ }
+
+ public void setServicePackageURIMap(Map<String, String> servicePackageURIMap) {
+ _servicePackageURIMap = servicePackageURIMap;
+ }
+
+ public Map<String, String> getServiceMainClassMap() {
+ return _serviceMainClassMap;
+ }
+
+ public void setServiceMainClassMap(Map<String, String> serviceMainClassMap) {
+ _serviceMainClassMap = serviceMainClassMap;
+ }
+
+ public Map<String, Map<String, String>> getServiceConfigMap() {
+ return _serviceConfigMap;
+ }
+
+ public void setServiceConfigMap(Map<String, Map<String, String>> serviceConfigMap) {
+ _serviceConfigMap = serviceConfigMap;
+ }
+
+ public void setAppName(String appName) {
+ _appName = appName;
+ }
+
+ public void setServices(List<String> services) {
+ _services = services;
+ }
+
+ public void setTaskConfigs(List<TaskConfig> taskConfigs) {
+ _taskConfigs = taskConfigs;
+ }
+
+ @Override
+ public String getAppName() {
+ return _appName;
+ }
+
+ @Override
+ public AppConfig getConfig() {
+ return _appConfig;
+ }
+
+ @Override
+ public List<String> getServices() {
+ return _services;
+ }
+
+ @Override
+ public URI getAppMasterPackage() {
+ try {
+ return new URI(_appMasterPackageUri);
+ } catch (URISyntaxException e) {
+ return null;
+ }
+ }
+
+ @Override
+ public URI getServicePackage(String serviceName) {
+ try {
+ return new URI(_servicePackageURIMap.get(serviceName));
+ } catch (URISyntaxException e) {
+ return null;
+ }
+ }
+
+ @Override
+ public String getServiceMainClass(String service) {
+ return _serviceMainClassMap.get(service);
+ }
+
+ @Override
+ public ServiceConfig getServiceConfig(String serviceName) {
+ return new ServiceConfig(Scope.resource(ResourceId.from(serviceName)));
+ }
+
+ @Override
+ public List<TaskConfig> getTaskConfigs() {
+ return _taskConfigs;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/helloworld-provisioning-yarn/src/main/resources/hello_world_app_spec.yaml
----------------------------------------------------------------------
diff --git a/recipes/helloworld-provisioning-yarn/src/main/resources/hello_world_app_spec.yaml b/recipes/helloworld-provisioning-yarn/src/main/resources/hello_world_app_spec.yaml
new file mode 100644
index 0000000..d8d1dd2
--- /dev/null
+++ b/recipes/helloworld-provisioning-yarn/src/main/resources/hello_world_app_spec.yaml
@@ -0,0 +1,24 @@
+!!org.apache.helix.provisioning.yarn.example.HelloworldAppSpec
+appConfig:
+ config: {
+ k1: v1
+ }
+appMasterPackageUri: 'file:///Users/kgopalak/Documents/projects/incubator-helix/recipes/provisioning/yarn/helloworld/target/helloworld-0.7.1-incubating-SNAPSHOT-pkg.tar'
+appName: testApp
+serviceConfigMap:
+ HelloWorld: {
+ num_containers: 3,
+ memory: 1024
+ }
+serviceMainClassMap: {
+ HelloWorld: org.apache.helix.provisioning.yarn.example.HelloWorldService
+}
+servicePackageURIMap: {
+ HelloWorld: 'file:///Users/kgopalak/Documents/projects/incubator-helix/recipes/provisioning/yarn/helloworld/target/helloworld-0.7.1-incubating-SNAPSHOT-pkg.tar'
+}
+services: [
+ HelloWorld]
+taskConfigs: null
+
+
+
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/helloworld-provisioning-yarn/src/test/conf/testng.xml
----------------------------------------------------------------------
diff --git a/recipes/helloworld-provisioning-yarn/src/test/conf/testng.xml b/recipes/helloworld-provisioning-yarn/src/test/conf/testng.xml
new file mode 100644
index 0000000..37bccf3
--- /dev/null
+++ b/recipes/helloworld-provisioning-yarn/src/test/conf/testng.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+<!DOCTYPE suite SYSTEM "http://testng.org/testng-1.0.dtd">
+<suite name="Suite" parallel="none">
+ <test name="Test" preserve-order="false">
+ <packages>
+ <package name="org.apache.helix.agent"/>
+ </packages>
+ </test>
+</suite>
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/pom.xml
----------------------------------------------------------------------
diff --git a/recipes/pom.xml b/recipes/pom.xml
index 7d9952a..5d137c2 100644
--- a/recipes/pom.xml
+++ b/recipes/pom.xml
@@ -36,7 +36,7 @@ under the License.
<module>user-defined-rebalancer</module>
<module>task-execution</module>
<module>service-discovery</module>
- <module>provisioning</module>
+ <module>helloworld-provisioning-yarn</module>
</modules>
<build>
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/provisioning/pom.xml
----------------------------------------------------------------------
diff --git a/recipes/provisioning/pom.xml b/recipes/provisioning/pom.xml
deleted file mode 100644
index dc5277b..0000000
--- a/recipes/provisioning/pom.xml
+++ /dev/null
@@ -1,50 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing,
-software distributed under the License is distributed on an
-"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-KIND, either express or implied. See the License for the
-specific language governing permissions and limitations
-under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.apache.helix.recipes</groupId>
- <artifactId>recipes</artifactId>
- <version>0.7.1-incubating-SNAPSHOT</version>
- </parent>
- <groupId>org.apache.helix.recipes.provisioning</groupId>
- <artifactId>provisioning</artifactId>
- <packaging>pom</packaging>
- <name>Apache Helix :: Recipes :: Provisioning</name>
-
- <modules>
- <module>yarn</module>
- </modules>
-
- <build>
- <pluginManagement>
- <plugins>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-deploy-plugin</artifactId>
- <configuration>
- <skip>true</skip>
- </configuration>
- </plugin>
- </plugins>
- </pluginManagement>
- </build>
-
-</project>
[38/50] [abbrv] git commit: fix some issues with the task recipe
Posted by ka...@apache.org.
fix some issues with the task recipe
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/0a1694b8
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/0a1694b8
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/0a1694b8
Branch: refs/heads/master
Commit: 0a1694b88a4238856bab6a281b5a53fd058be094
Parents: 9a2b729
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Mon Jun 30 09:50:47 2014 -0700
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Mon Jun 30 09:50:47 2014 -0700
----------------------------------------------------------------------
helix-core/pom.xml | 4 ++++
helix-core/src/main/java/org/apache/helix/task/TaskDriver.java | 1 +
recipes/jobrunner-yarn/run.sh | 6 +++---
3 files changed, 8 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/0a1694b8/helix-core/pom.xml
----------------------------------------------------------------------
diff --git a/helix-core/pom.xml b/helix-core/pom.xml
index f30abac..0f1f2b9 100644
--- a/helix-core/pom.xml
+++ b/helix-core/pom.xml
@@ -216,6 +216,10 @@ under the License.
<mainClass>org.apache.helix.tools.IntegrationTestUtil</mainClass>
<name>test-util</name>
</program>
+ <program>
+ <mainClass>org.apache.helix.task.TaskDriver</mainClass>
+ <name>task-driver</name>
+ </program>
</programs>
</configuration>
</plugin>
http://git-wip-us.apache.org/repos/asf/helix/blob/0a1694b8/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java b/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
index c8f0d08..d5e9101 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
@@ -129,6 +129,7 @@ public class TaskDriver {
break;
case list:
driver.list(resource);
+ break;
default:
throw new IllegalArgumentException("Unknown command " + args[0]);
}
http://git-wip-us.apache.org/repos/asf/helix/blob/0a1694b8/recipes/jobrunner-yarn/run.sh
----------------------------------------------------------------------
diff --git a/recipes/jobrunner-yarn/run.sh b/recipes/jobrunner-yarn/run.sh
index 07448bb..a8f4f7c 100755
--- a/recipes/jobrunner-yarn/run.sh
+++ b/recipes/jobrunner-yarn/run.sh
@@ -1,6 +1,6 @@
#cd ../../
#mvn clean install -DskipTests
#cd recipes/helloworld-provisioning-yarn
-mvn clean package -DskipTests
-chmod +x target/helloworld-provisioning-yarn-pkg/bin/app-launcher.sh
-target/helloworld-provisioning-yarn/pkg/bin/app-launcher.sh org.apache.helix.provisioning.yarn.example.HelloWordAppSpecFactory /Users/kgopalak/Documents/projects/incubator-helix/recipes/helloworld-provisioning-yarn/src/main/resources/hello_world_app_spec.yaml
+#mvn install package -DskipTests
+chmod +x target/jobrunner-yarn-pkg/bin/app-launcher.sh
+target/jobrunner-yarn-pkg/bin/app-launcher.sh --app_spec_provider org.apache.helix.provisioning.yarn.example.MyTaskAppSpecFactory --app_config_spec /Users/kbiscuit/helix/incubator-helix/recipes/jobrunner-yarn/src/main/resources/job_runner_app_spec.yaml
[42/50] [abbrv] git commit: Task framework recipe runs on distributed
YARN
Posted by ka...@apache.org.
Task framework recipe runs on distributed YARN
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/feaea562
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/feaea562
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/feaea562
Branch: refs/heads/master
Commit: feaea562f2b52ebad5cfd6aba92864cd411a582f
Parents: 99f5ff7
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Thu Jul 3 16:47:50 2014 -0700
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Thu Jul 3 16:47:50 2014 -0700
----------------------------------------------------------------------
.../helix/model/ClusterConfiguration.java | 31 +++++++++------
.../java/org/apache/helix/model/IdealState.java | 16 +++++---
.../org/apache/helix/model/InstanceConfig.java | 41 ++++++++++++--------
.../helix/model/ResourceConfiguration.java | 31 +++++++++------
.../java/org/apache/helix/task/Workflow.java | 16 ++++++++
.../apache/helix/provisioning/TaskConfig.java | 17 ++++++++
.../helix/provisioning/yarn/AppLauncher.java | 23 ++++++++++-
.../provisioning/yarn/AppMasterConfig.java | 17 ++++++--
.../provisioning/yarn/AppMasterLauncher.java | 28 +++++++++++--
.../src/main/resources/job_runner_app_spec.yaml | 8 +---
10 files changed, 167 insertions(+), 61 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/feaea562/helix-core/src/main/java/org/apache/helix/model/ClusterConfiguration.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/model/ClusterConfiguration.java b/helix-core/src/main/java/org/apache/helix/model/ClusterConfiguration.java
index 1e9c205..63f5776 100644
--- a/helix-core/src/main/java/org/apache/helix/model/ClusterConfiguration.java
+++ b/helix-core/src/main/java/org/apache/helix/model/ClusterConfiguration.java
@@ -25,11 +25,14 @@ import org.apache.helix.api.config.NamespacedConfig;
import org.apache.helix.api.config.UserConfig;
import org.apache.helix.api.id.ClusterId;
import org.apache.helix.manager.zk.ZKHelixManager;
+import org.apache.log4j.Logger;
/**
* Persisted configuration properties for a cluster
*/
public class ClusterConfiguration extends HelixProperty {
+ private static final Logger LOG = Logger.getLogger(ClusterConfiguration.class);
+
/**
* Instantiate for an id
* @param id cluster id
@@ -76,21 +79,25 @@ public class ClusterConfiguration extends HelixProperty {
*/
public UserConfig getUserConfig() {
UserConfig userConfig = UserConfig.from(this);
- for (String simpleField : _record.getSimpleFields().keySet()) {
- if (!simpleField.contains(NamespacedConfig.PREFIX_CHAR + "")
- && !simpleField.equals(ZKHelixManager.ALLOW_PARTICIPANT_AUTO_JOIN)) {
- userConfig.setSimpleField(simpleField, _record.getSimpleField(simpleField));
+ try {
+ for (String simpleField : _record.getSimpleFields().keySet()) {
+ if (!simpleField.contains(NamespacedConfig.PREFIX_CHAR + "")
+ && !simpleField.equals(ZKHelixManager.ALLOW_PARTICIPANT_AUTO_JOIN)) {
+ userConfig.setSimpleField(simpleField, _record.getSimpleField(simpleField));
+ }
}
- }
- for (String listField : _record.getListFields().keySet()) {
- if (!listField.contains(NamespacedConfig.PREFIX_CHAR + "")) {
- userConfig.setListField(listField, _record.getListField(listField));
+ for (String listField : _record.getListFields().keySet()) {
+ if (!listField.contains(NamespacedConfig.PREFIX_CHAR + "")) {
+ userConfig.setListField(listField, _record.getListField(listField));
+ }
}
- }
- for (String mapField : _record.getMapFields().keySet()) {
- if (!mapField.contains(NamespacedConfig.PREFIX_CHAR + "")) {
- userConfig.setMapField(mapField, _record.getMapField(mapField));
+ for (String mapField : _record.getMapFields().keySet()) {
+ if (!mapField.contains(NamespacedConfig.PREFIX_CHAR + "")) {
+ userConfig.setMapField(mapField, _record.getMapField(mapField));
+ }
}
+ } catch (NoSuchMethodError e) {
+ LOG.error("Could not parse ClusterConfiguration", e);
}
return userConfig;
}
http://git-wip-us.apache.org/repos/asf/helix/blob/feaea562/helix-core/src/main/java/org/apache/helix/model/IdealState.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/model/IdealState.java b/helix-core/src/main/java/org/apache/helix/model/IdealState.java
index 173e251..cc8fc4b 100644
--- a/helix-core/src/main/java/org/apache/helix/model/IdealState.java
+++ b/helix-core/src/main/java/org/apache/helix/model/IdealState.java
@@ -58,6 +58,8 @@ import com.google.common.collect.Sets;
* The ideal states of all partitions in a resource
*/
public class IdealState extends HelixProperty {
+ private static final Logger LOG = Logger.getLogger(IdealState.class);
+
/**
* Properties that are persisted and are queryable for an ideal state
*/
@@ -760,12 +762,16 @@ public class IdealState extends HelixProperty {
* @param userConfig the user config to update
*/
public void updateUserConfig(UserConfig userConfig) {
- for (String simpleField : _record.getSimpleFields().keySet()) {
- Optional<IdealStateProperty> enumField =
- Enums.getIfPresent(IdealStateProperty.class, simpleField);
- if (!simpleField.contains(NamespacedConfig.PREFIX_CHAR + "") && !enumField.isPresent()) {
- userConfig.setSimpleField(simpleField, _record.getSimpleField(simpleField));
+ try {
+ for (String simpleField : _record.getSimpleFields().keySet()) {
+ Optional<IdealStateProperty> enumField =
+ Enums.getIfPresent(IdealStateProperty.class, simpleField);
+ if (!simpleField.contains(NamespacedConfig.PREFIX_CHAR + "") && !enumField.isPresent()) {
+ userConfig.setSimpleField(simpleField, _record.getSimpleField(simpleField));
+ }
}
+ } catch (NoSuchMethodError e) {
+ LOG.error("Could not update user config", e);
}
}
http://git-wip-us.apache.org/repos/asf/helix/blob/feaea562/helix-core/src/main/java/org/apache/helix/model/InstanceConfig.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/model/InstanceConfig.java b/helix-core/src/main/java/org/apache/helix/model/InstanceConfig.java
index 5f27b05..2dde23e 100644
--- a/helix-core/src/main/java/org/apache/helix/model/InstanceConfig.java
+++ b/helix-core/src/main/java/org/apache/helix/model/InstanceConfig.java
@@ -34,6 +34,7 @@ import org.apache.helix.api.id.PartitionId;
import org.apache.helix.controller.provisioner.ContainerId;
import org.apache.helix.controller.provisioner.ContainerSpec;
import org.apache.helix.controller.provisioner.ContainerState;
+import org.apache.log4j.Logger;
import com.google.common.base.Enums;
import com.google.common.base.Optional;
@@ -42,6 +43,8 @@ import com.google.common.base.Optional;
* Instance configurations
*/
public class InstanceConfig extends HelixProperty {
+ private static final Logger LOG = Logger.getLogger(InstanceConfig.class);
+
/**
* Configurable characteristics of an instance
*/
@@ -279,26 +282,30 @@ public class InstanceConfig extends HelixProperty {
*/
public UserConfig getUserConfig() {
UserConfig userConfig = UserConfig.from(this);
- for (String simpleField : _record.getSimpleFields().keySet()) {
- Optional<InstanceConfigProperty> enumField =
- Enums.getIfPresent(InstanceConfigProperty.class, simpleField);
- if (!simpleField.contains(NamespacedConfig.PREFIX_CHAR + "") && !enumField.isPresent()) {
- userConfig.setSimpleField(simpleField, _record.getSimpleField(simpleField));
+ try {
+ for (String simpleField : _record.getSimpleFields().keySet()) {
+ Optional<InstanceConfigProperty> enumField =
+ Enums.getIfPresent(InstanceConfigProperty.class, simpleField);
+ if (!simpleField.contains(NamespacedConfig.PREFIX_CHAR + "") && !enumField.isPresent()) {
+ userConfig.setSimpleField(simpleField, _record.getSimpleField(simpleField));
+ }
}
- }
- for (String listField : _record.getListFields().keySet()) {
- Optional<InstanceConfigProperty> enumField =
- Enums.getIfPresent(InstanceConfigProperty.class, listField);
- if (!listField.contains(NamespacedConfig.PREFIX_CHAR + "") && !enumField.isPresent()) {
- userConfig.setListField(listField, _record.getListField(listField));
+ for (String listField : _record.getListFields().keySet()) {
+ Optional<InstanceConfigProperty> enumField =
+ Enums.getIfPresent(InstanceConfigProperty.class, listField);
+ if (!listField.contains(NamespacedConfig.PREFIX_CHAR + "") && !enumField.isPresent()) {
+ userConfig.setListField(listField, _record.getListField(listField));
+ }
}
- }
- for (String mapField : _record.getMapFields().keySet()) {
- Optional<InstanceConfigProperty> enumField =
- Enums.getIfPresent(InstanceConfigProperty.class, mapField);
- if (!mapField.contains(NamespacedConfig.PREFIX_CHAR + "") && !enumField.isPresent()) {
- userConfig.setMapField(mapField, _record.getMapField(mapField));
+ for (String mapField : _record.getMapFields().keySet()) {
+ Optional<InstanceConfigProperty> enumField =
+ Enums.getIfPresent(InstanceConfigProperty.class, mapField);
+ if (!mapField.contains(NamespacedConfig.PREFIX_CHAR + "") && !enumField.isPresent()) {
+ userConfig.setMapField(mapField, _record.getMapField(mapField));
+ }
}
+ } catch (NoSuchMethodError e) {
+ LOG.error("Could not parse InstanceConfig", e);
}
return userConfig;
}
http://git-wip-us.apache.org/repos/asf/helix/blob/feaea562/helix-core/src/main/java/org/apache/helix/model/ResourceConfiguration.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/model/ResourceConfiguration.java b/helix-core/src/main/java/org/apache/helix/model/ResourceConfiguration.java
index 65762cf..46d7ed7 100644
--- a/helix-core/src/main/java/org/apache/helix/model/ResourceConfiguration.java
+++ b/helix-core/src/main/java/org/apache/helix/model/ResourceConfiguration.java
@@ -9,6 +9,7 @@ import org.apache.helix.api.id.ResourceId;
import org.apache.helix.controller.provisioner.ProvisionerConfig;
import org.apache.helix.controller.rebalancer.config.RebalancerConfig;
import org.apache.helix.controller.rebalancer.config.RebalancerConfigHolder;
+import org.apache.log4j.Logger;
import com.google.common.base.Enums;
import com.google.common.base.Optional;
@@ -36,6 +37,8 @@ import com.google.common.base.Optional;
* Persisted configuration properties for a resource
*/
public class ResourceConfiguration extends HelixProperty {
+ private static final Logger LOG = Logger.getLogger(ResourceConfiguration.class);
+
public enum Fields {
TYPE
}
@@ -86,21 +89,25 @@ public class ResourceConfiguration extends HelixProperty {
*/
public UserConfig getUserConfig() {
UserConfig userConfig = UserConfig.from(this);
- for (String simpleField : _record.getSimpleFields().keySet()) {
- Optional<Fields> enumField = Enums.getIfPresent(Fields.class, simpleField);
- if (!simpleField.contains(NamespacedConfig.PREFIX_CHAR + "") && !enumField.isPresent()) {
- userConfig.setSimpleField(simpleField, _record.getSimpleField(simpleField));
+ try {
+ for (String simpleField : _record.getSimpleFields().keySet()) {
+ Optional<Fields> enumField = Enums.getIfPresent(Fields.class, simpleField);
+ if (!simpleField.contains(NamespacedConfig.PREFIX_CHAR + "") && !enumField.isPresent()) {
+ userConfig.setSimpleField(simpleField, _record.getSimpleField(simpleField));
+ }
}
- }
- for (String listField : _record.getListFields().keySet()) {
- if (!listField.contains(NamespacedConfig.PREFIX_CHAR + "")) {
- userConfig.setListField(listField, _record.getListField(listField));
+ for (String listField : _record.getListFields().keySet()) {
+ if (!listField.contains(NamespacedConfig.PREFIX_CHAR + "")) {
+ userConfig.setListField(listField, _record.getListField(listField));
+ }
}
- }
- for (String mapField : _record.getMapFields().keySet()) {
- if (!mapField.contains(NamespacedConfig.PREFIX_CHAR + "")) {
- userConfig.setMapField(mapField, _record.getMapField(mapField));
+ for (String mapField : _record.getMapFields().keySet()) {
+ if (!mapField.contains(NamespacedConfig.PREFIX_CHAR + "")) {
+ userConfig.setMapField(mapField, _record.getMapField(mapField));
+ }
}
+ } catch (NoSuchMethodError e) {
+ LOG.error("Could not parse ResourceConfiguration", e);
}
return userConfig;
}
http://git-wip-us.apache.org/repos/asf/helix/blob/feaea562/helix-core/src/main/java/org/apache/helix/task/Workflow.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/Workflow.java b/helix-core/src/main/java/org/apache/helix/task/Workflow.java
index 383180e..1a41e06 100644
--- a/helix-core/src/main/java/org/apache/helix/task/Workflow.java
+++ b/helix-core/src/main/java/org/apache/helix/task/Workflow.java
@@ -22,6 +22,7 @@ package org.apache.helix.task;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
+import java.io.InputStream;
import java.io.Reader;
import java.io.StringReader;
import java.util.ArrayList;
@@ -132,10 +133,25 @@ public class Workflow {
return parse(new StringReader(yaml));
}
+ /**
+ * Read a workflow from an open input stream
+ * @param inputStream the stream
+ * @return Workflow
+ */
+ public static Workflow parse(InputStream inputStream) {
+ Yaml yaml = new Yaml(new Constructor(WorkflowBean.class));
+ WorkflowBean wf = (WorkflowBean) yaml.load(inputStream);
+ return parse(wf);
+ }
+
/** Helper function to parse workflow from a generic {@link Reader} */
private static Workflow parse(Reader reader) throws Exception {
Yaml yaml = new Yaml(new Constructor(WorkflowBean.class));
WorkflowBean wf = (WorkflowBean) yaml.load(reader);
+ return parse(wf);
+ }
+
+ private static Workflow parse(WorkflowBean wf) {
Builder builder = new Builder(wf.name);
for (JobBean job : wf.jobs) {
http://git-wip-us.apache.org/repos/asf/helix/blob/feaea562/helix-provisioning/src/main/java/org/apache/helix/provisioning/TaskConfig.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/TaskConfig.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/TaskConfig.java
index 283538d..442d074 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/TaskConfig.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/TaskConfig.java
@@ -1,10 +1,27 @@
package org.apache.helix.provisioning;
+import java.net.URI;
+import java.net.URISyntaxException;
import java.util.HashMap;
import java.util.Map;
+import org.apache.log4j.Logger;
+
public class TaskConfig {
+ private static final Logger LOG = Logger.getLogger(TaskConfig.class);
+
public Map<String, String> config = new HashMap<String, String>();
+ public String yamlFile;
+ public String name;
+
+ public URI getYamlURI() {
+ try {
+ return yamlFile != null ? new URI(yamlFile) : null;
+ } catch (URISyntaxException e) {
+ LOG.error("Error parsing URI for task config", e);
+ }
+ return null;
+ }
public String getValue(String key) {
return (config != null ? config.get(key) : null);
http://git-wip-us.apache.org/repos/asf/helix/blob/feaea562/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
index 76b7877..2db4afb 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
@@ -53,6 +53,7 @@ import org.apache.helix.manager.zk.ZkHelixConnection;
import org.apache.helix.provisioning.ApplicationSpec;
import org.apache.helix.provisioning.ApplicationSpecFactory;
import org.apache.helix.provisioning.HelixYarnUtil;
+import org.apache.helix.provisioning.TaskConfig;
/**
* Main class to launch the job.
@@ -151,6 +152,19 @@ public class AppLauncher {
_appMasterConfig.setMainClass(name, serviceMainClass);
}
}
+
+ // Get YAML files describing all workflows to immediately start
+ Map<String, URI> workflowFiles = new HashMap<String, URI>();
+ List<TaskConfig> taskConfigs = _applicationSpec.getTaskConfigs();
+ if (taskConfigs != null) {
+ for (TaskConfig taskConfig : taskConfigs) {
+ URI configUri = taskConfig.getYamlURI();
+ if (taskConfig.name != null && configUri != null) {
+ workflowFiles.put(taskConfig.name, taskConfig.getYamlURI());
+ }
+ }
+ }
+
// set local resources for the application master
// local files or archives as needed
// In this scenario, the jar file for the application master is part of the local resources
@@ -163,6 +177,13 @@ public class AppLauncher {
hdfsDest.get(AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString()));
localResources.put(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString(), appMasterPkg);
localResources.put(AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString(), appSpecFile);
+ for (String name : workflowFiles.keySet()) {
+ URI uri = workflowFiles.get(name);
+ Path dst = copyToHDFS(fs, name, uri);
+ LocalResource taskLocalResource = setupLocalResource(fs, dst);
+ localResources.put(AppMasterConfig.AppEnvironment.TASK_CONFIG_FILE.toString() + "_" + name,
+ taskLocalResource);
+ }
// Set local resource info into app master container launch context
amContainer.setLocalResources(localResources);
@@ -393,7 +414,7 @@ public class AppLauncher {
prevReport = reportMessage;
Thread.sleep(10000);
} catch (Exception e) {
- LOG.error("Exception while getting info ");
+ LOG.error("Exception while getting info ", e);
break;
}
}
http://git-wip-us.apache.org/repos/asf/helix/blob/feaea562/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterConfig.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterConfig.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterConfig.java
index 9dcabc2..38a0dd1 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterConfig.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterConfig.java
@@ -19,7 +19,8 @@ public class AppMasterConfig {
APP_SPEC_FILE("APP_SPEC_FILE"),
APP_NAME("APP_NAME"),
APP_ID("APP_ID"),
- APP_SPEC_FACTORY("APP_SPEC_FACTORY");
+ APP_SPEC_FACTORY("APP_SPEC_FACTORY"),
+ TASK_CONFIG_FILE("TASK_CONFIG_FILE");
String _name;
private AppEnvironment(String name) {
@@ -37,8 +38,8 @@ public class AppMasterConfig {
private String get(String key) {
String value = (_envs.containsKey(key)) ? _envs.get(key) : System.getenv().get(key);
- LOG.info("Returning value:"+ value +" for key:'"+ key + "'");
-
+ LOG.info("Returning value:" + value + " for key:'" + key + "'");
+
return value;
}
@@ -83,6 +84,14 @@ public class AppMasterConfig {
_envs.put(serviceName + "_classpath", classpath);
}
+ public void setTaskConfigFile(String configName, String path) {
+ _envs.put(AppEnvironment.TASK_CONFIG_FILE.toString() + "_" + configName, path);
+ }
+
+ public String getTaskConfigFile(String configName) {
+ return get(AppEnvironment.TASK_CONFIG_FILE.toString() + "_" + configName);
+ }
+
public String getApplicationSpecConfigFile() {
return get(AppEnvironment.APP_SPEC_FILE.toString());
}
@@ -97,6 +106,6 @@ public class AppMasterConfig {
}
public void setMainClass(String serviceName, String serviceMainClass) {
- _envs.put(serviceName + "_mainClass", serviceMainClass);
+ _envs.put(serviceName + "_mainClass", serviceMainClass);
}
}
http://git-wip-us.apache.org/repos/asf/helix/blob/feaea562/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterLauncher.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterLauncher.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterLauncher.java
index 523fee0..e7a0f61 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterLauncher.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterLauncher.java
@@ -3,6 +3,8 @@ package org.apache.helix.provisioning.yarn;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
import java.util.List;
import java.util.Map;
@@ -11,8 +13,12 @@ import org.I0Itec.zkclient.ZkClient;
import org.I0Itec.zkclient.ZkServer;
import org.apache.commons.cli.Options;
import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.helix.HelixController;
import org.apache.helix.api.accessor.ClusterAccessor;
@@ -150,11 +156,16 @@ public class AppMasterLauncher {
// Start any pre-specified jobs
List<TaskConfig> taskConfigs = applicationSpec.getTaskConfigs();
if (taskConfigs != null) {
+ YarnConfiguration conf = new YarnConfiguration();
+ FileSystem fs;
+ fs = FileSystem.get(conf);
for (TaskConfig taskConfig : taskConfigs) {
- String yamlFile = taskConfig.getValue("yamlFile");
- if (yamlFile != null) {
- File file = new File(yamlFile);
- Workflow workflow = Workflow.parse(file);
+ URI yamlUri = taskConfig.getYamlURI();
+ if (yamlUri != null && taskConfig.name != null) {
+ InputStream is =
+ readFromHDFS(fs, taskConfig.name, yamlUri, applicationSpec,
+ appAttemptID.getApplicationId());
+ Workflow workflow = Workflow.parse(is);
TaskDriver taskDriver = new TaskDriver(new HelixConnectionAdaptor(controller));
taskDriver.start(workflow);
}
@@ -171,4 +182,13 @@ public class AppMasterLauncher {
Thread.sleep(10000);
}
+
+ private static InputStream readFromHDFS(FileSystem fs, String name, URI uri,
+ ApplicationSpec appSpec, ApplicationId appId) throws Exception {
+ // will throw exception if the file name is without extension
+ String extension = uri.getPath().substring(uri.getPath().lastIndexOf(".") + 1);
+ String pathSuffix = appSpec.getAppName() + "/" + appId.getId() + "/" + name + "." + extension;
+ Path dst = new Path(fs.getHomeDirectory(), pathSuffix);
+ return fs.open(dst).getWrappedStream();
+ }
}
http://git-wip-us.apache.org/repos/asf/helix/blob/feaea562/recipes/jobrunner-yarn/src/main/resources/job_runner_app_spec.yaml
----------------------------------------------------------------------
diff --git a/recipes/jobrunner-yarn/src/main/resources/job_runner_app_spec.yaml b/recipes/jobrunner-yarn/src/main/resources/job_runner_app_spec.yaml
index ad62ffc..0945690 100755
--- a/recipes/jobrunner-yarn/src/main/resources/job_runner_app_spec.yaml
+++ b/recipes/jobrunner-yarn/src/main/resources/job_runner_app_spec.yaml
@@ -19,9 +19,5 @@ servicePackageURIMap: {
services: [
JobRunner]
taskConfigs:
- - config: {
- yamlFile: '/Users/kbiscuit/helix/incubator-helix/recipes/jobrunner-yarn/src/main/resources/dummy_job.yaml'
- }
-
-
-
+ - name: JobRunnerWorkflow
+ yamlFile: 'file:///Users/kbiscuit/helix/incubator-helix/recipes/jobrunner-yarn/src/main/resources/dummy_job.yaml'
[24/50] [abbrv] git commit: Adding container admin to start/stop
participants
Posted by ka...@apache.org.
Adding container admin to start/stop participants
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/7d5bd789
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/7d5bd789
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/7d5bd789
Branch: refs/heads/master
Commit: 7d5bd789f37eb807f28d0e00810920b699b3d080
Parents: 2339465
Author: Kishore Gopalakrishna <g....@gmail.com>
Authored: Mon Feb 24 17:49:29 2014 -0800
Committer: Kishore Gopalakrishna <g....@gmail.com>
Committed: Mon Feb 24 17:49:29 2014 -0800
----------------------------------------------------------------------
.../stages/ContainerProvisioningStage.java | 2 +-
helix-provisioning/pom.xml | 4 +
.../provisioning/tools/ContainerAdmin.java | 98 ++++++++++++++++++++
3 files changed, 103 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/7d5bd789/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java b/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
index bc3e0c6..ae433e0 100644
--- a/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
@@ -276,7 +276,7 @@ public class ContainerProvisioningStage extends AbstractBaseStage {
if (participant.isAlive()) {
Message message = new Message(MessageType.SHUTDOWN, UUID.randomUUID().toString());
message.setTgtName(participant.getId().toString());
- message.setTgtSessionId("*");
+ message.setTgtSessionId(participant.getRunningInstance().getSessionId());
message.setMsgId(message.getId());
accessor.createProperty(
keyBuilder.message(participant.getId().toString(), message.getId()), message);
http://git-wip-us.apache.org/repos/asf/helix/blob/7d5bd789/helix-provisioning/pom.xml
----------------------------------------------------------------------
diff --git a/helix-provisioning/pom.xml b/helix-provisioning/pom.xml
index 4a2e523..410c2be 100644
--- a/helix-provisioning/pom.xml
+++ b/helix-provisioning/pom.xml
@@ -89,6 +89,10 @@ under the License.
<mainClass>org.apache.helix.provisioning.tools.UpdateProvisionerConfig</mainClass>
<name>update-provisioner-config</name>
</program>
+ <program>
+ <mainClass>org.apache.helix.provisioning.tools.ContainerAdmin</mainClass>
+ <name>container-admin</name>
+ </program>
</programs>
</configuration>
</plugin>
http://git-wip-us.apache.org/repos/asf/helix/blob/7d5bd789/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/ContainerAdmin.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/ContainerAdmin.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/ContainerAdmin.java
new file mode 100644
index 0000000..8154996
--- /dev/null
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/ContainerAdmin.java
@@ -0,0 +1,98 @@
+package org.apache.helix.provisioning.tools;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.UUID;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.OptionBuilder;
+import org.apache.commons.cli.OptionGroup;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.helix.HelixConnection;
+import org.apache.helix.api.Participant;
+import org.apache.helix.api.accessor.ParticipantAccessor;
+import org.apache.helix.api.id.ClusterId;
+import org.apache.helix.api.id.MessageId;
+import org.apache.helix.api.id.ParticipantId;
+import org.apache.helix.manager.zk.ZkHelixConnection;
+import org.apache.helix.model.Message;
+import org.apache.helix.model.Message.MessageType;
+import org.apache.log4j.Logger;
+
+/**
+ *
+ *
+ */
+public class ContainerAdmin {
+
+ private static Logger LOG = Logger.getLogger(ContainerAdmin.class);
+ private static String stopContainer = "stopContainer";
+ private HelixConnection _connection;
+
+ public ContainerAdmin(String zkAddress) {
+ _connection = new ZkHelixConnection(zkAddress);
+ _connection.connect();
+ }
+
+ public void stopContainer(String appName, String participantName) throws Exception {
+ ClusterId clusterId = ClusterId.from(appName);
+ ParticipantAccessor participantAccessor = _connection.createParticipantAccessor(clusterId);
+ ParticipantId participantId = ParticipantId.from(participantName);
+ Participant participant = participantAccessor.readParticipant(participantId);
+ if (participant != null && participant.isAlive()) {
+ Message message = new Message(MessageType.SHUTDOWN, UUID.randomUUID().toString());
+ message.setTgtName(participant.getId().toString());
+ message.setTgtSessionId(participant.getRunningInstance().getSessionId());
+ message.setMsgId(message.getId());
+ Map<MessageId, Message> msgMap = new HashMap<MessageId, Message>();
+ msgMap.put(MessageId.from(message.getId()), message);
+ participantAccessor.insertMessagesToParticipant(participantId, msgMap);
+ do {
+ participant = participantAccessor.readParticipant(participantId);
+ Thread.sleep(1000);
+ LOG.info("Waiting for container:" + participantName + " to shutdown");
+ } while (participant!=null && participant.isAlive());
+ }
+
+ }
+
+ @SuppressWarnings("static-access")
+ public static void main(String[] args) throws Exception {
+ Option zkServerOption =
+ OptionBuilder.withLongOpt("zookeeperAddress").withDescription("Provide zookeeper address")
+ .create();
+ zkServerOption.setArgs(1);
+ zkServerOption.setRequired(true);
+ zkServerOption.setArgName("zookeeperAddress(Required)");
+
+ OptionGroup group = new OptionGroup();
+ group.setRequired(true);
+
+ // update container count per service
+ Option stopContainerOption =
+ OptionBuilder.withLongOpt(stopContainer).withDescription("appName participantName")
+ .create();
+ stopContainerOption.setArgs(2);
+ stopContainerOption.setRequired(false);
+ stopContainerOption.setArgName("appName participantName");
+
+ group.addOption(stopContainerOption);
+
+ Options options = new Options();
+ options.addOption(zkServerOption);
+ options.addOptionGroup(group);
+ CommandLine cliParser = new GnuParser().parse(options, args);
+
+ String zkAddress = cliParser.getOptionValue("zookeeperAddress");
+ ContainerAdmin admin = new ContainerAdmin(zkAddress);
+
+ if (cliParser.hasOption(stopContainer)) {
+ String appName = cliParser.getOptionValues(stopContainer)[0];
+ String participantName = cliParser.getOptionValues(stopContainer)[1];
+ admin.stopContainer(appName, participantName);
+ }
+ }
+}
[26/50] [abbrv] git commit: Minor fixes to read the configuration
from app spec yaml
Posted by ka...@apache.org.
Minor fixes to read the configuration from app spec yaml
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/b97cfb45
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/b97cfb45
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/b97cfb45
Branch: refs/heads/master
Commit: b97cfb451037479745628cb3b15e2f4e35de7e11
Parents: c903186
Author: Kishore Gopalakrishna <g....@gmail.com>
Authored: Tue Feb 25 17:40:32 2014 -0800
Committer: Kishore Gopalakrishna <g....@gmail.com>
Committed: Tue Feb 25 17:40:32 2014 -0800
----------------------------------------------------------------------
.../helix/provisioning/ServiceConfig.java | 1 -
.../yarn/example/HelloWordAppSpecFactory.java | 1 +
.../yarn/example/HelloworldAppSpec.java | 25 ++++++++++++++++----
.../main/resources/hello_world_app_spec.yaml | 0
4 files changed, 21 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/b97cfb45/helix-provisioning/src/main/java/org/apache/helix/provisioning/ServiceConfig.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ServiceConfig.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ServiceConfig.java
index 262344b..55ca0ae 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ServiceConfig.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ServiceConfig.java
@@ -8,7 +8,6 @@ import org.apache.helix.api.config.UserConfig;
import org.apache.helix.api.id.ResourceId;
public class ServiceConfig extends UserConfig{
- public Map<String, String> config = new HashMap<String, String>();
public ServiceConfig(Scope<ResourceId> scope) {
super(scope);
http://git-wip-us.apache.org/repos/asf/helix/blob/b97cfb45/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
----------------------------------------------------------------------
diff --git a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java b/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
index 20591cf..e9163d3 100644
--- a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
+++ b/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
@@ -30,6 +30,7 @@ public class HelloWordAppSpecFactory implements ApplicationSpecFactory {
HelloworldAppSpec spec = yaml.loadAs(resourceAsStream, HelloworldAppSpec.class);
String dump = yaml.dump(spec);
System.out.println(dump);
+ System.out.println(spec.getServiceConfig("HelloWorld").getStringField("num_containers", "1"));
}
}
http://git-wip-us.apache.org/repos/asf/helix/blob/b97cfb45/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
----------------------------------------------------------------------
diff --git a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java b/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
index 588c84c..4fda91e 100644
--- a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
+++ b/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
@@ -17,6 +17,8 @@ import org.apache.helix.provisioning.ApplicationSpec;
import org.apache.helix.provisioning.ServiceConfig;
import org.apache.helix.provisioning.TaskConfig;
+import com.google.common.collect.Maps;
+
public class HelloworldAppSpec implements ApplicationSpec {
public String _appName;
@@ -31,7 +33,7 @@ public class HelloworldAppSpec implements ApplicationSpec {
private Map<String, String> _serviceMainClassMap;
- private Map<String, Map<String, String>> _serviceConfigMap;
+ private Map<String, ServiceConfig> _serviceConfigMap;
private List<TaskConfig> _taskConfigs;
@@ -68,11 +70,24 @@ public class HelloworldAppSpec implements ApplicationSpec {
}
public Map<String, Map<String, String>> getServiceConfigMap() {
- return _serviceConfigMap;
+ Map<String,Map<String,String>> map = Maps.newHashMap();
+ for(String service:_serviceConfigMap.keySet()){
+ map.put(service, _serviceConfigMap.get(service).getSimpleFields());
+ }
+ return map;
}
- public void setServiceConfigMap(Map<String, Map<String, String>> serviceConfigMap) {
- _serviceConfigMap = serviceConfigMap;
+ public void setServiceConfigMap(Map<String, Map<String, Object>> map) {
+ _serviceConfigMap = Maps.newHashMap();
+
+ for(String service:map.keySet()){
+ ServiceConfig serviceConfig = new ServiceConfig(Scope.resource(ResourceId.from(service)));
+ Map<String, Object> simpleFields = map.get(service);
+ for(String key:simpleFields.keySet()){
+ serviceConfig.setSimpleField(key, simpleFields.get(key).toString());
+ }
+ _serviceConfigMap.put(service, serviceConfig);
+ }
}
public void setAppName(String appName) {
@@ -127,7 +142,7 @@ public class HelloworldAppSpec implements ApplicationSpec {
@Override
public ServiceConfig getServiceConfig(String serviceName) {
- return new ServiceConfig(Scope.resource(ResourceId.from(serviceName)));
+ return _serviceConfigMap.get(serviceName);
}
@Override
http://git-wip-us.apache.org/repos/asf/helix/blob/b97cfb45/recipes/helloworld-provisioning-yarn/src/main/resources/hello_world_app_spec.yaml
----------------------------------------------------------------------
diff --git a/recipes/helloworld-provisioning-yarn/src/main/resources/hello_world_app_spec.yaml b/recipes/helloworld-provisioning-yarn/src/main/resources/hello_world_app_spec.yaml
old mode 100644
new mode 100755
[23/50] [abbrv] git commit: Merge branch 'helix-provisioning' of
https://git-wip-us.apache.org/repos/asf/helix into helix-provisioning
Posted by ka...@apache.org.
Merge branch 'helix-provisioning' of https://git-wip-us.apache.org/repos/asf/helix into helix-provisioning
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/c0a25f6c
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/c0a25f6c
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/c0a25f6c
Branch: refs/heads/master
Commit: c0a25f6c2d065dbaad83d7d0643025e44877b128
Parents: 2ab31dd 2339465
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Mon Feb 24 15:30:36 2014 -0800
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Mon Feb 24 15:30:36 2014 -0800
----------------------------------------------------------------------
helix-provisioning/pom.xml | 8 +-
.../tools/UpdateProvisionerConfig.java | 87 ++++++++++++++++++++
.../java/tools/UpdateProvisionerConfig.java | 87 --------------------
.../yarn/example/HelloWordAppSpecFactory.java | 67 ++-------------
4 files changed, 96 insertions(+), 153 deletions(-)
----------------------------------------------------------------------
[46/50] [abbrv] git commit: [HELIX-416] Support recurring scheduled
tasks
Posted by ka...@apache.org.
[HELIX-416] Support recurring scheduled tasks
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/0f79187d
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/0f79187d
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/0f79187d
Branch: refs/heads/master
Commit: 0f79187d31c8769a668ba81f8bcc5e5831c659da
Parents: 346d8a3
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Mon Jun 23 13:59:59 2014 -0700
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Wed Jul 9 09:48:57 2014 -0700
----------------------------------------------------------------------
.../org/apache/helix/task/ScheduleConfig.java | 29 ++-
.../org/apache/helix/task/TaskConstants.java | 4 +
.../java/org/apache/helix/task/TaskDriver.java | 36 +++-
.../org/apache/helix/task/TaskRebalancer.java | 134 +++++++++---
.../java/org/apache/helix/task/TaskUtil.java | 211 +++++++++++++++++++
.../java/org/apache/helix/task/Workflow.java | 4 +-
.../org/apache/helix/task/WorkflowConfig.java | 39 +---
.../org/apache/helix/task/WorkflowContext.java | 9 +
.../apache/helix/task/beans/WorkflowBean.java | 4 +-
9 files changed, 392 insertions(+), 78 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/0f79187d/helix-core/src/main/java/org/apache/helix/task/ScheduleConfig.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/ScheduleConfig.java b/helix-core/src/main/java/org/apache/helix/task/ScheduleConfig.java
index 9e3801e..b123793 100644
--- a/helix-core/src/main/java/org/apache/helix/task/ScheduleConfig.java
+++ b/helix-core/src/main/java/org/apache/helix/task/ScheduleConfig.java
@@ -87,12 +87,6 @@ public class ScheduleConfig {
* @return true if valid, false if invalid
*/
public boolean isValid() {
- // For now, disallow recurring workflows
- if (isRecurring()) {
- LOG.error("Recurring workflows are not currently supported.");
- return false;
- }
-
// All schedules must have a start time even if they are recurring
if (_startTime == null) {
LOG.error("All schedules must have a start time!");
@@ -141,25 +135,28 @@ public class ScheduleConfig {
return new ScheduleConfig(startTime, null, null);
}
- /*
+ /**
* Create a schedule for a recurring workflow that should start immediately
* @param recurUnit the unit of the recurrence interval
* @param recurInterval the magnitude of the recurrence interval
* @return instantiated ScheduleConfig
- * public static ScheduleConfig recurringFromNow(TimeUnit recurUnit, long recurInterval) {
- * return new ScheduleConfig(new Date(), recurUnit, recurInterval);
- * }
*/
+ public static ScheduleConfig recurringFromNow(TimeUnit recurUnit, long recurInterval) {
+ return new ScheduleConfig(new Date(), recurUnit, recurInterval);
+ }
- /*
+ /**
* Create a schedule for a recurring workflow that should start at a specific time
- * @param startTime the time to start the workflow the first time
+ * @param startTime the time to start the workflow the first time, or null if now
* @param recurUnit the unit of the recurrence interval
* @param recurInterval the magnitude of the recurrence interval
* @return instantiated ScheduleConfig
- * public static ScheduleConfig recurringFromDate(Date startTime, TimeUnit recurUnit,
- * long recurInterval) {
- * return new ScheduleConfig(startTime, recurUnit, recurInterval);
- * }
*/
+ public static ScheduleConfig recurringFromDate(Date startTime, TimeUnit recurUnit,
+ long recurInterval) {
+ if (startTime == null) {
+ startTime = new Date();
+ }
+ return new ScheduleConfig(startTime, recurUnit, recurInterval);
+ }
}
http://git-wip-us.apache.org/repos/asf/helix/blob/0f79187d/helix-core/src/main/java/org/apache/helix/task/TaskConstants.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskConstants.java b/helix-core/src/main/java/org/apache/helix/task/TaskConstants.java
index 305323d..34008d6 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskConstants.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskConstants.java
@@ -39,4 +39,8 @@ public class TaskConstants {
* The root property store path at which the {@link TaskRebalancer} stores context information.
*/
public static final String REBALANCER_CONTEXT_ROOT = "/TaskRebalancer";
+ /**
+ * Resource prefix for scheduled workflows
+ */
+ public static final String SCHEDULED = "SCHEDULED";
}
http://git-wip-us.apache.org/repos/asf/helix/blob/0f79187d/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java b/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
index d5e9101..0610c01 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
@@ -25,6 +25,7 @@ import java.util.Collections;
import java.util.List;
import java.util.Map;
+import org.I0Itec.zkclient.DataUpdater;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
@@ -34,6 +35,7 @@ import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.OptionGroup;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
+import org.apache.helix.AccessOption;
import org.apache.helix.HelixAdmin;
import org.apache.helix.HelixDataAccessor;
import org.apache.helix.HelixManager;
@@ -41,6 +43,7 @@ import org.apache.helix.HelixManagerFactory;
import org.apache.helix.HelixProperty;
import org.apache.helix.InstanceType;
import org.apache.helix.PropertyKey;
+import org.apache.helix.ZNRecord;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.builder.CustomModeISBuilder;
import org.apache.log4j.Logger;
@@ -221,11 +224,38 @@ public class TaskDriver {
/** Helper function to change target state for a given task */
private void setTaskTargetState(String jobResource, TargetState state) {
+ setSingleTaskTargetState(jobResource, state);
+
+ // For recurring schedules, child workflows must also be handled
HelixDataAccessor accessor = _manager.getHelixDataAccessor();
- HelixProperty p = new HelixProperty(jobResource);
- p.getRecord().setSimpleField(WorkflowConfig.TARGET_STATE, state.name());
- accessor.updateProperty(accessor.keyBuilder().resourceConfig(jobResource), p);
+ List<String> resources = accessor.getChildNames(accessor.keyBuilder().resourceConfigs());
+ for (String resource : resources) {
+ String prefix = resource + "_" + TaskConstants.SCHEDULED;
+ if (resource.startsWith(prefix)) {
+ setSingleTaskTargetState(resource, state);
+ }
+ }
+ }
+ /** Helper function to change target state for a given task */
+ private void setSingleTaskTargetState(String jobResource, final TargetState state) {
+ HelixDataAccessor accessor = _manager.getHelixDataAccessor();
+ DataUpdater<ZNRecord> updater = new DataUpdater<ZNRecord>() {
+ @Override
+ public ZNRecord update(ZNRecord currentData) {
+ // Only update target state for non-completed workflows
+ String finishTime = currentData.getSimpleField(WorkflowContext.FINISH_TIME);
+ if (finishTime == null || finishTime.equals(WorkflowContext.UNFINISHED)) {
+ currentData.setSimpleField(WorkflowConfig.TARGET_STATE, state.name());
+ }
+ return currentData;
+ }
+ };
+ List<DataUpdater<ZNRecord>> updaters = Lists.newArrayList();
+ updaters.add(updater);
+ List<String> paths = Lists.newArrayList();
+ paths.add(accessor.keyBuilder().resourceConfig(jobResource).getPath());
+ accessor.updateChildren(paths, updaters, AccessOption.PERSISTENT);
invokeRebalance();
}
http://git-wip-us.apache.org/repos/asf/helix/blob/0f79187d/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java b/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
index 37c8548..0e11d21 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
@@ -36,6 +36,7 @@ import java.util.concurrent.TimeUnit;
import org.apache.helix.AccessOption;
import org.apache.helix.HelixDataAccessor;
+import org.apache.helix.HelixDefinedState;
import org.apache.helix.HelixManager;
import org.apache.helix.PropertyKey;
import org.apache.helix.ZNRecord;
@@ -57,6 +58,7 @@ import com.google.common.base.Joiner;
import com.google.common.collect.BiMap;
import com.google.common.collect.HashBiMap;
import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
/**
@@ -65,12 +67,12 @@ import com.google.common.collect.Sets;
public abstract class TaskRebalancer implements HelixRebalancer {
private static final Logger LOG = Logger.getLogger(TaskRebalancer.class);
- /** Management of already-scheduled workflows across jobs */
+ // Management of already-scheduled workflows across jobs
private static final BiMap<String, Date> SCHEDULED_WORKFLOWS = HashBiMap.create();
private static final ScheduledExecutorService SCHEDULED_EXECUTOR = Executors
.newSingleThreadScheduledExecutor();
- /** For connection management */
+ // For connection management
private HelixManager _manager;
/**
@@ -129,12 +131,6 @@ public abstract class TaskRebalancer implements HelixRebalancer {
WorkflowConfig workflowCfg = TaskUtil.getWorkflowCfg(_manager, workflowResource);
WorkflowContext workflowCtx = TaskUtil.getWorkflowContext(_manager, workflowResource);
- // Check for readiness, and stop processing if it's not ready
- boolean isReady = scheduleIfNotReady(workflowCfg, workflowResource, resourceName);
- if (!isReady) {
- return emptyAssignment(resourceName);
- }
-
// Initialize workflow context if needed
if (workflowCtx == null) {
workflowCtx = new WorkflowContext(new ZNRecord("WorkflowContext"));
@@ -145,7 +141,7 @@ public abstract class TaskRebalancer implements HelixRebalancer {
for (String parent : workflowCfg.getJobDag().getDirectParents(resourceName)) {
if (workflowCtx.getJobState(parent) == null
|| !workflowCtx.getJobState(parent).equals(TaskState.COMPLETED)) {
- return emptyAssignment(resourceName);
+ return emptyAssignment(resourceName, currStateOutput);
}
}
@@ -153,7 +149,7 @@ public abstract class TaskRebalancer implements HelixRebalancer {
TargetState targetState = workflowCfg.getTargetState();
if (targetState == TargetState.DELETE) {
cleanup(_manager, resourceName, workflowCfg, workflowResource);
- return emptyAssignment(resourceName);
+ return emptyAssignment(resourceName, currStateOutput);
}
// Check if this workflow has been finished past its expiry.
@@ -161,7 +157,7 @@ public abstract class TaskRebalancer implements HelixRebalancer {
&& workflowCtx.getFinishTime() + workflowCfg.getExpiry() <= System.currentTimeMillis()) {
markForDeletion(_manager, workflowResource);
cleanup(_manager, resourceName, workflowCfg, workflowResource);
- return emptyAssignment(resourceName);
+ return emptyAssignment(resourceName, currStateOutput);
}
// Fetch any existing context information from the property store.
@@ -174,9 +170,17 @@ public abstract class TaskRebalancer implements HelixRebalancer {
// The job is already in a final state (completed/failed).
if (workflowCtx.getJobState(resourceName) == TaskState.FAILED
|| workflowCtx.getJobState(resourceName) == TaskState.COMPLETED) {
- return emptyAssignment(resourceName);
+ return emptyAssignment(resourceName, currStateOutput);
+ }
+
+ // Check for readiness, and stop processing if it's not ready
+ boolean isReady =
+ scheduleIfNotReady(workflowCfg, workflowCtx, workflowResource, resourceName, clusterData);
+ if (!isReady) {
+ return emptyAssignment(resourceName, currStateOutput);
}
+ // Grab the old assignment, or an empty one if it doesn't exist
ResourceAssignment prevAssignment = TaskUtil.getPrevResourceAssignment(_manager, resourceName);
if (prevAssignment == null) {
prevAssignment = new ResourceAssignment(ResourceId.from(resourceName));
@@ -359,8 +363,9 @@ public abstract class TaskRebalancer implements HelixRebalancer {
if (!successOptional) {
workflowCtx.setJobState(jobResource, TaskState.FAILED);
workflowCtx.setWorkflowState(TaskState.FAILED);
+ workflowCtx.setFinishTime(System.currentTimeMillis());
addAllPartitions(allPartitions, partitionsToDropFromIs);
- return emptyAssignment(jobResource);
+ return emptyAssignment(jobResource, currStateOutput);
} else {
skippedPartitions.add(pId);
partitionsToDropFromIs.add(pId);
@@ -443,12 +448,14 @@ public abstract class TaskRebalancer implements HelixRebalancer {
/**
* Check if a workflow is ready to schedule, and schedule a rebalance if it is not
* @param workflowCfg the workflow to check
+ * @param workflowCtx the current workflow context
* @param workflowResource the Helix resource associated with the workflow
* @param jobResource a job from the workflow
+ * @param cache the current snapshot of the cluster
* @return true if ready, false if not ready
*/
- private boolean scheduleIfNotReady(WorkflowConfig workflowCfg, String workflowResource,
- String jobResource) {
+ private boolean scheduleIfNotReady(WorkflowConfig workflowCfg, WorkflowContext workflowCtx,
+ String workflowResource, String jobResource, Cluster cache) {
// Ignore non-scheduled workflows
if (workflowCfg == null || workflowCfg.getScheduleConfig() == null) {
return true;
@@ -457,11 +464,66 @@ public abstract class TaskRebalancer implements HelixRebalancer {
// Figure out when this should be run, and if it's ready, then just run it
ScheduleConfig scheduleConfig = workflowCfg.getScheduleConfig();
Date startTime = scheduleConfig.getStartTime();
- long delay = startTime.getTime() - new Date().getTime();
- if (delay <= 0) {
- SCHEDULED_WORKFLOWS.remove(workflowResource);
- SCHEDULED_WORKFLOWS.inverse().remove(startTime);
- return true;
+ long currentTime = new Date().getTime();
+ long delayFromStart = startTime.getTime() - currentTime;
+
+ if (delayFromStart <= 0) {
+ // Remove any timers that are past-time for this workflow
+ Date scheduledTime = SCHEDULED_WORKFLOWS.get(workflowResource);
+ if (scheduledTime != null && currentTime > scheduledTime.getTime()) {
+ SCHEDULED_WORKFLOWS.remove(workflowResource);
+ }
+
+ // Recurring workflows are just templates that spawn new workflows
+ if (scheduleConfig.isRecurring()) {
+ // Skip scheduling this workflow if it's not in a start state
+ if (!workflowCfg.getTargetState().equals(TargetState.START)) {
+ return false;
+ }
+
+ // Skip scheduling this workflow again if the previous run (if any) is still active
+ String lastScheduled = workflowCtx.getLastScheduledSingleWorkflow();
+ if (lastScheduled != null) {
+ WorkflowContext lastWorkflowCtx = TaskUtil.getWorkflowContext(_manager, lastScheduled);
+ if (lastWorkflowCtx == null
+ || lastWorkflowCtx.getFinishTime() == WorkflowContext.UNFINISHED) {
+ return false;
+ }
+ }
+
+ // Figure out how many jumps are needed, thus the time to schedule the next workflow
+ // The negative of the delay is the amount of time past the start time
+ long period =
+ scheduleConfig.getRecurrenceUnit().toMillis(scheduleConfig.getRecurrenceInterval());
+ long offsetMultiplier = (-delayFromStart) / period;
+ long timeToSchedule = period * offsetMultiplier + startTime.getTime();
+
+ // Now clone the workflow if this clone has not yet been created
+ String newWorkflowName =
+ workflowResource + "_" + TaskConstants.SCHEDULED + "_" + offsetMultiplier;
+ if (lastScheduled == null || !lastScheduled.equals(newWorkflowName)) {
+ Workflow clonedWf =
+ TaskUtil.cloneWorkflow(_manager, workflowResource, newWorkflowName, new Date(
+ timeToSchedule));
+ TaskDriver driver = new TaskDriver(_manager);
+ try {
+ // Start the cloned workflow
+ driver.start(clonedWf);
+ } catch (Exception e) {
+ LOG.error("Failed to schedule cloned workflow " + newWorkflowName, e);
+ }
+ // Persist workflow start regardless of success to avoid retrying and failing
+ workflowCtx.setLastScheduledSingleWorkflow(newWorkflowName);
+ TaskUtil.setWorkflowContext(_manager, workflowResource, workflowCtx);
+ }
+
+ // Change the time to trigger the pipeline to that of the next run
+ startTime = new Date(timeToSchedule + period);
+ delayFromStart = startTime.getTime() - System.currentTimeMillis();
+ } else {
+ // This is a one-time workflow and is ready
+ return true;
+ }
}
// No need to schedule the same runnable at the same time
@@ -470,11 +532,22 @@ public abstract class TaskRebalancer implements HelixRebalancer {
return false;
}
+ scheduleRebalance(workflowResource, jobResource, startTime, delayFromStart);
+ return false;
+ }
+
+ private void scheduleRebalance(String workflowResource, String jobResource, Date startTime,
+ long delayFromStart) {
+ // No need to schedule the same runnable at the same time
+ if (SCHEDULED_WORKFLOWS.containsKey(workflowResource)
+ || SCHEDULED_WORKFLOWS.inverse().containsKey(startTime)) {
+ return;
+ }
+
// For workflows not yet scheduled, schedule them and record it
RebalanceInvoker rebalanceInvoker = new RebalanceInvoker(_manager, jobResource);
SCHEDULED_WORKFLOWS.put(workflowResource, startTime);
- SCHEDULED_EXECUTOR.schedule(rebalanceInvoker, delay, TimeUnit.MILLISECONDS);
- return false;
+ SCHEDULED_EXECUTOR.schedule(rebalanceInvoker, delayFromStart, TimeUnit.MILLISECONDS);
}
/**
@@ -620,8 +693,21 @@ public abstract class TaskRebalancer implements HelixRebalancer {
}
}
- private static ResourceAssignment emptyAssignment(String name) {
- return new ResourceAssignment(ResourceId.from(name));
+ private static ResourceAssignment emptyAssignment(String name,
+ ResourceCurrentState currStateOutput) {
+ ResourceId resourceId = ResourceId.from(name);
+ ResourceAssignment assignment = new ResourceAssignment(resourceId);
+ Set<PartitionId> partitions = currStateOutput.getCurrentStateMappedPartitions(resourceId);
+ for (PartitionId partition : partitions) {
+ Map<ParticipantId, State> currentStateMap =
+ currStateOutput.getCurrentStateMap(resourceId, partition);
+ Map<ParticipantId, State> replicaMap = Maps.newHashMap();
+ for (ParticipantId participantId : currentStateMap.keySet()) {
+ replicaMap.put(participantId, State.from(HelixDefinedState.DROPPED));
+ }
+ assignment.addReplicaMap(partition, replicaMap);
+ }
+ return assignment;
}
private static void addCompletedPartitions(Set<Integer> set, JobContext ctx,
http://git-wip-us.apache.org/repos/asf/helix/blob/0f79187d/helix-core/src/main/java/org/apache/helix/task/TaskUtil.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskUtil.java b/helix-core/src/main/java/org/apache/helix/task/TaskUtil.java
index 43a1741..b8582b1 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskUtil.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskUtil.java
@@ -20,10 +20,14 @@ package org.apache.helix.task;
*/
import java.io.IOException;
+import java.text.ParseException;
import java.util.Collections;
+import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
import org.apache.helix.AccessOption;
import org.apache.helix.ConfigAccessor;
@@ -44,6 +48,7 @@ import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.type.TypeReference;
import com.google.common.base.Joiner;
+import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
/**
@@ -63,6 +68,9 @@ public class TaskUtil {
*/
public static JobConfig getJobCfg(HelixManager manager, String jobResource) {
HelixProperty jobResourceConfig = getResourceConfig(manager, jobResource);
+ if (jobResourceConfig == null) {
+ return null;
+ }
JobConfig.Builder b =
JobConfig.Builder.fromMap(jobResourceConfig.getRecord().getSimpleFields());
Map<String, Map<String, String>> rawTaskConfigMap =
@@ -76,13 +84,33 @@ public class TaskUtil {
return b.build();
}
+ /**
+ * Parses workflow resource configurations in Helix into a {@link WorkflowConfig} object.
+ * @param manager Helix manager object used to connect to Helix.
+ * @param workflowResource The name of the workflow resource.
+ * @return A {@link WorkflowConfig} object if Helix contains valid configurations for the
+ * workflow, null otherwise.
+ */
public static WorkflowConfig getWorkflowCfg(HelixManager manager, String workflowResource) {
Map<String, String> workflowCfg = getResourceConfigMap(manager, workflowResource);
+ if (workflowCfg == null) {
+ return null;
+ }
WorkflowConfig.Builder b = WorkflowConfig.Builder.fromMap(workflowCfg);
return b.build();
}
+ /**
+ * Request a state change for a specific task.
+ * @param accessor connected Helix data accessor
+ * @param instance the instance serving the task
+ * @param sessionId the current session of the instance
+ * @param resource the job name
+ * @param partition the task partition name
+ * @param state the requested state
+ * @return true if the request was persisted, false otherwise
+ */
public static boolean setRequestedState(HelixDataAccessor accessor, String instance,
String sessionId, String resource, String partition, TaskPartitionState state) {
LOG.debug(String.format("Requesting a state transition to %s for partition %s.", state,
@@ -101,11 +129,23 @@ public class TaskUtil {
}
}
+ /**
+ * Get a Helix configuration scope at a resource (i.e. job and workflow) level
+ * @param clusterName the cluster containing the resource
+ * @param resource the resource name
+ * @return instantiated {@link HelixConfigScope}
+ */
public static HelixConfigScope getResourceConfigScope(String clusterName, String resource) {
return new HelixConfigScopeBuilder(HelixConfigScope.ConfigScopeProperty.RESOURCE)
.forCluster(clusterName).forResource(resource).build();
}
+ /**
+ * Get the last task assignment for a given job
+ * @param manager a connection to Helix
+ * @param resourceName the name of the job
+ * @return {@link ResourceAssignment} instance, or null if no assignment is available
+ */
public static ResourceAssignment getPrevResourceAssignment(HelixManager manager,
String resourceName) {
ZNRecord r =
@@ -115,6 +155,12 @@ public class TaskUtil {
return r != null ? new ResourceAssignment(r) : null;
}
+ /**
+ * Set the last task assignment for a given job
+ * @param manager a connection to Helix
+ * @param resourceName the name of the job
+ * @param ra {@link ResourceAssignment} containing the task assignment
+ */
public static void setPrevResourceAssignment(HelixManager manager, String resourceName,
ResourceAssignment ra) {
manager.getHelixPropertyStore().set(
@@ -122,6 +168,12 @@ public class TaskUtil {
ra.getRecord(), AccessOption.PERSISTENT);
}
+ /**
+ * Get the runtime context of a single job
+ * @param manager a connection to Helix
+ * @param jobResource the name of the job
+ * @return the {@link JobContext}, or null if none is available
+ */
public static JobContext getJobContext(HelixManager manager, String jobResource) {
ZNRecord r =
manager.getHelixPropertyStore().get(
@@ -130,12 +182,24 @@ public class TaskUtil {
return r != null ? new JobContext(r) : null;
}
+ /**
+ * Set the runtime context of a single job
+ * @param manager a connection to Helix
+ * @param jobResource the name of the job
+ * @param ctx the up-to-date {@link JobContext} for the job
+ */
public static void setJobContext(HelixManager manager, String jobResource, JobContext ctx) {
manager.getHelixPropertyStore().set(
Joiner.on("/").join(TaskConstants.REBALANCER_CONTEXT_ROOT, jobResource, CONTEXT_NODE),
ctx.getRecord(), AccessOption.PERSISTENT);
}
+ /**
+ * Get the rumtime context of a single workflow
+ * @param manager a connection to Helix
+ * @param workflowResource the name of the workflow
+ * @return the {@link WorkflowContext}, or null if none is available
+ */
public static WorkflowContext getWorkflowContext(HelixManager manager, String workflowResource) {
ZNRecord r =
manager.getHelixPropertyStore().get(
@@ -144,6 +208,12 @@ public class TaskUtil {
return r != null ? new WorkflowContext(r) : null;
}
+ /**
+ * Set the rumtime context of a single workflow
+ * @param manager a connection to Helix
+ * @param workflowResource the name of the workflow
+ * @param ctx the up-to-date {@link WorkflowContext} for the workflow
+ */
public static void setWorkflowContext(HelixManager manager, String workflowResource,
WorkflowContext ctx) {
manager.getHelixPropertyStore().set(
@@ -151,14 +221,45 @@ public class TaskUtil {
ctx.getRecord(), AccessOption.PERSISTENT);
}
+ /**
+ * Get a workflow-qualified job name for a single-job workflow
+ * @param singleJobWorkflow the name of the single-job workflow
+ * @return The namespaced job name, which is just singleJobWorkflow_singleJobWorkflow
+ */
public static String getNamespacedJobName(String singleJobWorkflow) {
return getNamespacedJobName(singleJobWorkflow, singleJobWorkflow);
}
+ /**
+ * Get a workflow-qualified job name for a job in that workflow
+ * @param workflowResource the name of the workflow
+ * @param jobName the un-namespaced name of the job
+ * @return The namespaced job name, which is just workflowResource_jobName
+ */
public static String getNamespacedJobName(String workflowResource, String jobName) {
return workflowResource + "_" + jobName;
}
+ /**
+ * Remove the workflow namespace from the job name
+ * @param workflowResource the name of the workflow that owns the job
+ * @param jobName the namespaced job name
+ * @return the denamespaced job name, or the same job name if it is already denamespaced
+ */
+ public static String getDenamespacedJobName(String workflowResource, String jobName) {
+ if (jobName.contains(workflowResource)) {
+ // skip the entire length of the work plus the underscore
+ return jobName.substring(jobName.indexOf(workflowResource) + workflowResource.length() + 1);
+ } else {
+ return jobName;
+ }
+ }
+
+ /**
+ * Serialize a map of job-level configurations as a single string
+ * @param commandConfig map of job config key to config value
+ * @return serialized string
+ */
public static String serializeJobConfigMap(Map<String, String> commandConfig) {
ObjectMapper mapper = new ObjectMapper();
try {
@@ -170,6 +271,11 @@ public class TaskUtil {
return null;
}
+ /**
+ * Deserialize a single string into a map of job-level configurations
+ * @param commandConfig the serialized job config map
+ * @return a map of job config key to config value
+ */
public static Map<String, String> deserializeJobConfigMap(String commandConfig) {
ObjectMapper mapper = new ObjectMapper();
try {
@@ -194,6 +300,111 @@ public class TaskUtil {
accessor.updateProperty(accessor.keyBuilder().idealStates(resource), new IdealState(resource));
}
+ /**
+ * Get a ScheduleConfig from a workflow config string map
+ * @param cfg the string map
+ * @return a ScheduleConfig if one exists, otherwise null
+ */
+ public static ScheduleConfig parseScheduleFromConfigMap(Map<String, String> cfg) {
+ // Parse schedule-specific configs, if they exist
+ Date startTime = null;
+ if (cfg.containsKey(WorkflowConfig.START_TIME)) {
+ try {
+ startTime = WorkflowConfig.DEFAULT_DATE_FORMAT.parse(cfg.get(WorkflowConfig.START_TIME));
+ } catch (ParseException e) {
+ LOG.error("Unparseable date " + cfg.get(WorkflowConfig.START_TIME), e);
+ return null;
+ }
+ }
+ if (cfg.containsKey(WorkflowConfig.RECURRENCE_UNIT)
+ && cfg.containsKey(WorkflowConfig.RECURRENCE_INTERVAL)) {
+ return ScheduleConfig.recurringFromDate(startTime,
+ TimeUnit.valueOf(cfg.get(WorkflowConfig.RECURRENCE_UNIT)),
+ Long.parseLong(cfg.get(WorkflowConfig.RECURRENCE_INTERVAL)));
+ } else if (startTime != null) {
+ return ScheduleConfig.oneTimeDelayedStart(startTime);
+ }
+ return null;
+ }
+
+ /**
+ * Create a new workflow based on an existing one
+ * @param manager connection to Helix
+ * @param origWorkflowName the name of the existing workflow
+ * @param newWorkflowName the name of the new workflow
+ * @param newStartTime a provided start time that deviates from the desired start time
+ * @return the cloned workflow, or null if there was a problem cloning the existing one
+ */
+ public static Workflow cloneWorkflow(HelixManager manager, String origWorkflowName,
+ String newWorkflowName, Date newStartTime) {
+ // Read all resources, including the workflow and jobs of interest
+ HelixDataAccessor accessor = manager.getHelixDataAccessor();
+ PropertyKey.Builder keyBuilder = accessor.keyBuilder();
+ Map<String, HelixProperty> resourceConfigMap =
+ accessor.getChildValuesMap(keyBuilder.resourceConfigs());
+ if (!resourceConfigMap.containsKey(origWorkflowName)) {
+ LOG.error("No such workflow named " + origWorkflowName);
+ return null;
+ }
+ if (resourceConfigMap.containsKey(newWorkflowName)) {
+ LOG.error("Workflow with name " + newWorkflowName + " already exists!");
+ return null;
+ }
+
+ // Create a new workflow with a new name
+ HelixProperty workflowConfig = resourceConfigMap.get(origWorkflowName);
+ Map<String, String> wfSimpleFields = workflowConfig.getRecord().getSimpleFields();
+ JobDag jobDag = JobDag.fromJson(wfSimpleFields.get(WorkflowConfig.DAG));
+ Map<String, Set<String>> parentsToChildren = jobDag.getParentsToChildren();
+ Workflow.Builder builder = new Workflow.Builder(newWorkflowName);
+
+ // Set the workflow expiry
+ builder.setExpiry(Long.parseLong(wfSimpleFields.get(WorkflowConfig.EXPIRY)));
+
+ // Set the schedule, if applicable
+ ScheduleConfig scheduleConfig;
+ if (newStartTime != null) {
+ scheduleConfig = ScheduleConfig.oneTimeDelayedStart(newStartTime);
+ } else {
+ scheduleConfig = parseScheduleFromConfigMap(wfSimpleFields);
+ }
+ if (scheduleConfig != null) {
+ builder.setScheduleConfig(scheduleConfig);
+ }
+
+ // Add each job back as long as the original exists
+ Set<String> namespacedJobs = jobDag.getAllNodes();
+ for (String namespacedJob : namespacedJobs) {
+ if (resourceConfigMap.containsKey(namespacedJob)) {
+ // Copy over job-level and task-level configs
+ String job = getDenamespacedJobName(origWorkflowName, namespacedJob);
+ HelixProperty jobConfig = resourceConfigMap.get(namespacedJob);
+ Map<String, String> jobSimpleFields = jobConfig.getRecord().getSimpleFields();
+ jobSimpleFields.put(JobConfig.WORKFLOW_ID, newWorkflowName); // overwrite workflow name
+ for (Map.Entry<String, String> e : jobSimpleFields.entrySet()) {
+ builder.addConfig(job, e.getKey(), e.getValue());
+ }
+ Map<String, Map<String, String>> rawTaskConfigMap = jobConfig.getRecord().getMapFields();
+ List<TaskConfig> taskConfigs = Lists.newLinkedList();
+ for (Map<String, String> rawTaskConfig : rawTaskConfigMap.values()) {
+ TaskConfig taskConfig = TaskConfig.from(rawTaskConfig);
+ taskConfigs.add(taskConfig);
+ }
+ builder.addTaskConfigs(job, taskConfigs);
+
+ // Add dag dependencies
+ Set<String> children = parentsToChildren.get(namespacedJob);
+ if (children != null) {
+ for (String namespacedChild : children) {
+ String child = getDenamespacedJobName(origWorkflowName, namespacedChild);
+ builder.addParentChildDependency(job, child);
+ }
+ }
+ }
+ }
+ return builder.build();
+ }
+
private static Map<String, String> getResourceConfigMap(HelixManager manager, String resource) {
HelixConfigScope scope = getResourceConfigScope(manager.getClusterName(), resource);
ConfigAccessor configAccessor = manager.getConfigAccessor();
http://git-wip-us.apache.org/repos/asf/helix/blob/0f79187d/helix-core/src/main/java/org/apache/helix/task/Workflow.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/Workflow.java b/helix-core/src/main/java/org/apache/helix/task/Workflow.java
index fef0274..320c020 100644
--- a/helix-core/src/main/java/org/apache/helix/task/Workflow.java
+++ b/helix-core/src/main/java/org/apache/helix/task/Workflow.java
@@ -221,6 +221,7 @@ public class Workflow {
if (wf.schedule != null) {
builder.setScheduleConfig(ScheduleConfig.from(wf.schedule));
}
+ builder.setExpiry(wf.expiry);
return builder.build();
}
@@ -267,7 +268,7 @@ public class Workflow {
_dag = new JobDag();
_jobConfigs = new TreeMap<String, Map<String, String>>();
_taskConfigs = new TreeMap<String, List<TaskConfig>>();
- _expiry = -1;
+ _expiry = WorkflowConfig.DEFAULT_EXPIRY;
}
public Builder addConfig(String job, String key, String val) {
@@ -345,7 +346,6 @@ public class Workflow {
if (_expiry > 0) {
builder.setExpiry(_expiry);
}
-
return new Workflow(_name, builder.build(), _jobConfigs, _taskConfigs); // calls validate
// internally
}
http://git-wip-us.apache.org/repos/asf/helix/blob/0f79187d/helix-core/src/main/java/org/apache/helix/task/WorkflowConfig.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/WorkflowConfig.java b/helix-core/src/main/java/org/apache/helix/task/WorkflowConfig.java
index a8aff1f..782c375 100644
--- a/helix-core/src/main/java/org/apache/helix/task/WorkflowConfig.java
+++ b/helix-core/src/main/java/org/apache/helix/task/WorkflowConfig.java
@@ -19,20 +19,14 @@ package org.apache.helix.task;
* under the License.
*/
-import java.text.ParseException;
import java.text.SimpleDateFormat;
-import java.util.Date;
import java.util.Map;
import java.util.TimeZone;
-import org.apache.log4j.Logger;
-
/**
* Provides a typed interface to workflow level configurations. Validates the configurations.
*/
public class WorkflowConfig {
- private static final Logger LOG = Logger.getLogger(WorkflowConfig.class);
-
/* Config fields */
public static final String DAG = "Dag";
public static final String TARGET_STATE = "TargetState";
@@ -50,10 +44,10 @@ public class WorkflowConfig {
}
/* Member variables */
- private JobDag _jobDag;
- private TargetState _targetState;
- private long _expiry;
- private ScheduleConfig _scheduleConfig;
+ private final JobDag _jobDag;
+ private final TargetState _targetState;
+ private final long _expiry;
+ private final ScheduleConfig _scheduleConfig;
private WorkflowConfig(JobDag jobDag, TargetState targetState, long expiry,
ScheduleConfig scheduleConfig) {
@@ -85,10 +79,6 @@ public class WorkflowConfig {
private long _expiry = DEFAULT_EXPIRY;
private ScheduleConfig _scheduleConfig;
- public Builder() {
- // Nothing to do
- }
-
public WorkflowConfig build() {
validate();
@@ -117,11 +107,9 @@ public class WorkflowConfig {
public static Builder fromMap(Map<String, String> cfg) {
Builder b = new Builder();
-
if (cfg == null) {
return b;
}
-
if (cfg.containsKey(EXPIRY)) {
b.setExpiry(Long.parseLong(cfg.get(EXPIRY)));
}
@@ -133,22 +121,9 @@ public class WorkflowConfig {
}
// Parse schedule-specific configs, if they exist
- Date startTime = null;
- if (cfg.containsKey(START_TIME)) {
- try {
- startTime = DEFAULT_DATE_FORMAT.parse(cfg.get(START_TIME));
- } catch (ParseException e) {
- LOG.error("Unparseable date " + cfg.get(START_TIME), e);
- }
- }
- if (cfg.containsKey(RECURRENCE_UNIT) && cfg.containsKey(RECURRENCE_INTERVAL)) {
- /*
- * b.setScheduleConfig(ScheduleConfig.recurringFromDate(startTime,
- * TimeUnit.valueOf(cfg.get(RECURRENCE_UNIT)),
- * Long.parseLong(cfg.get(RECURRENCE_INTERVAL))));
- */
- } else if (startTime != null) {
- b.setScheduleConfig(ScheduleConfig.oneTimeDelayedStart(startTime));
+ ScheduleConfig scheduleConfig = TaskUtil.parseScheduleFromConfigMap(cfg);
+ if (scheduleConfig != null) {
+ b.setScheduleConfig(scheduleConfig);
}
return b;
}
http://git-wip-us.apache.org/repos/asf/helix/blob/0f79187d/helix-core/src/main/java/org/apache/helix/task/WorkflowContext.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/WorkflowContext.java b/helix-core/src/main/java/org/apache/helix/task/WorkflowContext.java
index 4feda1b..6ad71a1 100644
--- a/helix-core/src/main/java/org/apache/helix/task/WorkflowContext.java
+++ b/helix-core/src/main/java/org/apache/helix/task/WorkflowContext.java
@@ -34,6 +34,7 @@ public class WorkflowContext extends HelixProperty {
public static final String START_TIME = "START_TIME";
public static final String FINISH_TIME = "FINISH_TIME";
public static final String TASK_STATES = "TASK_STATES";
+ public static final String LAST_SCHEDULED_WORKFLOW = "LAST_SCHEDULED_WORKFLOW";
public static final int UNFINISHED = -1;
public WorkflowContext(ZNRecord record) {
@@ -106,4 +107,12 @@ public class WorkflowContext extends HelixProperty {
return Long.parseLong(tStr);
}
+
+ public void setLastScheduledSingleWorkflow(String wf) {
+ _record.setSimpleField(LAST_SCHEDULED_WORKFLOW, wf);
+ }
+
+ public String getLastScheduledSingleWorkflow() {
+ return _record.getSimpleField(LAST_SCHEDULED_WORKFLOW);
+ }
}
http://git-wip-us.apache.org/repos/asf/helix/blob/0f79187d/helix-core/src/main/java/org/apache/helix/task/beans/WorkflowBean.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/beans/WorkflowBean.java b/helix-core/src/main/java/org/apache/helix/task/beans/WorkflowBean.java
index 2ea23c7..a59e818 100644
--- a/helix-core/src/main/java/org/apache/helix/task/beans/WorkflowBean.java
+++ b/helix-core/src/main/java/org/apache/helix/task/beans/WorkflowBean.java
@@ -21,12 +21,14 @@ package org.apache.helix.task.beans;
import java.util.List;
+import org.apache.helix.task.WorkflowConfig;
+
/**
* Bean class used for parsing workflow definitions from YAML.
*/
public class WorkflowBean {
public String name;
- public String expiry;
public List<JobBean> jobs;
public ScheduleBean schedule;
+ public long expiry = WorkflowConfig.DEFAULT_EXPIRY;
}
[40/50] [abbrv] git commit: Fix YARN hello world run script
Posted by ka...@apache.org.
Fix YARN hello world run script
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/d2209f72
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/d2209f72
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/d2209f72
Branch: refs/heads/master
Commit: d2209f726b5e4dc4644b6db9926c9b114e3c3a05
Parents: a486466
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Wed Jul 2 16:04:11 2014 -0700
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Wed Jul 2 16:04:11 2014 -0700
----------------------------------------------------------------------
recipes/helloworld-provisioning-yarn/run.sh | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/d2209f72/recipes/helloworld-provisioning-yarn/run.sh
----------------------------------------------------------------------
diff --git a/recipes/helloworld-provisioning-yarn/run.sh b/recipes/helloworld-provisioning-yarn/run.sh
index 07448bb..ae53daf 100755
--- a/recipes/helloworld-provisioning-yarn/run.sh
+++ b/recipes/helloworld-provisioning-yarn/run.sh
@@ -1,6 +1,6 @@
#cd ../../
#mvn clean install -DskipTests
#cd recipes/helloworld-provisioning-yarn
-mvn clean package -DskipTests
+#mvn clean package -DskipTests
chmod +x target/helloworld-provisioning-yarn-pkg/bin/app-launcher.sh
-target/helloworld-provisioning-yarn/pkg/bin/app-launcher.sh org.apache.helix.provisioning.yarn.example.HelloWordAppSpecFactory /Users/kgopalak/Documents/projects/incubator-helix/recipes/helloworld-provisioning-yarn/src/main/resources/hello_world_app_spec.yaml
+target/helloworld-provisioning-yarn-pkg/bin/app-launcher.sh org.apache.helix.provisioning.yarn.example.HelloWordAppSpecFactory /Users/kgopalak/Documents/projects/incubator-helix/recipes/helloworld-provisioning-yarn/src/main/resources/hello_world_app_spec.yaml
[10/50] [abbrv] git commit: Tests with independent task rebalancer
now pass
Posted by ka...@apache.org.
Tests with independent task rebalancer now pass
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/18628345
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/18628345
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/18628345
Branch: refs/heads/master
Commit: 186283457b92894eb03349fa264a5c4dcb7dc7d9
Parents: 2709b07
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Fri Feb 21 12:33:49 2014 -0800
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Fri Feb 21 12:33:49 2014 -0800
----------------------------------------------------------------------
.../helix/task/AbstractTaskRebalancer.java | 11 ++-
.../helix/task/IndependentTaskRebalancer.java | 5 +-
.../java/org/apache/helix/task/TaskConfig.java | 21 ++++--
.../java/org/apache/helix/task/TaskDriver.java | 23 ++++--
.../integration/task/TestTaskRebalancer.java | 77 ++++++++++++++++++--
.../integration/task/WorkflowGenerator.java | 15 ++--
6 files changed, 120 insertions(+), 32 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/18628345/helix-core/src/main/java/org/apache/helix/task/AbstractTaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/AbstractTaskRebalancer.java b/helix-core/src/main/java/org/apache/helix/task/AbstractTaskRebalancer.java
index fa4c1e5..9a9538c 100644
--- a/helix-core/src/main/java/org/apache/helix/task/AbstractTaskRebalancer.java
+++ b/helix-core/src/main/java/org/apache/helix/task/AbstractTaskRebalancer.java
@@ -254,10 +254,13 @@ public abstract class AbstractTaskRebalancer implements HelixRebalancer {
continue;
}
- TaskPartitionState currState =
- TaskPartitionState.valueOf(currStateOutput.getCurrentState(
- ResourceId.from(taskResource), PartitionId.from(pName),
- ParticipantId.from(instance)).toString());
+ // Current state is either present or dropped
+ State currentState =
+ currStateOutput.getCurrentState(ResourceId.from(taskResource), PartitionId.from(pName),
+ ParticipantId.from(instance));
+ String currentStateStr =
+ currentState != null ? currentState.toString() : TaskPartitionState.DROPPED.toString();
+ TaskPartitionState currState = TaskPartitionState.valueOf(currentStateStr);
// Process any requested state transitions.
State reqS =
http://git-wip-us.apache.org/repos/asf/helix/blob/18628345/helix-core/src/main/java/org/apache/helix/task/IndependentTaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/IndependentTaskRebalancer.java b/helix-core/src/main/java/org/apache/helix/task/IndependentTaskRebalancer.java
index 71ac912..80ec23c 100644
--- a/helix-core/src/main/java/org/apache/helix/task/IndependentTaskRebalancer.java
+++ b/helix-core/src/main/java/org/apache/helix/task/IndependentTaskRebalancer.java
@@ -70,15 +70,15 @@ public class IndependentTaskRebalancer extends AbstractTaskRebalancer {
states.put(State.from("ONLINE"), 1);
List<Integer> partitionNums = Lists.newArrayList(partitionSet);
Collections.sort(partitionNums);
+ final ResourceId resourceId = prevAssignment.getResourceId();
List<PartitionId> partitions =
new ArrayList<PartitionId>(Lists.transform(partitionNums,
new Function<Integer, PartitionId>() {
@Override
public PartitionId apply(Integer partitionNum) {
- return PartitionId.from(partitionNum.toString());
+ return PartitionId.from(resourceId, partitionNum.toString());
}
}));
- ResourceId resourceId = prevAssignment.getResourceId();
Map<PartitionId, Map<ParticipantId, State>> currentMapping = Maps.newHashMap();
for (PartitionId partitionId : currStateOutput.getCurrentStateMappedPartitions(resourceId)) {
currentMapping.put(partitionId, currStateOutput.getCurrentStateMap(resourceId, partitionId));
@@ -97,6 +97,7 @@ public class IndependentTaskRebalancer extends AbstractTaskRebalancer {
Map<String, SortedSet<Integer>> taskAssignment = Maps.newHashMap();
for (Map.Entry<String, List<String>> e : preferenceLists.entrySet()) {
String partitionName = e.getKey();
+ partitionName = String.valueOf(pId(partitionName));
List<String> preferenceList = e.getValue();
for (String participantName : preferenceList) {
if (!taskAssignment.containsKey(participantName)) {
http://git-wip-us.apache.org/repos/asf/helix/blob/18628345/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java b/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java
index 8d1c4bb..2834e85 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java
@@ -19,9 +19,6 @@ package org.apache.helix.task;
* under the License.
*/
-import com.google.common.base.Joiner;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
@@ -32,6 +29,10 @@ import java.util.Set;
import org.apache.helix.task.Workflow.WorkflowEnum;
+import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+
/**
* Provides a typed interface to task configurations.
*/
@@ -134,7 +135,9 @@ public class TaskConfig {
cfgMap.put(TaskConfig.COMMAND, _command);
cfgMap.put(TaskConfig.COMMAND_CONFIG, _commandConfig);
cfgMap.put(TaskConfig.TARGET_RESOURCE, _targetResource);
- cfgMap.put(TaskConfig.TARGET_PARTITION_STATES, Joiner.on(",").join(_targetPartitionStates));
+ if (_targetPartitionStates != null) {
+ cfgMap.put(TaskConfig.TARGET_PARTITION_STATES, Joiner.on(",").join(_targetPartitionStates));
+ }
if (_targetPartitions != null) {
cfgMap.put(TaskConfig.TARGET_PARTITIONS, Joiner.on(",").join(_targetPartitions));
}
@@ -252,11 +255,13 @@ public class TaskConfig {
}
private void validate() {
- if (_targetResource == null) {
- throw new IllegalArgumentException(String.format("%s cannot be null", TARGET_RESOURCE));
+ if (_targetResource == null && (_targetPartitions == null || _targetPartitions.isEmpty())) {
+ throw new IllegalArgumentException(String.format(
+ "%s cannot be null without specified partitions", TARGET_RESOURCE));
}
- if (_targetPartitionStates != null && _targetPartitionStates.isEmpty()) {
- throw new IllegalArgumentException(String.format("%s cannot be an empty set",
+ if (_targetResource != null && _targetPartitionStates != null
+ && _targetPartitionStates.isEmpty()) {
+ throw new IllegalArgumentException(String.format("%s cannot be empty",
TARGET_PARTITION_STATES));
}
if (_command == null) {
http://git-wip-us.apache.org/repos/asf/helix/blob/18628345/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java b/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
index 5189fb7..17e7542 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
@@ -25,6 +25,7 @@ import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
+
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
@@ -40,6 +41,7 @@ import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.HelixProperty;
import org.apache.helix.InstanceType;
+import org.apache.helix.controller.rebalancer.HelixRebalancer;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.builder.CustomModeISBuilder;
import org.apache.log4j.Logger;
@@ -157,10 +159,19 @@ public class TaskDriver {
/** Posts new task to cluster */
private void scheduleTask(String taskResource, TaskConfig taskConfig) throws Exception {
- // Set up task resource based on partitions from target resource
- int numPartitions =
- _admin.getResourceIdealState(_clusterName, taskConfig.getTargetResource())
- .getPartitionSet().size();
+ // Set up task resource based on partitions provided, or from target resource
+ int numPartitions;
+ List<Integer> partitions = taskConfig.getTargetPartitions();
+ String targetResource = taskConfig.getTargetResource();
+ if (partitions != null && !partitions.isEmpty()) {
+ numPartitions = partitions.size();
+ } else if (targetResource != null) {
+ numPartitions =
+ _admin.getResourceIdealState(_clusterName, taskConfig.getTargetResource())
+ .getPartitionSet().size();
+ } else {
+ numPartitions = 0;
+ }
_admin.addResource(_clusterName, taskResource, numPartitions, TaskConstants.STATE_MODEL_NAME);
_admin.setConfig(TaskUtil.getResourceConfigScope(_clusterName, taskResource),
taskConfig.getResourceConfigMap());
@@ -175,7 +186,9 @@ public class TaskDriver {
builder.add(taskResource + "_" + i);
}
IdealState is = builder.build();
- is.setRebalancerClassName(TaskRebalancer.class.getName());
+ Class<? extends HelixRebalancer> rebalancerClass =
+ (targetResource != null) ? TaskRebalancer.class : IndependentTaskRebalancer.class;
+ is.setRebalancerClassName(rebalancerClass.getName());
_admin.setResourceIdealState(_clusterName, taskResource, is);
}
http://git-wip-us.apache.org/repos/asf/helix/blob/18628345/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancer.java b/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancer.java
index e9127a1..c221d96 100644
--- a/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancer.java
+++ b/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancer.java
@@ -19,20 +19,33 @@ package org.apache.helix.integration.task;
* under the License.
*/
-import com.google.common.base.Joiner;
-import com.google.common.collect.ImmutableList;
-
import java.util.HashMap;
-import java.util.Iterator;
import java.util.Map;
-
-import org.apache.helix.*;
-import org.apache.helix.controller.HelixControllerMain;
+import java.util.TreeMap;
+
+import org.apache.helix.AccessOption;
+import org.apache.helix.HelixDataAccessor;
+import org.apache.helix.HelixManager;
+import org.apache.helix.HelixManagerFactory;
+import org.apache.helix.InstanceType;
+import org.apache.helix.PropertyKey;
import org.apache.helix.integration.ZkIntegrationTestBase;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.participant.StateMachineEngine;
-import org.apache.helix.task.*;
+import org.apache.helix.task.Task;
+import org.apache.helix.task.TaskConfig;
+import org.apache.helix.task.TaskConstants;
+import org.apache.helix.task.TaskContext;
+import org.apache.helix.task.TaskDriver;
+import org.apache.helix.task.TaskFactory;
+import org.apache.helix.task.TaskPartitionState;
+import org.apache.helix.task.TaskResult;
+import org.apache.helix.task.TaskState;
+import org.apache.helix.task.TaskStateModelFactory;
+import org.apache.helix.task.TaskUtil;
+import org.apache.helix.task.Workflow;
+import org.apache.helix.task.WorkflowContext;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.tools.ClusterStateVerifier;
import org.testng.Assert;
@@ -40,6 +53,9 @@ import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
+import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableList;
+
public class TestTaskRebalancer extends ZkIntegrationTestBase {
private static final int n = 5;
private static final int START_PORT = 12918;
@@ -265,6 +281,51 @@ public class TestTaskRebalancer extends ZkIntegrationTestBase {
Assert.assertEquals(maxAttempts, 2);
}
+ @Test
+ public void testIndependentTask() throws Exception {
+ final String taskResource = "independentTask";
+ Map<String, String> config = new TreeMap<String, String>();
+ config.put("TargetPartitions", "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19");
+ config.put("Command", "Reindex");
+ config.put("CommandConfig", String.valueOf(200));
+ config.put("TimeoutPerPartition", String.valueOf(10 * 1000));
+ Workflow flow =
+ WorkflowGenerator.generateSingleTaskWorkflowBuilder(taskResource, config).build();
+ _driver.start(flow);
+
+ // Wait for task completion
+ TestUtil.pollForWorkflowState(_manager, taskResource, TaskState.COMPLETED);
+
+ // Ensure all partitions are completed individually
+ TaskContext ctx =
+ TaskUtil.getTaskContext(_manager, TaskUtil.getNamespacedTaskName(taskResource));
+ for (int i = 0; i < NUM_PARTITIONS; i++) {
+ Assert.assertEquals(ctx.getPartitionState(i), TaskPartitionState.COMPLETED);
+ Assert.assertEquals(ctx.getPartitionNumAttempts(i), 1);
+ }
+ }
+
+ @Test
+ public void testIndependentRepeatedWorkflow() throws Exception {
+ final String workflowName = "independentTaskWorkflow";
+ Map<String, String> config = new TreeMap<String, String>();
+ config.put("TargetPartitions", "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19");
+ config.put("Command", "Reindex");
+ config.put("CommandConfig", String.valueOf(200));
+ config.put("TimeoutPerPartition", String.valueOf(10 * 1000));
+ Workflow flow =
+ WorkflowGenerator.generateRepeatedTaskWorkflowBuilder(workflowName, config).build();
+ new TaskDriver(_manager).start(flow);
+
+ // Wait until the task completes
+ TestUtil.pollForWorkflowState(_manager, workflowName, TaskState.COMPLETED);
+
+ // Assert completion for all tasks within two minutes
+ for (String task : flow.getTaskConfigs().keySet()) {
+ TestUtil.pollForTaskState(_manager, workflowName, task, TaskState.COMPLETED);
+ }
+ }
+
private static class ReindexTask implements Task {
private final long _delay;
private volatile boolean _canceled;
http://git-wip-us.apache.org/repos/asf/helix/blob/18628345/helix-core/src/test/java/org/apache/helix/integration/task/WorkflowGenerator.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/integration/task/WorkflowGenerator.java b/helix-core/src/test/java/org/apache/helix/integration/task/WorkflowGenerator.java
index 0d7251a..653d88a 100644
--- a/helix-core/src/test/java/org/apache/helix/integration/task/WorkflowGenerator.java
+++ b/helix-core/src/test/java/org/apache/helix/integration/task/WorkflowGenerator.java
@@ -19,12 +19,12 @@ package org.apache.helix.integration.task;
* under the License.
*/
-import org.apache.helix.task.Workflow;
-
import java.util.Collections;
import java.util.Map;
import java.util.TreeMap;
+import org.apache.helix.task.Workflow;
+
/**
* Convenience class for generating various test workflows
*/
@@ -72,12 +72,17 @@ public class WorkflowGenerator {
}
public static Workflow.Builder generateDefaultRepeatedTaskWorkflowBuilder(String workflowName) {
+ return generateRepeatedTaskWorkflowBuilder(workflowName, DEFAULT_TASK_CONFIG);
+ }
+
+ public static Workflow.Builder generateRepeatedTaskWorkflowBuilder(String workflowName,
+ Map<String, String> config) {
Workflow.Builder builder = new Workflow.Builder(workflowName);
builder.addParentChildDependency(TASK_NAME_1, TASK_NAME_2);
- for (String key : DEFAULT_TASK_CONFIG.keySet()) {
- builder.addConfig(TASK_NAME_1, key, DEFAULT_TASK_CONFIG.get(key));
- builder.addConfig(TASK_NAME_2, key, DEFAULT_TASK_CONFIG.get(key));
+ for (String key : config.keySet()) {
+ builder.addConfig(TASK_NAME_1, key, config.get(key));
+ builder.addConfig(TASK_NAME_2, key, config.get(key));
}
return builder;
[06/50] [abbrv] git commit: Almost complete working example of
Helloworld
Posted by ka...@apache.org.
Almost complete working example of Helloworld
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/8b19cfc7
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/8b19cfc7
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/8b19cfc7
Branch: refs/heads/master
Commit: 8b19cfc77b0ddd6bc90dcb034cfbd9b983ff2932
Parents: 57b4b18
Author: Kishore Gopalakrishna <g....@gmail.com>
Authored: Thu Feb 20 22:08:18 2014 -0800
Committer: Kishore Gopalakrishna <g....@gmail.com>
Committed: Thu Feb 20 22:08:18 2014 -0800
----------------------------------------------------------------------
.../controller/provisioner/ContainerSpec.java | 19 ++++--
.../stages/ContainerProvisioningStage.java | 23 ++++---
.../manager/zk/AbstractParticipantService.java | 68 ++++++++++++++-----
.../integration/TestLocalContainerProvider.java | 4 +-
.../provisioning/yarn/ApplicationSpec.java | 4 +-
.../yarn/HelixYarnApplicationMasterMain.java | 40 ++++++-----
.../helix/provisioning/yarn/ServiceConfig.java | 14 ++--
.../yarn/YamlApplicationSpecFactory.java | 70 --------------------
.../provisioning/yarn/YarnProvisioner.java | 53 ++++++---------
.../yarn/example/HelloWorldService.java | 40 +++++++----
.../yarn/example/HelloworldAppSpec.java | 23 +++----
.../main/resources/hello_world_app_spec.yaml | 3 +-
12 files changed, 177 insertions(+), 184 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/8b19cfc7/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerSpec.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerSpec.java b/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerSpec.java
index 4d3a521..ab3c46a 100644
--- a/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerSpec.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerSpec.java
@@ -1,5 +1,7 @@
package org.apache.helix.controller.provisioner;
+import org.apache.helix.api.id.ParticipantId;
+
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@@ -27,8 +29,10 @@ public class ContainerSpec {
int _memory;
- public ContainerSpec(ContainerId containerId) {
- this._containerId = containerId;
+ private ParticipantId _participantId;
+
+ public ContainerSpec(ParticipantId _participantId) {
+ this._participantId = _participantId;
}
public ContainerId getContainerId() {
@@ -37,7 +41,7 @@ public class ContainerSpec {
@Override
public String toString() {
- return _containerId.toString();
+ return _participantId.toString();
}
public void setMemory(int memory){
@@ -49,6 +53,13 @@ public class ContainerSpec {
}
public static ContainerSpec from(String serialized) {
- return new ContainerSpec(ContainerId.from(serialized));
+ //todo
+ return null;
+ //return new ContainerSpec(ContainerId.from(serialized));
}
+
+ public ParticipantId getParticipantId() {
+ return _participantId;
+ }
+
}
http://git-wip-us.apache.org/repos/asf/helix/blob/8b19cfc7/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java b/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
index 42c8218..f7105d1 100644
--- a/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
@@ -21,6 +21,7 @@ package org.apache.helix.controller.stages;
import java.util.Collection;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
import org.apache.helix.HelixAdmin;
@@ -121,16 +122,17 @@ public class ContainerProvisioningStage extends AbstractBaseStage {
// allocate new containers
for (final ContainerSpec spec : response.getContainersToAcquire()) {
- // random participant id
- final ParticipantId participantId = ParticipantId.from(spec.getContainerId().stringify());
- // create a new Participant, attach the container spec
- InstanceConfig instanceConfig = new InstanceConfig(participantId);
- instanceConfig.setContainerSpec(spec);
- // create a helix_participant in ACQUIRING state
- instanceConfig.setContainerState(ContainerState.ACQUIRING);
- // create the helix participant and add it to cluster
- helixAdmin.addInstance(cluster.getId().toString(), instanceConfig);
-
+ final ParticipantId participantId = spec.getParticipantId();
+ List<String> instancesInCluster = helixAdmin.getInstancesInCluster(cluster.getId().stringify());
+ if (!instancesInCluster.contains(participantId.stringify())) {
+ // create a new Participant, attach the container spec
+ InstanceConfig instanceConfig = new InstanceConfig(participantId);
+ instanceConfig.setContainerSpec(spec);
+ // create a helix_participant in ACQUIRING state
+ instanceConfig.setContainerState(ContainerState.ACQUIRING);
+ // create the helix participant and add it to cluster
+ helixAdmin.addInstance(cluster.getId().toString(), instanceConfig);
+ }
ListenableFuture<ContainerId> future = containerProvider.allocateContainer(spec);
FutureCallback<ContainerId> callback = new FutureCallback<ContainerId>() {
@Override
@@ -160,7 +162,6 @@ public class ContainerProvisioningStage extends AbstractBaseStage {
helixAdmin.getInstanceConfig(cluster.getId().toString(), participant.getId()
.toString());
final ContainerId containerId = existingInstance.getContainerId();
- existingInstance.setContainerId(containerId);
existingInstance.setContainerState(ContainerState.CONNECTING);
accessor.updateProperty(keyBuilder.instanceConfig(participant.getId().toString()),
existingInstance);
http://git-wip-us.apache.org/repos/asf/helix/blob/8b19cfc7/helix-core/src/main/java/org/apache/helix/manager/zk/AbstractParticipantService.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/manager/zk/AbstractParticipantService.java b/helix-core/src/main/java/org/apache/helix/manager/zk/AbstractParticipantService.java
index 2e5eafa..f515092 100644
--- a/helix-core/src/main/java/org/apache/helix/manager/zk/AbstractParticipantService.java
+++ b/helix-core/src/main/java/org/apache/helix/manager/zk/AbstractParticipantService.java
@@ -35,6 +35,7 @@ public abstract class AbstractParticipantService extends AbstractService {
private final ParticipantId _participantId;
private HelixParticipant _participant;
private HelixConnection _connection;
+ boolean initialized;
/**
* Initialize the service.
@@ -50,20 +51,22 @@ public abstract class AbstractParticipantService extends AbstractService {
}
@Override
- protected void doStart() {
+ protected final void doStart() {
_participant = _connection.createParticipant(_clusterId, _participantId);
// add a preconnect callback
_participant.addPreConnectCallback(new PreConnectCallback() {
@Override
public void onPreConnect() {
- onPreJoinCluster();
+ if (initialized) {
+ onReconnect();
+ } else {
+ init();
+ initialized = true;
+ }
}
});
- // register state machine and other initialization
- init();
-
// start and notify
if (!_connection.isConnected()) {
_connection.connect();
@@ -73,34 +76,67 @@ public abstract class AbstractParticipantService extends AbstractService {
}
@Override
- protected void doStop() {
+ protected final void doStop() {
_participant.stop();
notifyStopped();
}
/**
- * Initialize the participant. For example, here is where you can register a state machine: <br/>
+ * Invoked when connection is re-established to zookeeper. Typical scenario this is invoked is
+ * when there is a long GC pause that causes the node to disconnect from the cluster and
+ * reconnects. NOTE: When the service disconnects all its states are reset to initial state.
+ */
+ protected void onReconnect() {
+ // default implementation does nothing.
+ }
+
+ /**
+ * Initialize the participant. For example, here is where you can
+ * <ul>
+ * <li>Read configuration of the cluster,resource, node</li>
+ * <li>Read configuration of the cluster,resource, node register a state machine: <br/>
* <br/>
* <code>
* HelixParticipant participant = getParticipant();
* participant.getStateMachineEngine().registerStateModelFactory(stateModelDefId, factory);
* </code><br/>
* <br/>
- * This code is called prior to starting the participant.
+ * </li>
+ * </ul>
+ * This code is called after connecting to zookeeper but before creating the liveinstance.
*/
- public abstract void init();
-
- /**
- * Complete any tasks that require a live Helix connection. This function is called before the
- * participant declares itself ready to receive state transitions.
- */
- public abstract void onPreJoinCluster();
+ protected abstract void init();
/**
* Get an instantiated participant instance.
* @return HelixParticipant
*/
- public HelixParticipant getParticipant() {
+ protected HelixParticipant getParticipant() {
return _participant;
}
+
+ /**
+ * @return ClusterId
+ * @see {@link ClusterId}
+ */
+ public ClusterId getClusterId() {
+ return _clusterId;
+ }
+
+ /**
+ * @see {@link ParticipantId}
+ * @return
+ */
+ public ParticipantId getParticipantId() {
+ return _participantId;
+ }
+
+ /**
+ * @see {@link HelixConnection}
+ * @return HelixConnection
+ */
+ public HelixConnection getConnection() {
+ return _connection;
+ }
+
}
http://git-wip-us.apache.org/repos/asf/helix/blob/8b19cfc7/helix-core/src/test/java/org/apache/helix/integration/TestLocalContainerProvider.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/integration/TestLocalContainerProvider.java b/helix-core/src/test/java/org/apache/helix/integration/TestLocalContainerProvider.java
index 0f7be64..f4153cc 100644
--- a/helix-core/src/test/java/org/apache/helix/integration/TestLocalContainerProvider.java
+++ b/helix-core/src/test/java/org/apache/helix/integration/TestLocalContainerProvider.java
@@ -277,8 +277,8 @@ public class TestLocalContainerProvider extends ZkUnitTestBase {
List<ContainerSpec> containersToAcquire = Lists.newArrayList();
boolean asked = false;
if (_askCount < MAX_PARTICIPANTS) {
- containersToAcquire.add(new ContainerSpec(ContainerId.from("container" + _askCount)));
- containersToAcquire.add(new ContainerSpec(ContainerId.from("container" + (_askCount + 1))));
+ containersToAcquire.add(new ContainerSpec(ParticipantId.from("container" + _askCount)));
+ containersToAcquire.add(new ContainerSpec(ParticipantId.from("container" + (_askCount + 1))));
asked = true;
}
List<Participant> containersToStart = Lists.newArrayList();
http://git-wip-us.apache.org/repos/asf/helix/blob/8b19cfc7/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ApplicationSpec.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ApplicationSpec.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ApplicationSpec.java
index e104578..285d036 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ApplicationSpec.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ApplicationSpec.java
@@ -3,8 +3,6 @@ package org.apache.helix.provisioning.yarn;
import java.net.URI;
import java.util.List;
-import org.apache.helix.api.config.ParticipantConfig;
-import org.apache.helix.api.id.ParticipantId;
public interface ApplicationSpec {
/**
@@ -23,7 +21,7 @@ public interface ApplicationSpec {
String getServiceMainClass(String service);
- ParticipantConfig getParticipantConfig(String serviceName, ParticipantId participantId);
+ ServiceConfig getServiceConfig(String serviceName);
List<TaskConfig> getTaskConfigs();
http://git-wip-us.apache.org/repos/asf/helix/blob/8b19cfc7/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/HelixYarnApplicationMasterMain.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/HelixYarnApplicationMasterMain.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/HelixYarnApplicationMasterMain.java
index 058b384..33183c7 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/HelixYarnApplicationMasterMain.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/HelixYarnApplicationMasterMain.java
@@ -46,7 +46,7 @@ public class HelixYarnApplicationMasterMain {
public static void main(String[] args) throws Exception {
Map<String, String> env = System.getenv();
LOG.info("Starting app master with the following environment variables");
- for(String key: env.keySet()){
+ for (String key : env.keySet()) {
LOG.info(key + "\t\t=" + env.get(key));
}
int numContainers = 1;
@@ -93,11 +93,11 @@ public class HelixYarnApplicationMasterMain {
YarnProvisioner.applicationMaster = genericApplicationMaster;
YarnProvisioner.applicationMasterConfig = appMasterConfig;
- YarnProvisioner.applicationSpec = factory.fromYaml(new FileInputStream(configFile));
+ ApplicationSpec applicationSpec = factory.fromYaml(new FileInputStream(configFile));
+ YarnProvisioner.applicationSpec = applicationSpec;
String zkAddress = appMasterConfig.getZKAddress();
String clusterName = appMasterConfig.getAppName();
-
- String resourceName = "HelloWorld";
+
// CREATE CLUSTER and setup the resources
// connect
ZkHelixConnection connection = new ZkHelixConnection(zkAddress);
@@ -110,17 +110,27 @@ public class HelixYarnApplicationMasterMain {
new StateModelDefinition(StateModelConfigGenerator.generateConfigForStatelessService());
clusterAccessor.createCluster(new ClusterConfig.Builder(clusterId).addStateModelDefinition(
statelessService).build());
-
- // add the resource with the local provisioner
- ResourceId resourceId = ResourceId.from(resourceName);
- YarnProvisionerConfig provisionerConfig = new YarnProvisionerConfig(resourceId);
- provisionerConfig.setNumContainers(numContainers);
- RebalancerConfig rebalancerConfig =
- new FullAutoRebalancerConfig.Builder(resourceId).stateModelDefId(
- statelessService.getStateModelDefId()).build();
- clusterAccessor.addResourceToCluster(new ResourceConfig.Builder(ResourceId.from(resourceName))
- .provisionerConfig(provisionerConfig).rebalancerConfig(rebalancerConfig).build());
-
+ for (String service : applicationSpec.getServices()) {
+ String resourceName = service;
+ // add the resource with the local provisioner
+ ResourceId resourceId = ResourceId.from(resourceName);
+ YarnProvisionerConfig provisionerConfig = new YarnProvisionerConfig(resourceId);
+ ServiceConfig serviceConfig = applicationSpec.getServiceConfig(resourceName);
+ provisionerConfig.setNumContainers(serviceConfig.getIntField("num_containers", 1));
+ serviceConfig.setSimpleField("service_name", service);
+ FullAutoRebalancerConfig.Builder rebalancerConfigBuilder =
+ new FullAutoRebalancerConfig.Builder(resourceId);
+ RebalancerConfig rebalancerConfig =
+ rebalancerConfigBuilder.stateModelDefId(statelessService.getStateModelDefId())//
+ .build();
+ ResourceConfig.Builder resourceConfigBuilder =
+ new ResourceConfig.Builder(ResourceId.from(resourceName));
+ ResourceConfig resourceConfig = resourceConfigBuilder.provisionerConfig(provisionerConfig) //
+ .rebalancerConfig(rebalancerConfig) //
+ .userConfig(serviceConfig) //
+ .build();
+ clusterAccessor.addResourceToCluster(resourceConfig);
+ }
// start controller
ControllerId controllerId = ControllerId.from("controller1");
HelixController controller = connection.createController(clusterId, controllerId);
http://git-wip-us.apache.org/repos/asf/helix/blob/8b19cfc7/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ServiceConfig.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ServiceConfig.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ServiceConfig.java
index 4d9173e..87b5f12 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ServiceConfig.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ServiceConfig.java
@@ -3,11 +3,15 @@ package org.apache.helix.provisioning.yarn;
import java.util.HashMap;
import java.util.Map;
-public class ServiceConfig {
+import org.apache.helix.api.Scope;
+import org.apache.helix.api.config.UserConfig;
+import org.apache.helix.api.id.ResourceId;
+
+public class ServiceConfig extends UserConfig{
public Map<String, String> config = new HashMap<String, String>();
- public String getValue(String key) {
- return (config != null ? config.get(key) : null);
- }
-
+ public ServiceConfig(Scope<ResourceId> scope) {
+ super(scope);
+ }
+
}
http://git-wip-us.apache.org/repos/asf/helix/blob/8b19cfc7/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YamlApplicationSpecFactory.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YamlApplicationSpecFactory.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YamlApplicationSpecFactory.java
deleted file mode 100644
index e87a5c2..0000000
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YamlApplicationSpecFactory.java
+++ /dev/null
@@ -1,70 +0,0 @@
-package org.apache.helix.provisioning.yarn;
-
-import java.io.InputStream;
-import java.net.URI;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.helix.api.config.ParticipantConfig;
-import org.apache.helix.api.id.ParticipantId;
-import org.yaml.snakeyaml.Yaml;
-
-class DefaultApplicationSpec implements ApplicationSpec {
- public String appName;
- public Integer minContainers;
- public Integer maxContainers;
-
- public AppConfig appConfig;
-
- public List<String> services;
- public Map<String, ServiceConfig> serviceConfigMap;
-
- @Override
- public String getAppName() {
- return appName;
- }
-
- @Override
- public AppConfig getConfig() {
- return appConfig;
- }
-
- @Override
- public List<String> getServices() {
- return services;
- }
-
- @Override
- public URI getServicePackage(String serviceName) {
- return null;
- }
-
- @Override
- public ParticipantConfig getParticipantConfig(String serviceName, ParticipantId participantId) {
- return null;
- }
-
- @Override
- public List<TaskConfig> getTaskConfigs() {
- return null;
- }
-
- @Override
- public URI getAppMasterPackage() {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public String getServiceMainClass(String service) {
- // TODO Auto-generated method stub
- return null;
- }
-}
-
-public class YamlApplicationSpecFactory {
- ApplicationSpec fromYaml(InputStream input) {
- Yaml yaml = new Yaml();
- return yaml.loadAs(input, DefaultApplicationSpec.class);
- }
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/8b19cfc7/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
index daac87b..8fd308e 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
@@ -210,8 +210,7 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
vargs.add("--cluster " + appName);
vargs.add("--participantId " + participant.getId().stringify());
vargs.add("--participantClass " + mainClass);
- ;
-
+
vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/ContainerParticipant.stdout");
vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/ContainerParticipant.stderr");
@@ -262,13 +261,13 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
int targetNumContainers = provisionerConfig.getNumContainers();
// Any container that is in a state should be put in this set
- Set<ContainerId> existingContainersIdSet = new HashSet<ContainerId>();
+ Set<ParticipantId> existingContainersIdSet = new HashSet<ParticipantId>();
// Cache halted containers to determine which to restart and which to release
- Map<ContainerId, Participant> excessHaltedContainers = Maps.newHashMap();
+ Map<ParticipantId, Participant> excessHaltedContainers = Maps.newHashMap();
// Cache participants to ensure that excess participants are stopped
- Map<ContainerId, Participant> excessActiveContainers = Maps.newHashMap();
+ Map<ParticipantId, Participant> excessActiveContainers = Maps.newHashMap();
for (Participant participant : participants) {
ContainerConfig containerConfig = participant.getContainerConfig();
@@ -276,35 +275,35 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
ContainerState state = containerConfig.getState();
switch (state) {
case ACQUIRING:
- existingContainersIdSet.add(containerConfig.getId());
+ existingContainersIdSet.add(participant.getId());
break;
case ACQUIRED:
// acquired containers are ready to start
- existingContainersIdSet.add(containerConfig.getId());
+ existingContainersIdSet.add(participant.getId());
containersToStart.add(participant);
break;
case CONNECTING:
- existingContainersIdSet.add(containerConfig.getId());
+ existingContainersIdSet.add(participant.getId());
break;
case CONNECTED:
// active containers can be stopped or kept active
- existingContainersIdSet.add(containerConfig.getId());
- excessActiveContainers.put(containerConfig.getId(), participant);
+ existingContainersIdSet.add(participant.getId());
+ excessActiveContainers.put(participant.getId(), participant);
break;
case DISCONNECTED:
// disconnected containers must be stopped
- existingContainersIdSet.add(containerConfig.getId());
+ existingContainersIdSet.add(participant.getId());
containersToStop.add(participant);
case HALTING:
- existingContainersIdSet.add(containerConfig.getId());
+ existingContainersIdSet.add(participant.getId());
break;
case HALTED:
// halted containers can be released or restarted
- existingContainersIdSet.add(containerConfig.getId());
- excessHaltedContainers.put(containerConfig.getId(), participant);
+ existingContainersIdSet.add(participant.getId());
+ excessHaltedContainers.put(participant.getId(), participant);
break;
case FINALIZING:
- existingContainersIdSet.add(containerConfig.getId());
+ existingContainersIdSet.add(participant.getId());
break;
case FINALIZED:
break;
@@ -316,29 +315,21 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
default:
break;
}
- ContainerId containerId = containerConfig.getId();
- if (containerId != null) {
- // _containerParticipants.put(containerId, participant.getId());
- // _states.put(containerId, state);
- }
}
}
for (int i = 0; i < targetNumContainers; i++) {
- ContainerId containerId = ContainerId.from(resourceId + "_container_" + (i));
- excessActiveContainers.remove(containerId); // don't stop this container if active
- if (excessHaltedContainers.containsKey(containerId)) {
+ ParticipantId participantId = ParticipantId.from(resourceId + "_container_" + (i));
+ excessActiveContainers.remove(participantId); // don't stop this container if active
+ if (excessHaltedContainers.containsKey(participantId)) {
// Halted containers can be restarted if necessary
- Participant participant = excessHaltedContainers.get(containerId);
+ Participant participant = excessHaltedContainers.get(participantId);
containersToStart.add(participant);
- excessHaltedContainers.remove(containerId); // don't release this container
- } else if (!existingContainersIdSet.contains(containerId)) {
+ excessHaltedContainers.remove(participantId); // don't release this container
+ } else if (!existingContainersIdSet.contains(participantId)) {
// Unallocated containers must be allocated
- ContainerSpec containerSpec = new ContainerSpec(containerId);
- ParticipantId participantId = ParticipantId.from(containerId.stringify());
- ParticipantConfig participantConfig =
- applicationSpec.getParticipantConfig(resourceId.stringify(), participantId);
- containerSpec.setMemory(participantConfig.getUserConfig().getIntField("memory", 1024));
+ ContainerSpec containerSpec = new ContainerSpec(participantId);
+ containerSpec.setMemory(_resourceConfig.getUserConfig().getIntField("memory", 1024));
containersToAcquire.add(containerSpec);
}
}
http://git-wip-us.apache.org/repos/asf/helix/blob/8b19cfc7/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
index 614be36..f65fd5d 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
@@ -1,28 +1,40 @@
package org.apache.helix.provisioning.yarn.example;
import org.apache.helix.HelixConnection;
+import org.apache.helix.api.accessor.ResourceAccessor;
+import org.apache.helix.api.config.UserConfig;
import org.apache.helix.api.id.ClusterId;
import org.apache.helix.api.id.ParticipantId;
+import org.apache.helix.api.id.ResourceId;
import org.apache.helix.api.id.StateModelDefId;
import org.apache.helix.manager.zk.AbstractParticipantService;
-
+import org.apache.log4j.Logger;
public class HelloWorldService extends AbstractParticipantService {
- public HelloWorldService(HelixConnection connection, ClusterId clusterId,
- ParticipantId participantId) {
- super(connection, clusterId, participantId);
- }
-
- @Override
- public void init() {
- HelloWorldStateModelFactory stateModelFactory = new HelloWorldStateModelFactory();
- getParticipant().getStateMachineEngine().registerStateModelFactory(StateModelDefId.from("StatelessService"), stateModelFactory);
- }
+ private static Logger LOG = Logger.getLogger(AbstractParticipantService.class);
+
+ static String SERVICE_NAME = "HelloWorld";
+
+ public HelloWorldService(HelixConnection connection, ClusterId clusterId,
+ ParticipantId participantId) {
+ super(connection, clusterId, participantId);
+ }
+ /**
+ * init method to setup appropriate call back handlers.
+ */
@Override
- public void onPreJoinCluster() {
- //this will be invoked prior to
+ public void init() {
+ ClusterId clusterId = getClusterId();
+ ResourceAccessor resourceAccessor = getConnection().createResourceAccessor(clusterId);
+ UserConfig serviceConfig = resourceAccessor.readUserConfig(ResourceId.from(SERVICE_NAME));
+ LOG.info("Starting service:" + SERVICE_NAME + " with configuration:" + serviceConfig);
+
+ HelloWorldStateModelFactory stateModelFactory = new HelloWorldStateModelFactory();
+ getParticipant().getStateMachineEngine().registerStateModelFactory(
+ StateModelDefId.from("StatelessService"), stateModelFactory);
+
}
-}
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/8b19cfc7/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
index 2e4cd75..e22c7b2 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
@@ -7,27 +7,31 @@ import java.util.Map;
import org.apache.helix.api.Scope;
import org.apache.helix.api.config.ParticipantConfig;
+import org.apache.helix.api.config.ResourceConfig;
+import org.apache.helix.api.config.ResourceConfig.Builder;
import org.apache.helix.api.config.UserConfig;
import org.apache.helix.api.id.ParticipantId;
+import org.apache.helix.api.id.ResourceId;
import org.apache.helix.provisioning.yarn.AppConfig;
import org.apache.helix.provisioning.yarn.ApplicationSpec;
+import org.apache.helix.provisioning.yarn.ServiceConfig;
import org.apache.helix.provisioning.yarn.TaskConfig;
public class HelloworldAppSpec implements ApplicationSpec {
- private String _appName;
+ public String _appName;
- private AppConfig _appConfig;
+ public AppConfig _appConfig;
- private List<String> _services;
+ public List<String> _services;
private String _appMasterPackageUri;
-
+
private Map<String, String> _servicePackageURIMap;
private Map<String, String> _serviceMainClassMap;
- private Map<String,Map<String,String>> _serviceConfigMap;
+ private Map<String, Map<String, String>> _serviceConfigMap;
private List<TaskConfig> _taskConfigs;
@@ -122,13 +126,8 @@ public class HelloworldAppSpec implements ApplicationSpec {
}
@Override
- public ParticipantConfig getParticipantConfig(String serviceName, ParticipantId participantId) {
- ParticipantConfig.Builder builder = new ParticipantConfig.Builder(participantId);
- Scope<ParticipantId> scope = Scope.participant(participantId);
- UserConfig userConfig = new UserConfig(scope);
- Map<String, String> map = _serviceConfigMap.get(serviceName);
- userConfig.setSimpleFields(map);
- return builder.addTag(serviceName).userConfig(userConfig ).build();
+ public ServiceConfig getServiceConfig(String serviceName) {
+ return new ServiceConfig(Scope.resource(ResourceId.from(serviceName)));
}
@Override
http://git-wip-us.apache.org/repos/asf/helix/blob/8b19cfc7/helix-provisioning/src/main/resources/hello_world_app_spec.yaml
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/resources/hello_world_app_spec.yaml b/helix-provisioning/src/main/resources/hello_world_app_spec.yaml
index 648104a..1d4f1b7 100644
--- a/helix-provisioning/src/main/resources/hello_world_app_spec.yaml
+++ b/helix-provisioning/src/main/resources/hello_world_app_spec.yaml
@@ -7,7 +7,8 @@ appMasterPackageUri: 'file:///Users/kgopalak/Documents/projects/incubator-helix/
appName: testApp
serviceConfigMap:
HelloWorld: {
- k1: v1
+ num_containers: 3,
+ memory: 1024
}
serviceMainClassMap: {
HelloWorld: org.apache.helix.provisioning.yarn.example.HelloWorldService
[08/50] [abbrv] git commit: Check live instance to decide when to
mark as connected or disconnected
Posted by ka...@apache.org.
Check live instance to decide when to mark as connected or disconnected
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/0037b745
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/0037b745
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/0037b745
Branch: refs/heads/master
Commit: 0037b745aba1ff947d99f33b5683c86b2aacff0f
Parents: 970770a
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Thu Feb 20 23:41:53 2014 -0800
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Thu Feb 20 23:41:53 2014 -0800
----------------------------------------------------------------------
.../stages/ContainerProvisioningStage.java | 29 +++++++++++++++++---
1 file changed, 25 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/0037b745/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java b/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
index f7105d1..5cccd68 100644
--- a/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
@@ -30,6 +30,7 @@ import org.apache.helix.HelixManager;
import org.apache.helix.PropertyKey;
import org.apache.helix.api.Cluster;
import org.apache.helix.api.Participant;
+import org.apache.helix.api.config.ContainerConfig;
import org.apache.helix.api.config.ResourceConfig;
import org.apache.helix.api.id.ParticipantId;
import org.apache.helix.api.id.ResourceId;
@@ -104,7 +105,27 @@ public class ContainerProvisioningStage extends AbstractBaseStage {
final Cluster cluster = event.getAttribute("ClusterDataCache");
final Collection<Participant> participants = cluster.getParticipantMap().values();
- // TODO: if a process died, we need to mark it as stopped or something
+ // If a process died, we need to mark it as DISCONNECTED or if the process is ready, mark as
+ // CONNECTED
+ Map<ParticipantId, Participant> participantMap = cluster.getParticipantMap();
+ for (ParticipantId participantId : participantMap.keySet()) {
+ Participant participant = participantMap.get(participantId);
+ ContainerConfig config = participant.getContainerConfig();
+ if (config != null) {
+ ContainerState containerState = config.getState();
+ if (!participant.isAlive() && ContainerState.CONNECTED.equals(containerState)) {
+ // Need to mark as disconnected if process died
+ LOG.info("Participant " + participantId + " died, marking as DISCONNECTED");
+ updateContainerState(helixAdmin, accessor, keyBuilder, cluster, participantId,
+ ContainerState.DISCONNECTED);
+ } else if (participant.isAlive() && ContainerState.CONNECTING.equals(containerState)) {
+ // Need to mark as connected only when the live instance is visible
+ LOG.info("Participant " + participantId + " is ready, marking as CONNECTED");
+ updateContainerState(helixAdmin, accessor, keyBuilder, cluster, participantId,
+ ContainerState.CONNECTED);
+ }
+ }
+ }
// Participants registered in helix
// Give those participants to targetprovider
@@ -123,7 +144,8 @@ public class ContainerProvisioningStage extends AbstractBaseStage {
// allocate new containers
for (final ContainerSpec spec : response.getContainersToAcquire()) {
final ParticipantId participantId = spec.getParticipantId();
- List<String> instancesInCluster = helixAdmin.getInstancesInCluster(cluster.getId().stringify());
+ List<String> instancesInCluster =
+ helixAdmin.getInstancesInCluster(cluster.getId().stringify());
if (!instancesInCluster.contains(participantId.stringify())) {
// create a new Participant, attach the container spec
InstanceConfig instanceConfig = new InstanceConfig(participantId);
@@ -171,8 +193,7 @@ public class ContainerProvisioningStage extends AbstractBaseStage {
FutureCallback<Boolean> callback = new FutureCallback<Boolean>() {
@Override
public void onSuccess(Boolean result) {
- updateContainerState(helixAdmin, accessor, keyBuilder, cluster, participant.getId(),
- ContainerState.CONNECTED);
+ // Do nothing yet, need to wait for live instance
}
@Override
[09/50] [abbrv] git commit: Handling stop/release requests
Posted by ka...@apache.org.
Handling stop/release requests
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/2709b07c
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/2709b07c
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/2709b07c
Branch: refs/heads/master
Commit: 2709b07c26d509ba71b1c83ae9c37dc07a46aa8c
Parents: 0037b74
Author: Kishore Gopalakrishna <g....@gmail.com>
Authored: Fri Feb 21 10:21:58 2014 -0800
Committer: Kishore Gopalakrishna <g....@gmail.com>
Committed: Fri Feb 21 10:21:58 2014 -0800
----------------------------------------------------------------------
.../helix/provisioning/yarn/AppLauncher.java | 67 +++++++++++++++++++-
.../yarn/GenericApplicationMaster.java | 9 ++-
.../provisioning/yarn/RMCallbackHandler.java | 43 +++++++++----
.../provisioning/yarn/YarnProvisioner.java | 4 +-
4 files changed, 103 insertions(+), 20 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/2709b07c/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
index d06ae67..3bba018 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
@@ -29,16 +29,20 @@ import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.client.api.YarnClientApplication;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.Records;
@@ -345,10 +349,36 @@ public class AppLauncher {
* @return true if successfully completed, it will print status every X seconds
*/
public boolean waitUntilDone() {
+ String prevReport = "";
while (true) {
try {
+ // Get application report for the appId we are interested in
+ ApplicationReport report = yarnClient.getApplicationReport(_appId);
+
+ String reportMessage = generateReport(report);
+ if (!reportMessage.equals(prevReport)) {
+ LOG.info(reportMessage);
+ }
+ YarnApplicationState state = report.getYarnApplicationState();
+ FinalApplicationStatus dsStatus = report.getFinalApplicationStatus();
+ if (YarnApplicationState.FINISHED == state) {
+ if (FinalApplicationStatus.SUCCEEDED == dsStatus) {
+ LOG.info("Application has completed successfully. Breaking monitoring loop");
+ return true;
+ } else {
+ LOG.info("Application did finished unsuccessfully." + " YarnState=" + state.toString()
+ + ", DSFinalStatus=" + dsStatus.toString() + ". Breaking monitoring loop");
+ return false;
+ }
+ } else if (YarnApplicationState.KILLED == state || YarnApplicationState.FAILED == state) {
+ LOG.info("Application did not finish." + " YarnState=" + state.toString()
+ + ", DSFinalStatus=" + dsStatus.toString() + ". Breaking monitoring loop");
+ return false;
+ }
+ prevReport = reportMessage;
Thread.sleep(10000);
- } catch (InterruptedException e) {
+ } catch (Exception e) {
+ LOG.error("Exception while getting info ");
break;
}
}
@@ -356,6 +386,31 @@ public class AppLauncher {
}
/**
+ * TODO: kill the app only in dev mode. In prod, its ok for the app to continue running if the
+ * launcher dies after launching
+ */
+
+ private String generateReport(ApplicationReport report) {
+ return "Got application report from ASM for" + ", appId=" + _appId.getId()
+ + ", clientToAMToken=" + report.getClientToAMToken() + ", appDiagnostics="
+ + report.getDiagnostics() + ", appMasterHost=" + report.getHost() + ", appQueue="
+ + report.getQueue() + ", appMasterRpcPort=" + report.getRpcPort() + ", appStartTime="
+ + report.getStartTime() + ", yarnAppState=" + report.getYarnApplicationState().toString()
+ + ", distributedFinalState=" + report.getFinalApplicationStatus().toString()
+ + ", appTrackingUrl=" + report.getTrackingUrl() + ", appUser=" + report.getUser();
+ }
+
+ protected void cleanup() {
+ LOG.info("Cleaning up");
+ try {
+ ApplicationReport applicationReport = yarnClient.getApplicationReport(_appId);
+ LOG.info("Killing application:"+ _appId + " \n Application report" + generateReport(applicationReport));
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+
+ /**
* will take the input file and AppSpecFactory class name as input
* @param args
* @throws Exception
@@ -364,8 +419,16 @@ public class AppLauncher {
ApplicationSpecFactory applicationSpecFactory =
(ApplicationSpecFactory) Class.forName(args[0]).newInstance();
File yamlConfigFile = new File(args[1]);
- AppLauncher launcher = new AppLauncher(applicationSpecFactory, yamlConfigFile);
+ final AppLauncher launcher = new AppLauncher(applicationSpecFactory, yamlConfigFile);
launcher.launch();
+ Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
+
+ @Override
+ public void run() {
+ launcher.cleanup();
+ }
+ }));
launcher.waitUntilDone();
}
+
}
http://git-wip-us.apache.org/repos/asf/helix/blob/2709b07c/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/GenericApplicationMaster.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/GenericApplicationMaster.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/GenericApplicationMaster.java
index 3adffd6..a006363 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/GenericApplicationMaster.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/GenericApplicationMaster.java
@@ -23,6 +23,7 @@ import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.ByteBuffer;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
@@ -59,6 +60,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
@@ -226,7 +228,7 @@ public class GenericApplicationMaster {
}
public ListenableFuture<ContainerAskResponse> acquireContainer(ContainerRequest containerAsk) {
- amRMClient.addContainerRequest(containerAsk);
+ LOG.info("Requesting container ACQUIRE:" + containerAsk);
SettableFuture<ContainerAskResponse> future = SettableFuture.create();
containerRequestMap.put(containerAsk, future);
amRMClient.addContainerRequest(containerAsk);
@@ -234,7 +236,7 @@ public class GenericApplicationMaster {
}
public ListenableFuture<ContainerStopResponse> stopContainer(Container container) {
- nmClientAsync.stopContainerAsync(container.getId(), container.getNodeId());
+ LOG.info("Requesting container STOP:" + container);
SettableFuture<ContainerStopResponse> future = SettableFuture.create();
containerStopMap.put(container.getId(), future);
nmClientAsync.stopContainerAsync(container.getId(), container.getNodeId());
@@ -242,7 +244,7 @@ public class GenericApplicationMaster {
}
public ListenableFuture<ContainerReleaseResponse> releaseContainer(Container container) {
- amRMClient.releaseAssignedContainer(container.getId());
+ LOG.info("Requesting container RELEASE:" + container);
SettableFuture<ContainerReleaseResponse> future = SettableFuture.create();
containerReleaseMap.put(container.getId(), future);
amRMClient.releaseAssignedContainer(container.getId());
@@ -251,6 +253,7 @@ public class GenericApplicationMaster {
public ListenableFuture<ContainerLaunchResponse> launchContainer(Container container,
ContainerLaunchContext containerLaunchContext) {
+ LOG.info("Requesting container LAUNCH:" + container + " :" + Joiner.on(" ").join(containerLaunchContext.getCommands()));
SettableFuture<ContainerLaunchResponse> future = SettableFuture.create();
containerLaunchResponseMap.put(container.getId(), future);
nmClientAsync.startContainerAsync(container, containerLaunchContext);
http://git-wip-us.apache.org/repos/asf/helix/blob/2709b07c/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java
index 50c38b5..dae28a8 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java
@@ -35,23 +35,36 @@ class RMCallbackHandler implements AMRMClientAsync.CallbackHandler {
public void onContainersCompleted(List<ContainerStatus> completedContainers) {
LOG.info("Got response from RM for container ask, completedCnt=" + completedContainers.size());
for (ContainerStatus containerStatus : completedContainers) {
- GenericApplicationMaster.LOG.info("Got container status for containerID=" + containerStatus.getContainerId()
- + ", state=" + containerStatus.getState() + ", exitStatus="
- + containerStatus.getExitStatus() + ", diagnostics=" + containerStatus.getDiagnostics());
+ GenericApplicationMaster.LOG.info("Got container status for containerID="
+ + containerStatus.getContainerId() + ", state=" + containerStatus.getState()
+ + ", exitStatus=" + containerStatus.getExitStatus() + ", diagnostics="
+ + containerStatus.getDiagnostics());
// non complete containers should not be here
assert (containerStatus.getState() == ContainerState.COMPLETE);
-
+ SettableFuture<ContainerStopResponse> stopResponseFuture =
+ _genericApplicationMaster.containerStopMap.get(containerStatus.getContainerId());
+ if (stopResponseFuture != null) {
+ ContainerStopResponse value = new ContainerStopResponse();
+ stopResponseFuture.set(value);
+ } else {
+ SettableFuture<ContainerReleaseResponse> releaseResponseFuture =
+ _genericApplicationMaster.containerReleaseMap.get(containerStatus.getContainerId());
+ if (releaseResponseFuture != null) {
+ ContainerReleaseResponse value = new ContainerReleaseResponse();
+ releaseResponseFuture.set(value);
+ }
+ }
// increment counters for completed/failed containers
int exitStatus = containerStatus.getExitStatus();
if (0 != exitStatus) {
// container failed
if (ContainerExitStatus.ABORTED != exitStatus) {
-
+
} else {
// container was killed by framework, possibly preempted
// we should re-try as the container was lost for some reason
-
+
// we do not need to release the container as it would be done
// by the RM
}
@@ -66,7 +79,8 @@ class RMCallbackHandler implements AMRMClientAsync.CallbackHandler {
@Override
public void onContainersAllocated(List<Container> allocatedContainers) {
- GenericApplicationMaster.LOG.info("Got response from RM for container ask, allocatedCnt=" + allocatedContainers.size());
+ GenericApplicationMaster.LOG.info("Got response from RM for container ask, allocatedCnt="
+ + allocatedContainers.size());
for (Container allocatedContainer : allocatedContainers) {
GenericApplicationMaster.LOG.info("Allocated new container." + ", containerId="
+ allocatedContainer.getId() + ", containerNode="
@@ -74,15 +88,18 @@ class RMCallbackHandler implements AMRMClientAsync.CallbackHandler {
+ allocatedContainer.getNodeId().getPort() + ", containerNodeURI="
+ allocatedContainer.getNodeHttpAddress() + ", containerResourceMemory"
+ allocatedContainer.getResource().getMemory());
- for(ContainerRequest containerRequest: _genericApplicationMaster.containerRequestMap.keySet()){
- if(containerRequest.getCapability().getMemory() == allocatedContainer.getResource().getMemory()){
- SettableFuture<ContainerAskResponse> future = _genericApplicationMaster.containerRequestMap.remove(containerRequest);
+ for (ContainerRequest containerRequest : _genericApplicationMaster.containerRequestMap
+ .keySet()) {
+ if (containerRequest.getCapability().getMemory() == allocatedContainer.getResource()
+ .getMemory()) {
+ SettableFuture<ContainerAskResponse> future =
+ _genericApplicationMaster.containerRequestMap.remove(containerRequest);
ContainerAskResponse response = new ContainerAskResponse();
response.setContainer(allocatedContainer);
future.set(response);
break;
}
- }
+ }
}
}
@@ -97,11 +114,11 @@ class RMCallbackHandler implements AMRMClientAsync.CallbackHandler {
@Override
public float getProgress() {
// set progress to deliver to RM on next heartbeat
- return (System.currentTimeMillis()-startTime) % Integer.MAX_VALUE;
+ return (System.currentTimeMillis() - startTime) % Integer.MAX_VALUE;
}
@Override
public void onError(Throwable e) {
_genericApplicationMaster.amRMClient.stop();
}
-}
\ No newline at end of file
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/2709b07c/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
index 8fd308e..2eedfd0 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
@@ -324,8 +324,8 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
if (excessHaltedContainers.containsKey(participantId)) {
// Halted containers can be restarted if necessary
Participant participant = excessHaltedContainers.get(participantId);
- containersToStart.add(participant);
- excessHaltedContainers.remove(participantId); // don't release this container
+ //containersToStart.add(participant);
+ //excessHaltedContainers.remove(participantId); // don't release this container
} else if (!existingContainersIdSet.contains(participantId)) {
// Unallocated containers must be allocated
ContainerSpec containerSpec = new ContainerSpec(participantId);
[15/50] [abbrv] git commit: Adding new Helloworld recipe to launch
containers on YARN
Posted by ka...@apache.org.
Adding new Helloworld recipe to launch containers on YARN
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/b47e3299
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/b47e3299
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/b47e3299
Branch: refs/heads/master
Commit: b47e329905d853b8566231946bb9be04ad2b0685
Parents: 7118717
Author: Kishore Gopalakrishna <g....@gmail.com>
Authored: Sat Feb 22 00:05:41 2014 -0800
Committer: Kishore Gopalakrishna <g....@gmail.com>
Committed: Sat Feb 22 00:05:41 2014 -0800
----------------------------------------------------------------------
.../helix/provisioning/yarn/AppLauncher.java | 10 +++--
.../yarn/HelixYarnApplicationMasterMain.java | 28 ++++++++-----
.../helix/provisioning/yarn/HelixYarnUtil.java | 42 ++++++++++++++++++++
recipes/provisioning/yarn/helloworld/pom.xml | 2 +-
.../main/resources/hello_world_app_spec.yaml | 4 +-
5 files changed, 70 insertions(+), 16 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/b47e3299/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
index 3bba018..1fe0a28 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
@@ -42,7 +42,6 @@ import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.client.api.YarnClientApplication;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.Records;
@@ -375,6 +374,9 @@ public class AppLauncher {
+ ", DSFinalStatus=" + dsStatus.toString() + ". Breaking monitoring loop");
return false;
}
+ if (YarnApplicationState.RUNNING == state) {
+
+ }
prevReport = reportMessage;
Thread.sleep(10000);
} catch (Exception e) {
@@ -404,7 +406,8 @@ public class AppLauncher {
LOG.info("Cleaning up");
try {
ApplicationReport applicationReport = yarnClient.getApplicationReport(_appId);
- LOG.info("Killing application:"+ _appId + " \n Application report" + generateReport(applicationReport));
+ LOG.info("Killing application:" + _appId + " \n Application report"
+ + generateReport(applicationReport));
} catch (Exception e) {
e.printStackTrace();
}
@@ -416,8 +419,7 @@ public class AppLauncher {
* @throws Exception
*/
public static void main(String[] args) throws Exception {
- ApplicationSpecFactory applicationSpecFactory =
- (ApplicationSpecFactory) Class.forName(args[0]).newInstance();
+ ApplicationSpecFactory applicationSpecFactory = HelixYarnUtil.createInstance(args[0]);
File yamlConfigFile = new File(args[1]);
final AppLauncher launcher = new AppLauncher(applicationSpecFactory, yamlConfigFile);
launcher.launch();
http://git-wip-us.apache.org/repos/asf/helix/blob/b47e3299/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/HelixYarnApplicationMasterMain.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/HelixYarnApplicationMasterMain.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/HelixYarnApplicationMasterMain.java
index 33183c7..5884a35 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/HelixYarnApplicationMasterMain.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/HelixYarnApplicationMasterMain.java
@@ -2,6 +2,7 @@ package org.apache.helix.provisioning.yarn;
import java.io.File;
import java.io.FileInputStream;
+import java.io.IOException;
import java.util.Arrays;
import java.util.Map;
@@ -11,10 +12,12 @@ import org.I0Itec.zkclient.ZkServer;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.helix.HelixController;
import org.apache.helix.api.accessor.ClusterAccessor;
@@ -43,7 +46,8 @@ import org.apache.log4j.Logger;
public class HelixYarnApplicationMasterMain {
public static Logger LOG = Logger.getLogger(HelixYarnApplicationMasterMain.class);
- public static void main(String[] args) throws Exception {
+ @SuppressWarnings("unchecked")
+ public static void main(String[] args) throws Exception{
Map<String, String> env = System.getenv();
LOG.info("Starting app master with the following environment variables");
for (String key : env.keySet()) {
@@ -71,8 +75,12 @@ public class HelixYarnApplicationMasterMain {
}
};
- FileUtils.deleteDirectory(new File(dataDir));
- FileUtils.deleteDirectory(new File(logDir));
+ try {
+ FileUtils.deleteDirectory(new File(dataDir));
+ FileUtils.deleteDirectory(new File(logDir));
+ } catch (IOException e) {
+ LOG.error(e);
+ }
final ZkServer server = new ZkServer(dataDir, logDir, defaultNameSpace);
server.start();
@@ -84,13 +92,15 @@ public class HelixYarnApplicationMasterMain {
ApplicationAttemptId appAttemptID = containerId.getApplicationAttemptId();
String configFile = AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString();
- ApplicationSpecFactory factory =
- (ApplicationSpecFactory) Class.forName(appMasterConfig.getApplicationSpecFactory())
- .newInstance();
-
+ String className = appMasterConfig.getApplicationSpecFactory();
+
GenericApplicationMaster genericApplicationMaster = new GenericApplicationMaster(appAttemptID);
- genericApplicationMaster.start();
-
+ try {
+ genericApplicationMaster.start();
+ } catch (Exception e) {
+ LOG.error("Unable to start application master: ", e);
+ }
+ ApplicationSpecFactory factory = HelixYarnUtil.createInstance(className);
YarnProvisioner.applicationMaster = genericApplicationMaster;
YarnProvisioner.applicationMasterConfig = appMasterConfig;
ApplicationSpec applicationSpec = factory.fromYaml(new FileInputStream(configFile));
http://git-wip-us.apache.org/repos/asf/helix/blob/b47e3299/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/HelixYarnUtil.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/HelixYarnUtil.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/HelixYarnUtil.java
new file mode 100644
index 0000000..ad606ba
--- /dev/null
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/HelixYarnUtil.java
@@ -0,0 +1,42 @@
+package org.apache.helix.provisioning.yarn;
+
+import org.apache.log4j.Logger;
+
+public class HelixYarnUtil {
+ private static Logger LOG = Logger.getLogger(HelixYarnUtil.class);
+
+ @SuppressWarnings("unchecked")
+ public static <T extends ApplicationSpecFactory> T createInstance(String className) {
+ Class<ApplicationSpecFactory> factoryClazz = null;
+ {
+ try {
+ factoryClazz =
+ (Class<ApplicationSpecFactory>) Thread.currentThread().getContextClassLoader()
+ .loadClass(className);
+ } catch (ClassNotFoundException e) {
+ try {
+ factoryClazz =
+ (Class<ApplicationSpecFactory>) ClassLoader.getSystemClassLoader().loadClass(
+ className);
+ } catch (ClassNotFoundException e1) {
+ try {
+ factoryClazz = (Class<ApplicationSpecFactory>) Class.forName(className);
+ } catch (ClassNotFoundException e2) {
+
+ }
+ }
+ }
+ }
+ System.out.println(System.getProperty("java.class.path"));
+ if (factoryClazz == null) {
+ LOG.error("Unable to find class:" + className);
+ }
+ ApplicationSpecFactory factory = null;
+ try {
+ factory = factoryClazz.newInstance();
+ } catch (Exception e) {
+ LOG.error("Unable to create instance of class: " + className, e);
+ }
+ return (T) factory;
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/b47e3299/recipes/provisioning/yarn/helloworld/pom.xml
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/helloworld/pom.xml b/recipes/provisioning/yarn/helloworld/pom.xml
index bc6aca2..6c30679 100644
--- a/recipes/provisioning/yarn/helloworld/pom.xml
+++ b/recipes/provisioning/yarn/helloworld/pom.xml
@@ -36,7 +36,7 @@ under the License.
org.apache.log4j,
*
</osgi.import>
- <osgi.export>org.apache.helix.recipes.provisioning.yarn.helloworld*;version="${project.version};-noimport:=true</osgi.export>
+ <osgi.export>org.apache.helix.provisioning.yarn.example*;version="${project.version};-noimport:=true</osgi.export>
</properties>
<dependencies>
http://git-wip-us.apache.org/repos/asf/helix/blob/b47e3299/recipes/provisioning/yarn/helloworld/src/main/resources/hello_world_app_spec.yaml
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/helloworld/src/main/resources/hello_world_app_spec.yaml b/recipes/provisioning/yarn/helloworld/src/main/resources/hello_world_app_spec.yaml
index 535bece..d8d1dd2 100644
--- a/recipes/provisioning/yarn/helloworld/src/main/resources/hello_world_app_spec.yaml
+++ b/recipes/provisioning/yarn/helloworld/src/main/resources/hello_world_app_spec.yaml
@@ -3,7 +3,7 @@ appConfig:
config: {
k1: v1
}
-appMasterPackageUri: 'file:///Users/kbiscuit/helix/incubator-helix/recipes/provisioning/yarn/helloworld/target/helloworld-0.7.1-incubating-SNAPSHOT-pkg.tar'
+appMasterPackageUri: 'file:///Users/kgopalak/Documents/projects/incubator-helix/recipes/provisioning/yarn/helloworld/target/helloworld-0.7.1-incubating-SNAPSHOT-pkg.tar'
appName: testApp
serviceConfigMap:
HelloWorld: {
@@ -14,7 +14,7 @@ serviceMainClassMap: {
HelloWorld: org.apache.helix.provisioning.yarn.example.HelloWorldService
}
servicePackageURIMap: {
- HelloWorld: 'file:///Users/kbiscuit/helix/incubator-helix/recipes/provisioning/yarn/helloworld/target/helloworld-0.7.1-incubating-SNAPSHOT-pkg.tar'
+ HelloWorld: 'file:///Users/kgopalak/Documents/projects/incubator-helix/recipes/provisioning/yarn/helloworld/target/helloworld-0.7.1-incubating-SNAPSHOT-pkg.tar'
}
services: [
HelloWorld]
[32/50] [abbrv] Port recent task framework changes
Posted by ka...@apache.org.
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/Workflow.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/Workflow.java b/helix-core/src/main/java/org/apache/helix/task/Workflow.java
index 902f616..5b27fb6 100644
--- a/helix-core/src/main/java/org/apache/helix/task/Workflow.java
+++ b/helix-core/src/main/java/org/apache/helix/task/Workflow.java
@@ -19,45 +19,53 @@ package org.apache.helix.task;
* under the License.
*/
-import com.google.common.base.Joiner;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.Reader;
import java.io.StringReader;
+import java.util.ArrayList;
+import java.util.Collection;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
import java.util.TreeMap;
+
+import org.apache.helix.task.beans.JobBean;
import org.apache.helix.task.beans.TaskBean;
import org.apache.helix.task.beans.WorkflowBean;
import org.yaml.snakeyaml.Yaml;
import org.yaml.snakeyaml.constructor.Constructor;
+import com.google.common.base.Joiner;
+import com.google.common.collect.Lists;
+
/**
- * Houses a task dag and config set to fully describe a task workflow
+ * Houses a job dag and config set to fully describe a job workflow
*/
public class Workflow {
/** Default workflow name, useful constant for single-node workflows */
- public static enum WorkflowEnum {
- UNSPECIFIED;
- }
+ public static final String UNSPECIFIED = "UNSPECIFIED";
/** Workflow name */
- private final String _name;
+ private String _name;
/** Holds workflow-level configurations */
- private final WorkflowConfig _workflowConfig;
+ private WorkflowConfig _workflowConfig;
+
+ /** Contains the per-job configurations for all jobs specified in the provided dag */
+ private Map<String, Map<String, String>> _jobConfigs;
- /** Contains the per-task configurations for all tasks specified in the provided dag */
- private final Map<String, Map<String, String>> _taskConfigs;
+ /** Containers the per-job configurations of all individually-specified tasks */
+ private Map<String, List<TaskConfig>> _taskConfigs;
/** Constructs and validates a workflow against a provided dag and config set */
private Workflow(String name, WorkflowConfig workflowConfig,
- Map<String, Map<String, String>> taskConfigs) {
+ Map<String, Map<String, String>> jobConfigs, Map<String, List<TaskConfig>> taskConfigs) {
_name = name;
_workflowConfig = workflowConfig;
+ _jobConfigs = jobConfigs;
_taskConfigs = taskConfigs;
-
validate();
}
@@ -65,13 +73,17 @@ public class Workflow {
return _name;
}
- public Map<String, Map<String, String>> getTaskConfigs() {
+ public Map<String, Map<String, String>> getJobConfigs() {
+ return _jobConfigs;
+ }
+
+ public Map<String, List<TaskConfig>> getTaskConfigs() {
return _taskConfigs;
}
public Map<String, String> getResourceConfigMap() throws Exception {
Map<String, String> cfgMap = new HashMap<String, String>();
- cfgMap.put(WorkflowConfig.DAG, _workflowConfig.getTaskDag().toJson());
+ cfgMap.put(WorkflowConfig.DAG, _workflowConfig.getJobDag().toJson());
cfgMap.put(WorkflowConfig.EXPIRY, String.valueOf(_workflowConfig.getExpiry()));
cfgMap.put(WorkflowConfig.TARGET_STATE, _workflowConfig.getTargetState().name());
@@ -97,19 +109,19 @@ public class Workflow {
*
* <pre>
* name: MyFlow
- * tasks:
- * - name : TaskA
+ * jobs:
+ * - name : JobA
* command : SomeTask
* ...
- * - name : TaskB
- * parents : [TaskA]
+ * - name : JobB
+ * parents : [JobA]
* command : SomeOtherTask
* ...
- * - name : TaskC
+ * - name : JobC
* command : AnotherTask
* ...
- * - name : TaskD
- * parents : [TaskB, TaskC]
+ * - name : JobD
+ * parents : [JobB, JobC]
* command : AnotherTask
* ...
* </pre>
@@ -126,37 +138,44 @@ public class Workflow {
WorkflowBean wf = (WorkflowBean) yaml.load(reader);
Builder builder = new Builder(wf.name);
- for (TaskBean task : wf.tasks) {
- if (task.name == null) {
- throw new IllegalArgumentException("A task must have a name.");
+ for (JobBean job : wf.jobs) {
+ if (job.name == null) {
+ throw new IllegalArgumentException("A job must have a name.");
}
- if (task.parents != null) {
- for (String parent : task.parents) {
- builder.addParentChildDependency(parent, task.name);
+ if (job.parents != null) {
+ for (String parent : job.parents) {
+ builder.addParentChildDependency(parent, job.name);
}
}
- builder.addConfig(task.name, TaskConfig.WORKFLOW_ID, wf.name);
- builder.addConfig(task.name, TaskConfig.COMMAND, task.command);
- if (task.commandConfig != null) {
- builder.addConfig(task.name, TaskConfig.COMMAND_CONFIG, task.commandConfig.toString());
+ builder.addConfig(job.name, JobConfig.WORKFLOW_ID, wf.name);
+ builder.addConfig(job.name, JobConfig.COMMAND, job.command);
+ if (job.jobConfigMap != null) {
+ builder.addConfig(job.name, JobConfig.JOB_CONFIG_MAP, job.jobConfigMap.toString());
+ }
+ builder.addConfig(job.name, JobConfig.TARGET_RESOURCE, job.targetResource);
+ if (job.targetPartitionStates != null) {
+ builder.addConfig(job.name, JobConfig.TARGET_PARTITION_STATES,
+ Joiner.on(",").join(job.targetPartitionStates));
}
- builder.addConfig(task.name, TaskConfig.TARGET_RESOURCE, task.targetResource);
- if (task.targetPartitionStates != null) {
- builder.addConfig(task.name, TaskConfig.TARGET_PARTITION_STATES,
- Joiner.on(",").join(task.targetPartitionStates));
+ if (job.targetPartitions != null) {
+ builder.addConfig(job.name, JobConfig.TARGET_PARTITIONS,
+ Joiner.on(",").join(job.targetPartitions));
}
- if (task.targetPartitions != null) {
- builder.addConfig(task.name, TaskConfig.TARGET_PARTITIONS,
- Joiner.on(",").join(task.targetPartitions));
+ builder.addConfig(job.name, JobConfig.MAX_ATTEMPTS_PER_TASK,
+ String.valueOf(job.maxAttemptsPerPartition));
+ builder.addConfig(job.name, JobConfig.NUM_CONCURRENT_TASKS_PER_INSTANCE,
+ String.valueOf(job.numConcurrentTasksPerInstance));
+ builder.addConfig(job.name, JobConfig.TIMEOUT_PER_TASK,
+ String.valueOf(job.timeoutPerPartition));
+ if (job.tasks != null) {
+ List<TaskConfig> taskConfigs = Lists.newArrayList();
+ for (TaskBean task : job.tasks) {
+ taskConfigs.add(TaskConfig.from(task));
+ }
+ builder.addTaskConfigs(job.name, taskConfigs);
}
- builder.addConfig(task.name, TaskConfig.MAX_ATTEMPTS_PER_PARTITION,
- String.valueOf(task.maxAttemptsPerPartition));
- builder.addConfig(task.name, TaskConfig.NUM_CONCURRENT_TASKS_PER_INSTANCE,
- String.valueOf(task.numConcurrentTasksPerInstance));
- builder.addConfig(task.name, TaskConfig.TIMEOUT_PER_PARTITION,
- String.valueOf(task.timeoutPerPartition));
}
return builder.build();
@@ -168,47 +187,78 @@ public class Workflow {
*/
public void validate() {
// validate dag and configs
- if (!_taskConfigs.keySet().containsAll(_workflowConfig.getTaskDag().getAllNodes())) {
+ if (!_jobConfigs.keySet().containsAll(_workflowConfig.getJobDag().getAllNodes())) {
throw new IllegalArgumentException("Nodes specified in DAG missing from config");
- } else if (!_workflowConfig.getTaskDag().getAllNodes().containsAll(_taskConfigs.keySet())) {
+ } else if (!_workflowConfig.getJobDag().getAllNodes().containsAll(_jobConfigs.keySet())) {
throw new IllegalArgumentException("Given DAG lacks nodes with supplied configs");
}
- _workflowConfig.getTaskDag().validate();
+ _workflowConfig.getJobDag().validate();
- for (String node : _taskConfigs.keySet()) {
+ for (String node : _jobConfigs.keySet()) {
buildConfig(node);
}
}
- /** Builds a TaskConfig from config map. Useful for validating configs */
- private TaskConfig buildConfig(String task) {
- return TaskConfig.Builder.fromMap(_taskConfigs.get(task)).build();
+ /** Builds a JobConfig from config map. Useful for validating configs */
+ private JobConfig buildConfig(String job) {
+ JobConfig.Builder b = JobConfig.Builder.fromMap(_jobConfigs.get(job));
+ if (_taskConfigs != null && _taskConfigs.containsKey(job)) {
+ b.addTaskConfigs(_taskConfigs.get(job));
+ }
+ return b.build();
}
/** Build a workflow incrementally from dependencies and single configs, validate at build time */
public static class Builder {
- private final String _name;
- private final TaskDag _dag;
- private final Map<String, Map<String, String>> _taskConfigs;
+ private String _name;
+ private JobDag _dag;
+ private Map<String, Map<String, String>> _jobConfigs;
+ private Map<String, List<TaskConfig>> _taskConfigs;
private long _expiry;
public Builder(String name) {
_name = name;
- _dag = new TaskDag();
- _taskConfigs = new TreeMap<String, Map<String, String>>();
+ _dag = new JobDag();
+ _jobConfigs = new TreeMap<String, Map<String, String>>();
+ _taskConfigs = new TreeMap<String, List<TaskConfig>>();
_expiry = -1;
}
public Builder addConfig(String node, String key, String val) {
node = namespacify(node);
_dag.addNode(node);
+ if (!_jobConfigs.containsKey(node)) {
+ _jobConfigs.put(node, new TreeMap<String, String>());
+ }
+ _jobConfigs.get(node).put(key, val);
+ return this;
+ }
- if (!_taskConfigs.containsKey(node)) {
- _taskConfigs.put(node, new TreeMap<String, String>());
+ public Builder addJobConfigMap(String node, Map<String, String> jobConfigMap) {
+ return addConfig(node, JobConfig.JOB_CONFIG_MAP, TaskUtil.serializeJobConfigMap(jobConfigMap));
+ }
+
+ public Builder addJobConfig(String node, JobConfig jobConfig) {
+ for (Map.Entry<String, String> e : jobConfig.getResourceConfigMap().entrySet()) {
+ String key = e.getKey();
+ String val = e.getValue();
+ addConfig(node, key, val);
}
- _taskConfigs.get(node).put(key, val);
+ addTaskConfigs(node, jobConfig.getTaskConfigMap().values());
+ return this;
+ }
+ public Builder addTaskConfigs(String node, Collection<TaskConfig> taskConfigs) {
+ node = namespacify(node);
+ _dag.addNode(node);
+ if (!_taskConfigs.containsKey(node)) {
+ _taskConfigs.put(node, new ArrayList<TaskConfig>());
+ }
+ if (!_jobConfigs.containsKey(node)) {
+ _jobConfigs.put(node, new TreeMap<String, String>());
+ }
+ _taskConfigs.get(node).addAll(taskConfigs);
return this;
}
@@ -226,13 +276,13 @@ public class Workflow {
}
public String namespacify(String task) {
- return TaskUtil.getNamespacedTaskName(_name, task);
+ return TaskUtil.getNamespacedJobName(_name, task);
}
public Workflow build() {
- for (String task : _taskConfigs.keySet()) {
+ for (String task : _jobConfigs.keySet()) {
// addConfig(task, TaskConfig.WORKFLOW_ID, _name);
- _taskConfigs.get(task).put(TaskConfig.WORKFLOW_ID, _name);
+ _jobConfigs.get(task).put(JobConfig.WORKFLOW_ID, _name);
}
WorkflowConfig.Builder builder = new WorkflowConfig.Builder();
@@ -242,7 +292,8 @@ public class Workflow {
builder.setExpiry(_expiry);
}
- return new Workflow(_name, builder.build(), _taskConfigs); // calls validate internally
+ return new Workflow(_name, builder.build(), _jobConfigs, _taskConfigs); // calls validate
+ // internally
}
}
}
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/WorkflowConfig.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/WorkflowConfig.java b/helix-core/src/main/java/org/apache/helix/task/WorkflowConfig.java
index bb88be7..6f10955 100644
--- a/helix-core/src/main/java/org/apache/helix/task/WorkflowConfig.java
+++ b/helix-core/src/main/java/org/apache/helix/task/WorkflowConfig.java
@@ -34,18 +34,18 @@ public class WorkflowConfig {
public static final long DEFAULT_EXPIRY = 24 * 60 * 60 * 1000;
/* Member variables */
- private final TaskDag _taskDag;
- private final TargetState _targetState;
- private final long _expiry;
+ private JobDag _jobDag;
+ private TargetState _targetState;
+ private long _expiry;
- private WorkflowConfig(TaskDag taskDag, TargetState targetState, long expiry) {
- _taskDag = taskDag;
+ private WorkflowConfig(JobDag jobDag, TargetState targetState, long expiry) {
+ _jobDag = jobDag;
_targetState = targetState;
_expiry = expiry;
}
- public TaskDag getTaskDag() {
- return _taskDag;
+ public JobDag getJobDag() {
+ return _jobDag;
}
public TargetState getTargetState() {
@@ -57,7 +57,7 @@ public class WorkflowConfig {
}
public static class Builder {
- private TaskDag _taskDag = TaskDag.EMPTY_DAG;
+ private JobDag _taskDag = JobDag.EMPTY_DAG;
private TargetState _targetState = TargetState.START;
private long _expiry = DEFAULT_EXPIRY;
@@ -71,7 +71,7 @@ public class WorkflowConfig {
return new WorkflowConfig(_taskDag, _targetState, _expiry);
}
- public Builder setTaskDag(TaskDag v) {
+ public Builder setTaskDag(JobDag v) {
_taskDag = v;
return this;
}
@@ -93,7 +93,7 @@ public class WorkflowConfig {
b.setExpiry(Long.parseLong(cfg.get(EXPIRY)));
}
if (cfg.containsKey(DAG)) {
- b.setTaskDag(TaskDag.fromJson(cfg.get(DAG)));
+ b.setTaskDag(JobDag.fromJson(cfg.get(DAG)));
}
if (cfg.containsKey(TARGET_STATE)) {
b.setTargetState(TargetState.valueOf(cfg.get(TARGET_STATE)));
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/WorkflowContext.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/WorkflowContext.java b/helix-core/src/main/java/org/apache/helix/task/WorkflowContext.java
index cd30860..4feda1b 100644
--- a/helix-core/src/main/java/org/apache/helix/task/WorkflowContext.java
+++ b/helix-core/src/main/java/org/apache/helix/task/WorkflowContext.java
@@ -19,35 +19,21 @@ package org.apache.helix.task;
* under the License.
*/
-import org.apache.helix.HelixProperty;
-import org.apache.helix.ZNRecord;
-
import java.util.Map;
import java.util.TreeMap;
+import org.apache.helix.HelixProperty;
+import org.apache.helix.ZNRecord;
+
/**
* Typed interface to the workflow context information stored by {@link TaskRebalancer} in the Helix
* property store
*/
public class WorkflowContext extends HelixProperty {
-
- enum WorkflowContextEnum {
- WORKFLOW_STATE("STATE"),
- START_TIME("START_TIME"),
- FINISH_TIME("FINISH_TIME"),
- TASK_STATES("TASK_STATES");
-
- final String _value;
-
- private WorkflowContextEnum(String value) {
- _value = value;
- }
-
- public String value() {
- return _value;
- }
- }
-
+ public static final String WORKFLOW_STATE = "STATE";
+ public static final String START_TIME = "START_TIME";
+ public static final String FINISH_TIME = "FINISH_TIME";
+ public static final String TASK_STATES = "TASK_STATES";
public static final int UNFINISHED = -1;
public WorkflowContext(ZNRecord record) {
@@ -55,18 +41,16 @@ public class WorkflowContext extends HelixProperty {
}
public void setWorkflowState(TaskState s) {
- if (_record.getSimpleField(WorkflowContextEnum.WORKFLOW_STATE.value()) == null) {
- _record.setSimpleField(WorkflowContextEnum.WORKFLOW_STATE.value(), s.name());
- } else if (!_record.getSimpleField(WorkflowContextEnum.WORKFLOW_STATE.value()).equals(
- TaskState.FAILED.name())
- && !_record.getSimpleField(WorkflowContextEnum.WORKFLOW_STATE.value()).equals(
- TaskState.COMPLETED.name())) {
- _record.setSimpleField(WorkflowContextEnum.WORKFLOW_STATE.value(), s.name());
+ if (_record.getSimpleField(WORKFLOW_STATE) == null) {
+ _record.setSimpleField(WORKFLOW_STATE, s.name());
+ } else if (!_record.getSimpleField(WORKFLOW_STATE).equals(TaskState.FAILED.name())
+ && !_record.getSimpleField(WORKFLOW_STATE).equals(TaskState.COMPLETED.name())) {
+ _record.setSimpleField(WORKFLOW_STATE, s.name());
}
}
public TaskState getWorkflowState() {
- String s = _record.getSimpleField(WorkflowContextEnum.WORKFLOW_STATE.value());
+ String s = _record.getSimpleField(WORKFLOW_STATE);
if (s == null) {
return null;
}
@@ -74,22 +58,22 @@ public class WorkflowContext extends HelixProperty {
return TaskState.valueOf(s);
}
- public void setTaskState(String taskResource, TaskState s) {
- Map<String, String> states = _record.getMapField(WorkflowContextEnum.TASK_STATES.value());
+ public void setJobState(String jobResource, TaskState s) {
+ Map<String, String> states = _record.getMapField(TASK_STATES);
if (states == null) {
states = new TreeMap<String, String>();
- _record.setMapField(WorkflowContextEnum.TASK_STATES.value(), states);
+ _record.setMapField(TASK_STATES, states);
}
- states.put(taskResource, s.name());
+ states.put(jobResource, s.name());
}
- public TaskState getTaskState(String taskResource) {
- Map<String, String> states = _record.getMapField(WorkflowContextEnum.TASK_STATES.value());
+ public TaskState getJobState(String jobResource) {
+ Map<String, String> states = _record.getMapField(TASK_STATES);
if (states == null) {
return null;
}
- String s = states.get(taskResource);
+ String s = states.get(jobResource);
if (s == null) {
return null;
}
@@ -98,11 +82,11 @@ public class WorkflowContext extends HelixProperty {
}
public void setStartTime(long t) {
- _record.setSimpleField(WorkflowContextEnum.START_TIME.value(), String.valueOf(t));
+ _record.setSimpleField(START_TIME, String.valueOf(t));
}
public long getStartTime() {
- String tStr = _record.getSimpleField(WorkflowContextEnum.START_TIME.value());
+ String tStr = _record.getSimpleField(START_TIME);
if (tStr == null) {
return -1;
}
@@ -111,11 +95,11 @@ public class WorkflowContext extends HelixProperty {
}
public void setFinishTime(long t) {
- _record.setSimpleField(WorkflowContextEnum.FINISH_TIME.value(), String.valueOf(t));
+ _record.setSimpleField(FINISH_TIME, String.valueOf(t));
}
public long getFinishTime() {
- String tStr = _record.getSimpleField(WorkflowContextEnum.FINISH_TIME.value());
+ String tStr = _record.getSimpleField(FINISH_TIME);
if (tStr == null) {
return UNFINISHED;
}
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/beans/JobBean.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/beans/JobBean.java b/helix-core/src/main/java/org/apache/helix/task/beans/JobBean.java
new file mode 100644
index 0000000..5e12f19
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/task/beans/JobBean.java
@@ -0,0 +1,42 @@
+package org.apache.helix.task.beans;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.helix.task.JobConfig;
+
+/**
+ * Bean class used for parsing job definitions from YAML.
+ */
+public class JobBean {
+ public String name;
+ public List<String> parents;
+ public String targetResource;
+ public List<String> targetPartitionStates;
+ public List<String> targetPartitions;
+ public String command;
+ public Map<String, String> jobConfigMap;
+ public List<TaskBean> tasks;
+ public long timeoutPerPartition = JobConfig.DEFAULT_TIMEOUT_PER_TASK;
+ public int numConcurrentTasksPerInstance = JobConfig.DEFAULT_NUM_CONCURRENT_TASKS_PER_INSTANCE;
+ public int maxAttemptsPerPartition = JobConfig.DEFAULT_MAX_ATTEMPTS_PER_TASK;
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/beans/TaskBean.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/beans/TaskBean.java b/helix-core/src/main/java/org/apache/helix/task/beans/TaskBean.java
index 9481c6e..eedccb5 100644
--- a/helix-core/src/main/java/org/apache/helix/task/beans/TaskBean.java
+++ b/helix-core/src/main/java/org/apache/helix/task/beans/TaskBean.java
@@ -19,22 +19,14 @@ package org.apache.helix.task.beans;
* under the License.
*/
-import java.util.List;
import java.util.Map;
-import org.apache.helix.task.TaskConfig;
/**
- * Bean class used for parsing task definitions from YAML.
+ * Describes task-specific configuration, including an arbitrary map of
+ * key-value pairs to pass to the task
*/
+
public class TaskBean {
- public String name;
- public List<String> parents;
- public String targetResource;
- public List<String> targetPartitionStates;
- public List<Integer> targetPartitions;
public String command;
- public Map<String, Object> commandConfig;
- public long timeoutPerPartition = TaskConfig.DEFAULT_TIMEOUT_PER_PARTITION;
- public int numConcurrentTasksPerInstance = TaskConfig.DEFAULT_NUM_CONCURRENT_TASKS_PER_INSTANCE;
- public int maxAttemptsPerPartition = TaskConfig.DEFAULT_MAX_ATTEMPTS_PER_PARTITION;
+ public Map<String, String> taskConfigMap;
}
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/main/java/org/apache/helix/task/beans/WorkflowBean.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/beans/WorkflowBean.java b/helix-core/src/main/java/org/apache/helix/task/beans/WorkflowBean.java
index 4e64692..76da4c8 100644
--- a/helix-core/src/main/java/org/apache/helix/task/beans/WorkflowBean.java
+++ b/helix-core/src/main/java/org/apache/helix/task/beans/WorkflowBean.java
@@ -27,5 +27,5 @@ import java.util.List;
public class WorkflowBean {
public String name;
public String expiry;
- public List<TaskBean> tasks;
+ public List<JobBean> jobs;
}
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/test/java/org/apache/helix/integration/task/TestIndependentTaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/integration/task/TestIndependentTaskRebalancer.java b/helix-core/src/test/java/org/apache/helix/integration/task/TestIndependentTaskRebalancer.java
new file mode 100644
index 0000000..1ee3991
--- /dev/null
+++ b/helix-core/src/test/java/org/apache/helix/integration/task/TestIndependentTaskRebalancer.java
@@ -0,0 +1,171 @@
+package org.apache.helix.integration.task;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.helix.HelixManager;
+import org.apache.helix.HelixManagerFactory;
+import org.apache.helix.InstanceType;
+import org.apache.helix.TestHelper;
+import org.apache.helix.api.id.StateModelDefId;
+import org.apache.helix.integration.ZkIntegrationTestBase;
+import org.apache.helix.integration.manager.ClusterControllerManager;
+import org.apache.helix.integration.manager.MockParticipantManager;
+import org.apache.helix.integration.task.TestTaskRebalancerStopResume.ReindexTask;
+import org.apache.helix.participant.StateMachineEngine;
+import org.apache.helix.task.JobConfig;
+import org.apache.helix.task.Task;
+import org.apache.helix.task.TaskCallbackContext;
+import org.apache.helix.task.TaskConfig;
+import org.apache.helix.task.TaskDriver;
+import org.apache.helix.task.TaskFactory;
+import org.apache.helix.task.TaskResult;
+import org.apache.helix.task.TaskState;
+import org.apache.helix.task.TaskStateModelFactory;
+import org.apache.helix.task.Workflow;
+import org.apache.helix.tools.ClusterSetup;
+import org.testng.Assert;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+import org.testng.collections.Sets;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+public class TestIndependentTaskRebalancer extends ZkIntegrationTestBase {
+ private static final int n = 5;
+ private static final int START_PORT = 12918;
+ private final String CLUSTER_NAME = CLUSTER_PREFIX + "_" + getShortClassName();
+ private final MockParticipantManager[] _participants = new MockParticipantManager[n];
+ private ClusterControllerManager _controller;
+ private Set<String> _invokedClasses = Sets.newHashSet();
+
+ private HelixManager _manager;
+ private TaskDriver _driver;
+
+ @BeforeClass
+ public void beforeClass() throws Exception {
+ String namespace = "/" + CLUSTER_NAME;
+ if (_gZkClient.exists(namespace)) {
+ _gZkClient.deleteRecursive(namespace);
+ }
+
+ // Setup cluster and instances
+ ClusterSetup setupTool = new ClusterSetup(ZK_ADDR);
+ setupTool.addCluster(CLUSTER_NAME, true);
+ for (int i = 0; i < n; i++) {
+ String storageNodeName = PARTICIPANT_PREFIX + "_" + (START_PORT + i);
+ setupTool.addInstanceToCluster(CLUSTER_NAME, storageNodeName);
+ }
+
+ // Set task callbacks
+ Map<String, TaskFactory> taskFactoryReg = new HashMap<String, TaskFactory>();
+ taskFactoryReg.put("TaskOne", new TaskFactory() {
+ @Override
+ public Task createNewTask(TaskCallbackContext context) {
+ return new TaskOne(context);
+ }
+ });
+ taskFactoryReg.put("TaskTwo", new TaskFactory() {
+ @Override
+ public Task createNewTask(TaskCallbackContext context) {
+ return new TaskTwo(context);
+ }
+ });
+
+ // start dummy participants
+ for (int i = 0; i < n; i++) {
+ String instanceName = PARTICIPANT_PREFIX + "_" + (START_PORT + i);
+ _participants[i] = new MockParticipantManager(ZK_ADDR, CLUSTER_NAME, instanceName);
+
+ // Register a Task state model factory.
+ StateMachineEngine stateMachine = _participants[i].getStateMachineEngine();
+ stateMachine.registerStateModelFactory(StateModelDefId.from("Task"),
+ new TaskStateModelFactory(_participants[i], taskFactoryReg));
+ _participants[i].syncStart();
+ }
+
+ // Start controller
+ String controllerName = CONTROLLER_PREFIX + "_0";
+ _controller = new ClusterControllerManager(ZK_ADDR, CLUSTER_NAME, controllerName);
+ _controller.syncStart();
+
+ // Start an admin connection
+ _manager =
+ HelixManagerFactory.getZKHelixManager(CLUSTER_NAME, "Admin", InstanceType.ADMINISTRATOR,
+ ZK_ADDR);
+ _manager.connect();
+ _driver = new TaskDriver(_manager);
+ }
+
+ @BeforeMethod
+ public void beforeMethod() {
+ _invokedClasses.clear();
+ }
+
+ @Test
+ public void testDifferentTasks() throws Exception {
+ // Create a job with two different tasks
+ String jobName = TestHelper.getTestMethodName();
+ Workflow.Builder workflowBuilder = new Workflow.Builder(jobName);
+ List<TaskConfig> taskConfigs = Lists.newArrayListWithCapacity(2);
+ TaskConfig taskConfig1 = new TaskConfig("TaskOne", null);
+ TaskConfig taskConfig2 = new TaskConfig("TaskTwo", null);
+ taskConfigs.add(taskConfig1);
+ taskConfigs.add(taskConfig2);
+ workflowBuilder.addTaskConfigs(jobName, taskConfigs);
+ workflowBuilder.addConfig(jobName, JobConfig.COMMAND, "DummyCommand");
+ Map<String, String> jobConfigMap = Maps.newHashMap();
+ jobConfigMap.put("Timeout", "1000");
+ workflowBuilder.addJobConfigMap(jobName, jobConfigMap);
+ _driver.start(workflowBuilder.build());
+
+ // Ensure the job completes
+ TestUtil.pollForWorkflowState(_manager, jobName, TaskState.IN_PROGRESS);
+ TestUtil.pollForWorkflowState(_manager, jobName, TaskState.COMPLETED);
+
+ // Ensure that each class was invoked
+ Assert.assertTrue(_invokedClasses.contains(TaskOne.class.getName()));
+ Assert.assertTrue(_invokedClasses.contains(TaskTwo.class.getName()));
+ }
+
+ private class TaskOne extends ReindexTask {
+ public TaskOne(TaskCallbackContext context) {
+ super(context);
+ }
+
+ @Override
+ public TaskResult run() {
+ _invokedClasses.add(getClass().getName());
+ return super.run();
+ }
+ }
+
+ private class TaskTwo extends TaskOne {
+ public TaskTwo(TaskCallbackContext context) {
+ super(context);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancer.java b/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancer.java
index 1c83291..0a59ee1 100644
--- a/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancer.java
+++ b/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancer.java
@@ -19,9 +19,9 @@ package org.apache.helix.integration.task;
* under the License.
*/
+import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
-import java.util.TreeMap;
import org.apache.helix.AccessOption;
import org.apache.helix.HelixDataAccessor;
@@ -34,10 +34,11 @@ import org.apache.helix.integration.ZkIntegrationTestBase;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.participant.StateMachineEngine;
+import org.apache.helix.task.JobConfig;
+import org.apache.helix.task.JobContext;
import org.apache.helix.task.Task;
-import org.apache.helix.task.TaskConfig;
+import org.apache.helix.task.TaskCallbackContext;
import org.apache.helix.task.TaskConstants;
-import org.apache.helix.task.TaskContext;
import org.apache.helix.task.TaskDriver;
import org.apache.helix.task.TaskFactory;
import org.apache.helix.task.TaskPartitionState;
@@ -56,11 +57,13 @@ import org.testng.annotations.Test;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
public class TestTaskRebalancer extends ZkIntegrationTestBase {
private static final int n = 5;
private static final int START_PORT = 12918;
private static final String MASTER_SLAVE_STATE_MODEL = "MasterSlave";
+ private static final String TIMEOUT_CONFIG = "Timeout";
private static final int NUM_PARTITIONS = 20;
private static final int NUM_REPLICAS = 3;
private final String CLUSTER_NAME = CLUSTER_PREFIX + "_" + getShortClassName();
@@ -92,8 +95,8 @@ public class TestTaskRebalancer extends ZkIntegrationTestBase {
Map<String, TaskFactory> taskFactoryReg = new HashMap<String, TaskFactory>();
taskFactoryReg.put("Reindex", new TaskFactory() {
@Override
- public Task createNewTask(String config) {
- return new ReindexTask(config);
+ public Task createNewTask(TaskCallbackContext context) {
+ return new ReindexTask(context);
}
});
@@ -152,29 +155,30 @@ public class TestTaskRebalancer extends ZkIntegrationTestBase {
@Test
public void testExpiry() throws Exception {
- String taskName = "Expiry";
+ String jobName = "Expiry";
long expiry = 1000;
+ Map<String, String> commandConfig = ImmutableMap.of(TIMEOUT_CONFIG, String.valueOf(100));
Workflow flow =
WorkflowGenerator
- .generateDefaultSingleTaskWorkflowBuilderWithExtraConfigs(taskName,
- TaskConfig.COMMAND_CONFIG, String.valueOf(100)).setExpiry(expiry).build();
+ .generateDefaultSingleJobWorkflowBuilderWithExtraConfigs(jobName, commandConfig)
+ .setExpiry(expiry).build();
_driver.start(flow);
- TestUtil.pollForWorkflowState(_manager, taskName, TaskState.IN_PROGRESS);
+ TestUtil.pollForWorkflowState(_manager, jobName, TaskState.IN_PROGRESS);
// Running workflow should have config and context viewable through accessor
HelixDataAccessor accessor = _manager.getHelixDataAccessor();
- PropertyKey workflowCfgKey = accessor.keyBuilder().resourceConfig(taskName);
+ PropertyKey workflowCfgKey = accessor.keyBuilder().resourceConfig(jobName);
String workflowPropStoreKey =
- Joiner.on("/").join(TaskConstants.REBALANCER_CONTEXT_ROOT, taskName);
+ Joiner.on("/").join(TaskConstants.REBALANCER_CONTEXT_ROOT, jobName);
// Ensure context and config exist
Assert.assertTrue(_manager.getHelixPropertyStore().exists(workflowPropStoreKey,
AccessOption.PERSISTENT));
Assert.assertNotSame(accessor.getProperty(workflowCfgKey), null);
- // Wait for task to finish and expire
- TestUtil.pollForWorkflowState(_manager, taskName, TaskState.COMPLETED);
+ // Wait for job to finish and expire
+ TestUtil.pollForWorkflowState(_manager, jobName, TaskState.COMPLETED);
Thread.sleep(expiry);
_driver.invokeRebalance();
Thread.sleep(expiry);
@@ -185,25 +189,26 @@ public class TestTaskRebalancer extends ZkIntegrationTestBase {
Assert.assertEquals(accessor.getProperty(workflowCfgKey), null);
}
- private void basic(long taskCompletionTime) throws Exception {
+ private void basic(long jobCompletionTime) throws Exception {
// We use a different resource name in each test method as a work around for a helix participant
// bug where it does
// not clear locally cached state when a resource partition is dropped. Once that is fixed we
// should change these
// tests to use the same resource name and implement a beforeMethod that deletes the task
// resource.
- final String taskResource = "basic" + taskCompletionTime;
+ final String jobResource = "basic" + jobCompletionTime;
+ Map<String, String> commandConfig =
+ ImmutableMap.of(TIMEOUT_CONFIG, String.valueOf(jobCompletionTime));
Workflow flow =
- WorkflowGenerator.generateDefaultSingleTaskWorkflowBuilderWithExtraConfigs(taskResource,
- TaskConfig.COMMAND_CONFIG, String.valueOf(taskCompletionTime)).build();
+ WorkflowGenerator.generateDefaultSingleJobWorkflowBuilderWithExtraConfigs(jobResource,
+ commandConfig).build();
_driver.start(flow);
- // Wait for task completion
- TestUtil.pollForWorkflowState(_manager, taskResource, TaskState.COMPLETED);
+ // Wait for job completion
+ TestUtil.pollForWorkflowState(_manager, jobResource, TaskState.COMPLETED);
// Ensure all partitions are completed individually
- TaskContext ctx =
- TaskUtil.getTaskContext(_manager, TaskUtil.getNamespacedTaskName(taskResource));
+ JobContext ctx = TaskUtil.getJobContext(_manager, TaskUtil.getNamespacedJobName(jobResource));
for (int i = 0; i < NUM_PARTITIONS; i++) {
Assert.assertEquals(ctx.getPartitionState(i), TaskPartitionState.COMPLETED);
Assert.assertEquals(ctx.getPartitionNumAttempts(i), 1);
@@ -212,29 +217,31 @@ public class TestTaskRebalancer extends ZkIntegrationTestBase {
@Test
public void partitionSet() throws Exception {
- final String taskResource = "partitionSet";
- ImmutableList<Integer> targetPartitions = ImmutableList.of(1, 2, 3, 5, 8, 13);
+ final String jobResource = "partitionSet";
+ ImmutableList<String> targetPartitions =
+ ImmutableList.of("TestDB_1", "TestDB_2", "TestDB_3", "TestDB_5", "TestDB_8", "TestDB_13");
// construct and submit our basic workflow
+ Map<String, String> commandConfig = ImmutableMap.of(TIMEOUT_CONFIG, String.valueOf(100));
Workflow flow =
- WorkflowGenerator.generateDefaultSingleTaskWorkflowBuilderWithExtraConfigs(taskResource,
- TaskConfig.COMMAND_CONFIG, String.valueOf(100), TaskConfig.MAX_ATTEMPTS_PER_PARTITION,
- String.valueOf(1), TaskConfig.TARGET_PARTITIONS, Joiner.on(",").join(targetPartitions))
- .build();
+ WorkflowGenerator.generateDefaultSingleJobWorkflowBuilderWithExtraConfigs(jobResource,
+ commandConfig, JobConfig.MAX_ATTEMPTS_PER_TASK, String.valueOf(1),
+ JobConfig.TARGET_PARTITIONS, Joiner.on(",").join(targetPartitions)).build();
_driver.start(flow);
- // wait for task completeness/timeout
- TestUtil.pollForWorkflowState(_manager, taskResource, TaskState.COMPLETED);
+ // wait for job completeness/timeout
+ TestUtil.pollForWorkflowState(_manager, jobResource, TaskState.COMPLETED);
// see if resulting context completed successfully for our partition set
- String namespacedName = TaskUtil.getNamespacedTaskName(taskResource);
+ String namespacedName = TaskUtil.getNamespacedJobName(jobResource);
- TaskContext ctx = TaskUtil.getTaskContext(_manager, namespacedName);
- WorkflowContext workflowContext = TaskUtil.getWorkflowContext(_manager, taskResource);
+ JobContext ctx = TaskUtil.getJobContext(_manager, namespacedName);
+ WorkflowContext workflowContext = TaskUtil.getWorkflowContext(_manager, jobResource);
Assert.assertNotNull(ctx);
Assert.assertNotNull(workflowContext);
- Assert.assertEquals(workflowContext.getTaskState(namespacedName), TaskState.COMPLETED);
- for (int i : targetPartitions) {
+ Assert.assertEquals(workflowContext.getJobState(namespacedName), TaskState.COMPLETED);
+ for (String pName : targetPartitions) {
+ int i = ctx.getPartitionsByTarget().get(pName).get(0);
Assert.assertEquals(ctx.getPartitionState(i), TaskPartitionState.COMPLETED);
Assert.assertEquals(ctx.getPartitionNumAttempts(i), 1);
}
@@ -244,33 +251,32 @@ public class TestTaskRebalancer extends ZkIntegrationTestBase {
public void testRepeatedWorkflow() throws Exception {
String workflowName = "SomeWorkflow";
Workflow flow =
- WorkflowGenerator.generateDefaultRepeatedTaskWorkflowBuilder(workflowName).build();
+ WorkflowGenerator.generateDefaultRepeatedJobWorkflowBuilder(workflowName).build();
new TaskDriver(_manager).start(flow);
- // Wait until the task completes
+ // Wait until the workflow completes
TestUtil.pollForWorkflowState(_manager, workflowName, TaskState.COMPLETED);
// Assert completion for all tasks within two minutes
- for (String task : flow.getTaskConfigs().keySet()) {
- TestUtil.pollForTaskState(_manager, workflowName, task, TaskState.COMPLETED);
+ for (String task : flow.getJobConfigs().keySet()) {
+ TestUtil.pollForJobState(_manager, workflowName, task, TaskState.COMPLETED);
}
}
@Test
public void timeouts() throws Exception {
- final String taskResource = "timeouts";
+ final String jobResource = "timeouts";
Workflow flow =
- WorkflowGenerator.generateDefaultSingleTaskWorkflowBuilderWithExtraConfigs(taskResource,
- TaskConfig.MAX_ATTEMPTS_PER_PARTITION, String.valueOf(2),
- TaskConfig.TIMEOUT_PER_PARTITION, String.valueOf(100)).build();
+ WorkflowGenerator.generateDefaultSingleJobWorkflowBuilderWithExtraConfigs(jobResource,
+ WorkflowGenerator.DEFAULT_COMMAND_CONFIG, JobConfig.MAX_ATTEMPTS_PER_TASK,
+ String.valueOf(2), JobConfig.TIMEOUT_PER_TASK, String.valueOf(100)).build();
_driver.start(flow);
- // Wait until the task reports failure.
- TestUtil.pollForWorkflowState(_manager, taskResource, TaskState.FAILED);
+ // Wait until the job reports failure.
+ TestUtil.pollForWorkflowState(_manager, jobResource, TaskState.FAILED);
// Check that all partitions timed out up to maxAttempts
- TaskContext ctx =
- TaskUtil.getTaskContext(_manager, TaskUtil.getNamespacedTaskName(taskResource));
+ JobContext ctx = TaskUtil.getJobContext(_manager, TaskUtil.getNamespacedJobName(jobResource));
int maxAttempts = 0;
for (int i = 0; i < NUM_PARTITIONS; i++) {
TaskPartitionState state = ctx.getPartitionState(i);
@@ -282,57 +288,17 @@ public class TestTaskRebalancer extends ZkIntegrationTestBase {
Assert.assertEquals(maxAttempts, 2);
}
- @Test
- public void testIndependentTask() throws Exception {
- final String taskResource = "independentTask";
- Map<String, String> config = new TreeMap<String, String>();
- config.put("TargetPartitions", "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19");
- config.put("Command", "Reindex");
- config.put("CommandConfig", String.valueOf(200));
- config.put("TimeoutPerPartition", String.valueOf(10 * 1000));
- Workflow flow =
- WorkflowGenerator.generateSingleTaskWorkflowBuilder(taskResource, config).build();
- _driver.start(flow);
-
- // Wait for task completion
- TestUtil.pollForWorkflowState(_manager, taskResource, TaskState.COMPLETED);
-
- // Ensure all partitions are completed individually
- TaskContext ctx =
- TaskUtil.getTaskContext(_manager, TaskUtil.getNamespacedTaskName(taskResource));
- for (int i = 0; i < NUM_PARTITIONS; i++) {
- Assert.assertEquals(ctx.getPartitionState(i), TaskPartitionState.COMPLETED);
- Assert.assertEquals(ctx.getPartitionNumAttempts(i), 1);
- }
- }
-
- @Test
- public void testIndependentRepeatedWorkflow() throws Exception {
- final String workflowName = "independentTaskWorkflow";
- Map<String, String> config = new TreeMap<String, String>();
- config.put("TargetPartitions", "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19");
- config.put("Command", "Reindex");
- config.put("CommandConfig", String.valueOf(200));
- config.put("TimeoutPerPartition", String.valueOf(10 * 1000));
- Workflow flow =
- WorkflowGenerator.generateRepeatedTaskWorkflowBuilder(workflowName, config).build();
- new TaskDriver(_manager).start(flow);
-
- // Wait until the task completes
- TestUtil.pollForWorkflowState(_manager, workflowName, TaskState.COMPLETED);
-
- // Assert completion for all tasks within two minutes
- for (String task : flow.getTaskConfigs().keySet()) {
- TestUtil.pollForTaskState(_manager, workflowName, task, TaskState.COMPLETED);
- }
- }
-
private static class ReindexTask implements Task {
private final long _delay;
private volatile boolean _canceled;
- public ReindexTask(String cfg) {
- _delay = Long.parseLong(cfg);
+ public ReindexTask(TaskCallbackContext context) {
+ JobConfig jobCfg = context.getJobConfig();
+ Map<String, String> cfg = jobCfg.getJobConfigMap();
+ if (cfg == null) {
+ cfg = Collections.emptyMap();
+ }
+ _delay = cfg.containsKey(TIMEOUT_CONFIG) ? Long.parseLong(cfg.get(TIMEOUT_CONFIG)) : 200L;
}
@Override
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancerStopResume.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancerStopResume.java b/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancerStopResume.java
index bb490ea..e555468 100644
--- a/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancerStopResume.java
+++ b/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancerStopResume.java
@@ -19,6 +19,7 @@ package org.apache.helix.integration.task;
* under the License.
*/
+import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
@@ -30,8 +31,9 @@ import org.apache.helix.integration.ZkIntegrationTestBase;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.participant.StateMachineEngine;
+import org.apache.helix.task.JobConfig;
import org.apache.helix.task.Task;
-import org.apache.helix.task.TaskConfig;
+import org.apache.helix.task.TaskCallbackContext;
import org.apache.helix.task.TaskDriver;
import org.apache.helix.task.TaskFactory;
import org.apache.helix.task.TaskResult;
@@ -46,13 +48,16 @@ import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
+import com.google.common.collect.ImmutableMap;
+
public class TestTaskRebalancerStopResume extends ZkIntegrationTestBase {
private static final Logger LOG = Logger.getLogger(TestTaskRebalancerStopResume.class);
private static final int n = 5;
private static final int START_PORT = 12918;
private static final String MASTER_SLAVE_STATE_MODEL = "MasterSlave";
+ private static final String TIMEOUT_CONFIG = "Timeout";
private static final String TGT_DB = "TestDB";
- private static final String TASK_RESOURCE = "SomeTask";
+ private static final String JOB_RESOURCE = "SomeJob";
private static final int NUM_PARTITIONS = 20;
private static final int NUM_REPLICAS = 3;
private final String CLUSTER_NAME = CLUSTER_PREFIX + "_" + getShortClassName();
@@ -83,8 +88,8 @@ public class TestTaskRebalancerStopResume extends ZkIntegrationTestBase {
Map<String, TaskFactory> taskFactoryReg = new HashMap<String, TaskFactory>();
taskFactoryReg.put("Reindex", new TaskFactory() {
@Override
- public Task createNewTask(String config) {
- return new ReindexTask(config);
+ public Task createNewTask(TaskCallbackContext context) {
+ return new ReindexTask(context);
}
});
@@ -137,27 +142,28 @@ public class TestTaskRebalancerStopResume extends ZkIntegrationTestBase {
@Test
public void stopAndResume() throws Exception {
+ Map<String, String> commandConfig = ImmutableMap.of(TIMEOUT_CONFIG, String.valueOf(100));
Workflow flow =
- WorkflowGenerator.generateDefaultSingleTaskWorkflowBuilderWithExtraConfigs(TASK_RESOURCE,
- TaskConfig.COMMAND_CONFIG, String.valueOf(100)).build();
+ WorkflowGenerator.generateDefaultSingleJobWorkflowBuilderWithExtraConfigs(JOB_RESOURCE,
+ commandConfig).build();
LOG.info("Starting flow " + flow.getName());
_driver.start(flow);
- TestUtil.pollForWorkflowState(_manager, TASK_RESOURCE, TaskState.IN_PROGRESS);
+ TestUtil.pollForWorkflowState(_manager, JOB_RESOURCE, TaskState.IN_PROGRESS);
- LOG.info("Pausing task");
- _driver.stop(TASK_RESOURCE);
- TestUtil.pollForWorkflowState(_manager, TASK_RESOURCE, TaskState.STOPPED);
+ LOG.info("Pausing job");
+ _driver.stop(JOB_RESOURCE);
+ TestUtil.pollForWorkflowState(_manager, JOB_RESOURCE, TaskState.STOPPED);
- LOG.info("Resuming task");
- _driver.resume(TASK_RESOURCE);
- TestUtil.pollForWorkflowState(_manager, TASK_RESOURCE, TaskState.COMPLETED);
+ LOG.info("Resuming job");
+ _driver.resume(JOB_RESOURCE);
+ TestUtil.pollForWorkflowState(_manager, JOB_RESOURCE, TaskState.COMPLETED);
}
@Test
public void stopAndResumeWorkflow() throws Exception {
String workflow = "SomeWorkflow";
- Workflow flow = WorkflowGenerator.generateDefaultRepeatedTaskWorkflowBuilder(workflow).build();
+ Workflow flow = WorkflowGenerator.generateDefaultRepeatedJobWorkflowBuilder(workflow).build();
LOG.info("Starting flow " + workflow);
_driver.start(flow);
@@ -176,8 +182,13 @@ public class TestTaskRebalancerStopResume extends ZkIntegrationTestBase {
private final long _delay;
private volatile boolean _canceled;
- public ReindexTask(String cfg) {
- _delay = Long.parseLong(cfg);
+ public ReindexTask(TaskCallbackContext context) {
+ JobConfig jobCfg = context.getJobConfig();
+ Map<String, String> cfg = jobCfg.getJobConfigMap();
+ if (cfg == null) {
+ cfg = Collections.emptyMap();
+ }
+ _delay = cfg.containsKey(TIMEOUT_CONFIG) ? Long.parseLong(cfg.get(TIMEOUT_CONFIG)) : 200L;
}
@Override
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/test/java/org/apache/helix/integration/task/TestUtil.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/integration/task/TestUtil.java b/helix-core/src/test/java/org/apache/helix/integration/task/TestUtil.java
index 2cc6cb8..520d7c0 100644
--- a/helix-core/src/test/java/org/apache/helix/integration/task/TestUtil.java
+++ b/helix-core/src/test/java/org/apache/helix/integration/task/TestUtil.java
@@ -20,18 +20,17 @@ package org.apache.helix.integration.task;
*/
import org.apache.helix.HelixManager;
-import org.apache.helix.task.*;
-import org.apache.log4j.Logger;
+import org.apache.helix.task.TaskState;
+import org.apache.helix.task.TaskUtil;
+import org.apache.helix.task.WorkflowContext;
import org.testng.Assert;
/**
* Static test utility methods.
*/
public class TestUtil {
- private static final Logger LOG = Logger.getLogger(TestUtil.class);
-
/**
- * Polls {@link org.apache.helix.task.TaskContext} for given task resource until a timeout is
+ * Polls {@link org.apache.helix.task.JobContext} for given task resource until a timeout is
* reached.
* If the task has not reached target state by then, an error is thrown
* @param workflowResource Resource to poll for completeness
@@ -52,15 +51,15 @@ public class TestUtil {
Assert.assertEquals(ctx.getWorkflowState(), state);
}
- public static void pollForTaskState(HelixManager manager, String workflowResource,
- String taskName, TaskState state) throws InterruptedException {
+ public static void pollForJobState(HelixManager manager, String workflowResource,
+ String jobName, TaskState state) throws InterruptedException {
// Wait for completion.
long st = System.currentTimeMillis();
WorkflowContext ctx;
do {
Thread.sleep(100);
ctx = TaskUtil.getWorkflowContext(manager, workflowResource);
- } while ((ctx == null || ctx.getTaskState(taskName) == null || ctx.getTaskState(taskName) != state)
+ } while ((ctx == null || ctx.getJobState(jobName) == null || ctx.getJobState(jobName) != state)
&& System.currentTimeMillis() < st + 2 * 60 * 1000 /* 2 mins */);
Assert.assertNotNull(ctx);
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-core/src/test/java/org/apache/helix/integration/task/WorkflowGenerator.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/integration/task/WorkflowGenerator.java b/helix-core/src/test/java/org/apache/helix/integration/task/WorkflowGenerator.java
index 653d88a..921a5f9 100644
--- a/helix-core/src/test/java/org/apache/helix/integration/task/WorkflowGenerator.java
+++ b/helix-core/src/test/java/org/apache/helix/integration/task/WorkflowGenerator.java
@@ -19,72 +19,95 @@ package org.apache.helix.integration.task;
* under the License.
*/
+import java.io.IOException;
import java.util.Collections;
import java.util.Map;
import java.util.TreeMap;
+import org.apache.helix.task.JobConfig;
import org.apache.helix.task.Workflow;
+import org.apache.log4j.Logger;
+import org.codehaus.jackson.map.ObjectMapper;
/**
* Convenience class for generating various test workflows
*/
public class WorkflowGenerator {
+ private static final Logger LOG = Logger.getLogger(WorkflowGenerator.class);
+
public static final String DEFAULT_TGT_DB = "TestDB";
- private static final String TASK_NAME_1 = "SomeTask1";
- private static final String TASK_NAME_2 = "SomeTask2";
+ public static final String JOB_NAME_1 = "SomeJob1";
+ public static final String JOB_NAME_2 = "SomeJob2";
- private static final Map<String, String> DEFAULT_TASK_CONFIG;
+ public static final Map<String, String> DEFAULT_JOB_CONFIG;
static {
Map<String, String> tmpMap = new TreeMap<String, String>();
tmpMap.put("TargetResource", DEFAULT_TGT_DB);
tmpMap.put("TargetPartitionStates", "MASTER");
tmpMap.put("Command", "Reindex");
- tmpMap.put("CommandConfig", String.valueOf(2000));
tmpMap.put("TimeoutPerPartition", String.valueOf(10 * 1000));
- DEFAULT_TASK_CONFIG = Collections.unmodifiableMap(tmpMap);
+ DEFAULT_JOB_CONFIG = Collections.unmodifiableMap(tmpMap);
+ }
+
+ public static final Map<String, String> DEFAULT_COMMAND_CONFIG;
+ static {
+ Map<String, String> tmpMap = new TreeMap<String, String>();
+ tmpMap.put("Timeout", String.valueOf(2000));
+ DEFAULT_COMMAND_CONFIG = Collections.unmodifiableMap(tmpMap);
}
- public static Workflow.Builder generateDefaultSingleTaskWorkflowBuilderWithExtraConfigs(
- String taskName, String... cfgs) {
+ public static Workflow.Builder generateDefaultSingleJobWorkflowBuilderWithExtraConfigs(
+ String jobName, Map<String, String> commandConfig, String... cfgs) {
if (cfgs.length % 2 != 0) {
throw new IllegalArgumentException(
"Additional configs should have even number of keys and values");
}
- Workflow.Builder bldr = generateDefaultSingleTaskWorkflowBuilder(taskName);
+ Workflow.Builder bldr = generateDefaultSingleJobWorkflowBuilder(jobName);
for (int i = 0; i < cfgs.length; i += 2) {
- bldr.addConfig(taskName, cfgs[i], cfgs[i + 1]);
+ bldr.addConfig(jobName, cfgs[i], cfgs[i + 1]);
}
return bldr;
}
- public static Workflow.Builder generateDefaultSingleTaskWorkflowBuilder(String taskName) {
- return generateSingleTaskWorkflowBuilder(taskName, DEFAULT_TASK_CONFIG);
+ public static Workflow.Builder generateDefaultSingleJobWorkflowBuilder(String jobName) {
+ return generateSingleJobWorkflowBuilder(jobName, DEFAULT_COMMAND_CONFIG, DEFAULT_JOB_CONFIG);
}
- public static Workflow.Builder generateSingleTaskWorkflowBuilder(String taskName,
- Map<String, String> config) {
- Workflow.Builder builder = new Workflow.Builder(taskName);
+ public static Workflow.Builder generateSingleJobWorkflowBuilder(String jobName,
+ Map<String, String> commandConfig, Map<String, String> config) {
+ Workflow.Builder builder = new Workflow.Builder(jobName);
for (String key : config.keySet()) {
- builder.addConfig(taskName, key, config.get(key));
+ builder.addConfig(jobName, key, config.get(key));
+ }
+ if (commandConfig != null) {
+ ObjectMapper mapper = new ObjectMapper();
+ try {
+ String serializedMap = mapper.writeValueAsString(commandConfig);
+ builder.addConfig(jobName, JobConfig.JOB_CONFIG_MAP, serializedMap);
+ } catch (IOException e) {
+ LOG.error("Error serializing " + commandConfig, e);
+ }
}
return builder;
}
- public static Workflow.Builder generateDefaultRepeatedTaskWorkflowBuilder(String workflowName) {
- return generateRepeatedTaskWorkflowBuilder(workflowName, DEFAULT_TASK_CONFIG);
- }
-
- public static Workflow.Builder generateRepeatedTaskWorkflowBuilder(String workflowName,
- Map<String, String> config) {
+ public static Workflow.Builder generateDefaultRepeatedJobWorkflowBuilder(String workflowName) {
Workflow.Builder builder = new Workflow.Builder(workflowName);
- builder.addParentChildDependency(TASK_NAME_1, TASK_NAME_2);
+ builder.addParentChildDependency(JOB_NAME_1, JOB_NAME_2);
- for (String key : config.keySet()) {
- builder.addConfig(TASK_NAME_1, key, config.get(key));
- builder.addConfig(TASK_NAME_2, key, config.get(key));
+ for (String key : DEFAULT_JOB_CONFIG.keySet()) {
+ builder.addConfig(JOB_NAME_1, key, DEFAULT_JOB_CONFIG.get(key));
+ builder.addConfig(JOB_NAME_2, key, DEFAULT_JOB_CONFIG.get(key));
+ }
+ ObjectMapper mapper = new ObjectMapper();
+ try {
+ String serializedMap = mapper.writeValueAsString(DEFAULT_COMMAND_CONFIG);
+ builder.addConfig(JOB_NAME_1, JobConfig.JOB_CONFIG_MAP, serializedMap);
+ builder.addConfig(JOB_NAME_2, JobConfig.JOB_CONFIG_MAP, serializedMap);
+ } catch (IOException e) {
+ LOG.error("Error serializing " + DEFAULT_COMMAND_CONFIG, e);
}
-
return builder;
}
}
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/TaskManager.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/TaskManager.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/TaskManager.java
deleted file mode 100644
index 437880e..0000000
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/TaskManager.java
+++ /dev/null
@@ -1,247 +0,0 @@
-package org.apache.helix.provisioning.tools;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-
-import org.I0Itec.zkclient.DataUpdater;
-import org.apache.helix.AccessOption;
-import org.apache.helix.ClusterMessagingService;
-import org.apache.helix.HelixConnection;
-import org.apache.helix.HelixDataAccessor;
-import org.apache.helix.HelixManager;
-import org.apache.helix.HelixRole;
-import org.apache.helix.InstanceType;
-import org.apache.helix.PropertyKey;
-import org.apache.helix.ZNRecord;
-import org.apache.helix.api.State;
-import org.apache.helix.api.id.ClusterId;
-import org.apache.helix.api.id.Id;
-import org.apache.helix.api.id.ParticipantId;
-import org.apache.helix.api.id.PartitionId;
-import org.apache.helix.api.id.ResourceId;
-import org.apache.helix.api.id.SessionId;
-import org.apache.helix.manager.zk.HelixConnectionAdaptor;
-import org.apache.helix.model.ExternalView;
-import org.apache.helix.model.IdealState.IdealStateProperty;
-import org.apache.helix.model.LiveInstance;
-import org.apache.helix.model.ResourceConfiguration;
-import org.apache.helix.task.TaskConfig;
-import org.apache.helix.task.TaskDriver;
-import org.apache.helix.task.TaskPartitionState;
-import org.apache.helix.task.TaskUtil;
-import org.apache.helix.task.Workflow;
-import org.apache.log4j.Logger;
-
-import com.google.common.collect.Maps;
-
-public class TaskManager {
- private static final Logger LOG = Logger.getLogger(TaskManager.class);
-
- private final ClusterId _clusterId;
- private final HelixConnection _connection;
- private final HelixManager _manager;
- private final TaskDriver _driver;
-
- public TaskManager(final ClusterId clusterId, final HelixConnection connection) {
- HelixRole dummyRole = new HelixRole() {
- @Override
- public HelixConnection getConnection() {
- return connection;
- }
-
- @Override
- public ClusterId getClusterId() {
- return clusterId;
- }
-
- @Override
- public Id getId() {
- return clusterId;
- }
-
- @Override
- public InstanceType getType() {
- return InstanceType.ADMINISTRATOR;
- }
-
- @Override
- public ClusterMessagingService getMessagingService() {
- return null;
- }
- };
- _manager = new HelixConnectionAdaptor(dummyRole);
- _driver = new TaskDriver(_manager);
- _clusterId = clusterId;
- _connection = connection;
- }
-
- public boolean createTaskQueue(String queueName, boolean isParallel) {
- Workflow.Builder builder = new Workflow.Builder(queueName);
- builder.addConfig(queueName, TaskConfig.COMMAND, queueName);
- builder.addConfig(queueName, TaskConfig.TARGET_PARTITIONS, "");
- builder.addConfig(queueName, TaskConfig.COMMAND_CONFIG, "");
- builder.addConfig(queueName, TaskConfig.LONG_LIVED + "", String.valueOf(true));
- if (isParallel) {
- builder.addConfig(queueName, TaskConfig.NUM_CONCURRENT_TASKS_PER_INSTANCE,
- String.valueOf(Integer.MAX_VALUE));
- }
- Workflow workflow = builder.build();
- try {
- _driver.start(workflow);
- } catch (Exception e) {
- LOG.error("Failed to start queue " + queueName, e);
- return false;
- }
- return true;
- }
-
- public void addTaskToQueue(final String taskName, final String queueName) {
- // Update the resource config with the new partition count
- HelixDataAccessor accessor = _connection.createDataAccessor(_clusterId);
- PropertyKey.Builder keyBuilder = accessor.keyBuilder();
- final ResourceId resourceId = resourceId(queueName);
- final int[] numPartitions = {
- 0
- };
- DataUpdater<ZNRecord> dataUpdater = new DataUpdater<ZNRecord>() {
- @Override
- public ZNRecord update(ZNRecord currentData) {
- // Update the partition integers to add one to the end, and have that integer map to the
- // task name
- String current = currentData.getSimpleField(TaskConfig.TARGET_PARTITIONS);
- int currentId = 0;
- if (current == null || current.isEmpty()) {
- currentData.setSimpleField(TaskConfig.TARGET_PARTITIONS, String.valueOf(currentId));
- } else {
- String[] parts = current.split(",");
- currentId = parts.length;
- numPartitions[0] = currentId + 1;
- currentData.setSimpleField(TaskConfig.TARGET_PARTITIONS, current + "," + currentId);
- }
- Map<String, String> partitionMap = currentData.getMapField(TaskConfig.TASK_NAME_MAP);
- if (partitionMap == null) {
- partitionMap = Maps.newHashMap();
- currentData.setMapField(TaskConfig.TASK_NAME_MAP, partitionMap);
- }
- partitionMap.put(resourceId.toString() + '_' + currentId, taskName);
- return currentData;
- }
- };
- String configPath = keyBuilder.resourceConfig(resourceId.toString()).getPath();
- List<DataUpdater<ZNRecord>> dataUpdaters = new ArrayList<DataUpdater<ZNRecord>>();
- dataUpdaters.add(dataUpdater);
- accessor.updateChildren(Arrays.asList(configPath), dataUpdaters, AccessOption.PERSISTENT);
-
- // Update the ideal state with the proper partition count
- DataUpdater<ZNRecord> idealStateUpdater = new DataUpdater<ZNRecord>() {
- @Override
- public ZNRecord update(ZNRecord currentData) {
- currentData.setSimpleField(IdealStateProperty.NUM_PARTITIONS.toString(),
- String.valueOf(numPartitions[0]));
- return currentData;
- }
- };
- String idealStatePath = keyBuilder.idealStates(queueName + "_" + queueName).getPath();
- dataUpdaters.clear();
- dataUpdaters.add(idealStateUpdater);
- accessor.updateChildren(Arrays.asList(idealStatePath), dataUpdaters, AccessOption.PERSISTENT);
- }
-
- public void cancelTask(String queueName, String taskName) {
- // Get the mapped task name
- final ResourceId resourceId = resourceId(queueName);
- HelixDataAccessor accessor = _connection.createDataAccessor(_clusterId);
- PropertyKey.Builder keyBuilder = accessor.keyBuilder();
- ResourceConfiguration resourceConfig =
- accessor.getProperty(keyBuilder.resourceConfig(resourceId.stringify()));
- if (resourceConfig == null) {
- LOG.error("Queue " + queueName + " does not exist!");
- return;
- }
- Map<String, String> taskMap = resourceConfig.getRecord().getMapField(TaskConfig.TASK_NAME_MAP);
- if (taskMap == null) {
- LOG.error("Task " + taskName + " in queue " + queueName + " does not exist!");
- return;
- }
- String partitionName = null;
- for (Map.Entry<String, String> e : taskMap.entrySet()) {
- String possiblePartition = e.getKey();
- String possibleTask = e.getValue();
- if (taskName.equals(possibleTask)) {
- partitionName = possiblePartition;
- break;
- }
- }
- if (partitionName == null) {
- LOG.error("Task " + taskName + " in queue " + queueName + " does not exist!");
- return;
- }
-
- // Now search the external view for who is running the task
- ExternalView externalView =
- accessor.getProperty(keyBuilder.externalView(resourceId.toString()));
- if (externalView == null) {
- LOG.error("Queue " + queueName + " was never started!");
- return;
- }
- PartitionId partitionId = PartitionId.from(partitionName);
- Map<ParticipantId, State> stateMap = externalView.getStateMap(partitionId);
- if (stateMap == null || stateMap.isEmpty()) {
- LOG.warn("Task " + taskName + " in queue " + queueName + " is not currently running");
- return;
- }
- ParticipantId targetParticipant = null;
- for (ParticipantId participantId : stateMap.keySet()) {
- targetParticipant = participantId;
- }
- if (targetParticipant == null) {
- LOG.warn("Task " + taskName + " in queue " + queueName + " is not currently running");
- return;
- }
-
- // Send a request to stop to the appropriate live instance
- LiveInstance liveInstance =
- accessor.getProperty(keyBuilder.liveInstance(targetParticipant.toString()));
- if (liveInstance == null) {
- LOG.error("Task " + taskName + " in queue " + queueName
- + " is assigned to a non-running participant");
- return;
- }
- SessionId sessionId = liveInstance.getTypedSessionId();
- TaskUtil.setRequestedState(accessor, targetParticipant.toString(), sessionId.toString(),
- resourceId.toString(), partitionId.toString(), TaskPartitionState.STOPPED);
- LOG.info("Task" + taskName + " for queue " + queueName + " instructed to stop");
- }
-
- public void shutdownQueue(String queueName) {
- // Check if tasks are complete, then set task and workflows to complete
-
- // Otherwise, send a stop for everybody
- _driver.stop(resourceId(queueName).toString());
- }
-
- private ResourceId resourceId(String queueName) {
- return ResourceId.from(queueName + '_' + queueName);
- }
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/97ca4de4/helix-provisioning/src/test/java/org/apache/helix/provisioning/tools/TestTaskManager.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/test/java/org/apache/helix/provisioning/tools/TestTaskManager.java b/helix-provisioning/src/test/java/org/apache/helix/provisioning/tools/TestTaskManager.java
deleted file mode 100644
index 7d46cff..0000000
--- a/helix-provisioning/src/test/java/org/apache/helix/provisioning/tools/TestTaskManager.java
+++ /dev/null
@@ -1,149 +0,0 @@
-package org.apache.helix.provisioning.tools;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.helix.HelixConnection;
-import org.apache.helix.TestHelper;
-import org.apache.helix.ZkUnitTestBase;
-import org.apache.helix.api.id.ClusterId;
-import org.apache.helix.api.id.StateModelDefId;
-import org.apache.helix.integration.TestHelixConnection;
-import org.apache.helix.integration.manager.ClusterControllerManager;
-import org.apache.helix.integration.manager.MockParticipantManager;
-import org.apache.helix.manager.zk.ZkHelixConnection;
-import org.apache.helix.model.IdealState.RebalanceMode;
-import org.apache.helix.task.Task;
-import org.apache.helix.task.TaskFactory;
-import org.apache.helix.task.TaskResult;
-import org.apache.helix.task.TaskStateModelFactory;
-import org.testng.annotations.Test;
-
-public class TestTaskManager extends ZkUnitTestBase {
- @Test
- public void testBasic() throws Exception {
- final int NUM_PARTICIPANTS = 3;
- final int NUM_PARTITIONS = 1;
- final int NUM_REPLICAS = 1;
-
- String className = TestHelper.getTestClassName();
- String methodName = TestHelper.getTestMethodName();
- String clusterName = className + "_" + methodName;
-
- // Set up cluster
- TestHelper.setupCluster(clusterName, ZK_ADDR, 12918, // participant port
- "localhost", // participant name prefix
- "TestService", // resource name prefix
- 1, // resources
- NUM_PARTITIONS, // partitions per resource
- NUM_PARTICIPANTS, // number of nodes
- NUM_REPLICAS, // replicas
- "StatelessService", RebalanceMode.FULL_AUTO, // just get everything up
- true); // do rebalance
-
- Map<String, TaskFactory> taskFactoryReg = new HashMap<String, TaskFactory>();
- taskFactoryReg.put("mytask1", new TaskFactory() {
- @Override
- public Task createNewTask(String config) {
- return new MyTask(1);
- }
- });
- taskFactoryReg.put("mytask2", new TaskFactory() {
- @Override
- public Task createNewTask(String config) {
- return new MyTask(2);
- }
- });
- MockParticipantManager[] participants = new MockParticipantManager[NUM_PARTICIPANTS];
- for (int i = 0; i < participants.length; i++) {
- String instanceName = "localhost_" + (12918 + i);
- participants[i] = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
- participants[i].getStateMachineEngine()
- .registerStateModelFactory(StateModelDefId.from("StatelessService"),
- new TestHelixConnection.MockStateModelFactory());
- participants[i].getStateMachineEngine().registerStateModelFactory(
- StateModelDefId.from("Task"), new TaskStateModelFactory(participants[i], taskFactoryReg));
- participants[i].syncStart();
- }
-
- ClusterControllerManager controller =
- new ClusterControllerManager(ZK_ADDR, clusterName, "controller_1");
- controller.syncStart();
-
- HelixConnection connection = new ZkHelixConnection(ZK_ADDR);
- connection.connect();
- ClusterId clusterId = ClusterId.from(clusterName);
- TaskManager taskManager = new TaskManager(clusterId, connection);
- taskManager.createTaskQueue("myqueue", true);
- taskManager.addTaskToQueue("mytask1", "myqueue");
- Thread.sleep(5000);
- taskManager.addTaskToQueue("mytask2", "myqueue");
- taskManager.cancelTask("myqueue", "mytask1");
-
- controller.syncStop();
- for (MockParticipantManager participant : participants) {
- participant.syncStop();
- }
- }
-
- public static class MyTask implements Task {
- private final int _id;
- private Thread _t;
- private TaskResult.Status _status = null;
-
- public MyTask(int id) {
- _id = id;
- }
-
- @Override
- public TaskResult run() {
- _t = new Thread() {
- @Override
- public void run() {
- try {
- Thread.sleep(60000);
- _status = TaskResult.Status.COMPLETED;
- System.err.println("task complete for " + _id);
- } catch (InterruptedException e) {
- _status = TaskResult.Status.CANCELED;
- System.err.println("task canceled for " + _id);
- interrupt();
- }
- }
- };
- _t.start();
- try {
- _t.join();
- } catch (InterruptedException e) {
- _status = TaskResult.Status.CANCELED;
- }
- return new TaskResult(_status, "");
- }
-
- @Override
- public void cancel() {
- if (_t != null && _t.isAlive()) {
- _t.interrupt();
- }
- }
- }
-}
[35/50] [abbrv] git commit: Complete job runner recipe
Posted by ka...@apache.org.
Complete job runner recipe
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/785bb9fb
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/785bb9fb
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/785bb9fb
Branch: refs/heads/master
Commit: 785bb9fbbab2d82532a26ed253e6a72dffaa9849
Parents: 97ca4de
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Wed Apr 30 18:28:08 2014 -0700
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Wed Apr 30 18:28:08 2014 -0700
----------------------------------------------------------------------
.../java/org/apache/helix/task/JobContext.java | 23 ++-
.../java/org/apache/helix/task/TaskDriver.java | 60 ++++---
.../org/apache/helix/task/TaskRebalancer.java | 5 +
.../java/org/apache/helix/task/Workflow.java | 2 +-
.../helix/provisioning/yarn/AppLauncher.java | 75 +++++++--
.../provisioning/yarn/AppMasterLauncher.java | 50 +++---
recipes/jobrunner-yarn/pom.xml | 159 +++++++++++++++++++
recipes/jobrunner-yarn/run.sh | 6 +
.../jobrunner-yarn/src/assemble/assembly.xml | 60 +++++++
.../src/main/config/log4j.properties | 31 ++++
.../yarn/example/JobRunnerMain.java | 127 +++++++++++++++
.../helix/provisioning/yarn/example/MyTask.java | 53 +++++++
.../yarn/example/MyTaskAppSpec.java | 148 +++++++++++++++++
.../yarn/example/MyTaskAppSpecFactory.java | 28 ++++
.../yarn/example/MyTaskService.java | 62 ++++++++
.../src/main/resources/dummy_job.yaml | 18 +++
.../src/main/resources/job_runner_app_spec.yaml | 27 ++++
recipes/jobrunner-yarn/src/test/conf/testng.xml | 27 ++++
recipes/pom.xml | 1 +
19 files changed, 909 insertions(+), 53 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/785bb9fb/helix-core/src/main/java/org/apache/helix/task/JobContext.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/JobContext.java b/helix-core/src/main/java/org/apache/helix/task/JobContext.java
index 7742c67..c10173d 100644
--- a/helix-core/src/main/java/org/apache/helix/task/JobContext.java
+++ b/helix-core/src/main/java/org/apache/helix/task/JobContext.java
@@ -43,7 +43,8 @@ public class JobContext extends HelixProperty {
NUM_ATTEMPTS,
FINISH_TIME,
TARGET,
- TASK_ID
+ TASK_ID,
+ ASSIGNED_PARTICIPANT
}
public JobContext(ZNRecord record) {
@@ -224,4 +225,24 @@ public class JobContext extends HelixProperty {
}
return partitionMap;
}
+
+ public void setAssignedParticipant(int p, String participantName) {
+ String pStr = String.valueOf(p);
+ Map<String, String> map = _record.getMapField(pStr);
+ if (map == null) {
+ map = new TreeMap<String, String>();
+ _record.setMapField(pStr, map);
+ }
+ map.put(ContextProperties.ASSIGNED_PARTICIPANT.toString(), participantName);
+ }
+
+ public String getAssignedParticipant(int p) {
+ String pStr = String.valueOf(p);
+ Map<String, String> map = _record.getMapField(pStr);
+ if (map == null) {
+ return null;
+ } else {
+ return map.get(ContextProperties.ASSIGNED_PARTICIPANT.toString());
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/helix/blob/785bb9fb/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java b/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
index ada2f99..193b78e 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
@@ -24,7 +24,6 @@ import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
-import java.util.TreeMap;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
@@ -46,7 +45,7 @@ import org.apache.helix.model.IdealState;
import org.apache.helix.model.builder.CustomModeISBuilder;
import org.apache.log4j.Logger;
-import com.beust.jcommander.internal.Lists;
+import com.google.common.collect.Lists;
/**
* CLI for scheduling/canceling workflows
@@ -233,36 +232,59 @@ public class TaskDriver {
WorkflowConfig wCfg = TaskUtil.getWorkflowCfg(_manager, resource);
WorkflowContext wCtx = TaskUtil.getWorkflowContext(_manager, resource);
- LOG.info("Workflow " + resource + " consists of the following tasks: "
+ System.out.println("Workflow " + resource + " consists of the following tasks: "
+ wCfg.getJobDag().getAllNodes());
- LOG.info("Current state of workflow is " + wCtx.getWorkflowState().name());
- LOG.info("Job states are: ");
- LOG.info("-------");
+ System.out.println("Current state of workflow is " + wCtx.getWorkflowState().name());
+ System.out.println("Job states are: ");
+ System.out.println("-------");
for (String job : wCfg.getJobDag().getAllNodes()) {
- LOG.info("Task " + job + " is " + wCtx.getJobState(job));
+ System.out.println("Job " + job + " is " + wCtx.getJobState(job));
// fetch task information
+ JobConfig jCfg = TaskUtil.getJobCfg(_manager, job);
JobContext jCtx = TaskUtil.getJobContext(_manager, job);
// calculate taskPartitions
List<Integer> partitions = Lists.newArrayList(jCtx.getPartitionSet());
Collections.sort(partitions);
- // group partitions by status
- Map<TaskPartitionState, Integer> statusCount = new TreeMap<TaskPartitionState, Integer>();
- for (Integer i : partitions) {
- TaskPartitionState s = jCtx.getPartitionState(i);
- if (!statusCount.containsKey(s)) {
- statusCount.put(s, 0);
+ // report status
+ for (Integer partition : partitions) {
+ String taskId = jCtx.getTaskIdForPartition(partition);
+ taskId = (taskId != null) ? taskId : jCtx.getTargetForPartition(partition);
+ System.out.println("Task: " + taskId);
+ TaskConfig taskConfig = jCfg.getTaskConfig(taskId);
+ if (taskConfig != null) {
+ System.out.println("Configuration: " + taskConfig.getConfigMap());
}
- statusCount.put(s, statusCount.get(s) + 1);
- }
-
- for (TaskPartitionState s : statusCount.keySet()) {
- LOG.info(statusCount.get(s) + "/" + partitions.size() + " in state " + s.name());
+ TaskPartitionState state = jCtx.getPartitionState(partition);
+ if (state == null) {
+ state = TaskPartitionState.INIT;
+ }
+ System.out.println("State: " + state);
+ String assignedParticipant = jCtx.getAssignedParticipant(partition);
+ if (assignedParticipant != null) {
+ System.out.println("Assigned participant: " + assignedParticipant);
+ }
+ System.out.println("-------");
}
- LOG.info("-------");
+ // group partitions by status
+ /*
+ * Map<TaskPartitionState, Integer> statusCount = new TreeMap<TaskPartitionState, Integer>();
+ * for (Integer i : partitions) {
+ * TaskPartitionState s = jCtx.getPartitionState(i);
+ * if (!statusCount.containsKey(s)) {
+ * statusCount.put(s, 0);
+ * }
+ * statusCount.put(s, statusCount.get(s) + 1);
+ * }
+ * for (TaskPartitionState s : statusCount.keySet()) {
+ * LOG.info(statusCount.get(s) + "/" + partitions.size() + " in state " + s.name());
+ * }
+ */
+
+ System.out.println("-------");
}
}
http://git-wip-us.apache.org/repos/asf/helix/blob/785bb9fb/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java b/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
index 829f0c4..e9f60f9 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
@@ -227,6 +227,7 @@ public abstract class TaskRebalancer implements HelixRebalancer {
// TASK_ERROR, ERROR.
Set<Integer> donePartitions = new TreeSet<Integer>();
for (int pId : pSet) {
+ jobCtx.setPartitionState(pId, TaskPartitionState.INIT);
final String pName = pName(jobResource, pId);
// Check for pending state transitions on this (partition, instance).
@@ -289,6 +290,8 @@ public abstract class TaskRebalancer implements HelixRebalancer {
nextState = TaskPartitionState.STOPPED;
}
+ jobCtx.setPartitionState(pId, currState);
+
paMap.put(pId, new PartitionAssignment(instance.toString(), nextState.name()));
assignedPartitions.add(pId);
LOG.debug(String.format("Setting task partition %s state to %s on instance %s.", pName,
@@ -378,6 +381,8 @@ public abstract class TaskRebalancer implements HelixRebalancer {
paMap.put(pId,
new PartitionAssignment(instance.toString(), TaskPartitionState.RUNNING.name()));
excludeSet.add(pId);
+ jobCtx.setPartitionState(pId, TaskPartitionState.INIT);
+ jobCtx.setAssignedParticipant(pId, instance.toString());
LOG.debug(String.format("Setting task partition %s state to %s on instance %s.", pName,
TaskPartitionState.RUNNING, instance));
}
http://git-wip-us.apache.org/repos/asf/helix/blob/785bb9fb/helix-core/src/main/java/org/apache/helix/task/Workflow.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/Workflow.java b/helix-core/src/main/java/org/apache/helix/task/Workflow.java
index 5b27fb6..383180e 100644
--- a/helix-core/src/main/java/org/apache/helix/task/Workflow.java
+++ b/helix-core/src/main/java/org/apache/helix/task/Workflow.java
@@ -152,7 +152,7 @@ public class Workflow {
builder.addConfig(job.name, JobConfig.WORKFLOW_ID, wf.name);
builder.addConfig(job.name, JobConfig.COMMAND, job.command);
if (job.jobConfigMap != null) {
- builder.addConfig(job.name, JobConfig.JOB_CONFIG_MAP, job.jobConfigMap.toString());
+ builder.addJobConfigMap(job.name, job.jobConfigMap);
}
builder.addConfig(job.name, JobConfig.TARGET_RESOURCE, job.targetResource);
if (job.targetPartitionStates != null) {
http://git-wip-us.apache.org/repos/asf/helix/blob/785bb9fb/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
index 4b77105..9a19842 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
@@ -2,7 +2,6 @@ package org.apache.helix.provisioning.yarn;
import java.io.File;
import java.io.FileInputStream;
-import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
@@ -93,6 +92,10 @@ public class AppLauncher {
yarnClient.init(_conf);
}
+ public ApplicationSpec getApplicationSpec() {
+ return _applicationSpec;
+ }
+
public boolean launch() throws Exception {
LOG.info("Running Client");
yarnClient.start();
@@ -189,7 +192,7 @@ public class AppLauncher {
classPathEnv.append(':');
classPathEnv.append(System.getProperty("java.class.path"));
}
- LOG.info("\n\n Setting the classpath to launch AppMaster:\n\n" );
+ LOG.info("\n\n Setting the classpath to launch AppMaster:\n\n");
// Set the env variables to be setup in the env where the application master will be run
Map<String, String> env = new HashMap<String, String>(_appMasterConfig.getEnv());
env.put("CLASSPATH", classPathEnv.toString());
@@ -268,12 +271,11 @@ public class AppLauncher {
// Set the queue to which this application is to be submitted in the RM
appContext.setQueue(amQueue);
-
LOG.info("Submitting application to YARN Resource Manager");
ApplicationId applicationId = yarnClient.submitApplication(appContext);
- LOG.info("Submitted application with applicationId:" + applicationId );
+ LOG.info("Submitted application with applicationId:" + applicationId);
return true;
}
@@ -352,6 +354,52 @@ public class AppLauncher {
|| path.endsWith("zip");
}
+ public HelixConnection pollForConnection() {
+ String prevReport = "";
+ HelixConnection connection = null;
+
+ while (true) {
+ try {
+ // Get application report for the appId we are interested in
+ ApplicationReport report = yarnClient.getApplicationReport(_appId);
+
+ String reportMessage = generateReport(report);
+ if (!reportMessage.equals(prevReport)) {
+ LOG.info(reportMessage);
+ }
+ YarnApplicationState state = report.getYarnApplicationState();
+ if (YarnApplicationState.RUNNING == state) {
+ if (connection == null) {
+ String hostName = null;
+ int ind = report.getHost().indexOf('/');
+ if (ind > -1) {
+ hostName = report.getHost().substring(ind + 1);
+ } else {
+ hostName = report.getHost();
+ }
+ connection = new ZkHelixConnection(hostName + ":2181");
+
+ try {
+ connection.connect();
+ } catch (Exception e) {
+ LOG.warn("AppMaster started but not yet initialized");
+ connection = null;
+ }
+ }
+ if (connection.isConnected()) {
+ return connection;
+ }
+ }
+ prevReport = reportMessage;
+ Thread.sleep(10000);
+ } catch (Exception e) {
+ LOG.error("Exception while getting info ");
+ break;
+ }
+ }
+ return null;
+ }
+
/**
* @return true if successfully completed, it will print status every X seconds
*/
@@ -434,7 +482,7 @@ public class AppLauncher {
+ ", appTrackingUrl=" + report.getTrackingUrl() + ", appUser=" + report.getUser();
}
- protected void cleanup() {
+ public void cleanup() {
LOG.info("Cleaning up");
try {
ApplicationReport applicationReport = yarnClient.getApplicationReport(_appId);
@@ -446,23 +494,28 @@ public class AppLauncher {
}
/**
- * Launches the application on a YARN cluster. Once launched, it will display (periodically) the status of the containers in the application.
+ * Launches the application on a YARN cluster. Once launched, it will display (periodically) the
+ * status of the containers in the application.
* @param args app_spec_provider and app_config_spec
* @throws Exception
*/
public static void main(String[] args) throws Exception {
Options opts = new Options();
- opts.addOption(new Option("app_spec_provider",true, "Application Spec Factory Class that will parse the app_config_spec file"));
- opts.addOption(new Option("app_config_spec",true, "YAML config file that provides the app specifications"));
+ opts.addOption(new Option("app_spec_provider", true,
+ "Application Spec Factory Class that will parse the app_config_spec file"));
+ opts.addOption(new Option("app_config_spec", true,
+ "YAML config file that provides the app specifications"));
CommandLine cliParser = new GnuParser().parse(opts, args);
String appSpecFactoryClass = cliParser.getOptionValue("app_spec_provider");
String yamlConfigFileName = cliParser.getOptionValue("app_config_spec");
- ApplicationSpecFactory applicationSpecFactory = HelixYarnUtil.createInstance(appSpecFactoryClass);
+ ApplicationSpecFactory applicationSpecFactory =
+ HelixYarnUtil.createInstance(appSpecFactoryClass);
File yamlConfigFile = new File(yamlConfigFileName);
- if(!yamlConfigFile.exists()){
- throw new IllegalArgumentException("YAML app_config_spec file: '"+ yamlConfigFileName + "' does not exist");
+ if (!yamlConfigFile.exists()) {
+ throw new IllegalArgumentException("YAML app_config_spec file: '" + yamlConfigFileName
+ + "' does not exist");
}
final AppLauncher launcher = new AppLauncher(applicationSpecFactory, yamlConfigFile);
launcher.launch();
http://git-wip-us.apache.org/repos/asf/helix/blob/785bb9fb/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterLauncher.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterLauncher.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterLauncher.java
index 72d6ea9..523fee0 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterLauncher.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterLauncher.java
@@ -3,21 +3,16 @@ package org.apache.helix.provisioning.yarn;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
-import java.util.Arrays;
+import java.util.List;
import java.util.Map;
import org.I0Itec.zkclient.IDefaultNameSpace;
import org.I0Itec.zkclient.ZkClient;
import org.I0Itec.zkclient.ZkServer;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.helix.HelixController;
import org.apache.helix.api.accessor.ClusterAccessor;
@@ -26,15 +21,18 @@ import org.apache.helix.api.config.ResourceConfig;
import org.apache.helix.api.id.ClusterId;
import org.apache.helix.api.id.ControllerId;
import org.apache.helix.api.id.ResourceId;
-import org.apache.helix.controller.provisioner.ProvisionerConfig;
import org.apache.helix.controller.rebalancer.config.FullAutoRebalancerConfig;
import org.apache.helix.controller.rebalancer.config.RebalancerConfig;
+import org.apache.helix.manager.zk.HelixConnectionAdaptor;
import org.apache.helix.manager.zk.ZkHelixConnection;
import org.apache.helix.model.StateModelDefinition;
import org.apache.helix.provisioning.ApplicationSpec;
import org.apache.helix.provisioning.ApplicationSpecFactory;
import org.apache.helix.provisioning.HelixYarnUtil;
import org.apache.helix.provisioning.ServiceConfig;
+import org.apache.helix.provisioning.TaskConfig;
+import org.apache.helix.task.TaskDriver;
+import org.apache.helix.task.Workflow;
import org.apache.helix.tools.StateModelConfigGenerator;
import org.apache.log4j.Logger;
@@ -50,8 +48,7 @@ import org.apache.log4j.Logger;
public class AppMasterLauncher {
public static Logger LOG = Logger.getLogger(AppMasterLauncher.class);
- @SuppressWarnings("unchecked")
- public static void main(String[] args) throws Exception{
+ public static void main(String[] args) throws Exception {
Map<String, String> env = System.getenv();
LOG.info("Starting app master with the following environment variables");
for (String key : env.keySet()) {
@@ -61,11 +58,6 @@ public class AppMasterLauncher {
Options opts;
opts = new Options();
opts.addOption("num_containers", true, "Number of containers");
- try {
- CommandLine cliParser = new GnuParser().parse(opts, args);
- } catch (Exception e) {
- LOG.error("Error parsing input arguments" + Arrays.toString(args), e);
- }
// START ZOOKEEPER
String dataDir = "dataDir";
@@ -94,7 +86,7 @@ public class AppMasterLauncher {
String configFile = AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString();
String className = appMasterConfig.getApplicationSpecFactory();
-
+
GenericApplicationMaster genericApplicationMaster = new GenericApplicationMaster(appAttemptID);
try {
genericApplicationMaster.start();
@@ -102,8 +94,8 @@ public class AppMasterLauncher {
LOG.error("Unable to start application master: ", e);
}
ApplicationSpecFactory factory = HelixYarnUtil.createInstance(className);
-
- //TODO: Avoid setting static variable.
+
+ // TODO: Avoid setting static variable.
YarnProvisioner.applicationMaster = genericApplicationMaster;
YarnProvisioner.applicationMasterConfig = appMasterConfig;
ApplicationSpec applicationSpec = factory.fromYaml(new FileInputStream(configFile));
@@ -121,17 +113,19 @@ public class AppMasterLauncher {
ClusterAccessor clusterAccessor = connection.createClusterAccessor(clusterId);
StateModelDefinition statelessService =
new StateModelDefinition(StateModelConfigGenerator.generateConfigForStatelessService());
- clusterAccessor.createCluster(new ClusterConfig.Builder(clusterId).addStateModelDefinition(
- statelessService).build());
+ StateModelDefinition taskStateModel =
+ new StateModelDefinition(StateModelConfigGenerator.generateConfigForTaskStateModel());
+ clusterAccessor.createCluster(new ClusterConfig.Builder(clusterId)
+ .addStateModelDefinition(statelessService).addStateModelDefinition(taskStateModel).build());
for (String service : applicationSpec.getServices()) {
String resourceName = service;
// add the resource with the local provisioner
ResourceId resourceId = ResourceId.from(resourceName);
-
+
ServiceConfig serviceConfig = applicationSpec.getServiceConfig(resourceName);
serviceConfig.setSimpleField("service_name", service);
int numContainers = serviceConfig.getIntField("num_containers", 1);
-
+
YarnProvisionerConfig provisionerConfig = new YarnProvisionerConfig(resourceId);
provisionerConfig.setNumContainers(numContainers);
@@ -153,6 +147,20 @@ public class AppMasterLauncher {
HelixController controller = connection.createController(clusterId, controllerId);
controller.start();
+ // Start any pre-specified jobs
+ List<TaskConfig> taskConfigs = applicationSpec.getTaskConfigs();
+ if (taskConfigs != null) {
+ for (TaskConfig taskConfig : taskConfigs) {
+ String yamlFile = taskConfig.getValue("yamlFile");
+ if (yamlFile != null) {
+ File file = new File(yamlFile);
+ Workflow workflow = Workflow.parse(file);
+ TaskDriver taskDriver = new TaskDriver(new HelixConnectionAdaptor(controller));
+ taskDriver.start(workflow);
+ }
+ }
+ }
+
Thread shutdownhook = new Thread(new Runnable() {
@Override
public void run() {
http://git-wip-us.apache.org/repos/asf/helix/blob/785bb9fb/recipes/jobrunner-yarn/pom.xml
----------------------------------------------------------------------
diff --git a/recipes/jobrunner-yarn/pom.xml b/recipes/jobrunner-yarn/pom.xml
new file mode 100644
index 0000000..f067a56
--- /dev/null
+++ b/recipes/jobrunner-yarn/pom.xml
@@ -0,0 +1,159 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.apache.helix.recipes</groupId>
+ <artifactId>recipes</artifactId>
+ <version>0.7.1-incubating-SNAPSHOT</version>
+ </parent>
+
+ <artifactId>jobrunner-yarn</artifactId>
+ <packaging>bundle</packaging>
+ <name>Apache Helix :: Recipes :: Provisioning :: YARN :: Job Runner</name>
+
+ <properties>
+ <osgi.import>
+ org.apache.helix*,
+ org.apache.log4j,
+ *
+ </osgi.import>
+ <osgi.export>org.apache.helix.provisioning.yarn.example*;version="${project.version};-noimport:=true</osgi.export>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.testng</groupId>
+ <artifactId>testng</artifactId>
+ <version>6.0.1</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.helix</groupId>
+ <artifactId>helix-core</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.helix</groupId>
+ <artifactId>helix-provisioning</artifactId>
+ <version>0.7.1-incubating-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>log4j</groupId>
+ <artifactId>log4j</artifactId>
+ <exclusions>
+ <exclusion>
+ <groupId>javax.mail</groupId>
+ <artifactId>mail</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>javax.jms</groupId>
+ <artifactId>jms</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.sun.jdmk</groupId>
+ <artifactId>jmxtools</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.sun.jmx</groupId>
+ <artifactId>jmxri</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ </dependencies>
+ <build>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>appassembler-maven-plugin</artifactId>
+ <configuration>
+ <!-- Set the target configuration directory to be used in the bin scripts -->
+ <!-- <configurationDirectory>conf</configurationDirectory> -->
+ <!-- Copy the contents from "/src/main/config" to the target configuration
+ directory in the assembled application -->
+ <!-- <copyConfigurationDirectory>true</copyConfigurationDirectory> -->
+ <!-- Include the target configuration directory in the beginning of
+ the classpath declaration in the bin scripts -->
+ <includeConfigurationDirectoryInClasspath>true</includeConfigurationDirectoryInClasspath>
+ <assembleDirectory>${project.build.directory}/${project.artifactId}-pkg</assembleDirectory>
+ <!-- Extra JVM arguments that will be included in the bin scripts -->
+ <extraJvmArguments>-Xms512m -Xmx512m</extraJvmArguments>
+ <!-- Generate bin scripts for windows and unix pr default -->
+ <platforms>
+ <platform>windows</platform>
+ <platform>unix</platform>
+ </platforms>
+ </configuration>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>assemble</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.rat</groupId>
+ <artifactId>apache-rat-plugin</artifactId>
+ <configuration>
+ <excludes combine.children="append">
+ </excludes>
+ </configuration>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>appassembler-maven-plugin</artifactId>
+ <configuration>
+ <programs>
+ <program>
+ <mainClass>org.apache.helix.provisioning.yarn.AppLauncher</mainClass>
+ <name>app-launcher</name>
+ </program>
+ <program>
+ <mainClass>org.apache.helix.provisioning.yarn.example.JobRunnerMain</mainClass>
+ <name>job-runner</name>
+ </program>
+ </programs>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <configuration>
+ <descriptors>
+ <descriptor>src/assemble/assembly.xml</descriptor>
+ </descriptors>
+ </configuration>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>single</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
http://git-wip-us.apache.org/repos/asf/helix/blob/785bb9fb/recipes/jobrunner-yarn/run.sh
----------------------------------------------------------------------
diff --git a/recipes/jobrunner-yarn/run.sh b/recipes/jobrunner-yarn/run.sh
new file mode 100755
index 0000000..07448bb
--- /dev/null
+++ b/recipes/jobrunner-yarn/run.sh
@@ -0,0 +1,6 @@
+#cd ../../
+#mvn clean install -DskipTests
+#cd recipes/helloworld-provisioning-yarn
+mvn clean package -DskipTests
+chmod +x target/helloworld-provisioning-yarn-pkg/bin/app-launcher.sh
+target/helloworld-provisioning-yarn/pkg/bin/app-launcher.sh org.apache.helix.provisioning.yarn.example.HelloWordAppSpecFactory /Users/kgopalak/Documents/projects/incubator-helix/recipes/helloworld-provisioning-yarn/src/main/resources/hello_world_app_spec.yaml
http://git-wip-us.apache.org/repos/asf/helix/blob/785bb9fb/recipes/jobrunner-yarn/src/assemble/assembly.xml
----------------------------------------------------------------------
diff --git a/recipes/jobrunner-yarn/src/assemble/assembly.xml b/recipes/jobrunner-yarn/src/assemble/assembly.xml
new file mode 100644
index 0000000..c2d08a1
--- /dev/null
+++ b/recipes/jobrunner-yarn/src/assemble/assembly.xml
@@ -0,0 +1,60 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+<assembly>
+ <id>pkg</id>
+ <formats>
+ <format>tar</format>
+ </formats>
+ <fileSets>
+ <fileSet>
+ <directory>${project.build.directory}/${project.artifactId}-pkg/bin</directory>
+ <outputDirectory>bin</outputDirectory>
+ <lineEnding>unix</lineEnding>
+ <fileMode>0755</fileMode>
+ <directoryMode>0755</directoryMode>
+ </fileSet>
+ <fileSet>
+ <directory>${project.build.directory}/${project.artifactId}-pkg/repo/</directory>
+ <outputDirectory>repo</outputDirectory>
+ <fileMode>0755</fileMode>
+ <directoryMode>0755</directoryMode>
+ <excludes>
+ <exclude>**/*.xml</exclude>
+ </excludes>
+ </fileSet>
+ <fileSet>
+ <directory>${project.build.directory}/${project.artifactId}-pkg/conf</directory>
+ <outputDirectory>conf</outputDirectory>
+ <lineEnding>unix</lineEnding>
+ <fileMode>0755</fileMode>
+ <directoryMode>0755</directoryMode>
+ </fileSet>
+ <fileSet>
+ <directory>${project.basedir}</directory>
+ <outputDirectory>/</outputDirectory>
+ <includes>
+ <include>LICENSE</include>
+ <include>NOTICE</include>
+ <include>DISCLAIMER</include>
+ </includes>
+ <fileMode>0755</fileMode>
+ </fileSet>
+ </fileSets>
+</assembly>
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/helix/blob/785bb9fb/recipes/jobrunner-yarn/src/main/config/log4j.properties
----------------------------------------------------------------------
diff --git a/recipes/jobrunner-yarn/src/main/config/log4j.properties b/recipes/jobrunner-yarn/src/main/config/log4j.properties
new file mode 100644
index 0000000..91fac03
--- /dev/null
+++ b/recipes/jobrunner-yarn/src/main/config/log4j.properties
@@ -0,0 +1,31 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+##
+
+# Set root logger level to DEBUG and its only appender to A1.
+log4j.rootLogger=DEBUG,A1
+
+# A1 is set to be a ConsoleAppender.
+log4j.appender.A1=org.apache.log4j.ConsoleAppender
+
+# A1 uses PatternLayout.
+log4j.appender.A1.layout=org.apache.log4j.PatternLayout
+log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
+
+log4j.logger.org.I0Itec=ERROR
+log4j.logger.org.apache=ERROR
http://git-wip-us.apache.org/repos/asf/helix/blob/785bb9fb/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/JobRunnerMain.java
----------------------------------------------------------------------
diff --git a/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/JobRunnerMain.java b/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/JobRunnerMain.java
new file mode 100644
index 0000000..623854f
--- /dev/null
+++ b/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/JobRunnerMain.java
@@ -0,0 +1,127 @@
+package org.apache.helix.provisioning.yarn.example;
+
+import java.io.File;
+import java.util.Collection;
+import java.util.List;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+import org.apache.helix.ClusterMessagingService;
+import org.apache.helix.HelixConnection;
+import org.apache.helix.HelixManager;
+import org.apache.helix.HelixRole;
+import org.apache.helix.InstanceType;
+import org.apache.helix.api.Participant;
+import org.apache.helix.api.accessor.ClusterAccessor;
+import org.apache.helix.api.config.ContainerConfig;
+import org.apache.helix.api.id.ClusterId;
+import org.apache.helix.api.id.Id;
+import org.apache.helix.manager.zk.HelixConnectionAdaptor;
+import org.apache.helix.provisioning.ApplicationSpec;
+import org.apache.helix.provisioning.ApplicationSpecFactory;
+import org.apache.helix.provisioning.HelixYarnUtil;
+import org.apache.helix.provisioning.TaskConfig;
+import org.apache.helix.provisioning.yarn.AppLauncher;
+import org.apache.helix.task.TaskDriver;
+import org.apache.helix.task.Workflow;
+
+public class JobRunnerMain {
+ public static void main(String[] args) throws Exception {
+ Options opts = new Options();
+ opts.addOption(new Option("app_spec_provider", true,
+ "Application Spec Factory Class that will parse the app_config_spec file"));
+ opts.addOption(new Option("app_config_spec", true,
+ "YAML config file that provides the app specifications"));
+ CommandLine cliParser = new GnuParser().parse(opts, args);
+ String appSpecFactoryClass = cliParser.getOptionValue("app_spec_provider");
+ String yamlConfigFileName = cliParser.getOptionValue("app_config_spec");
+
+ ApplicationSpecFactory applicationSpecFactory =
+ HelixYarnUtil.createInstance(appSpecFactoryClass);
+ File yamlConfigFile = new File(yamlConfigFileName);
+ if (!yamlConfigFile.exists()) {
+ throw new IllegalArgumentException("YAML app_config_spec file: '" + yamlConfigFileName
+ + "' does not exist");
+ }
+ final AppLauncher launcher = new AppLauncher(applicationSpecFactory, yamlConfigFile);
+ launcher.launch();
+ Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
+
+ @Override
+ public void run() {
+ launcher.cleanup();
+ }
+ }));
+
+ final ApplicationSpec appSpec = launcher.getApplicationSpec();
+
+ // Repeatedly print status
+ final HelixConnection connection = launcher.pollForConnection();
+ final ClusterId clusterId = ClusterId.from(appSpec.getAppName());
+ // TODO: this is a hack -- TaskDriver should accept a connection instead of a manager
+ HelixManager manager = new HelixConnectionAdaptor(new HelixRole() {
+ @Override
+ public HelixConnection getConnection() {
+ return connection;
+ }
+
+ @Override
+ public ClusterId getClusterId() {
+ return clusterId;
+ }
+
+ @Override
+ public Id getId() {
+ return null;
+ }
+
+ @Override
+ public InstanceType getType() {
+ return InstanceType.ADMINISTRATOR;
+ }
+
+ @Override
+ public ClusterMessagingService getMessagingService() {
+ return null;
+ }
+ });
+
+ // Get all submitted jobs
+ String workflow = null;
+ List<TaskConfig> taskConfigs = appSpec.getTaskConfigs();
+ if (taskConfigs != null) {
+ for (TaskConfig taskConfig : taskConfigs) {
+ String yamlFile = taskConfig.getValue("yamlFile");
+ if (yamlFile != null) {
+ Workflow flow = Workflow.parse(new File(yamlFile));
+ workflow = flow.getName();
+ }
+ }
+ }
+
+ // Repeatedly poll for status
+ if (workflow != null) {
+ ClusterAccessor accessor = connection.createClusterAccessor(clusterId);
+ TaskDriver driver = new TaskDriver(manager);
+ while (true) {
+ System.out.println("CONTAINER STATUS");
+ System.out.println("----------------");
+ Collection<Participant> participants = accessor.readParticipants().values();
+ for (Participant participant : participants) {
+ ContainerConfig containerConfig = participant.getContainerConfig();
+ if (containerConfig != null) {
+ System.out.println(participant.getId() + "[" + containerConfig.getId() + "]: "
+ + containerConfig.getState());
+ }
+ }
+ System.out.println("----------------");
+ System.out.println("TASK STATUS");
+ System.out.println("----------------");
+ driver.list(workflow);
+ Thread.sleep(5000);
+ }
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/785bb9fb/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTask.java
----------------------------------------------------------------------
diff --git a/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTask.java b/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTask.java
new file mode 100644
index 0000000..584550d
--- /dev/null
+++ b/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTask.java
@@ -0,0 +1,53 @@
+package org.apache.helix.provisioning.yarn.example;
+
+import org.apache.helix.task.Task;
+import org.apache.helix.task.TaskCallbackContext;
+import org.apache.helix.task.TaskResult;
+import org.apache.log4j.Logger;
+
+/**
+ * Callbacks for task execution - THIS INTERFACE IS SUBJECT TO CHANGE
+ */
+public class MyTask implements Task {
+ private static final Logger LOG = Logger.getLogger(MyTask.class);
+ private static final long DEFAULT_DELAY = 60000L;
+ private final long _delay;
+ private volatile boolean _canceled;
+
+ public MyTask(TaskCallbackContext context) {
+ LOG.info("Job config" + context.getJobConfig().getJobConfigMap());
+ if (context.getTaskConfig() != null) {
+ LOG.info("Task config: " + context.getTaskConfig().getConfigMap());
+ }
+ _delay = DEFAULT_DELAY;
+ }
+
+ @Override
+ public TaskResult run() {
+ long expiry = System.currentTimeMillis() + _delay;
+ long timeLeft;
+ while (System.currentTimeMillis() < expiry) {
+ if (_canceled) {
+ timeLeft = expiry - System.currentTimeMillis();
+ return new TaskResult(TaskResult.Status.CANCELED, String.valueOf(timeLeft < 0 ? 0
+ : timeLeft));
+ }
+ sleep(50);
+ }
+ timeLeft = expiry - System.currentTimeMillis();
+ return new TaskResult(TaskResult.Status.COMPLETED, String.valueOf(timeLeft < 0 ? 0 : timeLeft));
+ }
+
+ @Override
+ public void cancel() {
+ _canceled = true;
+ }
+
+ private static void sleep(long d) {
+ try {
+ Thread.sleep(d);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/785bb9fb/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskAppSpec.java
----------------------------------------------------------------------
diff --git a/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskAppSpec.java b/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskAppSpec.java
new file mode 100644
index 0000000..a20994c
--- /dev/null
+++ b/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskAppSpec.java
@@ -0,0 +1,148 @@
+package org.apache.helix.provisioning.yarn.example;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.helix.api.Scope;
+import org.apache.helix.api.id.ResourceId;
+import org.apache.helix.provisioning.AppConfig;
+import org.apache.helix.provisioning.ApplicationSpec;
+import org.apache.helix.provisioning.ServiceConfig;
+import org.apache.helix.provisioning.TaskConfig;
+
+import com.google.common.collect.Maps;
+
+public class MyTaskAppSpec implements ApplicationSpec {
+
+ public String _appName;
+
+ public AppConfig _appConfig;
+
+ public List<String> _services;
+
+ private String _appMasterPackageUri;
+
+ private Map<String, String> _servicePackageURIMap;
+
+ private Map<String, String> _serviceMainClassMap;
+
+ private Map<String, ServiceConfig> _serviceConfigMap;
+
+ private List<TaskConfig> _taskConfigs;
+
+ public AppConfig getAppConfig() {
+ return _appConfig;
+ }
+
+ public void setAppConfig(AppConfig appConfig) {
+ _appConfig = appConfig;
+ }
+
+ public String getAppMasterPackageUri() {
+ return _appMasterPackageUri;
+ }
+
+ public void setAppMasterPackageUri(String appMasterPackageUri) {
+ _appMasterPackageUri = appMasterPackageUri;
+ }
+
+ public Map<String, String> getServicePackageURIMap() {
+ return _servicePackageURIMap;
+ }
+
+ public void setServicePackageURIMap(Map<String, String> servicePackageURIMap) {
+ _servicePackageURIMap = servicePackageURIMap;
+ }
+
+ public Map<String, String> getServiceMainClassMap() {
+ return _serviceMainClassMap;
+ }
+
+ public void setServiceMainClassMap(Map<String, String> serviceMainClassMap) {
+ _serviceMainClassMap = serviceMainClassMap;
+ }
+
+ public Map<String, Map<String, String>> getServiceConfigMap() {
+ Map<String, Map<String, String>> map = Maps.newHashMap();
+ for (String service : _serviceConfigMap.keySet()) {
+ map.put(service, _serviceConfigMap.get(service).getSimpleFields());
+ }
+ return map;
+ }
+
+ public void setServiceConfigMap(Map<String, Map<String, Object>> map) {
+ _serviceConfigMap = Maps.newHashMap();
+
+ for (String service : map.keySet()) {
+ ServiceConfig serviceConfig = new ServiceConfig(Scope.resource(ResourceId.from(service)));
+ Map<String, Object> simpleFields = map.get(service);
+ for (String key : simpleFields.keySet()) {
+ serviceConfig.setSimpleField(key, simpleFields.get(key).toString());
+ }
+ _serviceConfigMap.put(service, serviceConfig);
+ }
+ }
+
+ public void setAppName(String appName) {
+ _appName = appName;
+ }
+
+ public void setServices(List<String> services) {
+ _services = services;
+ }
+
+ public void setTaskConfigs(List<TaskConfig> taskConfigs) {
+ _taskConfigs = taskConfigs;
+ }
+
+ @Override
+ public String getAppName() {
+ return _appName;
+ }
+
+ @Override
+ public AppConfig getConfig() {
+ return _appConfig;
+ }
+
+ @Override
+ public List<String> getServices() {
+ return _services;
+ }
+
+ @Override
+ public URI getAppMasterPackage() {
+ try {
+ return new URI(_appMasterPackageUri);
+ } catch (URISyntaxException e) {
+ return null;
+ }
+ }
+
+ @Override
+ public URI getServicePackage(String serviceName) {
+ try {
+ return new URI(_servicePackageURIMap.get(serviceName));
+ } catch (URISyntaxException e) {
+ return null;
+ }
+ }
+
+ @Override
+ public String getServiceMainClass(String service) {
+ return _serviceMainClassMap.get(service);
+ }
+
+ @Override
+ public ServiceConfig getServiceConfig(String serviceName) {
+ return _serviceConfigMap.get(serviceName);
+ }
+
+ @Override
+ public List<TaskConfig> getTaskConfigs() {
+ return _taskConfigs;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/785bb9fb/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskAppSpecFactory.java
----------------------------------------------------------------------
diff --git a/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskAppSpecFactory.java b/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskAppSpecFactory.java
new file mode 100644
index 0000000..17601ba
--- /dev/null
+++ b/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskAppSpecFactory.java
@@ -0,0 +1,28 @@
+package org.apache.helix.provisioning.yarn.example;
+
+import java.io.InputStream;
+
+import org.apache.helix.provisioning.ApplicationSpec;
+import org.apache.helix.provisioning.ApplicationSpecFactory;
+import org.yaml.snakeyaml.Yaml;
+
+public class MyTaskAppSpecFactory implements ApplicationSpecFactory {
+
+ @Override
+ public ApplicationSpec fromYaml(InputStream inputstream) {
+ return (ApplicationSpec) new Yaml().load(inputstream);
+ // return data;
+ }
+
+ public static void main(String[] args) {
+
+ Yaml yaml = new Yaml();
+ InputStream resourceAsStream =
+ ClassLoader.getSystemClassLoader().getResourceAsStream("job_runner_app_spec.yaml");
+ MyTaskAppSpec spec = yaml.loadAs(resourceAsStream, MyTaskAppSpec.class);
+ String dump = yaml.dump(spec);
+ System.out.println(dump);
+ System.out.println(spec.getServiceConfig("JobRunner").getStringField("num_containers", "1"));
+
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/785bb9fb/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskService.java
----------------------------------------------------------------------
diff --git a/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskService.java b/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskService.java
new file mode 100644
index 0000000..22c3ab0
--- /dev/null
+++ b/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/MyTaskService.java
@@ -0,0 +1,62 @@
+package org.apache.helix.provisioning.yarn.example;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.helix.HelixConnection;
+import org.apache.helix.HelixManager;
+import org.apache.helix.api.id.ClusterId;
+import org.apache.helix.api.id.ParticipantId;
+import org.apache.helix.api.id.StateModelDefId;
+import org.apache.helix.manager.zk.HelixConnectionAdaptor;
+import org.apache.helix.participant.AbstractParticipantService;
+import org.apache.helix.provisioning.ServiceConfig;
+import org.apache.helix.provisioning.participant.StatelessParticipantService;
+import org.apache.helix.task.Task;
+import org.apache.helix.task.TaskCallbackContext;
+import org.apache.helix.task.TaskFactory;
+import org.apache.helix.task.TaskStateModelFactory;
+import org.apache.log4j.Logger;
+
+/**
+ * A simple "service" for task callback registration.
+ */
+public class MyTaskService extends StatelessParticipantService {
+
+ private static Logger LOG = Logger.getLogger(AbstractParticipantService.class);
+
+ static String SERVICE_NAME = "JobRunner";
+
+ public MyTaskService(HelixConnection connection, ClusterId clusterId,
+ ParticipantId participantId) {
+ super(connection, clusterId, participantId, SERVICE_NAME);
+ }
+
+ @Override
+ protected void init(ServiceConfig serviceConfig) {
+ LOG.info("Initialized service with config " + serviceConfig);
+
+ // Register for callbacks for tasks
+ HelixManager manager = new HelixConnectionAdaptor(getParticipant());
+ Map<String, TaskFactory> taskFactoryReg = new HashMap<String, TaskFactory>();
+ taskFactoryReg.put("RunTask", new TaskFactory() {
+ @Override
+ public Task createNewTask(TaskCallbackContext context) {
+ return new MyTask(context);
+ }
+ });
+ getParticipant().getStateMachineEngine().registerStateModelFactory(
+ StateModelDefId.from("Task"), new TaskStateModelFactory(manager, taskFactoryReg));
+ }
+
+ @Override
+ protected void goOnline() {
+ LOG.info("JobRunner service is told to go online");
+ }
+
+ @Override
+ protected void goOffine() {
+ LOG.info("JobRunner service is told to go offline");
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/785bb9fb/recipes/jobrunner-yarn/src/main/resources/dummy_job.yaml
----------------------------------------------------------------------
diff --git a/recipes/jobrunner-yarn/src/main/resources/dummy_job.yaml b/recipes/jobrunner-yarn/src/main/resources/dummy_job.yaml
new file mode 100644
index 0000000..0187fd1
--- /dev/null
+++ b/recipes/jobrunner-yarn/src/main/resources/dummy_job.yaml
@@ -0,0 +1,18 @@
+name: myJob1234
+jobs:
+ - name: myJob1234
+ command: RunTask
+ jobConfigMap: {
+ k1: "v1",
+ k2: "v2"
+ }
+ tasks:
+ - taskConfigMap: {
+ k3: "v3"
+ }
+ - taskConfigMap: {
+ k4: "v4"
+ }
+ - taskConfigMap: {
+ k5: "v5"
+ }
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/helix/blob/785bb9fb/recipes/jobrunner-yarn/src/main/resources/job_runner_app_spec.yaml
----------------------------------------------------------------------
diff --git a/recipes/jobrunner-yarn/src/main/resources/job_runner_app_spec.yaml b/recipes/jobrunner-yarn/src/main/resources/job_runner_app_spec.yaml
new file mode 100755
index 0000000..ad62ffc
--- /dev/null
+++ b/recipes/jobrunner-yarn/src/main/resources/job_runner_app_spec.yaml
@@ -0,0 +1,27 @@
+!!org.apache.helix.provisioning.yarn.example.MyTaskAppSpec
+appConfig:
+ config: {
+ k1: v1
+ }
+appMasterPackageUri: 'file:///Users/kbiscuit/helix/incubator-helix/recipes/jobrunner-yarn/target/jobrunner-yarn-0.7.1-incubating-SNAPSHOT-pkg.tar'
+appName: testApp
+serviceConfigMap:
+ JobRunner: {
+ num_containers: 3,
+ memory: 1024
+ }
+serviceMainClassMap: {
+ JobRunner: org.apache.helix.provisioning.yarn.example.MyTaskService
+}
+servicePackageURIMap: {
+ JobRunner: 'file:///Users/kbiscuit/helix/incubator-helix/recipes/jobrunner-yarn/target/jobrunner-yarn-0.7.1-incubating-SNAPSHOT-pkg.tar'
+}
+services: [
+ JobRunner]
+taskConfigs:
+ - config: {
+ yamlFile: '/Users/kbiscuit/helix/incubator-helix/recipes/jobrunner-yarn/src/main/resources/dummy_job.yaml'
+ }
+
+
+
http://git-wip-us.apache.org/repos/asf/helix/blob/785bb9fb/recipes/jobrunner-yarn/src/test/conf/testng.xml
----------------------------------------------------------------------
diff --git a/recipes/jobrunner-yarn/src/test/conf/testng.xml b/recipes/jobrunner-yarn/src/test/conf/testng.xml
new file mode 100644
index 0000000..37bccf3
--- /dev/null
+++ b/recipes/jobrunner-yarn/src/test/conf/testng.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+<!DOCTYPE suite SYSTEM "http://testng.org/testng-1.0.dtd">
+<suite name="Suite" parallel="none">
+ <test name="Test" preserve-order="false">
+ <packages>
+ <package name="org.apache.helix.agent"/>
+ </packages>
+ </test>
+</suite>
http://git-wip-us.apache.org/repos/asf/helix/blob/785bb9fb/recipes/pom.xml
----------------------------------------------------------------------
diff --git a/recipes/pom.xml b/recipes/pom.xml
index 5d137c2..3fcaf42 100644
--- a/recipes/pom.xml
+++ b/recipes/pom.xml
@@ -37,6 +37,7 @@ under the License.
<module>task-execution</module>
<module>service-discovery</module>
<module>helloworld-provisioning-yarn</module>
+ <module>jobrunner-yarn</module>
</modules>
<build>
[41/50] [abbrv] git commit: Set tokens when starting new containers
Posted by ka...@apache.org.
Set tokens when starting new containers
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/99f5ff7b
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/99f5ff7b
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/99f5ff7b
Branch: refs/heads/master
Commit: 99f5ff7bb6db9be7a92cb04bc5bba68b5130a871
Parents: d2209f7
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Thu Jul 3 10:47:31 2014 -0700
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Thu Jul 3 10:47:31 2014 -0700
----------------------------------------------------------------------
.../helix/provisioning/yarn/GenericApplicationMaster.java | 2 ++
.../org/apache/helix/provisioning/yarn/YarnProvisioner.java | 5 +++++
2 files changed, 7 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/99f5ff7b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/GenericApplicationMaster.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/GenericApplicationMaster.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/GenericApplicationMaster.java
index 346af4b..defa3d8 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/GenericApplicationMaster.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/GenericApplicationMaster.java
@@ -190,12 +190,14 @@ public class GenericApplicationMaster {
LOG.info("Starting ApplicationMaster");
Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
+ LOG.info("Credentials: " + credentials);
DataOutputBuffer dob = new DataOutputBuffer();
credentials.writeTokenStorageToStream(dob);
// Now remove the AM->RM token so that containers cannot access it.
Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
while (iter.hasNext()) {
Token<?> token = iter.next();
+ LOG.info("Processing token: " + token);
if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
iter.remove();
}
http://git-wip-us.apache.org/repos/asf/helix/blob/99f5ff7b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
index 833efa5..ce6b1bc 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
@@ -200,6 +200,11 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
participantContainer.setEnvironment(env);
+ if (applicationMaster.allTokens != null) {
+ LOG.info("Setting tokens: " + applicationMaster.allTokens);
+ participantContainer.setTokens(applicationMaster.allTokens);
+ }
+
// Set the necessary command to execute the application master
Vector<CharSequence> vargs = new Vector<CharSequence>(30);
[16/50] [abbrv] Moving packages around
Posted by ka...@apache.org.
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/provisioning/yarn/helloworld/pom.xml
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/helloworld/pom.xml b/recipes/provisioning/yarn/helloworld/pom.xml
deleted file mode 100644
index 6c30679..0000000
--- a/recipes/provisioning/yarn/helloworld/pom.xml
+++ /dev/null
@@ -1,159 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing,
-software distributed under the License is distributed on an
-"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-KIND, either express or implied. See the License for the
-specific language governing permissions and limitations
-under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
- <modelVersion>4.0.0</modelVersion>
-
- <parent>
- <groupId>org.apache.helix.recipes.provisioning.yarn</groupId>
- <artifactId>yarn</artifactId>
- <version>0.7.1-incubating-SNAPSHOT</version>
- </parent>
-
- <artifactId>helloworld</artifactId>
- <packaging>bundle</packaging>
- <name>Apache Helix :: Recipes :: Provisioning :: YARN :: Hello World</name>
-
- <properties>
- <osgi.import>
- org.apache.helix*,
- org.apache.log4j,
- *
- </osgi.import>
- <osgi.export>org.apache.helix.provisioning.yarn.example*;version="${project.version};-noimport:=true</osgi.export>
- </properties>
-
- <dependencies>
- <dependency>
- <groupId>org.testng</groupId>
- <artifactId>testng</artifactId>
- <version>6.0.1</version>
- </dependency>
- <dependency>
- <groupId>org.apache.helix</groupId>
- <artifactId>helix-core</artifactId>
- </dependency>
- <dependency>
- <groupId>org.apache.helix</groupId>
- <artifactId>helix-provisioning</artifactId>
- <version>0.7.1-incubating-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>log4j</groupId>
- <artifactId>log4j</artifactId>
- <exclusions>
- <exclusion>
- <groupId>javax.mail</groupId>
- <artifactId>mail</artifactId>
- </exclusion>
- <exclusion>
- <groupId>javax.jms</groupId>
- <artifactId>jms</artifactId>
- </exclusion>
- <exclusion>
- <groupId>com.sun.jdmk</groupId>
- <artifactId>jmxtools</artifactId>
- </exclusion>
- <exclusion>
- <groupId>com.sun.jmx</groupId>
- <artifactId>jmxri</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
- </dependencies>
- <build>
- <pluginManagement>
- <plugins>
- <plugin>
- <groupId>org.codehaus.mojo</groupId>
- <artifactId>appassembler-maven-plugin</artifactId>
- <configuration>
- <!-- Set the target configuration directory to be used in the bin scripts -->
- <!-- <configurationDirectory>conf</configurationDirectory> -->
- <!-- Copy the contents from "/src/main/config" to the target configuration
- directory in the assembled application -->
- <!-- <copyConfigurationDirectory>true</copyConfigurationDirectory> -->
- <!-- Include the target configuration directory in the beginning of
- the classpath declaration in the bin scripts -->
- <includeConfigurationDirectoryInClasspath>true</includeConfigurationDirectoryInClasspath>
- <assembleDirectory>${project.build.directory}/${project.artifactId}-pkg</assembleDirectory>
- <!-- Extra JVM arguments that will be included in the bin scripts -->
- <extraJvmArguments>-Xms512m -Xmx512m</extraJvmArguments>
- <!-- Generate bin scripts for windows and unix pr default -->
- <platforms>
- <platform>windows</platform>
- <platform>unix</platform>
- </platforms>
- </configuration>
- <executions>
- <execution>
- <phase>package</phase>
- <goals>
- <goal>assemble</goal>
- </goals>
- </execution>
- </executions>
- </plugin>
- <plugin>
- <groupId>org.apache.rat</groupId>
- <artifactId>apache-rat-plugin</artifactId>
- <configuration>
- <excludes combine.children="append">
- </excludes>
- </configuration>
- </plugin>
- </plugins>
- </pluginManagement>
- <plugins>
- <plugin>
- <groupId>org.codehaus.mojo</groupId>
- <artifactId>appassembler-maven-plugin</artifactId>
- <configuration>
- <programs>
- <program>
- <mainClass>org.apache.helix.provisioning.yarn.Client</mainClass>
- <name>yarn-job-launcher</name>
- </program>
- <program>
- <mainClass>org.apache.helix.provisioning.yarn.AppLauncher</mainClass>
- <name>app-launcher</name>
- </program>
- </programs>
- </configuration>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-assembly-plugin</artifactId>
- <configuration>
- <descriptors>
- <descriptor>src/assemble/assembly.xml</descriptor>
- </descriptors>
- </configuration>
- <executions>
- <execution>
- <phase>package</phase>
- <goals>
- <goal>single</goal>
- </goals>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
-</project>
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/provisioning/yarn/helloworld/src/assemble/assembly.xml
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/helloworld/src/assemble/assembly.xml b/recipes/provisioning/yarn/helloworld/src/assemble/assembly.xml
deleted file mode 100644
index c2d08a1..0000000
--- a/recipes/provisioning/yarn/helloworld/src/assemble/assembly.xml
+++ /dev/null
@@ -1,60 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing,
-software distributed under the License is distributed on an
-"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-KIND, either express or implied. See the License for the
-specific language governing permissions and limitations
-under the License.
--->
-<assembly>
- <id>pkg</id>
- <formats>
- <format>tar</format>
- </formats>
- <fileSets>
- <fileSet>
- <directory>${project.build.directory}/${project.artifactId}-pkg/bin</directory>
- <outputDirectory>bin</outputDirectory>
- <lineEnding>unix</lineEnding>
- <fileMode>0755</fileMode>
- <directoryMode>0755</directoryMode>
- </fileSet>
- <fileSet>
- <directory>${project.build.directory}/${project.artifactId}-pkg/repo/</directory>
- <outputDirectory>repo</outputDirectory>
- <fileMode>0755</fileMode>
- <directoryMode>0755</directoryMode>
- <excludes>
- <exclude>**/*.xml</exclude>
- </excludes>
- </fileSet>
- <fileSet>
- <directory>${project.build.directory}/${project.artifactId}-pkg/conf</directory>
- <outputDirectory>conf</outputDirectory>
- <lineEnding>unix</lineEnding>
- <fileMode>0755</fileMode>
- <directoryMode>0755</directoryMode>
- </fileSet>
- <fileSet>
- <directory>${project.basedir}</directory>
- <outputDirectory>/</outputDirectory>
- <includes>
- <include>LICENSE</include>
- <include>NOTICE</include>
- <include>DISCLAIMER</include>
- </includes>
- <fileMode>0755</fileMode>
- </fileSet>
- </fileSets>
-</assembly>
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/provisioning/yarn/helloworld/src/main/config/log4j.properties
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/helloworld/src/main/config/log4j.properties b/recipes/provisioning/yarn/helloworld/src/main/config/log4j.properties
deleted file mode 100644
index 91fac03..0000000
--- a/recipes/provisioning/yarn/helloworld/src/main/config/log4j.properties
+++ /dev/null
@@ -1,31 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-##
-
-# Set root logger level to DEBUG and its only appender to A1.
-log4j.rootLogger=DEBUG,A1
-
-# A1 is set to be a ConsoleAppender.
-log4j.appender.A1=org.apache.log4j.ConsoleAppender
-
-# A1 uses PatternLayout.
-log4j.appender.A1.layout=org.apache.log4j.PatternLayout
-log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
-
-log4j.logger.org.I0Itec=ERROR
-log4j.logger.org.apache=ERROR
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java b/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
deleted file mode 100644
index 2e1ad41..0000000
--- a/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWordAppSpecFactory.java
+++ /dev/null
@@ -1,92 +0,0 @@
-package org.apache.helix.provisioning.yarn.example;
-
-import java.io.File;
-import java.io.InputStream;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.helix.provisioning.yarn.AppConfig;
-import org.apache.helix.provisioning.yarn.ApplicationSpec;
-import org.apache.helix.provisioning.yarn.ApplicationSpecFactory;
-import org.apache.helix.provisioning.yarn.example.HelloWorldService;
-import org.apache.helix.provisioning.yarn.example.HelloworldAppSpec;
-import org.yaml.snakeyaml.DumperOptions;
-import org.yaml.snakeyaml.Yaml;
-
-public class HelloWordAppSpecFactory implements ApplicationSpecFactory {
-
- static HelloworldAppSpec data;
-
- static {
- HelloworldAppSpec data = new HelloworldAppSpec();
- AppConfig appConfig = new AppConfig();
- appConfig.setValue("k1", "v1");
- data.setAppConfig(appConfig);
- data.setAppName("testApp");
- data.setAppMasterPackageUri(
- "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
- HashMap<String, Map<String, String>> serviceConfigMap = new HashMap<String, Map<String, String>>();
- serviceConfigMap.put("HelloWorld", new HashMap<String, String>());
- serviceConfigMap.get("HelloWorld").put("k1", "v1");
- data.setServiceConfigMap(serviceConfigMap);
- HashMap<String, String> serviceMainClassMap = new HashMap<String, String>();
- serviceMainClassMap.put("HelloWorld", HelloWorldService.class.getCanonicalName());
- data.setServiceMainClassMap(serviceMainClassMap);
- HashMap<String, String> servicePackageURIMap = new HashMap<String, String>();
- servicePackageURIMap
- .put(
- "HelloWorld",
- "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
- data.setServicePackageURIMap(servicePackageURIMap);
- data.setServices(Arrays.asList(new String[] {
- "HelloWorld"
- })); }
-
- @Override
- public ApplicationSpec fromYaml(InputStream inputstream) {
- return (ApplicationSpec) new Yaml().load(inputstream);
- // return data;
- }
-
- public static void main(String[] args) {
- DumperOptions options = new DumperOptions();
- options.setPrettyFlow(true);
-
- Yaml yaml = new Yaml(options);
- HelloworldAppSpec data = new HelloworldAppSpec();
- AppConfig appConfig = new AppConfig();
- appConfig.setValue("k1", "v1");
- data.setAppConfig(appConfig);
- data.setAppName("testApp");
- data.setAppMasterPackageUri(
- "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
- HashMap<String, Map<String, String>> serviceConfigMap = new HashMap<String, Map<String, String>>();
- serviceConfigMap.put("HelloWorld", new HashMap<String, String>());
- serviceConfigMap.get("HelloWorld").put("k1", "v1");
- data.setServiceConfigMap(serviceConfigMap);
- HashMap<String, String> serviceMainClassMap = new HashMap<String, String>();
- serviceMainClassMap.put("HelloWorld", HelloWorldService.class.getCanonicalName());
- data.setServiceMainClassMap(serviceMainClassMap);
- HashMap<String, String> servicePackageURIMap = new HashMap<String, String>();
- servicePackageURIMap
- .put(
- "HelloWorld",
- "/Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/helix-provisioning-0.7.1-incubating-SNAPSHOT-pkg.tar");
- data.setServicePackageURIMap(servicePackageURIMap);
- data.setServices(Arrays.asList(new String[] {
- "HelloWorld"
- }));
- String dump = yaml.dump(data);
- System.out.println(dump);
-
- InputStream resourceAsStream = ClassLoader.getSystemClassLoader().getResourceAsStream("hello_world_app_spec.yaml");
- HelloworldAppSpec load = yaml.loadAs(resourceAsStream,HelloworldAppSpec.class);
- String dumpnew = yaml.dump(load);
- System.out.println(dumpnew.equals(dump));
-
- System.out.println("==================================");
- System.out.println(dumpnew);
-
- }
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java b/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
deleted file mode 100644
index 8999817..0000000
--- a/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
+++ /dev/null
@@ -1,41 +0,0 @@
-package org.apache.helix.provisioning.yarn.example;
-
-import org.apache.helix.HelixConnection;
-import org.apache.helix.api.accessor.ResourceAccessor;
-import org.apache.helix.api.config.UserConfig;
-import org.apache.helix.api.id.ClusterId;
-import org.apache.helix.api.id.ParticipantId;
-import org.apache.helix.api.id.ResourceId;
-import org.apache.helix.api.id.StateModelDefId;
-import org.apache.helix.manager.zk.AbstractParticipantService;
-import org.apache.helix.provisioning.yarn.example.HelloWorldStateModelFactory;
-import org.apache.log4j.Logger;
-
-public class HelloWorldService extends AbstractParticipantService {
-
- private static Logger LOG = Logger.getLogger(AbstractParticipantService.class);
-
- static String SERVICE_NAME = "HelloWorld";
-
- public HelloWorldService(HelixConnection connection, ClusterId clusterId,
- ParticipantId participantId) {
- super(connection, clusterId, participantId);
- }
-
- /**
- * init method to setup appropriate call back handlers.
- */
- @Override
- public void init() {
- ClusterId clusterId = getClusterId();
- ResourceAccessor resourceAccessor = getConnection().createResourceAccessor(clusterId);
- UserConfig serviceConfig = resourceAccessor.readUserConfig(ResourceId.from(SERVICE_NAME));
- LOG.info("Starting service:" + SERVICE_NAME + " with configuration:" + serviceConfig);
-
- HelloWorldStateModelFactory stateModelFactory = new HelloWorldStateModelFactory();
- getParticipant().getStateMachineEngine().registerStateModelFactory(
- StateModelDefId.from("StatelessService"), stateModelFactory);
-
- }
-
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModel.java
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModel.java b/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModel.java
deleted file mode 100644
index 95f66e3..0000000
--- a/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModel.java
+++ /dev/null
@@ -1,29 +0,0 @@
-package org.apache.helix.provisioning.yarn.example;
-
-import org.apache.helix.NotificationContext;
-import org.apache.helix.api.id.PartitionId;
-import org.apache.helix.model.Message;
-import org.apache.helix.participant.statemachine.StateModel;
-import org.apache.helix.participant.statemachine.StateModelInfo;
-import org.apache.helix.participant.statemachine.Transition;
-
-@StateModelInfo(initialState = "OFFLINE", states = { "OFFLINE", "ONLINE",
- "ERROR" })
-public class HelloWorldStateModel extends StateModel {
-
- public HelloWorldStateModel(PartitionId partitionId) {
- // TODO Auto-generated constructor stub
- }
-
- @Transition(to = "ONLINE", from = "OFFLINE")
- public void onBecomeOnlineFromOffline(Message message,
- NotificationContext context) throws Exception {
- System.out.println("Started HelloWorld service");
- }
-
- @Transition(to = "OFFLINE", from = "ONLINE")
- public void onBecomeOfflineFromOnline(Message message,
- NotificationContext context) throws InterruptedException {
- System.out.println("Stopped HelloWorld service");
- }
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModelFactory.java
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModelFactory.java b/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModelFactory.java
deleted file mode 100644
index 2766f6d..0000000
--- a/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModelFactory.java
+++ /dev/null
@@ -1,13 +0,0 @@
-package org.apache.helix.provisioning.yarn.example;
-
-import org.apache.helix.api.id.PartitionId;
-import org.apache.helix.participant.statemachine.HelixStateModelFactory;
-import org.apache.helix.participant.statemachine.StateModel;
-import org.apache.helix.provisioning.yarn.example.HelloWorldStateModel;
-
-public class HelloWorldStateModelFactory extends HelixStateModelFactory<StateModel> {
- @Override
- public StateModel createNewStateModel(PartitionId partitionId) {
- return new HelloWorldStateModel(partitionId);
- }
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java b/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
deleted file mode 100644
index e22c7b2..0000000
--- a/recipes/provisioning/yarn/helloworld/src/main/java/org/apache/helix/provisioning/yarn/example/HelloworldAppSpec.java
+++ /dev/null
@@ -1,138 +0,0 @@
-package org.apache.helix.provisioning.yarn.example;
-
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.helix.api.Scope;
-import org.apache.helix.api.config.ParticipantConfig;
-import org.apache.helix.api.config.ResourceConfig;
-import org.apache.helix.api.config.ResourceConfig.Builder;
-import org.apache.helix.api.config.UserConfig;
-import org.apache.helix.api.id.ParticipantId;
-import org.apache.helix.api.id.ResourceId;
-import org.apache.helix.provisioning.yarn.AppConfig;
-import org.apache.helix.provisioning.yarn.ApplicationSpec;
-import org.apache.helix.provisioning.yarn.ServiceConfig;
-import org.apache.helix.provisioning.yarn.TaskConfig;
-
-public class HelloworldAppSpec implements ApplicationSpec {
-
- public String _appName;
-
- public AppConfig _appConfig;
-
- public List<String> _services;
-
- private String _appMasterPackageUri;
-
- private Map<String, String> _servicePackageURIMap;
-
- private Map<String, String> _serviceMainClassMap;
-
- private Map<String, Map<String, String>> _serviceConfigMap;
-
- private List<TaskConfig> _taskConfigs;
-
- public AppConfig getAppConfig() {
- return _appConfig;
- }
-
- public void setAppConfig(AppConfig appConfig) {
- _appConfig = appConfig;
- }
-
- public String getAppMasterPackageUri() {
- return _appMasterPackageUri;
- }
-
- public void setAppMasterPackageUri(String appMasterPackageUri) {
- _appMasterPackageUri = appMasterPackageUri;
- }
-
- public Map<String, String> getServicePackageURIMap() {
- return _servicePackageURIMap;
- }
-
- public void setServicePackageURIMap(Map<String, String> servicePackageURIMap) {
- _servicePackageURIMap = servicePackageURIMap;
- }
-
- public Map<String, String> getServiceMainClassMap() {
- return _serviceMainClassMap;
- }
-
- public void setServiceMainClassMap(Map<String, String> serviceMainClassMap) {
- _serviceMainClassMap = serviceMainClassMap;
- }
-
- public Map<String, Map<String, String>> getServiceConfigMap() {
- return _serviceConfigMap;
- }
-
- public void setServiceConfigMap(Map<String, Map<String, String>> serviceConfigMap) {
- _serviceConfigMap = serviceConfigMap;
- }
-
- public void setAppName(String appName) {
- _appName = appName;
- }
-
- public void setServices(List<String> services) {
- _services = services;
- }
-
- public void setTaskConfigs(List<TaskConfig> taskConfigs) {
- _taskConfigs = taskConfigs;
- }
-
- @Override
- public String getAppName() {
- return _appName;
- }
-
- @Override
- public AppConfig getConfig() {
- return _appConfig;
- }
-
- @Override
- public List<String> getServices() {
- return _services;
- }
-
- @Override
- public URI getAppMasterPackage() {
- try {
- return new URI(_appMasterPackageUri);
- } catch (URISyntaxException e) {
- return null;
- }
- }
-
- @Override
- public URI getServicePackage(String serviceName) {
- try {
- return new URI(_servicePackageURIMap.get(serviceName));
- } catch (URISyntaxException e) {
- return null;
- }
- }
-
- @Override
- public String getServiceMainClass(String service) {
- return _serviceMainClassMap.get(service);
- }
-
- @Override
- public ServiceConfig getServiceConfig(String serviceName) {
- return new ServiceConfig(Scope.resource(ResourceId.from(serviceName)));
- }
-
- @Override
- public List<TaskConfig> getTaskConfigs() {
- return _taskConfigs;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/provisioning/yarn/helloworld/src/main/resources/hello_world_app_spec.yaml
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/helloworld/src/main/resources/hello_world_app_spec.yaml b/recipes/provisioning/yarn/helloworld/src/main/resources/hello_world_app_spec.yaml
deleted file mode 100644
index d8d1dd2..0000000
--- a/recipes/provisioning/yarn/helloworld/src/main/resources/hello_world_app_spec.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-!!org.apache.helix.provisioning.yarn.example.HelloworldAppSpec
-appConfig:
- config: {
- k1: v1
- }
-appMasterPackageUri: 'file:///Users/kgopalak/Documents/projects/incubator-helix/recipes/provisioning/yarn/helloworld/target/helloworld-0.7.1-incubating-SNAPSHOT-pkg.tar'
-appName: testApp
-serviceConfigMap:
- HelloWorld: {
- num_containers: 3,
- memory: 1024
- }
-serviceMainClassMap: {
- HelloWorld: org.apache.helix.provisioning.yarn.example.HelloWorldService
-}
-servicePackageURIMap: {
- HelloWorld: 'file:///Users/kgopalak/Documents/projects/incubator-helix/recipes/provisioning/yarn/helloworld/target/helloworld-0.7.1-incubating-SNAPSHOT-pkg.tar'
-}
-services: [
- HelloWorld]
-taskConfigs: null
-
-
-
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/provisioning/yarn/helloworld/src/test/conf/testng.xml
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/helloworld/src/test/conf/testng.xml b/recipes/provisioning/yarn/helloworld/src/test/conf/testng.xml
deleted file mode 100644
index 37bccf3..0000000
--- a/recipes/provisioning/yarn/helloworld/src/test/conf/testng.xml
+++ /dev/null
@@ -1,27 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing,
-software distributed under the License is distributed on an
-"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-KIND, either express or implied. See the License for the
-specific language governing permissions and limitations
-under the License.
--->
-<!DOCTYPE suite SYSTEM "http://testng.org/testng-1.0.dtd">
-<suite name="Suite" parallel="none">
- <test name="Test" preserve-order="false">
- <packages>
- <package name="org.apache.helix.agent"/>
- </packages>
- </test>
-</suite>
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/recipes/provisioning/yarn/pom.xml
----------------------------------------------------------------------
diff --git a/recipes/provisioning/yarn/pom.xml b/recipes/provisioning/yarn/pom.xml
deleted file mode 100644
index d557b2b..0000000
--- a/recipes/provisioning/yarn/pom.xml
+++ /dev/null
@@ -1,50 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing,
-software distributed under the License is distributed on an
-"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-KIND, either express or implied. See the License for the
-specific language governing permissions and limitations
-under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.apache.helix.recipes.provisioning</groupId>
- <artifactId>provisioning</artifactId>
- <version>0.7.1-incubating-SNAPSHOT</version>
- </parent>
- <groupId>org.apache.helix.recipes.provisioning.yarn</groupId>
- <artifactId>yarn</artifactId>
- <packaging>pom</packaging>
- <name>Apache Helix :: Recipes :: Provisioning :: YARN</name>
-
- <modules>
- <module>helloworld</module>
- </modules>
-
- <build>
- <pluginManagement>
- <plugins>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-deploy-plugin</artifactId>
- <configuration>
- <skip>true</skip>
- </configuration>
- </plugin>
- </plugins>
- </pluginManagement>
- </build>
-
-</project>
[31/50] [abbrv] git commit: Easily stop a single task partition
Posted by ka...@apache.org.
Easily stop a single task partition
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/8f0b7e4c
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/8f0b7e4c
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/8f0b7e4c
Branch: refs/heads/master
Commit: 8f0b7e4c6556acca05b9da16cfaf1872bd5de65c
Parents: e446812
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Thu Mar 6 17:38:27 2014 -0800
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Thu Mar 6 17:38:27 2014 -0800
----------------------------------------------------------------------
.../helix/task/AbstractTaskRebalancer.java | 2 +-
.../apache/helix/provisioning/TaskConfig.java | 11 +-
.../helix/provisioning/tools/TaskManager.java | 110 +++++++++++++++++--
.../provisioning/tools/TestTaskManager.java | 28 ++++-
4 files changed, 133 insertions(+), 18 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/8f0b7e4c/helix-core/src/main/java/org/apache/helix/task/AbstractTaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/AbstractTaskRebalancer.java b/helix-core/src/main/java/org/apache/helix/task/AbstractTaskRebalancer.java
index 329d02f..f733fb5 100644
--- a/helix-core/src/main/java/org/apache/helix/task/AbstractTaskRebalancer.java
+++ b/helix-core/src/main/java/org/apache/helix/task/AbstractTaskRebalancer.java
@@ -554,7 +554,7 @@ public abstract class AbstractTaskRebalancer implements HelixRebalancer {
private static List<Integer> getNextPartitions(SortedSet<Integer> candidatePartitions,
Set<Integer> excluded, int n) {
- List<Integer> result = new ArrayList<Integer>(n);
+ List<Integer> result = new ArrayList<Integer>();
if (candidatePartitions == null || candidatePartitions.isEmpty()) {
return result;
}
http://git-wip-us.apache.org/repos/asf/helix/blob/8f0b7e4c/helix-provisioning/src/main/java/org/apache/helix/provisioning/TaskConfig.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/TaskConfig.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/TaskConfig.java
index 42203e9..283538d 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/TaskConfig.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/TaskConfig.java
@@ -3,11 +3,10 @@ package org.apache.helix.provisioning;
import java.util.HashMap;
import java.util.Map;
-
public class TaskConfig {
- public Map<String, String> config = new HashMap<String, String>();
-
- public String getValue(String key) {
- return (config != null ? config.get(key) : null);
- }
+ public Map<String, String> config = new HashMap<String, String>();
+
+ public String getValue(String key) {
+ return (config != null ? config.get(key) : null);
+ }
}
http://git-wip-us.apache.org/repos/asf/helix/blob/8f0b7e4c/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/TaskManager.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/TaskManager.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/TaskManager.java
index 2d3f8bb..437880e 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/TaskManager.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/TaskManager.java
@@ -29,16 +29,27 @@ import org.apache.helix.AccessOption;
import org.apache.helix.ClusterMessagingService;
import org.apache.helix.HelixConnection;
import org.apache.helix.HelixDataAccessor;
+import org.apache.helix.HelixManager;
import org.apache.helix.HelixRole;
import org.apache.helix.InstanceType;
import org.apache.helix.PropertyKey;
import org.apache.helix.ZNRecord;
+import org.apache.helix.api.State;
import org.apache.helix.api.id.ClusterId;
import org.apache.helix.api.id.Id;
+import org.apache.helix.api.id.ParticipantId;
+import org.apache.helix.api.id.PartitionId;
import org.apache.helix.api.id.ResourceId;
+import org.apache.helix.api.id.SessionId;
import org.apache.helix.manager.zk.HelixConnectionAdaptor;
+import org.apache.helix.model.ExternalView;
+import org.apache.helix.model.IdealState.IdealStateProperty;
+import org.apache.helix.model.LiveInstance;
+import org.apache.helix.model.ResourceConfiguration;
import org.apache.helix.task.TaskConfig;
import org.apache.helix.task.TaskDriver;
+import org.apache.helix.task.TaskPartitionState;
+import org.apache.helix.task.TaskUtil;
import org.apache.helix.task.Workflow;
import org.apache.log4j.Logger;
@@ -49,6 +60,7 @@ public class TaskManager {
private final ClusterId _clusterId;
private final HelixConnection _connection;
+ private final HelixManager _manager;
private final TaskDriver _driver;
public TaskManager(final ClusterId clusterId, final HelixConnection connection) {
@@ -78,7 +90,8 @@ public class TaskManager {
return null;
}
};
- _driver = new TaskDriver(new HelixConnectionAdaptor(dummyRole));
+ _manager = new HelixConnectionAdaptor(dummyRole);
+ _driver = new TaskDriver(_manager);
_clusterId = clusterId;
_connection = connection;
}
@@ -90,8 +103,8 @@ public class TaskManager {
builder.addConfig(queueName, TaskConfig.COMMAND_CONFIG, "");
builder.addConfig(queueName, TaskConfig.LONG_LIVED + "", String.valueOf(true));
if (isParallel) {
- builder
- .addConfig(queueName, TaskConfig.NUM_CONCURRENT_TASKS_PER_INSTANCE, String.valueOf(10));
+ builder.addConfig(queueName, TaskConfig.NUM_CONCURRENT_TASKS_PER_INSTANCE,
+ String.valueOf(Integer.MAX_VALUE));
}
Workflow workflow = builder.build();
try {
@@ -104,10 +117,13 @@ public class TaskManager {
}
public void addTaskToQueue(final String taskName, final String queueName) {
+ // Update the resource config with the new partition count
HelixDataAccessor accessor = _connection.createDataAccessor(_clusterId);
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
- final ResourceId resourceId = ResourceId.from(queueName + "_" + queueName);
- String configPath = keyBuilder.resourceConfig(resourceId.toString()).getPath();
+ final ResourceId resourceId = resourceId(queueName);
+ final int[] numPartitions = {
+ 0
+ };
DataUpdater<ZNRecord> dataUpdater = new DataUpdater<ZNRecord>() {
@Override
public ZNRecord update(ZNRecord currentData) {
@@ -120,6 +136,7 @@ public class TaskManager {
} else {
String[] parts = current.split(",");
currentId = parts.length;
+ numPartitions[0] = currentId + 1;
currentData.setSimpleField(TaskConfig.TARGET_PARTITIONS, current + "," + currentId);
}
Map<String, String> partitionMap = currentData.getMapField(TaskConfig.TASK_NAME_MAP);
@@ -131,23 +148,100 @@ public class TaskManager {
return currentData;
}
};
+ String configPath = keyBuilder.resourceConfig(resourceId.toString()).getPath();
List<DataUpdater<ZNRecord>> dataUpdaters = new ArrayList<DataUpdater<ZNRecord>>();
dataUpdaters.add(dataUpdater);
accessor.updateChildren(Arrays.asList(configPath), dataUpdaters, AccessOption.PERSISTENT);
- // Update the ideal state to trigger a change event
- DataUpdater<ZNRecord> noOpUpdater = new DataUpdater<ZNRecord>() {
+ // Update the ideal state with the proper partition count
+ DataUpdater<ZNRecord> idealStateUpdater = new DataUpdater<ZNRecord>() {
@Override
public ZNRecord update(ZNRecord currentData) {
+ currentData.setSimpleField(IdealStateProperty.NUM_PARTITIONS.toString(),
+ String.valueOf(numPartitions[0]));
return currentData;
}
};
String idealStatePath = keyBuilder.idealStates(queueName + "_" + queueName).getPath();
dataUpdaters.clear();
- dataUpdaters.add(noOpUpdater);
+ dataUpdaters.add(idealStateUpdater);
accessor.updateChildren(Arrays.asList(idealStatePath), dataUpdaters, AccessOption.PERSISTENT);
}
+ public void cancelTask(String queueName, String taskName) {
+ // Get the mapped task name
+ final ResourceId resourceId = resourceId(queueName);
+ HelixDataAccessor accessor = _connection.createDataAccessor(_clusterId);
+ PropertyKey.Builder keyBuilder = accessor.keyBuilder();
+ ResourceConfiguration resourceConfig =
+ accessor.getProperty(keyBuilder.resourceConfig(resourceId.stringify()));
+ if (resourceConfig == null) {
+ LOG.error("Queue " + queueName + " does not exist!");
+ return;
+ }
+ Map<String, String> taskMap = resourceConfig.getRecord().getMapField(TaskConfig.TASK_NAME_MAP);
+ if (taskMap == null) {
+ LOG.error("Task " + taskName + " in queue " + queueName + " does not exist!");
+ return;
+ }
+ String partitionName = null;
+ for (Map.Entry<String, String> e : taskMap.entrySet()) {
+ String possiblePartition = e.getKey();
+ String possibleTask = e.getValue();
+ if (taskName.equals(possibleTask)) {
+ partitionName = possiblePartition;
+ break;
+ }
+ }
+ if (partitionName == null) {
+ LOG.error("Task " + taskName + " in queue " + queueName + " does not exist!");
+ return;
+ }
+
+ // Now search the external view for who is running the task
+ ExternalView externalView =
+ accessor.getProperty(keyBuilder.externalView(resourceId.toString()));
+ if (externalView == null) {
+ LOG.error("Queue " + queueName + " was never started!");
+ return;
+ }
+ PartitionId partitionId = PartitionId.from(partitionName);
+ Map<ParticipantId, State> stateMap = externalView.getStateMap(partitionId);
+ if (stateMap == null || stateMap.isEmpty()) {
+ LOG.warn("Task " + taskName + " in queue " + queueName + " is not currently running");
+ return;
+ }
+ ParticipantId targetParticipant = null;
+ for (ParticipantId participantId : stateMap.keySet()) {
+ targetParticipant = participantId;
+ }
+ if (targetParticipant == null) {
+ LOG.warn("Task " + taskName + " in queue " + queueName + " is not currently running");
+ return;
+ }
+
+ // Send a request to stop to the appropriate live instance
+ LiveInstance liveInstance =
+ accessor.getProperty(keyBuilder.liveInstance(targetParticipant.toString()));
+ if (liveInstance == null) {
+ LOG.error("Task " + taskName + " in queue " + queueName
+ + " is assigned to a non-running participant");
+ return;
+ }
+ SessionId sessionId = liveInstance.getTypedSessionId();
+ TaskUtil.setRequestedState(accessor, targetParticipant.toString(), sessionId.toString(),
+ resourceId.toString(), partitionId.toString(), TaskPartitionState.STOPPED);
+ LOG.info("Task" + taskName + " for queue " + queueName + " instructed to stop");
+ }
+
public void shutdownQueue(String queueName) {
+ // Check if tasks are complete, then set task and workflows to complete
+
+ // Otherwise, send a stop for everybody
+ _driver.stop(resourceId(queueName).toString());
+ }
+
+ private ResourceId resourceId(String queueName) {
+ return ResourceId.from(queueName + '_' + queueName);
}
}
http://git-wip-us.apache.org/repos/asf/helix/blob/8f0b7e4c/helix-provisioning/src/test/java/org/apache/helix/provisioning/tools/TestTaskManager.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/test/java/org/apache/helix/provisioning/tools/TestTaskManager.java b/helix-provisioning/src/test/java/org/apache/helix/provisioning/tools/TestTaskManager.java
index f90ef3a..7d46cff 100644
--- a/helix-provisioning/src/test/java/org/apache/helix/provisioning/tools/TestTaskManager.java
+++ b/helix-provisioning/src/test/java/org/apache/helix/provisioning/tools/TestTaskManager.java
@@ -95,7 +95,9 @@ public class TestTaskManager extends ZkUnitTestBase {
TaskManager taskManager = new TaskManager(clusterId, connection);
taskManager.createTaskQueue("myqueue", true);
taskManager.addTaskToQueue("mytask1", "myqueue");
+ Thread.sleep(5000);
taskManager.addTaskToQueue("mytask2", "myqueue");
+ taskManager.cancelTask("myqueue", "mytask1");
controller.syncStop();
for (MockParticipantManager participant : participants) {
@@ -105,6 +107,8 @@ public class TestTaskManager extends ZkUnitTestBase {
public static class MyTask implements Task {
private final int _id;
+ private Thread _t;
+ private TaskResult.Status _status = null;
public MyTask(int id) {
_id = id;
@@ -112,16 +116,34 @@ public class TestTaskManager extends ZkUnitTestBase {
@Override
public TaskResult run() {
+ _t = new Thread() {
+ @Override
+ public void run() {
+ try {
+ Thread.sleep(60000);
+ _status = TaskResult.Status.COMPLETED;
+ System.err.println("task complete for " + _id);
+ } catch (InterruptedException e) {
+ _status = TaskResult.Status.CANCELED;
+ System.err.println("task canceled for " + _id);
+ interrupt();
+ }
+ }
+ };
+ _t.start();
try {
- Thread.sleep(10000);
+ _t.join();
} catch (InterruptedException e) {
+ _status = TaskResult.Status.CANCELED;
}
- System.err.println("task complete for " + _id);
- return new TaskResult(TaskResult.Status.COMPLETED, "");
+ return new TaskResult(_status, "");
}
@Override
public void cancel() {
+ if (_t != null && _t.isAlive()) {
+ _t.interrupt();
+ }
}
}
}
[02/50] [abbrv] git commit: Adding simple steps to start HelloWorld
example
Posted by ka...@apache.org.
Adding simple steps to start HelloWorld example
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/5a1391ea
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/5a1391ea
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/5a1391ea
Branch: refs/heads/master
Commit: 5a1391ea134200255e148b3e2828ce9be9ea0144
Parents: 48031f3
Author: Kishore Gopalakrishna <g....@gmail.com>
Authored: Tue Feb 18 18:47:58 2014 -0800
Committer: Kishore Gopalakrishna <g....@gmail.com>
Committed: Tue Feb 18 18:47:58 2014 -0800
----------------------------------------------------------------------
helix-provisioning/README.md | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/5a1391ea/helix-provisioning/README.md
----------------------------------------------------------------------
diff --git a/helix-provisioning/README.md b/helix-provisioning/README.md
new file mode 100644
index 0000000..77a4e81
--- /dev/null
+++ b/helix-provisioning/README.md
@@ -0,0 +1,16 @@
+Checkout helix provisioning branch
+cd helix
+mvn clean package -DskipTests
+cd helix-provisioning
+
+
+yyDownload and install YARN start all services (datanode, resourcemanage, nodemanager, jobHistoryServer(optional))
+
+Will post the instructions to get a local YARN cluster.
+
+target/helix-provisioning-pkg/bin/app-launcher.sh org.apache.helix.provisioning.yarn.example.HelloWordAppSpecFactory /Users/kgopalak/Documents/projects/incubator-helix/helix-provisioning/src/main/resources/hello_world_app_spec.yaml
+
+
+
+
+
[29/50] [abbrv] git commit: Creating a more user-friendly service
class for stateless services
Posted by ka...@apache.org.
Creating a more user-friendly service class for stateless services
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/4ea6bcef
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/4ea6bcef
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/4ea6bcef
Branch: refs/heads/master
Commit: 4ea6bcef711db208eaec78b6948ac2cae20291d1
Parents: 080a15f
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Tue Mar 4 11:37:43 2014 -0800
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Tue Mar 4 11:37:43 2014 -0800
----------------------------------------------------------------------
.../manager/zk/AbstractParticipantService.java | 142 -------------------
.../participant/AbstractParticipantService.java | 142 +++++++++++++++++++
.../helix/provisioning/ParticipantLauncher.java | 2 +-
.../StatelessParticipantService.java | 86 +++++++++++
.../participant/StatelessServiceStateModel.java | 56 ++++++++
.../StatelessServiceStateModelFactory.java | 39 +++++
.../yarn/example/HelloWorldService.java | 34 ++---
.../yarn/example/HelloWorldStateModel.java | 33 -----
.../example/HelloWorldStateModelFactory.java | 13 --
9 files changed, 339 insertions(+), 208 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/4ea6bcef/helix-core/src/main/java/org/apache/helix/manager/zk/AbstractParticipantService.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/manager/zk/AbstractParticipantService.java b/helix-core/src/main/java/org/apache/helix/manager/zk/AbstractParticipantService.java
deleted file mode 100644
index 49a7159..0000000
--- a/helix-core/src/main/java/org/apache/helix/manager/zk/AbstractParticipantService.java
+++ /dev/null
@@ -1,142 +0,0 @@
-package org.apache.helix.manager.zk;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import org.apache.helix.HelixConnection;
-import org.apache.helix.HelixParticipant;
-import org.apache.helix.PreConnectCallback;
-import org.apache.helix.api.id.ClusterId;
-import org.apache.helix.api.id.ParticipantId;
-
-import com.google.common.util.concurrent.AbstractService;
-
-/**
- * A modeling of a helix participant as a self-contained service.
- */
-public abstract class AbstractParticipantService extends AbstractService {
- private final ClusterId _clusterId;
- private final ParticipantId _participantId;
- private HelixParticipant _participant;
- private HelixConnection _connection;
- boolean initialized;
-
- /**
- * Initialize the service.
- * @param connection A live Helix connection
- * @param clusterId the cluster to join
- * @param participantId a unique identifier that this participant will join with
- */
- public AbstractParticipantService(HelixConnection connection, ClusterId clusterId,
- ParticipantId participantId) {
- _connection = connection;
- _clusterId = clusterId;
- _participantId = participantId;
- }
-
- @Override
- protected final void doStart() {
- _participant = _connection.createParticipant(_clusterId, _participantId);
-
- // add a preconnect callback
- _participant.addPreConnectCallback(new PreConnectCallback() {
- @Override
- public void onPreConnect() {
- if (initialized) {
- onReconnect();
- } else {
- init();
- initialized = true;
- }
- }
- });
-
- // start and notify
- if (!_connection.isConnected()) {
- _connection.connect();
- }
- _participant.start();
- notifyStarted();
- }
-
- @Override
- protected final void doStop() {
- _participant.stop();
- notifyStopped();
- }
-
- /**
- * Invoked when connection is re-established to zookeeper. Typical scenario this is invoked is
- * when there is a long GC pause that causes the node to disconnect from the cluster and
- * reconnects. NOTE: When the service disconnects all its states are reset to initial state.
- */
- protected void onReconnect() {
- // default implementation does nothing.
- }
-
- /**
- * Initialize the participant. For example, here is where you can
- * <ul>
- * <li>Read configuration of the cluster,resource, node</li>
- * <li>Read configuration of the cluster,resource, node register a state machine: <br/>
- * <br/>
- * <code>
- * HelixParticipant participant = getParticipant();
- * participant.getStateMachineEngine().registerStateModelFactory(stateModelDefId, factory);
- * </code><br/>
- * <br/>
- * </li>
- * </ul>
- * This code is called after connecting to zookeeper but before creating the liveinstance.
- */
- protected abstract void init();
-
- /**
- * Get an instantiated participant instance.
- * @return HelixParticipant
- */
- public HelixParticipant getParticipant() {
- return _participant;
- }
-
- /**
- * @return ClusterId
- * @see {@link ClusterId}
- */
- public ClusterId getClusterId() {
- return _clusterId;
- }
-
- /**
- * @see {@link ParticipantId}
- * @return
- */
- public ParticipantId getParticipantId() {
- return _participantId;
- }
-
- /**
- * @see {@link HelixConnection}
- * @return HelixConnection
- */
- public HelixConnection getConnection() {
- return _connection;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/4ea6bcef/helix-core/src/main/java/org/apache/helix/participant/AbstractParticipantService.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/participant/AbstractParticipantService.java b/helix-core/src/main/java/org/apache/helix/participant/AbstractParticipantService.java
new file mode 100644
index 0000000..cd22762
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/participant/AbstractParticipantService.java
@@ -0,0 +1,142 @@
+package org.apache.helix.participant;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.apache.helix.HelixConnection;
+import org.apache.helix.HelixParticipant;
+import org.apache.helix.PreConnectCallback;
+import org.apache.helix.api.id.ClusterId;
+import org.apache.helix.api.id.ParticipantId;
+
+import com.google.common.util.concurrent.AbstractService;
+
+/**
+ * A modeling of a helix participant as a self-contained service.
+ */
+public abstract class AbstractParticipantService extends AbstractService {
+ private final ClusterId _clusterId;
+ private final ParticipantId _participantId;
+ private HelixParticipant _participant;
+ private HelixConnection _connection;
+ boolean initialized;
+
+ /**
+ * Initialize the service.
+ * @param connection A live Helix connection
+ * @param clusterId the cluster to join
+ * @param participantId a unique identifier that this participant will join with
+ */
+ public AbstractParticipantService(HelixConnection connection, ClusterId clusterId,
+ ParticipantId participantId) {
+ _connection = connection;
+ _clusterId = clusterId;
+ _participantId = participantId;
+ }
+
+ @Override
+ protected final void doStart() {
+ _participant = _connection.createParticipant(_clusterId, _participantId);
+
+ // add a preconnect callback
+ _participant.addPreConnectCallback(new PreConnectCallback() {
+ @Override
+ public void onPreConnect() {
+ if (initialized) {
+ onReconnect();
+ } else {
+ init();
+ initialized = true;
+ }
+ }
+ });
+
+ // start and notify
+ if (!_connection.isConnected()) {
+ _connection.connect();
+ }
+ _participant.start();
+ notifyStarted();
+ }
+
+ @Override
+ protected final void doStop() {
+ _participant.stop();
+ notifyStopped();
+ }
+
+ /**
+ * Invoked when connection is re-established to zookeeper. Typical scenario this is invoked is
+ * when there is a long GC pause that causes the node to disconnect from the cluster and
+ * reconnects. NOTE: When the service disconnects all its states are reset to initial state.
+ */
+ protected void onReconnect() {
+ // default implementation does nothing.
+ }
+
+ /**
+ * Initialize the participant. For example, here is where you can
+ * <ul>
+ * <li>Read configuration of the cluster,resource, node</li>
+ * <li>Read configuration of the cluster,resource, node register a state machine: <br/>
+ * <br/>
+ * <code>
+ * HelixParticipant participant = getParticipant();
+ * participant.getStateMachineEngine().registerStateModelFactory(stateModelDefId, factory);
+ * </code><br/>
+ * <br/>
+ * </li>
+ * </ul>
+ * This code is called after connecting to zookeeper but before creating the liveinstance.
+ */
+ protected abstract void init();
+
+ /**
+ * Get an instantiated participant instance.
+ * @return HelixParticipant
+ */
+ public HelixParticipant getParticipant() {
+ return _participant;
+ }
+
+ /**
+ * @return ClusterId
+ * @see {@link ClusterId}
+ */
+ public ClusterId getClusterId() {
+ return _clusterId;
+ }
+
+ /**
+ * @see {@link ParticipantId}
+ * @return
+ */
+ public ParticipantId getParticipantId() {
+ return _participantId;
+ }
+
+ /**
+ * @see {@link HelixConnection}
+ * @return HelixConnection
+ */
+ public HelixConnection getConnection() {
+ return _connection;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/4ea6bcef/helix-provisioning/src/main/java/org/apache/helix/provisioning/ParticipantLauncher.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ParticipantLauncher.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ParticipantLauncher.java
index 55bb618..60231fb 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ParticipantLauncher.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ParticipantLauncher.java
@@ -10,13 +10,13 @@ import org.apache.helix.HelixConnection;
import org.apache.helix.NotificationContext;
import org.apache.helix.api.id.ClusterId;
import org.apache.helix.api.id.ParticipantId;
-import org.apache.helix.manager.zk.AbstractParticipantService;
import org.apache.helix.manager.zk.ZkHelixConnection;
import org.apache.helix.messaging.handling.HelixTaskResult;
import org.apache.helix.messaging.handling.MessageHandler;
import org.apache.helix.messaging.handling.MessageHandlerFactory;
import org.apache.helix.model.Message;
import org.apache.helix.model.Message.MessageType;
+import org.apache.helix.participant.AbstractParticipantService;
import org.apache.log4j.Logger;
/**
http://git-wip-us.apache.org/repos/asf/helix/blob/4ea6bcef/helix-provisioning/src/main/java/org/apache/helix/provisioning/participant/StatelessParticipantService.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/participant/StatelessParticipantService.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/participant/StatelessParticipantService.java
new file mode 100644
index 0000000..d937c5c
--- /dev/null
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/participant/StatelessParticipantService.java
@@ -0,0 +1,86 @@
+package org.apache.helix.provisioning.participant;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.apache.helix.HelixConnection;
+import org.apache.helix.api.Scope;
+import org.apache.helix.api.accessor.ResourceAccessor;
+import org.apache.helix.api.config.UserConfig;
+import org.apache.helix.api.id.ClusterId;
+import org.apache.helix.api.id.ParticipantId;
+import org.apache.helix.api.id.ResourceId;
+import org.apache.helix.api.id.StateModelDefId;
+import org.apache.helix.participant.AbstractParticipantService;
+import org.apache.helix.provisioning.ServiceConfig;
+import org.apache.log4j.Logger;
+
+public abstract class StatelessParticipantService extends AbstractParticipantService {
+ private static final Logger LOG = Logger.getLogger(StatelessParticipantService.class);
+
+ private final String _serviceName;
+
+ public StatelessParticipantService(HelixConnection connection, ClusterId clusterId,
+ ParticipantId participantId, String serviceName) {
+ super(connection, clusterId, participantId);
+ _serviceName = serviceName;
+ }
+
+ @Override
+ protected void init() {
+ ClusterId clusterId = getClusterId();
+ ResourceAccessor resourceAccessor = getConnection().createResourceAccessor(clusterId);
+ ResourceId resourceId = ResourceId.from(_serviceName);
+ UserConfig userConfig = resourceAccessor.readUserConfig(resourceId);
+ ServiceConfig serviceConfig = new ServiceConfig(Scope.resource(resourceId));
+ serviceConfig.setSimpleFields(userConfig.getSimpleFields());
+ serviceConfig.setListFields(userConfig.getListFields());
+ serviceConfig.setMapFields(userConfig.getMapFields());
+ LOG.info("Starting service:" + _serviceName + " with configuration:" + serviceConfig);
+ StatelessServiceStateModelFactory stateModelFactory =
+ new StatelessServiceStateModelFactory(this);
+ getParticipant().getStateMachineEngine().registerStateModelFactory(
+ StateModelDefId.from("StatelessService"), stateModelFactory);
+ init(serviceConfig);
+ }
+
+ /**
+ * Get the name of this stateless service
+ * @return service name
+ */
+ public String getName() {
+ return _serviceName;
+ }
+
+ /**
+ * Initialize the service with a configuration
+ */
+ protected abstract void init(ServiceConfig serviceConfig);
+
+ /**
+ * Invoked when this service is instructed to go online
+ */
+ protected abstract void goOnline();
+
+ /**
+ * Invoked when this service is instructed to go offline
+ */
+ protected abstract void goOffine();
+
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/4ea6bcef/helix-provisioning/src/main/java/org/apache/helix/provisioning/participant/StatelessServiceStateModel.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/participant/StatelessServiceStateModel.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/participant/StatelessServiceStateModel.java
new file mode 100644
index 0000000..f653de8
--- /dev/null
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/participant/StatelessServiceStateModel.java
@@ -0,0 +1,56 @@
+package org.apache.helix.provisioning.participant;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.apache.helix.NotificationContext;
+import org.apache.helix.api.id.PartitionId;
+import org.apache.helix.model.Message;
+import org.apache.helix.participant.statemachine.StateModel;
+import org.apache.helix.participant.statemachine.StateModelInfo;
+import org.apache.helix.participant.statemachine.Transition;
+import org.apache.log4j.Logger;
+
+@StateModelInfo(initialState = "OFFLINE", states = {
+ "OFFLINE", "ONLINE", "ERROR"
+})
+public class StatelessServiceStateModel extends StateModel {
+ private static final Logger LOG = Logger.getLogger(StatelessServiceStateModel.class);
+
+ private final StatelessParticipantService _service;
+
+ public StatelessServiceStateModel(PartitionId partitionId, StatelessParticipantService service) {
+ _service = service;
+ // ignore partition
+ }
+
+ @Transition(to = "ONLINE", from = "OFFLINE")
+ public void onBecomeOnlineFromOffline(Message message, NotificationContext context)
+ throws Exception {
+ LOG.info("Started " + _service.getName() + " service");
+ _service.goOnline();
+ }
+
+ @Transition(to = "OFFLINE", from = "ONLINE")
+ public void onBecomeOfflineFromOnline(Message message, NotificationContext context)
+ throws InterruptedException {
+ LOG.info("Stopped " + _service.getName() + " service");
+ _service.goOffine();
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/4ea6bcef/helix-provisioning/src/main/java/org/apache/helix/provisioning/participant/StatelessServiceStateModelFactory.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/participant/StatelessServiceStateModelFactory.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/participant/StatelessServiceStateModelFactory.java
new file mode 100644
index 0000000..19c1488
--- /dev/null
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/participant/StatelessServiceStateModelFactory.java
@@ -0,0 +1,39 @@
+package org.apache.helix.provisioning.participant;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.apache.helix.api.id.PartitionId;
+import org.apache.helix.participant.statemachine.HelixStateModelFactory;
+
+public class StatelessServiceStateModelFactory extends
+ HelixStateModelFactory<StatelessServiceStateModel> {
+
+ private final StatelessParticipantService _service;
+
+ public StatelessServiceStateModelFactory(StatelessParticipantService service) {
+ _service = service;
+ }
+
+ @Override
+ public StatelessServiceStateModel createNewStateModel(PartitionId partitionId) {
+ return new StatelessServiceStateModel(partitionId, _service);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/4ea6bcef/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
----------------------------------------------------------------------
diff --git a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java b/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
index 8999817..269ae0c 100644
--- a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
+++ b/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldService.java
@@ -1,17 +1,14 @@
package org.apache.helix.provisioning.yarn.example;
import org.apache.helix.HelixConnection;
-import org.apache.helix.api.accessor.ResourceAccessor;
-import org.apache.helix.api.config.UserConfig;
import org.apache.helix.api.id.ClusterId;
import org.apache.helix.api.id.ParticipantId;
-import org.apache.helix.api.id.ResourceId;
-import org.apache.helix.api.id.StateModelDefId;
-import org.apache.helix.manager.zk.AbstractParticipantService;
-import org.apache.helix.provisioning.yarn.example.HelloWorldStateModelFactory;
+import org.apache.helix.participant.AbstractParticipantService;
+import org.apache.helix.provisioning.ServiceConfig;
+import org.apache.helix.provisioning.participant.StatelessParticipantService;
import org.apache.log4j.Logger;
-public class HelloWorldService extends AbstractParticipantService {
+public class HelloWorldService extends StatelessParticipantService {
private static Logger LOG = Logger.getLogger(AbstractParticipantService.class);
@@ -19,23 +16,22 @@ public class HelloWorldService extends AbstractParticipantService {
public HelloWorldService(HelixConnection connection, ClusterId clusterId,
ParticipantId participantId) {
- super(connection, clusterId, participantId);
+ super(connection, clusterId, participantId, SERVICE_NAME);
}
- /**
- * init method to setup appropriate call back handlers.
- */
@Override
- public void init() {
- ClusterId clusterId = getClusterId();
- ResourceAccessor resourceAccessor = getConnection().createResourceAccessor(clusterId);
- UserConfig serviceConfig = resourceAccessor.readUserConfig(ResourceId.from(SERVICE_NAME));
- LOG.info("Starting service:" + SERVICE_NAME + " with configuration:" + serviceConfig);
+ protected void init(ServiceConfig serviceConfig) {
+ LOG.info("Initialized service with config " + serviceConfig);
+ }
- HelloWorldStateModelFactory stateModelFactory = new HelloWorldStateModelFactory();
- getParticipant().getStateMachineEngine().registerStateModelFactory(
- StateModelDefId.from("StatelessService"), stateModelFactory);
+ @Override
+ protected void goOnline() {
+ LOG.info("HelloWorld service is told to go online");
+ }
+ @Override
+ protected void goOffine() {
+ LOG.info("HelloWorld service is told to go offline");
}
}
http://git-wip-us.apache.org/repos/asf/helix/blob/4ea6bcef/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModel.java
----------------------------------------------------------------------
diff --git a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModel.java b/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModel.java
deleted file mode 100644
index 078d847..0000000
--- a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModel.java
+++ /dev/null
@@ -1,33 +0,0 @@
-package org.apache.helix.provisioning.yarn.example;
-
-import org.apache.helix.NotificationContext;
-import org.apache.helix.api.id.PartitionId;
-import org.apache.helix.model.Message;
-import org.apache.helix.participant.statemachine.StateModel;
-import org.apache.helix.participant.statemachine.StateModelInfo;
-import org.apache.helix.participant.statemachine.Transition;
-import org.apache.log4j.Logger;
-
-@StateModelInfo(initialState = "OFFLINE", states = {
- "OFFLINE", "ONLINE", "ERROR"
-})
-public class HelloWorldStateModel extends StateModel {
-
- private static Logger LOG = Logger.getLogger(HelloWorldStateModel.class);
-
- public HelloWorldStateModel(PartitionId partitionId) {
- // ignore the partitionId
- }
-
- @Transition(to = "ONLINE", from = "OFFLINE")
- public void onBecomeOnlineFromOffline(Message message, NotificationContext context)
- throws Exception {
- LOG.info("Started HelloWorld service");
- }
-
- @Transition(to = "OFFLINE", from = "ONLINE")
- public void onBecomeOfflineFromOnline(Message message, NotificationContext context)
- throws InterruptedException {
- LOG.info("Stopped HelloWorld service");
- }
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/4ea6bcef/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModelFactory.java
----------------------------------------------------------------------
diff --git a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModelFactory.java b/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModelFactory.java
deleted file mode 100644
index 2766f6d..0000000
--- a/recipes/helloworld-provisioning-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/HelloWorldStateModelFactory.java
+++ /dev/null
@@ -1,13 +0,0 @@
-package org.apache.helix.provisioning.yarn.example;
-
-import org.apache.helix.api.id.PartitionId;
-import org.apache.helix.participant.statemachine.HelixStateModelFactory;
-import org.apache.helix.participant.statemachine.StateModel;
-import org.apache.helix.provisioning.yarn.example.HelloWorldStateModel;
-
-public class HelloWorldStateModelFactory extends HelixStateModelFactory<StateModel> {
- @Override
- public StateModel createNewStateModel(PartitionId partitionId) {
- return new HelloWorldStateModel(partitionId);
- }
-}
[27/50] [abbrv] git commit: Use non-deprecated version of state model
factory for tasks
Posted by ka...@apache.org.
Use non-deprecated version of state model factory for tasks
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/1bc9354d
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/1bc9354d
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/1bc9354d
Branch: refs/heads/master
Commit: 1bc9354d5da7be0e2b495914351c23ce47b5eb07
Parents: b97cfb4
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Mon Mar 3 14:20:04 2014 -0800
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Mon Mar 3 14:20:04 2014 -0800
----------------------------------------------------------------------
.../apache/helix/task/TaskStateModelFactory.java | 8 +++++---
.../integration/task/TestTaskRebalancer.java | 5 +++--
.../task/TestTaskRebalancerStopResume.java | 18 +++++++++++-------
3 files changed, 19 insertions(+), 12 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/1bc9354d/helix-core/src/main/java/org/apache/helix/task/TaskStateModelFactory.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskStateModelFactory.java b/helix-core/src/main/java/org/apache/helix/task/TaskStateModelFactory.java
index 369ac22..2537747 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskStateModelFactory.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskStateModelFactory.java
@@ -20,13 +20,15 @@ package org.apache.helix.task;
*/
import java.util.Map;
+
import org.apache.helix.HelixManager;
-import org.apache.helix.participant.statemachine.StateModelFactory;
+import org.apache.helix.api.id.PartitionId;
+import org.apache.helix.participant.statemachine.HelixStateModelFactory;
/**
* Factory class for {@link TaskStateModel}.
*/
-public class TaskStateModelFactory extends StateModelFactory<TaskStateModel> {
+public class TaskStateModelFactory extends HelixStateModelFactory<TaskStateModel> {
private final HelixManager _manager;
private final Map<String, TaskFactory> _taskFactoryRegistry;
@@ -36,7 +38,7 @@ public class TaskStateModelFactory extends StateModelFactory<TaskStateModel> {
}
@Override
- public TaskStateModel createNewStateModel(String partitionName) {
+ public TaskStateModel createNewStateModel(PartitionId partitionId) {
return new TaskStateModel(_manager, _taskFactoryRegistry);
}
}
http://git-wip-us.apache.org/repos/asf/helix/blob/1bc9354d/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancer.java b/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancer.java
index c221d96..1c83291 100644
--- a/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancer.java
+++ b/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancer.java
@@ -29,6 +29,7 @@ import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
import org.apache.helix.PropertyKey;
+import org.apache.helix.api.id.StateModelDefId;
import org.apache.helix.integration.ZkIntegrationTestBase;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.integration.manager.MockParticipantManager;
@@ -103,8 +104,8 @@ public class TestTaskRebalancer extends ZkIntegrationTestBase {
// Register a Task state model factory.
StateMachineEngine stateMachine = _participants[i].getStateMachineEngine();
- stateMachine.registerStateModelFactory("Task", new TaskStateModelFactory(_participants[i],
- taskFactoryReg));
+ stateMachine.registerStateModelFactory(StateModelDefId.from("Task"),
+ new TaskStateModelFactory(_participants[i], taskFactoryReg));
_participants[i].syncStart();
}
http://git-wip-us.apache.org/repos/asf/helix/blob/1bc9354d/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancerStopResume.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancerStopResume.java b/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancerStopResume.java
index 01d64f3..bb490ea 100644
--- a/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancerStopResume.java
+++ b/helix-core/src/test/java/org/apache/helix/integration/task/TestTaskRebalancerStopResume.java
@@ -20,20 +20,24 @@ package org.apache.helix.integration.task;
*/
import java.util.HashMap;
-import java.util.Iterator;
import java.util.Map;
import org.apache.helix.HelixManager;
import org.apache.helix.HelixManagerFactory;
import org.apache.helix.InstanceType;
-import org.apache.helix.TestHelper;
-import org.apache.helix.controller.HelixControllerMain;
+import org.apache.helix.api.id.StateModelDefId;
import org.apache.helix.integration.ZkIntegrationTestBase;
-import org.apache.helix.integration.ZkStandAloneCMTestBase;
import org.apache.helix.integration.manager.ClusterControllerManager;
import org.apache.helix.integration.manager.MockParticipantManager;
import org.apache.helix.participant.StateMachineEngine;
-import org.apache.helix.task.*;
+import org.apache.helix.task.Task;
+import org.apache.helix.task.TaskConfig;
+import org.apache.helix.task.TaskDriver;
+import org.apache.helix.task.TaskFactory;
+import org.apache.helix.task.TaskResult;
+import org.apache.helix.task.TaskState;
+import org.apache.helix.task.TaskStateModelFactory;
+import org.apache.helix.task.Workflow;
import org.apache.helix.tools.ClusterSetup;
import org.apache.helix.tools.ClusterStateVerifier;
import org.apache.log4j.Logger;
@@ -91,8 +95,8 @@ public class TestTaskRebalancerStopResume extends ZkIntegrationTestBase {
// Register a Task state model factory.
StateMachineEngine stateMachine = _participants[i].getStateMachineEngine();
- stateMachine.registerStateModelFactory("Task", new TaskStateModelFactory(_participants[i],
- taskFactoryReg));
+ stateMachine.registerStateModelFactory(StateModelDefId.from("Task"),
+ new TaskStateModelFactory(_participants[i], taskFactoryReg));
_participants[i].syncStart();
}
[04/50] [abbrv] git commit: Making minor changes to YarnProvisioner
to maintain a fixed number of containers
Posted by ka...@apache.org.
Making minor changes to YarnProvisioner to maintain a fixed number of containers
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/cb6aa4fa
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/cb6aa4fa
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/cb6aa4fa
Branch: refs/heads/master
Commit: cb6aa4fa0e82436f1d6714c3cdcf1435c510024a
Parents: d1e7ca6
Author: Kishore Gopalakrishna <g....@gmail.com>
Authored: Wed Feb 19 17:16:55 2014 -0800
Committer: Kishore Gopalakrishna <g....@gmail.com>
Committed: Wed Feb 19 17:16:55 2014 -0800
----------------------------------------------------------------------
.../controller/provisioner/ContainerSpec.java | 18 ++++--
.../stages/ContainerProvisioningStage.java | 2 +-
.../provisioning/yarn/YarnProvisioner.java | 63 ++++++++++++++------
3 files changed, 58 insertions(+), 25 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/cb6aa4fa/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerSpec.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerSpec.java b/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerSpec.java
index b393a64..4d3a521 100644
--- a/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerSpec.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerSpec.java
@@ -23,23 +23,31 @@ public class ContainerSpec {
/**
* Some unique id representing the container.
*/
- ContainerId containerId;
+ ContainerId _containerId;
- String memory;
+ int _memory;
public ContainerSpec(ContainerId containerId) {
- this.containerId = containerId;
+ this._containerId = containerId;
}
public ContainerId getContainerId() {
- return containerId;
+ return _containerId;
}
@Override
public String toString() {
- return containerId.toString();
+ return _containerId.toString();
+ }
+
+ public void setMemory(int memory){
+ _memory = memory;
}
+ public int getMemory(){
+ return _memory;
+ }
+
public static ContainerSpec from(String serialized) {
return new ContainerSpec(ContainerId.from(serialized));
}
http://git-wip-us.apache.org/repos/asf/helix/blob/cb6aa4fa/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java b/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
index 2f97c5a..48166bf 100644
--- a/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
@@ -123,7 +123,7 @@ public class ContainerProvisioningStage extends AbstractBaseStage {
// allocate new containers
for (final ContainerSpec spec : response.getContainersToAcquire()) {
// random participant id
- final ParticipantId participantId = ParticipantId.from(UUID.randomUUID().toString());
+ final ParticipantId participantId = ParticipantId.from(spec.getContainerId().stringify());
// create a new Participant, attach the container spec
InstanceConfig instanceConfig = new InstanceConfig(participantId);
instanceConfig.setContainerSpec(spec);
http://git-wip-us.apache.org/repos/asf/helix/blob/cb6aa4fa/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
index 477023b..4fcc219 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/YarnProvisioner.java
@@ -8,8 +8,10 @@ import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Set;
import java.util.Vector;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
@@ -44,7 +46,9 @@ import org.apache.helix.HelixManager;
import org.apache.helix.api.Cluster;
import org.apache.helix.api.Participant;
import org.apache.helix.api.config.ContainerConfig;
+import org.apache.helix.api.config.ParticipantConfig;
import org.apache.helix.api.config.ResourceConfig;
+import org.apache.helix.api.id.ParticipantId;
import org.apache.helix.api.id.ResourceId;
import org.apache.helix.controller.provisioner.ContainerId;
import org.apache.helix.controller.provisioner.ContainerProvider;
@@ -54,6 +58,7 @@ import org.apache.helix.controller.provisioner.Provisioner;
import org.apache.helix.controller.provisioner.ProvisionerConfig;
import org.apache.helix.controller.provisioner.TargetProvider;
import org.apache.helix.controller.provisioner.TargetProviderResponse;
+import org.apache.helix.model.InstanceConfig;
import com.google.common.collect.Lists;
import com.google.common.base.Function;
@@ -73,8 +78,9 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
Map<ContainerId, Container> allocatedContainersMap = new HashMap<ContainerId, Container>();
private HelixManager _helixManager;
private ResourceConfig _resourceConfig;
- public YarnProvisioner(){
-
+
+ public YarnProvisioner() {
+
}
@Override
@@ -109,7 +115,8 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
}
@Override
- public ListenableFuture<Boolean> startContainer(final ContainerId containerId, Participant participant) {
+ public ListenableFuture<Boolean> startContainer(final ContainerId containerId,
+ Participant participant) {
Container container = allocatedContainersMap.get(containerId);
ContainerLaunchContext launchContext;
try {
@@ -128,11 +135,12 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
}, service);
}
- private ContainerLaunchContext createLaunchContext(ContainerId containerId, Container container, Participant participant) throws Exception {
+ private ContainerLaunchContext createLaunchContext(ContainerId containerId, Container container,
+ Participant participant) throws Exception {
ContainerLaunchContext participantContainer = Records.newRecord(ContainerLaunchContext.class);
-// Map<String, String> envs = System.getenv();
+ // Map<String, String> envs = System.getenv();
String appName = applicationMasterConfig.getAppName();
int appId = applicationMasterConfig.getAppId();
String serviceName = _resourceConfig.getId().stringify();
@@ -166,7 +174,7 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
// resource the client intended to use with the application
servicePackageResource.setTimestamp(destStatus.getModificationTime());
servicePackageResource.setSize(destStatus.getLen());
- LOG.info("Setting local resource:" + servicePackageResource + " for service" + serviceName );
+ LOG.info("Setting local resource:" + servicePackageResource + " for service" + serviceName);
localResources.put(serviceName, servicePackageResource);
// Set local resource info into app master container launch context
@@ -195,7 +203,7 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
classPathEnv.append(c.trim());
}
classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties");
- LOG.info("Setting classpath for service:\n"+ classPathEnv.toString());
+ LOG.info("Setting classpath for service:\n" + classPathEnv.toString());
env.put("CLASSPATH", classPathEnv.toString());
participantContainer.setEnvironment(env);
@@ -214,8 +222,8 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
vargs.add("--zkAddress " + zkAddress);
vargs.add("--cluster " + appName);
vargs.add("--participantId " + participant.getId().stringify());
- vargs.add("--participantClass " + mainClass);;
-
+ vargs.add("--participantClass " + mainClass);
+ ;
vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/ContainerParticipant.stdout");
vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/ContainerParticipant.stderr");
@@ -226,7 +234,8 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
command.append(str).append(" ");
}
- LOG.info("Completed setting up container launch command " + command.toString() + " with arguments \n" + vargs);
+ LOG.info("Completed setting up container launch command " + command.toString()
+ + " with arguments \n" + vargs);
List<String> commands = new ArrayList<String>();
commands.add(command.toString());
participantContainer.setCommands(commands);
@@ -260,13 +269,13 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
List<Participant> containersToStart = Lists.newArrayList();
List<Participant> containersToRelease = Lists.newArrayList();
List<Participant> containersToStop = Lists.newArrayList();
- YarnProvisionerConfig provisionerConfig = (YarnProvisionerConfig) cluster.getConfig().getResourceMap().get(resourceId).getProvisionerConfig();
+ YarnProvisionerConfig provisionerConfig =
+ (YarnProvisionerConfig) cluster.getConfig().getResourceMap().get(resourceId)
+ .getProvisionerConfig();
int targetNumContainers = provisionerConfig.getNumContainers();
- for (int i = 0; i < targetNumContainers - participants.size(); i++) {
- containersToAcquire.add(new ContainerSpec(ContainerId.from("container"
- + (targetNumContainers - i))));
- }
- response.setContainersToAcquire(containersToAcquire);
+
+ Set<ContainerId> existingContainersIdSet = new HashSet<ContainerId>();
+
for (Participant participant : participants) {
ContainerConfig containerConfig = participant.getContainerConfig();
@@ -278,17 +287,20 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
containersToStart.add(participant);
break;
case ACTIVE:
-
+ existingContainersIdSet.add(containerConfig.getId());
break;
case HALTED:
// halted containers can be released
- // containersToRelease.add(participant);
+ containersToRelease.add(participant);
break;
case ACQUIRING:
+ existingContainersIdSet.add(containerConfig.getId());
break;
case CONNECTING:
break;
case FAILED:
+ //remove the failed instance
+ _helixManager.getClusterManagmentTool().dropInstance(cluster.getId().toString(), new InstanceConfig(participant.getId()));
break;
case FINALIZED:
break;
@@ -306,6 +318,19 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
}
}
}
+
+ for (int i = 0; i < targetNumContainers; i++) {
+ ContainerId containerId = ContainerId.from(resourceId + "_container_" + (i));
+ if(!existingContainersIdSet.contains(containerId)){
+ ContainerSpec containerSpec = new ContainerSpec(containerId);
+ ParticipantId participantId = ParticipantId.from(containerId.stringify());
+ ParticipantConfig participantConfig = applicationSpec.getParticipantConfig(resourceId.stringify(), participantId);
+ containerSpec.setMemory(participantConfig.getUserConfig().getIntField("memory", 1024));
+ containersToAcquire.add(containerSpec);
+ }
+ }
+
+ response.setContainersToAcquire(containersToAcquire);
response.setContainersToStart(containersToStart);
response.setContainersToRelease(containersToRelease);
response.setContainersToStop(containersToStop);
@@ -326,7 +351,7 @@ public class YarnProvisioner implements Provisioner, TargetProvider, ContainerPr
// Set up resource type requirements
// For now, only memory is supported so we set memory requirements
Resource capability = Records.newRecord(Resource.class);
- int memory = 1024;
+ int memory = spec.getMemory();
capability.setMemory(memory);
ContainerRequest request = new ContainerRequest(capability, null, null, pri);
[43/50] [abbrv] git commit: [HELIX-439] Support thresholding for job
success/failure
Posted by ka...@apache.org.
[HELIX-439] Support thresholding for job success/failure
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/c5921f42
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/c5921f42
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/c5921f42
Branch: refs/heads/master
Commit: c5921f4299cd3d6d6be1aed44053904e1aada53e
Parents: feaea56
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Tue Jul 8 18:34:57 2014 -0700
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Tue Jul 8 18:34:57 2014 -0700
----------------------------------------------------------------------
.../java/org/apache/helix/task/JobConfig.java | 27 ++++++-
.../java/org/apache/helix/task/TaskConfig.java | 36 +++++++--
.../org/apache/helix/task/TaskRebalancer.java | 52 +++++++++----
.../java/org/apache/helix/task/Workflow.java | 2 +
.../org/apache/helix/task/beans/JobBean.java | 1 +
.../org/apache/helix/task/beans/TaskBean.java | 1 +
.../task/TestIndependentTaskRebalancer.java | 81 +++++++++++++++++++-
7 files changed, 174 insertions(+), 26 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/c5921f42/helix-core/src/main/java/org/apache/helix/task/JobConfig.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/JobConfig.java b/helix-core/src/main/java/org/apache/helix/task/JobConfig.java
index 90e3cfc..b166da1 100644
--- a/helix-core/src/main/java/org/apache/helix/task/JobConfig.java
+++ b/helix-core/src/main/java/org/apache/helix/task/JobConfig.java
@@ -63,6 +63,8 @@ public class JobConfig {
public static final String MAX_ATTEMPTS_PER_TASK = "MaxAttemptsPerTask";
/** The number of concurrent tasks that are allowed to run on an instance. */
public static final String NUM_CONCURRENT_TASKS_PER_INSTANCE = "ConcurrentTasksPerInstance";
+ /** The number of tasks within the job that are allowed to fail. */
+ public static final String FAILURE_THRESHOLD = "FailureThreshold";
/** The individual task configurations, if any **/
public static final String TASK_CONFIGS = "TaskConfigs";
@@ -72,6 +74,7 @@ public class JobConfig {
public static final long DEFAULT_TIMEOUT_PER_TASK = 60 * 60 * 1000; // 1 hr.
public static final int DEFAULT_MAX_ATTEMPTS_PER_TASK = 10;
public static final int DEFAULT_NUM_CONCURRENT_TASKS_PER_INSTANCE = 1;
+ public static final int DEFAULT_FAILURE_THRESHOLD = 0;
private final String _workflow;
private final String _targetResource;
@@ -82,12 +85,13 @@ public class JobConfig {
private final long _timeoutPerTask;
private final int _numConcurrentTasksPerInstance;
private final int _maxAttemptsPerTask;
+ private final int _failureThreshold;
private final Map<String, TaskConfig> _taskConfigMap;
private JobConfig(String workflow, String targetResource, List<String> targetPartitions,
Set<String> targetPartitionStates, String command, Map<String, String> jobConfigMap,
long timeoutPerTask, int numConcurrentTasksPerInstance, int maxAttemptsPerTask,
- Map<String, TaskConfig> taskConfigMap) {
+ int failureThreshold, Map<String, TaskConfig> taskConfigMap) {
_workflow = workflow;
_targetResource = targetResource;
_targetPartitions = targetPartitions;
@@ -97,6 +101,7 @@ public class JobConfig {
_timeoutPerTask = timeoutPerTask;
_numConcurrentTasksPerInstance = numConcurrentTasksPerInstance;
_maxAttemptsPerTask = maxAttemptsPerTask;
+ _failureThreshold = failureThreshold;
if (taskConfigMap != null) {
_taskConfigMap = taskConfigMap;
} else {
@@ -140,6 +145,10 @@ public class JobConfig {
return _maxAttemptsPerTask;
}
+ public int getFailureThreshold() {
+ return _failureThreshold;
+ }
+
public Map<String, TaskConfig> getTaskConfigMap() {
return _taskConfigMap;
}
@@ -171,6 +180,7 @@ public class JobConfig {
}
cfgMap.put(JobConfig.TIMEOUT_PER_TASK, "" + _timeoutPerTask);
cfgMap.put(JobConfig.MAX_ATTEMPTS_PER_TASK, "" + _maxAttemptsPerTask);
+ cfgMap.put(JobConfig.FAILURE_THRESHOLD, "" + _failureThreshold);
return cfgMap;
}
@@ -188,13 +198,14 @@ public class JobConfig {
private long _timeoutPerTask = DEFAULT_TIMEOUT_PER_TASK;
private int _numConcurrentTasksPerInstance = DEFAULT_NUM_CONCURRENT_TASKS_PER_INSTANCE;
private int _maxAttemptsPerTask = DEFAULT_MAX_ATTEMPTS_PER_TASK;
+ private int _failureThreshold = DEFAULT_FAILURE_THRESHOLD;
public JobConfig build() {
validate();
return new JobConfig(_workflow, _targetResource, _targetPartitions, _targetPartitionStates,
_command, _commandConfig, _timeoutPerTask, _numConcurrentTasksPerInstance,
- _maxAttemptsPerTask, _taskConfigMap);
+ _maxAttemptsPerTask, _failureThreshold, _taskConfigMap);
}
/**
@@ -235,6 +246,9 @@ public class JobConfig {
if (cfg.containsKey(MAX_ATTEMPTS_PER_TASK)) {
b.setMaxAttemptsPerTask(Integer.parseInt(cfg.get(MAX_ATTEMPTS_PER_TASK)));
}
+ if (cfg.containsKey(FAILURE_THRESHOLD)) {
+ b.setFailureThreshold(Integer.parseInt(cfg.get(FAILURE_THRESHOLD)));
+ }
return b;
}
@@ -283,6 +297,11 @@ public class JobConfig {
return this;
}
+ public Builder setFailureThreshold(int v) {
+ _failureThreshold = v;
+ return this;
+ }
+
public Builder addTaskConfigs(List<TaskConfig> taskConfigs) {
if (taskConfigs != null) {
for (TaskConfig taskConfig : taskConfigs) {
@@ -321,6 +340,10 @@ public class JobConfig {
throw new IllegalArgumentException(String.format("%s has invalid value %s",
MAX_ATTEMPTS_PER_TASK, _maxAttemptsPerTask));
}
+ if (_failureThreshold < 0) {
+ throw new IllegalArgumentException(String.format("%s has invalid value %s",
+ FAILURE_THRESHOLD, _failureThreshold));
+ }
if (_workflow == null) {
throw new IllegalArgumentException(String.format("%s cannot be null", WORKFLOW_ID));
}
http://git-wip-us.apache.org/repos/asf/helix/blob/c5921f42/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java b/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java
index 547ba48..4ddab1a 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskConfig.java
@@ -35,7 +35,8 @@ import com.google.common.collect.Maps;
public class TaskConfig {
private enum TaskConfigFields {
TASK_ID,
- TASK_COMMAND
+ TASK_COMMAND,
+ TASK_SUCCESS_OPTIONAL
}
private static final Logger LOG = Logger.getLogger(TaskConfig.class);
@@ -46,9 +47,12 @@ public class TaskConfig {
* Instantiate the task config
* @param command the command to invoke for the task
* @param configMap configuration to be passed as part of the invocation
+ * @param successOptional true if this task need not pass for the job to succeed, false
+ * otherwise
* @param id existing task ID
*/
- public TaskConfig(String command, Map<String, String> configMap, String id) {
+ public TaskConfig(String command, Map<String, String> configMap, boolean successOptional,
+ String id) {
if (configMap == null) {
configMap = Maps.newHashMap();
}
@@ -56,6 +60,8 @@ public class TaskConfig {
id = UUID.randomUUID().toString();
}
configMap.put(TaskConfigFields.TASK_COMMAND.toString(), command);
+ configMap.put(TaskConfigFields.TASK_SUCCESS_OPTIONAL.toString(),
+ Boolean.toString(successOptional));
configMap.put(TaskConfigFields.TASK_ID.toString(), id);
_configMap = configMap;
}
@@ -64,9 +70,11 @@ public class TaskConfig {
* Instantiate the task config
* @param command the command to invoke for the task
* @param configMap configuration to be passed as part of the invocation
+ * @param successOptional true if this task need not pass for the job to succeed, false
+ * otherwise
*/
- public TaskConfig(String command, Map<String, String> configMap) {
- this(command, configMap, null);
+ public TaskConfig(String command, Map<String, String> configMap, boolean successOptional) {
+ this(command, configMap, successOptional, null);
}
/**
@@ -86,6 +94,19 @@ public class TaskConfig {
}
/**
+ * Check if this task must succeed for a job to succeed
+ * @return true if success is optional, false otherwise
+ */
+ public boolean isSuccessOptional() {
+ String successOptionalStr = _configMap.get(TaskConfigFields.TASK_SUCCESS_OPTIONAL.toString());
+ if (successOptionalStr == null) {
+ return false;
+ } else {
+ return Boolean.parseBoolean(successOptionalStr);
+ }
+ }
+
+ /**
* Get the configuration map for this task's command
* @return map of configuration key to value
*/
@@ -110,7 +131,7 @@ public class TaskConfig {
* @return instantiated TaskConfig
*/
public static TaskConfig from(TaskBean bean) {
- return new TaskConfig(bean.command, bean.taskConfigMap);
+ return new TaskConfig(bean.command, bean.taskConfigMap, bean.successOptional);
}
/**
@@ -121,6 +142,9 @@ public class TaskConfig {
public static TaskConfig from(Map<String, String> rawConfigMap) {
String taskId = rawConfigMap.get(TaskConfigFields.TASK_ID.toString());
String command = rawConfigMap.get(TaskConfigFields.TASK_COMMAND.toString());
- return new TaskConfig(command, rawConfigMap, taskId);
+ String successOptionalStr = rawConfigMap.get(TaskConfigFields.TASK_SUCCESS_OPTIONAL.toString());
+ boolean successOptional =
+ (successOptionalStr != null) ? Boolean.valueOf(successOptionalStr) : null;
+ return new TaskConfig(command, rawConfigMap, successOptional, taskId);
}
}
http://git-wip-us.apache.org/repos/asf/helix/blob/c5921f42/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java b/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
index e9f60f9..376eca5 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
@@ -213,6 +213,9 @@ public abstract class TaskRebalancer implements HelixRebalancer {
// Used to keep track of tasks that have already been assigned to instances.
Set<Integer> assignedPartitions = new HashSet<Integer>();
+ // Used to keep track of tasks that have failed, but whose failure is acceptable
+ Set<Integer> skippedPartitions = new HashSet<Integer>();
+
// Keeps a mapping of (partition) -> (instance, state)
Map<Integer, PartitionAssignment> paMap = new TreeMap<Integer, PartitionAssignment>();
@@ -227,7 +230,6 @@ public abstract class TaskRebalancer implements HelixRebalancer {
// TASK_ERROR, ERROR.
Set<Integer> donePartitions = new TreeSet<Integer>();
for (int pId : pSet) {
- jobCtx.setPartitionState(pId, TaskPartitionState.INIT);
final String pName = pName(jobResource, pId);
// Check for pending state transitions on this (partition, instance).
@@ -236,8 +238,7 @@ public abstract class TaskRebalancer implements HelixRebalancer {
instance);
if (pendingState != null) {
// There is a pending state transition for this (partition, instance). Just copy forward
- // the state
- // assignment from the previous ideal state.
+ // the state assignment from the previous ideal state.
Map<ParticipantId, State> stateMap =
prevAssignment.getReplicaMap(PartitionId.from(pName));
if (stateMap != null) {
@@ -290,8 +291,6 @@ public abstract class TaskRebalancer implements HelixRebalancer {
nextState = TaskPartitionState.STOPPED;
}
- jobCtx.setPartitionState(pId, currState);
-
paMap.put(pId, new PartitionAssignment(instance.toString(), nextState.name()));
assignedPartitions.add(pId);
LOG.debug(String.format("Setting task partition %s state to %s on instance %s.", pName,
@@ -318,13 +317,34 @@ public abstract class TaskRebalancer implements HelixRebalancer {
pName, currState));
markPartitionError(jobCtx, pId, currState);
// The error policy is to fail the task as soon a single partition fails for a specified
- // maximum number of
- // attempts.
+ // maximum number of attempts.
if (jobCtx.getPartitionNumAttempts(pId) >= jobCfg.getMaxAttemptsPerTask()) {
- workflowCtx.setJobState(jobResource, TaskState.FAILED);
- workflowCtx.setWorkflowState(TaskState.FAILED);
- addAllPartitions(allPartitions, partitionsToDropFromIs);
- return emptyAssignment(jobResource);
+ // If the user does not require this task to succeed in order for the job to succeed,
+ // then we don't have to fail the job right now
+ boolean successOptional = false;
+ String taskId = jobCtx.getTaskIdForPartition(pId);
+ if (taskId != null) {
+ TaskConfig taskConfig = jobCfg.getTaskConfig(taskId);
+ if (taskConfig != null) {
+ successOptional = taskConfig.isSuccessOptional();
+ }
+ }
+
+ // Similarly, if we have some leeway for how many tasks we can fail, then we don't have
+ // to fail the job immediately
+ if (skippedPartitions.size() < jobCfg.getFailureThreshold()) {
+ successOptional = true;
+ }
+
+ if (!successOptional) {
+ workflowCtx.setJobState(jobResource, TaskState.FAILED);
+ workflowCtx.setWorkflowState(TaskState.FAILED);
+ addAllPartitions(allPartitions, partitionsToDropFromIs);
+ return emptyAssignment(jobResource);
+ } else {
+ skippedPartitions.add(pId);
+ partitionsToDropFromIs.add(pId);
+ }
}
}
break;
@@ -346,7 +366,7 @@ public abstract class TaskRebalancer implements HelixRebalancer {
pSet.removeAll(donePartitions);
}
- if (isJobComplete(jobCtx, allPartitions)) {
+ if (isJobComplete(jobCtx, allPartitions, skippedPartitions)) {
workflowCtx.setJobState(jobResource, TaskState.COMPLETED);
if (isWorkflowComplete(workflowCtx, workflowConfig)) {
workflowCtx.setWorkflowState(TaskState.COMPLETED);
@@ -381,7 +401,6 @@ public abstract class TaskRebalancer implements HelixRebalancer {
paMap.put(pId,
new PartitionAssignment(instance.toString(), TaskPartitionState.RUNNING.name()));
excludeSet.add(pId);
- jobCtx.setPartitionState(pId, TaskPartitionState.INIT);
jobCtx.setAssignedParticipant(pId, instance.toString());
LOG.debug(String.format("Setting task partition %s state to %s on instance %s.", pName,
TaskPartitionState.RUNNING, instance));
@@ -397,7 +416,6 @@ public abstract class TaskRebalancer implements HelixRebalancer {
ra.addReplicaMap(PartitionId.from(pName(jobResource, e.getKey())),
ImmutableMap.of(ParticipantId.from(pa._instance), State.from(pa._state)));
}
-
return ra;
}
@@ -405,14 +423,16 @@ public abstract class TaskRebalancer implements HelixRebalancer {
* Checks if the job has completed.
* @param ctx The rebalancer context.
* @param allPartitions The set of partitions to check.
+ * @param skippedPartitions partitions that failed, but whose failure is acceptable
* @return true if all task partitions have been marked with status
* {@link TaskPartitionState#COMPLETED} in the rebalancer
* context, false otherwise.
*/
- private static boolean isJobComplete(JobContext ctx, Set<Integer> allPartitions) {
+ private static boolean isJobComplete(JobContext ctx, Set<Integer> allPartitions,
+ Set<Integer> skippedPartitions) {
for (Integer pId : allPartitions) {
TaskPartitionState state = ctx.getPartitionState(pId);
- if (state != TaskPartitionState.COMPLETED) {
+ if (!skippedPartitions.contains(pId) && state != TaskPartitionState.COMPLETED) {
return false;
}
}
http://git-wip-us.apache.org/repos/asf/helix/blob/c5921f42/helix-core/src/main/java/org/apache/helix/task/Workflow.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/Workflow.java b/helix-core/src/main/java/org/apache/helix/task/Workflow.java
index 1a41e06..8afafe4 100644
--- a/helix-core/src/main/java/org/apache/helix/task/Workflow.java
+++ b/helix-core/src/main/java/org/apache/helix/task/Workflow.java
@@ -185,6 +185,8 @@ public class Workflow {
String.valueOf(job.numConcurrentTasksPerInstance));
builder.addConfig(job.name, JobConfig.TIMEOUT_PER_TASK,
String.valueOf(job.timeoutPerPartition));
+ builder
+ .addConfig(job.name, JobConfig.FAILURE_THRESHOLD, String.valueOf(job.failureThreshold));
if (job.tasks != null) {
List<TaskConfig> taskConfigs = Lists.newArrayList();
for (TaskBean task : job.tasks) {
http://git-wip-us.apache.org/repos/asf/helix/blob/c5921f42/helix-core/src/main/java/org/apache/helix/task/beans/JobBean.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/beans/JobBean.java b/helix-core/src/main/java/org/apache/helix/task/beans/JobBean.java
index 5e12f19..af5882c 100644
--- a/helix-core/src/main/java/org/apache/helix/task/beans/JobBean.java
+++ b/helix-core/src/main/java/org/apache/helix/task/beans/JobBean.java
@@ -39,4 +39,5 @@ public class JobBean {
public long timeoutPerPartition = JobConfig.DEFAULT_TIMEOUT_PER_TASK;
public int numConcurrentTasksPerInstance = JobConfig.DEFAULT_NUM_CONCURRENT_TASKS_PER_INSTANCE;
public int maxAttemptsPerPartition = JobConfig.DEFAULT_MAX_ATTEMPTS_PER_TASK;
+ public int failureThreshold = JobConfig.DEFAULT_FAILURE_THRESHOLD;
}
http://git-wip-us.apache.org/repos/asf/helix/blob/c5921f42/helix-core/src/main/java/org/apache/helix/task/beans/TaskBean.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/beans/TaskBean.java b/helix-core/src/main/java/org/apache/helix/task/beans/TaskBean.java
index eedccb5..97ecfc0 100644
--- a/helix-core/src/main/java/org/apache/helix/task/beans/TaskBean.java
+++ b/helix-core/src/main/java/org/apache/helix/task/beans/TaskBean.java
@@ -29,4 +29,5 @@ import java.util.Map;
public class TaskBean {
public String command;
public Map<String, String> taskConfigMap;
+ public boolean successOptional = false;
}
http://git-wip-us.apache.org/repos/asf/helix/blob/c5921f42/helix-core/src/test/java/org/apache/helix/integration/task/TestIndependentTaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/integration/task/TestIndependentTaskRebalancer.java b/helix-core/src/test/java/org/apache/helix/integration/task/TestIndependentTaskRebalancer.java
index 1ee3991..5dad94c 100644
--- a/helix-core/src/test/java/org/apache/helix/integration/task/TestIndependentTaskRebalancer.java
+++ b/helix-core/src/test/java/org/apache/helix/integration/task/TestIndependentTaskRebalancer.java
@@ -41,6 +41,7 @@ import org.apache.helix.task.TaskConfig;
import org.apache.helix.task.TaskDriver;
import org.apache.helix.task.TaskFactory;
import org.apache.helix.task.TaskResult;
+import org.apache.helix.task.TaskResult.Status;
import org.apache.helix.task.TaskState;
import org.apache.helix.task.TaskStateModelFactory;
import org.apache.helix.task.Workflow;
@@ -51,6 +52,7 @@ import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import org.testng.collections.Sets;
+import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
@@ -131,8 +133,63 @@ public class TestIndependentTaskRebalancer extends ZkIntegrationTestBase {
String jobName = TestHelper.getTestMethodName();
Workflow.Builder workflowBuilder = new Workflow.Builder(jobName);
List<TaskConfig> taskConfigs = Lists.newArrayListWithCapacity(2);
- TaskConfig taskConfig1 = new TaskConfig("TaskOne", null);
- TaskConfig taskConfig2 = new TaskConfig("TaskTwo", null);
+ TaskConfig taskConfig1 = new TaskConfig("TaskOne", null, true);
+ TaskConfig taskConfig2 = new TaskConfig("TaskTwo", null, true);
+ taskConfigs.add(taskConfig1);
+ taskConfigs.add(taskConfig2);
+ workflowBuilder.addTaskConfigs(jobName, taskConfigs);
+ workflowBuilder.addConfig(jobName, JobConfig.COMMAND, "DummyCommand");
+ Map<String, String> jobConfigMap = Maps.newHashMap();
+ jobConfigMap.put("Timeout", "1000");
+ workflowBuilder.addJobConfigMap(jobName, jobConfigMap);
+ _driver.start(workflowBuilder.build());
+
+ // Ensure the job completes
+ TestUtil.pollForWorkflowState(_manager, jobName, TaskState.IN_PROGRESS);
+ TestUtil.pollForWorkflowState(_manager, jobName, TaskState.COMPLETED);
+
+ // Ensure that each class was invoked
+ Assert.assertTrue(_invokedClasses.contains(TaskOne.class.getName()));
+ Assert.assertTrue(_invokedClasses.contains(TaskTwo.class.getName()));
+ }
+
+ @Test
+ public void testThresholdFailure() throws Exception {
+ // Create a job with two different tasks
+ String jobName = TestHelper.getTestMethodName();
+ Workflow.Builder workflowBuilder = new Workflow.Builder(jobName);
+ List<TaskConfig> taskConfigs = Lists.newArrayListWithCapacity(2);
+ Map<String, String> taskConfigMap = Maps.newHashMap(ImmutableMap.of("fail", "" + true));
+ TaskConfig taskConfig1 = new TaskConfig("TaskOne", taskConfigMap, false);
+ TaskConfig taskConfig2 = new TaskConfig("TaskTwo", null, false);
+ taskConfigs.add(taskConfig1);
+ taskConfigs.add(taskConfig2);
+ workflowBuilder.addTaskConfigs(jobName, taskConfigs);
+ workflowBuilder.addConfig(jobName, JobConfig.COMMAND, "DummyCommand");
+ workflowBuilder.addConfig(jobName, JobConfig.FAILURE_THRESHOLD, "" + 1);
+ Map<String, String> jobConfigMap = Maps.newHashMap();
+ jobConfigMap.put("Timeout", "1000");
+ workflowBuilder.addJobConfigMap(jobName, jobConfigMap);
+ _driver.start(workflowBuilder.build());
+
+ // Ensure the job completes
+ TestUtil.pollForWorkflowState(_manager, jobName, TaskState.IN_PROGRESS);
+ TestUtil.pollForWorkflowState(_manager, jobName, TaskState.COMPLETED);
+
+ // Ensure that each class was invoked
+ Assert.assertTrue(_invokedClasses.contains(TaskOne.class.getName()));
+ Assert.assertTrue(_invokedClasses.contains(TaskTwo.class.getName()));
+ }
+
+ @Test
+ public void testOptionalTaskFailure() throws Exception {
+ // Create a job with two different tasks
+ String jobName = TestHelper.getTestMethodName();
+ Workflow.Builder workflowBuilder = new Workflow.Builder(jobName);
+ List<TaskConfig> taskConfigs = Lists.newArrayListWithCapacity(2);
+ Map<String, String> taskConfigMap = Maps.newHashMap(ImmutableMap.of("fail", "" + true));
+ TaskConfig taskConfig1 = new TaskConfig("TaskOne", taskConfigMap, true);
+ TaskConfig taskConfig2 = new TaskConfig("TaskTwo", null, false);
taskConfigs.add(taskConfig1);
taskConfigs.add(taskConfig2);
workflowBuilder.addTaskConfigs(jobName, taskConfigs);
@@ -152,13 +209,33 @@ public class TestIndependentTaskRebalancer extends ZkIntegrationTestBase {
}
private class TaskOne extends ReindexTask {
+ private final boolean _shouldFail;
+
public TaskOne(TaskCallbackContext context) {
super(context);
+
+ // Check whether or not this task should succeed
+ TaskConfig taskConfig = context.getTaskConfig();
+ boolean shouldFail = false;
+ if (taskConfig != null) {
+ Map<String, String> configMap = taskConfig.getConfigMap();
+ if (configMap != null && configMap.containsKey("fail")
+ && Boolean.parseBoolean(configMap.get("fail"))) {
+ shouldFail = true;
+ }
+ }
+ _shouldFail = shouldFail;
}
@Override
public TaskResult run() {
_invokedClasses.add(getClass().getName());
+
+ // Fail the task if it should fail
+ if (_shouldFail) {
+ return new TaskResult(Status.ERROR, null);
+ }
+
return super.run();
}
}
[18/50] [abbrv] git commit: Moving packages around
Posted by ka...@apache.org.
Moving packages around
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/8992aa5a
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/8992aa5a
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/8992aa5a
Branch: refs/heads/master
Commit: 8992aa5a7e7bd39b63aa79ea96a9fe316f833014
Parents: b47e329
Author: Kishore Gopalakrishna <g....@gmail.com>
Authored: Sat Feb 22 11:50:07 2014 -0800
Committer: Kishore Gopalakrishna <g....@gmail.com>
Committed: Sat Feb 22 11:50:07 2014 -0800
----------------------------------------------------------------------
.../controller/provisioner/ContainerState.java | 3 +-
helix-provisioning/pom.xml | 2 +-
.../apache/helix/provisioning/AppConfig.java | 17 +
.../helix/provisioning/ApplicationSpec.java | 29 +
.../provisioning/ApplicationSpecFactory.java | 9 +
.../provisioning/ContainerAskResponse.java | 17 +
.../provisioning/ContainerLaunchResponse.java | 5 +
.../provisioning/ContainerReleaseResponse.java | 5 +
.../provisioning/ContainerStopResponse.java | 5 +
.../helix/provisioning/HelixYarnUtil.java | 42 +
.../helix/provisioning/ParticipantLauncher.java | 136 +++
.../helix/provisioning/ServiceConfig.java | 17 +
.../apache/helix/provisioning/TaskConfig.java | 13 +
.../helix/provisioning/yarn/AppConfig.java | 17 -
.../helix/provisioning/yarn/AppLauncher.java | 22 +-
.../provisioning/yarn/AppMasterLauncher.java | 166 ++++
.../yarn/AppStatusReportGenerator.java | 79 ++
.../provisioning/yarn/ApplicationMaster.java | 889 -------------------
.../provisioning/yarn/ApplicationSpec.java | 28 -
.../yarn/ApplicationSpecFactory.java | 9 -
.../apache/helix/provisioning/yarn/Client.java | 627 -------------
.../provisioning/yarn/ContainerAskResponse.java | 17 -
.../yarn/ContainerLaunchResponse.java | 5 -
.../yarn/ContainerReleaseResponse.java | 5 -
.../yarn/ContainerStopResponse.java | 5 -
.../helix/provisioning/yarn/DSConstants.java | 47 -
.../provisioning/yarn/FixedTargetProvider.java | 20 +
.../yarn/GenericApplicationMaster.java | 4 +
.../yarn/HelixYarnApplicationMasterMain.java | 159 ----
.../helix/provisioning/yarn/HelixYarnUtil.java | 42 -
.../provisioning/yarn/NMCallbackHandler.java | 2 +
.../provisioning/yarn/ParticipantLauncher.java | 135 ---
.../provisioning/yarn/RMCallbackHandler.java | 3 +
.../helix/provisioning/yarn/ServiceConfig.java | 17 -
.../helix/provisioning/yarn/TaskConfig.java | 13 -
.../provisioning/yarn/YarnProvisioner.java | 6 +
recipes/helloworld-provisioning-yarn/pom.xml | 159 ++++
recipes/helloworld-provisioning-yarn/run.sh | 6 +
.../src/assemble/assembly.xml | 60 ++
.../src/main/config/log4j.properties | 31 +
.../yarn/example/HelloWordAppSpecFactory.java | 92 ++
.../yarn/example/HelloWorldService.java | 41 +
.../yarn/example/HelloWorldStateModel.java | 33 +
.../example/HelloWorldStateModelFactory.java | 13 +
.../yarn/example/HelloworldAppSpec.java | 138 +++
.../main/resources/hello_world_app_spec.yaml | 24 +
.../src/test/conf/testng.xml | 27 +
recipes/pom.xml | 2 +-
recipes/provisioning/pom.xml | 50 --
recipes/provisioning/yarn/helloworld/pom.xml | 159 ----
.../yarn/helloworld/src/assemble/assembly.xml | 60 --
.../helloworld/src/main/config/log4j.properties | 31 -
.../yarn/example/HelloWordAppSpecFactory.java | 92 --
.../yarn/example/HelloWorldService.java | 41 -
.../yarn/example/HelloWorldStateModel.java | 29 -
.../example/HelloWorldStateModelFactory.java | 13 -
.../yarn/example/HelloworldAppSpec.java | 138 ---
.../main/resources/hello_world_app_spec.yaml | 24 -
.../yarn/helloworld/src/test/conf/testng.xml | 27 -
recipes/provisioning/yarn/pom.xml | 50 --
60 files changed, 1222 insertions(+), 2735 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerState.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerState.java b/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerState.java
index 449f636..2f91275 100644
--- a/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerState.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/provisioner/ContainerState.java
@@ -29,5 +29,6 @@ public enum ContainerState {
HALTED,
FINALIZING,
FINALIZED,
- FAILED
+ FAILED,
+ UNDEFINED
}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/pom.xml
----------------------------------------------------------------------
diff --git a/helix-provisioning/pom.xml b/helix-provisioning/pom.xml
index ea5a0fe..3ba7d39 100644
--- a/helix-provisioning/pom.xml
+++ b/helix-provisioning/pom.xml
@@ -36,7 +36,7 @@ under the License.
org.apache.log4j,
*
</osgi.import>
- <osgi.export>org.apache.helix.provisioning.yarn*;version="${project.version};-noimport:=true</osgi.export>
+ <osgi.export>org.apache.helix.provisioning*;version="${project.version};-noimport:=true</osgi.export>
</properties>
<dependencies>
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/AppConfig.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/AppConfig.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/AppConfig.java
new file mode 100644
index 0000000..a51db1c
--- /dev/null
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/AppConfig.java
@@ -0,0 +1,17 @@
+package org.apache.helix.provisioning;
+
+import java.util.HashMap;
+import java.util.Map;
+
+
+public class AppConfig {
+ public Map<String, String> config = new HashMap<String, String>();
+
+ public String getValue(String key) {
+ return (config != null ? config.get(key) : null);
+ }
+
+ public void setValue(String key, String value){
+ config.put(key, value);
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ApplicationSpec.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ApplicationSpec.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ApplicationSpec.java
new file mode 100644
index 0000000..f7454d2
--- /dev/null
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ApplicationSpec.java
@@ -0,0 +1,29 @@
+package org.apache.helix.provisioning;
+
+import java.net.URI;
+import java.util.List;
+
+
+
+public interface ApplicationSpec {
+ /**
+ * Returns the name of the application
+ * @return
+ */
+ String getAppName();
+
+ AppConfig getConfig();
+
+ List<String> getServices();
+
+ URI getAppMasterPackage();
+
+ URI getServicePackage(String serviceName);
+
+ String getServiceMainClass(String service);
+
+ ServiceConfig getServiceConfig(String serviceName);
+
+ List<TaskConfig> getTaskConfigs();
+
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ApplicationSpecFactory.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ApplicationSpecFactory.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ApplicationSpecFactory.java
new file mode 100644
index 0000000..0c524f2
--- /dev/null
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ApplicationSpecFactory.java
@@ -0,0 +1,9 @@
+package org.apache.helix.provisioning;
+
+import java.io.InputStream;
+
+public interface ApplicationSpecFactory {
+
+ ApplicationSpec fromYaml(InputStream yamlFile);
+
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerAskResponse.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerAskResponse.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerAskResponse.java
new file mode 100644
index 0000000..18f66d2
--- /dev/null
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerAskResponse.java
@@ -0,0 +1,17 @@
+package org.apache.helix.provisioning;
+
+import org.apache.hadoop.yarn.api.records.Container;
+
+public class ContainerAskResponse {
+
+ Container container;
+
+ public Container getContainer() {
+ return container;
+ }
+
+ public void setContainer(Container container) {
+ this.container = container;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerLaunchResponse.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerLaunchResponse.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerLaunchResponse.java
new file mode 100644
index 0000000..ea6ef12
--- /dev/null
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerLaunchResponse.java
@@ -0,0 +1,5 @@
+package org.apache.helix.provisioning;
+
+public class ContainerLaunchResponse {
+
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerReleaseResponse.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerReleaseResponse.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerReleaseResponse.java
new file mode 100644
index 0000000..e4a5dc4
--- /dev/null
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerReleaseResponse.java
@@ -0,0 +1,5 @@
+package org.apache.helix.provisioning;
+
+public class ContainerReleaseResponse {
+
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerStopResponse.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerStopResponse.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerStopResponse.java
new file mode 100644
index 0000000..d8c8a46
--- /dev/null
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ContainerStopResponse.java
@@ -0,0 +1,5 @@
+package org.apache.helix.provisioning;
+
+public class ContainerStopResponse {
+
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/HelixYarnUtil.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/HelixYarnUtil.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/HelixYarnUtil.java
new file mode 100644
index 0000000..80ac16b
--- /dev/null
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/HelixYarnUtil.java
@@ -0,0 +1,42 @@
+package org.apache.helix.provisioning;
+
+import org.apache.log4j.Logger;
+
+public class HelixYarnUtil {
+ private static Logger LOG = Logger.getLogger(HelixYarnUtil.class);
+
+ @SuppressWarnings("unchecked")
+ public static <T extends ApplicationSpecFactory> T createInstance(String className) {
+ Class<ApplicationSpecFactory> factoryClazz = null;
+ {
+ try {
+ factoryClazz =
+ (Class<ApplicationSpecFactory>) Thread.currentThread().getContextClassLoader()
+ .loadClass(className);
+ } catch (ClassNotFoundException e) {
+ try {
+ factoryClazz =
+ (Class<ApplicationSpecFactory>) ClassLoader.getSystemClassLoader().loadClass(
+ className);
+ } catch (ClassNotFoundException e1) {
+ try {
+ factoryClazz = (Class<ApplicationSpecFactory>) Class.forName(className);
+ } catch (ClassNotFoundException e2) {
+
+ }
+ }
+ }
+ }
+ System.out.println(System.getProperty("java.class.path"));
+ if (factoryClazz == null) {
+ LOG.error("Unable to find class:" + className);
+ }
+ ApplicationSpecFactory factory = null;
+ try {
+ factory = factoryClazz.newInstance();
+ } catch (Exception e) {
+ LOG.error("Unable to create instance of class: " + className, e);
+ }
+ return (T) factory;
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ParticipantLauncher.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ParticipantLauncher.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ParticipantLauncher.java
new file mode 100644
index 0000000..55bb618
--- /dev/null
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ParticipantLauncher.java
@@ -0,0 +1,136 @@
+package org.apache.helix.provisioning;
+
+import java.util.Arrays;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.Options;
+import org.apache.helix.HelixConnection;
+import org.apache.helix.NotificationContext;
+import org.apache.helix.api.id.ClusterId;
+import org.apache.helix.api.id.ParticipantId;
+import org.apache.helix.manager.zk.AbstractParticipantService;
+import org.apache.helix.manager.zk.ZkHelixConnection;
+import org.apache.helix.messaging.handling.HelixTaskResult;
+import org.apache.helix.messaging.handling.MessageHandler;
+import org.apache.helix.messaging.handling.MessageHandlerFactory;
+import org.apache.helix.model.Message;
+import org.apache.helix.model.Message.MessageType;
+import org.apache.log4j.Logger;
+
+/**
+ * Main class that invokes the Participant Api
+ */
+public class ParticipantLauncher {
+ private static Logger LOG = Logger.getLogger(ParticipantLauncher.class);
+
+ public static void main(String[] args) {
+
+ System.out.println("Starting Helix Participant: " + Arrays.toString(args));
+ Options opts;
+ opts = new Options();
+ opts.addOption("cluster", true, "Cluster name, default app name");
+ opts.addOption("participantId", true, "Participant Id");
+ opts.addOption("zkAddress", true, "Zookeeper address");
+ opts.addOption("participantClass", true, "Participant service class");
+ try {
+ CommandLine cliParser = new GnuParser().parse(opts, args);
+ String zkAddress = cliParser.getOptionValue("zkAddress");
+ final HelixConnection connection = new ZkHelixConnection(zkAddress);
+ connection.connect();
+ ClusterId clusterId = ClusterId.from(cliParser.getOptionValue("cluster"));
+ ParticipantId participantId = ParticipantId.from(cliParser.getOptionValue("participantId"));
+ String participantClass = cliParser.getOptionValue("participantClass");
+ @SuppressWarnings("unchecked")
+ Class<? extends AbstractParticipantService> clazz =
+ (Class<? extends AbstractParticipantService>) Class.forName(participantClass);
+ final AbstractParticipantService containerParticipant =
+ clazz.getConstructor(HelixConnection.class, ClusterId.class, ParticipantId.class)
+ .newInstance(connection, clusterId, participantId);
+ containerParticipant.startAsync();
+ containerParticipant.awaitRunning(60, TimeUnit.SECONDS);
+ containerParticipant
+ .getParticipant()
+ .getMessagingService()
+ .registerMessageHandlerFactory(MessageType.SHUTDOWN.toString(),
+ new ShutdownMessageHandlerFactory(containerParticipant, connection));
+ Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
+ @Override
+ public void run() {
+ LOG.info("Received a shutdown signal. Stopping participant");
+ containerParticipant.stopAsync();
+ containerParticipant.awaitTerminated();
+ connection.disconnect();
+ }
+ }) {
+
+ });
+ Thread.currentThread().join();
+ } catch (Exception e) {
+ e.printStackTrace();
+ System.out.println("Failed to start Helix participant" + e);
+ // System.exit(1);
+ }
+ try {
+ Thread.currentThread().join();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+
+ }
+
+ public static class ShutdownMessageHandlerFactory implements MessageHandlerFactory {
+ private final AbstractParticipantService _service;
+ private final HelixConnection _connection;
+
+ public ShutdownMessageHandlerFactory(AbstractParticipantService service,
+ HelixConnection connection) {
+ _service = service;
+ _connection = connection;
+ }
+
+ @Override
+ public MessageHandler createHandler(Message message, NotificationContext context) {
+ return new ShutdownMessageHandler(_service, _connection, message, context);
+ }
+
+ @Override
+ public String getMessageType() {
+ return MessageType.SHUTDOWN.toString();
+ }
+
+ @Override
+ public void reset() {
+ }
+
+ }
+
+ public static class ShutdownMessageHandler extends MessageHandler {
+ private final AbstractParticipantService _service;
+ private final HelixConnection _connection;
+
+ public ShutdownMessageHandler(AbstractParticipantService service, HelixConnection connection,
+ Message message, NotificationContext context) {
+ super(message, context);
+ _service = service;
+ _connection = connection;
+ }
+
+ @Override
+ public HelixTaskResult handleMessage() throws InterruptedException {
+ LOG.info("Received a shutdown message. Trying to shut down.");
+ _service.stopAsync();
+ _service.awaitTerminated();
+ _connection.disconnect();
+ LOG.info("Shutdown complete. Process exiting gracefully");
+ System.exit(0);
+ return null;
+ }
+
+ @Override
+ public void onError(Exception e, ErrorCode code, ErrorType type) {
+ }
+
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ServiceConfig.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ServiceConfig.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ServiceConfig.java
new file mode 100644
index 0000000..262344b
--- /dev/null
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ServiceConfig.java
@@ -0,0 +1,17 @@
+package org.apache.helix.provisioning;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.helix.api.Scope;
+import org.apache.helix.api.config.UserConfig;
+import org.apache.helix.api.id.ResourceId;
+
+public class ServiceConfig extends UserConfig{
+ public Map<String, String> config = new HashMap<String, String>();
+
+ public ServiceConfig(Scope<ResourceId> scope) {
+ super(scope);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/TaskConfig.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/TaskConfig.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/TaskConfig.java
new file mode 100644
index 0000000..42203e9
--- /dev/null
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/TaskConfig.java
@@ -0,0 +1,13 @@
+package org.apache.helix.provisioning;
+
+import java.util.HashMap;
+import java.util.Map;
+
+
+public class TaskConfig {
+ public Map<String, String> config = new HashMap<String, String>();
+
+ public String getValue(String key) {
+ return (config != null ? config.get(key) : null);
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppConfig.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppConfig.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppConfig.java
deleted file mode 100644
index 7ea3e42..0000000
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppConfig.java
+++ /dev/null
@@ -1,17 +0,0 @@
-package org.apache.helix.provisioning.yarn;
-
-import java.util.HashMap;
-import java.util.Map;
-
-
-public class AppConfig {
- public Map<String, String> config = new HashMap<String, String>();
-
- public String getValue(String key) {
- return (config != null ? config.get(key) : null);
- }
-
- public void setValue(String key, String value){
- config.put(key, value);
- }
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
index 1fe0a28..d2e901f 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
@@ -44,6 +44,12 @@ import org.apache.hadoop.yarn.client.api.YarnClientApplication;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.Records;
+import org.apache.helix.HelixConnection;
+import org.apache.helix.api.id.ClusterId;
+import org.apache.helix.manager.zk.ZkHelixConnection;
+import org.apache.helix.provisioning.ApplicationSpec;
+import org.apache.helix.provisioning.ApplicationSpecFactory;
+import org.apache.helix.provisioning.HelixYarnUtil;
/**
* Main class to launch the job.
@@ -52,7 +58,7 @@ import org.apache.hadoop.yarn.util.Records;
*/
public class AppLauncher {
- private static final Log LOG = LogFactory.getLog(Client.class);
+ private static final Log LOG = LogFactory.getLog(AppLauncher.class);
private ApplicationSpec _applicationSpec;
private YarnClient yarnClient;
@@ -197,7 +203,7 @@ public class AppLauncher {
// Set Xmx based on am memory size
vargs.add("-Xmx" + amMemory + "m");
// Set class name
- vargs.add(HelixYarnApplicationMasterMain.class.getCanonicalName());
+ vargs.add(AppMasterLauncher.class.getCanonicalName());
// Set params for Application Master
// vargs.add("--num_containers " + String.valueOf(numContainers));
@@ -375,7 +381,17 @@ public class AppLauncher {
return false;
}
if (YarnApplicationState.RUNNING == state) {
-
+ HelixConnection connection = new ZkHelixConnection(report.getHost() + ":2181");
+ try{
+ connection.connect();
+ }catch(Exception e){
+ LOG.warn("AppMaster started but not yet initialized");
+ }
+ if(connection.isConnected()){
+ AppStatusReportGenerator generator = new AppStatusReportGenerator();
+ String generateReport = generator.generateReport(connection, ClusterId.from(_applicationSpec.getAppName()));
+ LOG.info(generateReport);
+ }
}
prevReport = reportMessage;
Thread.sleep(10000);
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterLauncher.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterLauncher.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterLauncher.java
new file mode 100644
index 0000000..72d6ea9
--- /dev/null
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterLauncher.java
@@ -0,0 +1,166 @@
+package org.apache.helix.provisioning.yarn;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Map;
+
+import org.I0Itec.zkclient.IDefaultNameSpace;
+import org.I0Itec.zkclient.ZkClient;
+import org.I0Itec.zkclient.ZkServer;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.helix.HelixController;
+import org.apache.helix.api.accessor.ClusterAccessor;
+import org.apache.helix.api.config.ClusterConfig;
+import org.apache.helix.api.config.ResourceConfig;
+import org.apache.helix.api.id.ClusterId;
+import org.apache.helix.api.id.ControllerId;
+import org.apache.helix.api.id.ResourceId;
+import org.apache.helix.controller.provisioner.ProvisionerConfig;
+import org.apache.helix.controller.rebalancer.config.FullAutoRebalancerConfig;
+import org.apache.helix.controller.rebalancer.config.RebalancerConfig;
+import org.apache.helix.manager.zk.ZkHelixConnection;
+import org.apache.helix.model.StateModelDefinition;
+import org.apache.helix.provisioning.ApplicationSpec;
+import org.apache.helix.provisioning.ApplicationSpecFactory;
+import org.apache.helix.provisioning.HelixYarnUtil;
+import org.apache.helix.provisioning.ServiceConfig;
+import org.apache.helix.tools.StateModelConfigGenerator;
+import org.apache.log4j.Logger;
+
+/**
+ * This will <br/>
+ * <ul>
+ * <li>start zookeeper automatically</li>
+ * <li>create the cluster</li>
+ * <li>set up resource(s)</li>
+ * <li>start helix controller</li>
+ * </ul>
+ */
+public class AppMasterLauncher {
+ public static Logger LOG = Logger.getLogger(AppMasterLauncher.class);
+
+ @SuppressWarnings("unchecked")
+ public static void main(String[] args) throws Exception{
+ Map<String, String> env = System.getenv();
+ LOG.info("Starting app master with the following environment variables");
+ for (String key : env.keySet()) {
+ LOG.info(key + "\t\t=" + env.get(key));
+ }
+
+ Options opts;
+ opts = new Options();
+ opts.addOption("num_containers", true, "Number of containers");
+ try {
+ CommandLine cliParser = new GnuParser().parse(opts, args);
+ } catch (Exception e) {
+ LOG.error("Error parsing input arguments" + Arrays.toString(args), e);
+ }
+
+ // START ZOOKEEPER
+ String dataDir = "dataDir";
+ String logDir = "logDir";
+ IDefaultNameSpace defaultNameSpace = new IDefaultNameSpace() {
+ @Override
+ public void createDefaultNameSpace(ZkClient zkClient) {
+
+ }
+ };
+ try {
+ FileUtils.deleteDirectory(new File(dataDir));
+ FileUtils.deleteDirectory(new File(logDir));
+ } catch (IOException e) {
+ LOG.error(e);
+ }
+
+ final ZkServer server = new ZkServer(dataDir, logDir, defaultNameSpace);
+ server.start();
+
+ // start Generic AppMaster that interacts with Yarn RM
+ AppMasterConfig appMasterConfig = new AppMasterConfig();
+ String containerIdStr = appMasterConfig.getContainerId();
+ ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
+ ApplicationAttemptId appAttemptID = containerId.getApplicationAttemptId();
+
+ String configFile = AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString();
+ String className = appMasterConfig.getApplicationSpecFactory();
+
+ GenericApplicationMaster genericApplicationMaster = new GenericApplicationMaster(appAttemptID);
+ try {
+ genericApplicationMaster.start();
+ } catch (Exception e) {
+ LOG.error("Unable to start application master: ", e);
+ }
+ ApplicationSpecFactory factory = HelixYarnUtil.createInstance(className);
+
+ //TODO: Avoid setting static variable.
+ YarnProvisioner.applicationMaster = genericApplicationMaster;
+ YarnProvisioner.applicationMasterConfig = appMasterConfig;
+ ApplicationSpec applicationSpec = factory.fromYaml(new FileInputStream(configFile));
+ YarnProvisioner.applicationSpec = applicationSpec;
+ String zkAddress = appMasterConfig.getZKAddress();
+ String clusterName = appMasterConfig.getAppName();
+
+ // CREATE CLUSTER and setup the resources
+ // connect
+ ZkHelixConnection connection = new ZkHelixConnection(zkAddress);
+ connection.connect();
+
+ // create the cluster
+ ClusterId clusterId = ClusterId.from(clusterName);
+ ClusterAccessor clusterAccessor = connection.createClusterAccessor(clusterId);
+ StateModelDefinition statelessService =
+ new StateModelDefinition(StateModelConfigGenerator.generateConfigForStatelessService());
+ clusterAccessor.createCluster(new ClusterConfig.Builder(clusterId).addStateModelDefinition(
+ statelessService).build());
+ for (String service : applicationSpec.getServices()) {
+ String resourceName = service;
+ // add the resource with the local provisioner
+ ResourceId resourceId = ResourceId.from(resourceName);
+
+ ServiceConfig serviceConfig = applicationSpec.getServiceConfig(resourceName);
+ serviceConfig.setSimpleField("service_name", service);
+ int numContainers = serviceConfig.getIntField("num_containers", 1);
+
+ YarnProvisionerConfig provisionerConfig = new YarnProvisionerConfig(resourceId);
+ provisionerConfig.setNumContainers(numContainers);
+
+ FullAutoRebalancerConfig.Builder rebalancerConfigBuilder =
+ new FullAutoRebalancerConfig.Builder(resourceId);
+ RebalancerConfig rebalancerConfig =
+ rebalancerConfigBuilder.stateModelDefId(statelessService.getStateModelDefId())//
+ .build();
+ ResourceConfig.Builder resourceConfigBuilder =
+ new ResourceConfig.Builder(ResourceId.from(resourceName));
+ ResourceConfig resourceConfig = resourceConfigBuilder.provisionerConfig(provisionerConfig) //
+ .rebalancerConfig(rebalancerConfig) //
+ .userConfig(serviceConfig) //
+ .build();
+ clusterAccessor.addResourceToCluster(resourceConfig);
+ }
+ // start controller
+ ControllerId controllerId = ControllerId.from("controller1");
+ HelixController controller = connection.createController(clusterId, controllerId);
+ controller.start();
+
+ Thread shutdownhook = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ server.shutdown();
+ }
+ });
+ Runtime.getRuntime().addShutdownHook(shutdownhook);
+ Thread.sleep(10000);
+
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppStatusReportGenerator.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppStatusReportGenerator.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppStatusReportGenerator.java
new file mode 100644
index 0000000..0443f8a
--- /dev/null
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppStatusReportGenerator.java
@@ -0,0 +1,79 @@
+package org.apache.helix.provisioning.yarn;
+
+import java.util.Map;
+
+import jline.ConsoleReader;
+
+import org.apache.helix.HelixConnection;
+import org.apache.helix.api.Participant;
+import org.apache.helix.api.Resource;
+import org.apache.helix.api.State;
+import org.apache.helix.api.accessor.ClusterAccessor;
+import org.apache.helix.api.config.ContainerConfig;
+import org.apache.helix.api.id.ClusterId;
+import org.apache.helix.api.id.ParticipantId;
+import org.apache.helix.api.id.PartitionId;
+import org.apache.helix.api.id.ResourceId;
+import org.apache.helix.controller.provisioner.ContainerId;
+import org.apache.helix.controller.provisioner.ContainerState;
+import org.apache.helix.manager.zk.ZkHelixConnection;
+import org.apache.helix.model.ExternalView;
+
+public class AppStatusReportGenerator {
+ static String TAB = "\t";
+ static String NEWLINE = "\n";
+
+ String generateReport(HelixConnection connection, ClusterId clusterId) {
+ if (!connection.isConnected()) {
+ return "Unable to connect to cluster";
+ }
+ StringBuilder builder = new StringBuilder();
+ ClusterAccessor clusterAccessor = connection.createClusterAccessor(clusterId);
+ Map<ParticipantId, Participant> participants = clusterAccessor.readParticipants();
+ builder.append("AppName").append(TAB).append(clusterId).append(NEWLINE);
+ Map<ResourceId, Resource> resources = clusterAccessor.readResources();
+ for (ResourceId resourceId : resources.keySet()) {
+ builder.append("SERVICE").append(TAB).append(resourceId).append(NEWLINE);
+ Resource resource = resources.get(resourceId);
+ Map<ParticipantId, State> serviceStateMap =
+ resource.getExternalView().getStateMap(PartitionId.from(resourceId.stringify() + "_0"));
+
+ builder.append(TAB).append("CONTAINER_NAME").append(TAB).append(TAB)
+ .append("CONTAINER_STATE").append(TAB).append("SERVICE_STATE").append(TAB).append("CONTAINER_ID").append(NEWLINE);
+ for (Participant participant : participants.values()) {
+ // need a better check
+ if (!participant.getId().stringify().startsWith(resource.getId().stringify())) {
+ continue;
+ }
+ ContainerConfig containerConfig = participant.getContainerConfig();
+ ContainerState containerState =ContainerState.UNDEFINED;
+ ContainerId containerId = ContainerId.from("N/A");
+
+ if (containerConfig != null) {
+ containerId = containerConfig.getId();
+ containerState = containerConfig.getState();
+ }
+ State participantState = serviceStateMap.get(participant.getId());
+ if (participantState == null) {
+ participantState = State.from("UNKNOWN");
+ }
+ builder.append(TAB).append(participant.getId()).append(TAB)
+ .append(containerState).append(TAB).append(participantState).append(TAB).append(TAB).append(containerId);
+ builder.append(NEWLINE);
+ }
+
+ }
+ return builder.toString();
+
+ }
+
+ public static void main(String[] args) {
+ AppStatusReportGenerator generator = new AppStatusReportGenerator();
+
+ ZkHelixConnection connection = new ZkHelixConnection("localhost:2181");
+ connection.connect();
+ String generateReport = generator.generateReport(connection, ClusterId.from("testApp"));
+ System.out.println(generateReport);
+ connection.disconnect();
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ApplicationMaster.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ApplicationMaster.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ApplicationMaster.java
deleted file mode 100644
index d63b300..0000000
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ApplicationMaster.java
+++ /dev/null
@@ -1,889 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.helix.provisioning.yarn;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Vector;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.GnuParser;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.yarn.api.ApplicationConstants;
-import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
-import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
-import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
-import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
-import org.apache.hadoop.yarn.api.records.ContainerState;
-import org.apache.hadoop.yarn.api.records.ContainerStatus;
-import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.api.records.LocalResource;
-import org.apache.hadoop.yarn.api.records.LocalResourceType;
-import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
-import org.apache.hadoop.yarn.api.records.NodeReport;
-import org.apache.hadoop.yarn.api.records.Priority;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
-import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
-import org.apache.hadoop.yarn.client.api.async.NMClientAsync;
-import org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
-import org.apache.hadoop.yarn.util.ConverterUtils;
-import org.apache.hadoop.yarn.util.Records;
-
-/**
- * An ApplicationMaster for executing shell commands on a set of launched
- * containers using the YARN framework.
- *
- * <p>
- * This class is meant to act as an example on how to write yarn-based
- * application masters.
- * </p>
- *
- * <p>
- * The ApplicationMaster is started on a container by the
- * <code>ResourceManager</code>'s launcher. The first thing that the
- * <code>ApplicationMaster</code> needs to do is to connect and register itself
- * with the <code>ResourceManager</code>. The registration sets up information
- * within the <code>ResourceManager</code> regarding what host:port the
- * ApplicationMaster is listening on to provide any form of functionality to a
- * client as well as a tracking url that a client can use to keep track of
- * status/job history if needed. However, in the distributedshell, trackingurl
- * and appMasterHost:appMasterRpcPort are not supported.
- * </p>
- *
- * <p>
- * The <code>ApplicationMaster</code> needs to send a heartbeat to the
- * <code>ResourceManager</code> at regular intervals to inform the
- * <code>ResourceManager</code> that it is up and alive. The
- * {@link ApplicationMasterProtocol#allocate} to the <code>ResourceManager</code> from the
- * <code>ApplicationMaster</code> acts as a heartbeat.
- *
- * <p>
- * For the actual handling of the job, the <code>ApplicationMaster</code> has to
- * request the <code>ResourceManager</code> via {@link AllocateRequest} for the
- * required no. of containers using {@link ResourceRequest} with the necessary
- * resource specifications such as node location, computational
- * (memory/disk/cpu) resource requirements. The <code>ResourceManager</code>
- * responds with an {@link AllocateResponse} that informs the
- * <code>ApplicationMaster</code> of the set of newly allocated containers,
- * completed containers as well as current state of available resources.
- * </p>
- *
- * <p>
- * For each allocated container, the <code>ApplicationMaster</code> can then set
- * up the necessary launch context via {@link ContainerLaunchContext} to specify
- * the allocated container id, local resources required by the executable, the
- * environment to be setup for the executable, commands to execute, etc. and
- * submit a {@link StartContainerRequest} to the {@link ContainerManagementProtocol} to
- * launch and execute the defined commands on the given allocated container.
- * </p>
- *
- * <p>
- * The <code>ApplicationMaster</code> can monitor the launched container by
- * either querying the <code>ResourceManager</code> using
- * {@link ApplicationMasterProtocol#allocate} to get updates on completed containers or via
- * the {@link ContainerManagementProtocol} by querying for the status of the allocated
- * container's {@link ContainerId}.
- *
- * <p>
- * After the job has been completed, the <code>ApplicationMaster</code> has to
- * send a {@link FinishApplicationMasterRequest} to the
- * <code>ResourceManager</code> to inform it that the
- * <code>ApplicationMaster</code> has been completed.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public class ApplicationMaster {
-
- private static final Log LOG = LogFactory.getLog(ApplicationMaster.class);
-
- // Configuration
- private Configuration conf;
-
- // Handle to communicate with the Resource Manager
- @SuppressWarnings("rawtypes")
- private AMRMClientAsync amRMClient;
-
- // Handle to communicate with the Node Manager
- private NMClientAsync nmClientAsync;
- // Listen to process the response from the Node Manager
- private NMCallbackHandler containerListener;
-
- // Application Attempt Id ( combination of attemptId and fail count )
- private ApplicationAttemptId appAttemptID;
-
- // TODO
- // For status update for clients - yet to be implemented
- // Hostname of the container
- private String appMasterHostname = "";
- // Port on which the app master listens for status updates from clients
- private int appMasterRpcPort = -1;
- // Tracking url to which app master publishes info for clients to monitor
- private String appMasterTrackingUrl = "";
-
- // App Master configuration
- // No. of containers to run shell command on
- private int numTotalContainers = 1;
- // Memory to request for the container on which the shell command will run
- private int containerMemory = 10;
- // Priority of the request
- private int requestPriority;
-
- // Counter for completed containers ( complete denotes successful or failed )
- private AtomicInteger numCompletedContainers = new AtomicInteger();
- // Allocated container count so that we know how many containers has the RM
- // allocated to us
- private AtomicInteger numAllocatedContainers = new AtomicInteger();
- // Count of failed containers
- private AtomicInteger numFailedContainers = new AtomicInteger();
- // Count of containers already requested from the RM
- // Needed as once requested, we should not request for containers again.
- // Only request for more if the original requirement changes.
- private AtomicInteger numRequestedContainers = new AtomicInteger();
-
- // Shell command to be executed
- private String shellCommand = "";
- // Args to be passed to the shell command
- private String shellArgs = "";
- // Env variables to be setup for the shell command
- private Map<String, String> shellEnv = new HashMap<String, String>();
-
- // Location of shell script ( obtained from info set in env )
- // Shell script path in fs
- private String shellScriptPath = "";
- // Timestamp needed for creating a local resource
- private long shellScriptPathTimestamp = 0;
- // File length needed for local resource
- private long shellScriptPathLen = 0;
-
- // Hardcoded path to shell script in launch container's local env
- private final String ExecShellStringPath = "ExecShellScript.sh";
-
- private volatile boolean done;
- private volatile boolean success;
-
- private ByteBuffer allTokens;
-
- // Launch threads
- private List<Thread> launchThreads = new ArrayList<Thread>();
-
- /**
- * @param args Command line args
- */
- public static void main(String[] args) {
- boolean result = false;
- try {
- ApplicationMaster appMaster = new ApplicationMaster();
- LOG.info("Initializing ApplicationMaster");
- LOG.info("classpath:" + System.getProperty("java.class.path"));
- boolean doRun = appMaster.init(args);
- if (!doRun) {
- System.exit(0);
- }
- result = appMaster.run();
- } catch (Throwable t) {
- LOG.fatal("Error running ApplicationMaster", t);
- System.exit(1);
- }
- if (result) {
- LOG.info("Application Master completed successfully. exiting");
- System.exit(0);
- } else {
- LOG.info("Application Master failed. exiting");
- System.exit(2);
- }
- }
-
- /**
- * Dump out contents of $CWD and the environment to stdout for debugging
- */
- private void dumpOutDebugInfo() {
-
- LOG.info("Dump debug output");
- Map<String, String> envs = System.getenv();
- for (Map.Entry<String, String> env : envs.entrySet()) {
- LOG.info("System env: key=" + env.getKey() + ", val=" + env.getValue());
- System.out.println("System env: key=" + env.getKey() + ", val="
- + env.getValue());
- }
-
- String cmd = "ls -al";
- Runtime run = Runtime.getRuntime();
- Process pr = null;
- try {
- pr = run.exec(cmd);
- pr.waitFor();
-
- BufferedReader buf = new BufferedReader(new InputStreamReader(
- pr.getInputStream()));
- String line = "";
- while ((line = buf.readLine()) != null) {
- LOG.info("System CWD content: " + line);
- System.out.println("System CWD content: " + line);
- }
- buf.close();
- } catch (IOException e) {
- e.printStackTrace();
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- }
-
- public ApplicationMaster() {
- // Set up the configuration
- conf = new YarnConfiguration();
- }
-
- /**
- * Parse command line options
- *
- * @param args Command line args
- * @return Whether init successful and run should be invoked
- * @throws ParseException
- * @throws IOException
- */
- public boolean init(String[] args) throws ParseException, IOException {
-
- Options opts = new Options();
- opts.addOption("app_attempt_id", true,
- "App Attempt ID. Not to be used unless for testing purposes");
- opts.addOption("shell_command", true,
- "Shell command to be executed by the Application Master");
- opts.addOption("shell_script", true,
- "Location of the shell script to be executed");
- opts.addOption("shell_args", true, "Command line args for the shell script");
- opts.addOption("shell_env", true,
- "Environment for shell script. Specified as env_key=env_val pairs");
- opts.addOption("container_memory", true,
- "Amount of memory in MB to be requested to run the shell command");
- opts.addOption("num_containers", true,
- "No. of containers on which the shell command needs to be executed");
- opts.addOption("priority", true, "Application Priority. Default 0");
- opts.addOption("debug", false, "Dump out debug information");
-
- opts.addOption("help", false, "Print usage");
- CommandLine cliParser = new GnuParser().parse(opts, args);
-
- if (args.length == 0) {
- printUsage(opts);
- throw new IllegalArgumentException(
- "No args specified for application master to initialize");
- }
-
- if (cliParser.hasOption("help")) {
- printUsage(opts);
- return false;
- }
-
- if (cliParser.hasOption("debug")) {
- dumpOutDebugInfo();
- }
-
- Map<String, String> envs = System.getenv();
-
- if (!envs.containsKey(Environment.CONTAINER_ID.name())) {
- if (cliParser.hasOption("app_attempt_id")) {
- String appIdStr = cliParser.getOptionValue("app_attempt_id", "");
- appAttemptID = ConverterUtils.toApplicationAttemptId(appIdStr);
- } else {
- throw new IllegalArgumentException(
- "Application Attempt Id not set in the environment");
- }
- } else {
- ContainerId containerId = ConverterUtils.toContainerId(envs
- .get(Environment.CONTAINER_ID.name()));
- appAttemptID = containerId.getApplicationAttemptId();
- }
-
- if (!envs.containsKey(ApplicationConstants.APP_SUBMIT_TIME_ENV)) {
- throw new RuntimeException(ApplicationConstants.APP_SUBMIT_TIME_ENV
- + " not set in the environment");
- }
- if (!envs.containsKey(Environment.NM_HOST.name())) {
- throw new RuntimeException(Environment.NM_HOST.name()
- + " not set in the environment");
- }
- if (!envs.containsKey(Environment.NM_HTTP_PORT.name())) {
- throw new RuntimeException(Environment.NM_HTTP_PORT
- + " not set in the environment");
- }
- if (!envs.containsKey(Environment.NM_PORT.name())) {
- throw new RuntimeException(Environment.NM_PORT.name()
- + " not set in the environment");
- }
-
- LOG.info("Application master for app" + ", appId="
- + appAttemptID.getApplicationId().getId() + ", clustertimestamp="
- + appAttemptID.getApplicationId().getClusterTimestamp()
- + ", attemptId=" + appAttemptID.getAttemptId());
-
- if (!cliParser.hasOption("shell_command")) {
- throw new IllegalArgumentException(
- "No shell command specified to be executed by application master");
- }
- shellCommand = cliParser.getOptionValue("shell_command");
-
- if (cliParser.hasOption("shell_args")) {
- shellArgs = cliParser.getOptionValue("shell_args");
- }
- if (cliParser.hasOption("shell_env")) {
- String shellEnvs[] = cliParser.getOptionValues("shell_env");
- for (String env : shellEnvs) {
- env = env.trim();
- int index = env.indexOf('=');
- if (index == -1) {
- shellEnv.put(env, "");
- continue;
- }
- String key = env.substring(0, index);
- String val = "";
- if (index < (env.length() - 1)) {
- val = env.substring(index + 1);
- }
- shellEnv.put(key, val);
- }
- }
-
- if (envs.containsKey(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION)) {
- shellScriptPath = envs.get(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION);
-
- if (envs.containsKey(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP)) {
- shellScriptPathTimestamp = Long.valueOf(envs
- .get(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP));
- }
- if (envs.containsKey(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN)) {
- shellScriptPathLen = Long.valueOf(envs
- .get(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN));
- }
-
- if (!shellScriptPath.isEmpty()
- && (shellScriptPathTimestamp <= 0 || shellScriptPathLen <= 0)) {
- LOG.error("Illegal values in env for shell script path" + ", path="
- + shellScriptPath + ", len=" + shellScriptPathLen + ", timestamp="
- + shellScriptPathTimestamp);
- throw new IllegalArgumentException(
- "Illegal values in env for shell script path");
- }
- }
-
- containerMemory = Integer.parseInt(cliParser.getOptionValue(
- "container_memory", "10"));
- numTotalContainers = Integer.parseInt(cliParser.getOptionValue(
- "num_containers", "1"));
- if (numTotalContainers == 0) {
- throw new IllegalArgumentException(
- "Cannot run distributed shell with no containers");
- }
- requestPriority = Integer.parseInt(cliParser
- .getOptionValue("priority", "0"));
-
- return true;
- }
-
- /**
- * Helper function to print usage
- *
- * @param opts Parsed command line options
- */
- private void printUsage(Options opts) {
- new HelpFormatter().printHelp("ApplicationMaster", opts);
- }
-
- /**
- * Main run function for the application master
- *
- * @throws YarnException
- * @throws IOException
- */
- @SuppressWarnings({ "unchecked" })
- public boolean run() throws YarnException, IOException {
- LOG.info("Starting ApplicationMaster");
-
- Credentials credentials =
- UserGroupInformation.getCurrentUser().getCredentials();
- DataOutputBuffer dob = new DataOutputBuffer();
- credentials.writeTokenStorageToStream(dob);
- // Now remove the AM->RM token so that containers cannot access it.
- Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
- while (iter.hasNext()) {
- Token<?> token = iter.next();
- if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
- iter.remove();
- }
- }
- allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
-
- AMRMClientAsync.CallbackHandler allocListener = new RMCallbackHandler();
- amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
- amRMClient.init(conf);
- amRMClient.start();
-
- containerListener = createNMCallbackHandler();
- nmClientAsync = new NMClientAsyncImpl(containerListener);
- nmClientAsync.init(conf);
- nmClientAsync.start();
-
- // Setup local RPC Server to accept status requests directly from clients
- // TODO need to setup a protocol for client to be able to communicate to
- // the RPC server
- // TODO use the rpc port info to register with the RM for the client to
- // send requests to this app master
-
- // Register self with ResourceManager
- // This will start heartbeating to the RM
- appMasterHostname = NetUtils.getHostname();
- RegisterApplicationMasterResponse response = amRMClient
- .registerApplicationMaster(appMasterHostname, appMasterRpcPort,
- appMasterTrackingUrl);
- // Dump out information about cluster capability as seen by the
- // resource manager
- int maxMem = response.getMaximumResourceCapability().getMemory();
- LOG.info("Max mem capabililty of resources in this cluster " + maxMem);
-
- // A resource ask cannot exceed the max.
- if (containerMemory > maxMem) {
- LOG.info("Container memory specified above max threshold of cluster."
- + " Using max value." + ", specified=" + containerMemory + ", max="
- + maxMem);
- containerMemory = maxMem;
- }
-
- // Setup ask for containers from RM
- // Send request for containers to RM
- // Until we get our fully allocated quota, we keep on polling RM for
- // containers
- // Keep looping until all the containers are launched and shell script
- // executed on them ( regardless of success/failure).
- for (int i = 0; i < numTotalContainers; ++i) {
- ContainerRequest containerAsk = setupContainerAskForRM();
- amRMClient.addContainerRequest(containerAsk);
- }
- numRequestedContainers.set(numTotalContainers);
-
- while (!done
- && (numCompletedContainers.get() != numTotalContainers)) {
- try {
- Thread.sleep(200);
- } catch (InterruptedException ex) {}
- }
- finish();
-
- return success;
- }
-
- @VisibleForTesting
- NMCallbackHandler createNMCallbackHandler() {
- return new NMCallbackHandler(this);
- }
-
- private void finish() {
- // Join all launched threads
- // needed for when we time out
- // and we need to release containers
- for (Thread launchThread : launchThreads) {
- try {
- launchThread.join(10000);
- } catch (InterruptedException e) {
- LOG.info("Exception thrown in thread join: " + e.getMessage());
- e.printStackTrace();
- }
- }
-
- // When the application completes, it should stop all running containers
- LOG.info("Application completed. Stopping running containers");
- nmClientAsync.stop();
-
- // When the application completes, it should send a finish application
- // signal to the RM
- LOG.info("Application completed. Signalling finish to RM");
-
- FinalApplicationStatus appStatus;
- String appMessage = null;
- success = true;
- if (numFailedContainers.get() == 0 &&
- numCompletedContainers.get() == numTotalContainers) {
- appStatus = FinalApplicationStatus.SUCCEEDED;
- } else {
- appStatus = FinalApplicationStatus.FAILED;
- appMessage = "Diagnostics." + ", total=" + numTotalContainers
- + ", completed=" + numCompletedContainers.get() + ", allocated="
- + numAllocatedContainers.get() + ", failed="
- + numFailedContainers.get();
- success = false;
- }
- try {
- amRMClient.unregisterApplicationMaster(appStatus, appMessage, null);
- } catch (YarnException ex) {
- LOG.error("Failed to unregister application", ex);
- } catch (IOException e) {
- LOG.error("Failed to unregister application", e);
- }
-
- amRMClient.stop();
- }
-
- private class RMCallbackHandler implements AMRMClientAsync.CallbackHandler {
- @SuppressWarnings("unchecked")
- @Override
- public void onContainersCompleted(List<ContainerStatus> completedContainers) {
- LOG.info("Got response from RM for container ask, completedCnt="
- + completedContainers.size());
- for (ContainerStatus containerStatus : completedContainers) {
- LOG.info("Got container status for containerID="
- + containerStatus.getContainerId() + ", state="
- + containerStatus.getState() + ", exitStatus="
- + containerStatus.getExitStatus() + ", diagnostics="
- + containerStatus.getDiagnostics());
-
- // non complete containers should not be here
- assert (containerStatus.getState() == ContainerState.COMPLETE);
-
- // increment counters for completed/failed containers
- int exitStatus = containerStatus.getExitStatus();
- if (0 != exitStatus) {
- // container failed
- if (ContainerExitStatus.ABORTED != exitStatus) {
- // shell script failed
- // counts as completed
- numCompletedContainers.incrementAndGet();
- numFailedContainers.incrementAndGet();
- } else {
- // container was killed by framework, possibly preempted
- // we should re-try as the container was lost for some reason
- numAllocatedContainers.decrementAndGet();
- numRequestedContainers.decrementAndGet();
- // we do not need to release the container as it would be done
- // by the RM
- }
- } else {
- // nothing to do
- // container completed successfully
- numCompletedContainers.incrementAndGet();
- LOG.info("Container completed successfully." + ", containerId="
- + containerStatus.getContainerId());
- }
- }
-
- // ask for more containers if any failed
- int askCount = numTotalContainers - numRequestedContainers.get();
- numRequestedContainers.addAndGet(askCount);
-
- if (askCount > 0) {
- for (int i = 0; i < askCount; ++i) {
- ContainerRequest containerAsk = setupContainerAskForRM();
- amRMClient.addContainerRequest(containerAsk);
- }
- }
-
- if (numCompletedContainers.get() == numTotalContainers) {
- done = true;
- }
- }
-
- @Override
- public void onContainersAllocated(List<Container> allocatedContainers) {
- LOG.info("Got response from RM for container ask, allocatedCnt="
- + allocatedContainers.size());
- numAllocatedContainers.addAndGet(allocatedContainers.size());
- for (Container allocatedContainer : allocatedContainers) {
- LOG.info("Launching shell command on a new container."
- + ", containerId=" + allocatedContainer.getId()
- + ", containerNode=" + allocatedContainer.getNodeId().getHost()
- + ":" + allocatedContainer.getNodeId().getPort()
- + ", containerNodeURI=" + allocatedContainer.getNodeHttpAddress()
- + ", containerResourceMemory"
- + allocatedContainer.getResource().getMemory());
- // + ", containerToken"
- // +allocatedContainer.getContainerToken().getIdentifier().toString());
-
- LaunchContainerRunnable runnableLaunchContainer =
- new LaunchContainerRunnable(allocatedContainer, containerListener);
- Thread launchThread = new Thread(runnableLaunchContainer);
-
- // launch and start the container on a separate thread to keep
- // the main thread unblocked
- // as all containers may not be allocated at one go.
- launchThreads.add(launchThread);
- launchThread.start();
- }
- }
-
- @Override
- public void onShutdownRequest() {
- done = true;
- }
-
- @Override
- public void onNodesUpdated(List<NodeReport> updatedNodes) {}
-
- @Override
- public float getProgress() {
- // set progress to deliver to RM on next heartbeat
- float progress = (float) numCompletedContainers.get()
- / numTotalContainers;
- return progress;
- }
-
- @Override
- public void onError(Throwable e) {
- done = true;
- amRMClient.stop();
- }
- }
-
- @VisibleForTesting
- static class NMCallbackHandler
- implements NMClientAsync.CallbackHandler {
-
- private ConcurrentMap<ContainerId, Container> containers =
- new ConcurrentHashMap<ContainerId, Container>();
- private final ApplicationMaster applicationMaster;
-
- public NMCallbackHandler(ApplicationMaster applicationMaster) {
- this.applicationMaster = applicationMaster;
- }
-
- public void addContainer(ContainerId containerId, Container container) {
- containers.putIfAbsent(containerId, container);
- }
-
- @Override
- public void onContainerStopped(ContainerId containerId) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Succeeded to stop Container " + containerId);
- }
- containers.remove(containerId);
- }
-
- @Override
- public void onContainerStatusReceived(ContainerId containerId,
- ContainerStatus containerStatus) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Container Status: id=" + containerId + ", status=" +
- containerStatus);
- }
- }
-
- @Override
- public void onContainerStarted(ContainerId containerId,
- Map<String, ByteBuffer> allServiceResponse) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Succeeded to start Container " + containerId);
- }
- Container container = containers.get(containerId);
- if (container != null) {
- applicationMaster.nmClientAsync.getContainerStatusAsync(containerId, container.getNodeId());
- }
- }
-
- @Override
- public void onStartContainerError(ContainerId containerId, Throwable t) {
- LOG.error("Failed to start Container " + containerId);
- containers.remove(containerId);
- applicationMaster.numCompletedContainers.incrementAndGet();
- applicationMaster.numFailedContainers.incrementAndGet();
- }
-
- @Override
- public void onGetContainerStatusError(
- ContainerId containerId, Throwable t) {
- LOG.error("Failed to query the status of Container " + containerId);
- }
-
- @Override
- public void onStopContainerError(ContainerId containerId, Throwable t) {
- LOG.error("Failed to stop Container " + containerId);
- containers.remove(containerId);
- }
- }
-
- /**
- * Thread to connect to the {@link ContainerManagementProtocol} and launch the container
- * that will execute the shell command.
- */
- private class LaunchContainerRunnable implements Runnable {
-
- // Allocated container
- Container container;
-
- NMCallbackHandler containerListener;
-
- /**
- * @param lcontainer Allocated container
- * @param containerListener Callback handler of the container
- */
- public LaunchContainerRunnable(
- Container lcontainer, NMCallbackHandler containerListener) {
- this.container = lcontainer;
- this.containerListener = containerListener;
- }
-
- @Override
- /**
- * Connects to CM, sets up container launch context
- * for shell command and eventually dispatches the container
- * start request to the CM.
- */
- public void run() {
- LOG.info("Setting up container launch container for containerid="
- + container.getId());
- ContainerLaunchContext ctx = Records
- .newRecord(ContainerLaunchContext.class);
-
- // Set the environment
- ctx.setEnvironment(shellEnv);
-
- // Set the local resources
- Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
-
- // The container for the eventual shell commands needs its own local
- // resources too.
- // In this scenario, if a shell script is specified, we need to have it
- // copied and made available to the container.
- if (!shellScriptPath.isEmpty()) {
- LocalResource shellRsrc = Records.newRecord(LocalResource.class);
- shellRsrc.setType(LocalResourceType.FILE);
- shellRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
- try {
- shellRsrc.setResource(ConverterUtils.getYarnUrlFromURI(new URI(
- shellScriptPath)));
- } catch (URISyntaxException e) {
- LOG.error("Error when trying to use shell script path specified"
- + " in env, path=" + shellScriptPath);
- e.printStackTrace();
-
- // A failure scenario on bad input such as invalid shell script path
- // We know we cannot continue launching the container
- // so we should release it.
- // TODO
- numCompletedContainers.incrementAndGet();
- numFailedContainers.incrementAndGet();
- return;
- }
- shellRsrc.setTimestamp(shellScriptPathTimestamp);
- shellRsrc.setSize(shellScriptPathLen);
- localResources.put(ExecShellStringPath, shellRsrc);
- }
- ctx.setLocalResources(localResources);
-
- // Set the necessary command to execute on the allocated container
- Vector<CharSequence> vargs = new Vector<CharSequence>(5);
-
- // Set executable command
- vargs.add(shellCommand);
- // Set shell script path
- if (!shellScriptPath.isEmpty()) {
- vargs.add(ExecShellStringPath);
- }
-
- // Set args for the shell command if any
- vargs.add(shellArgs);
- // Add log redirect params
- vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout");
- vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr");
-
- // Get final commmand
- StringBuilder command = new StringBuilder();
- for (CharSequence str : vargs) {
- command.append(str).append(" ");
- }
-
- List<String> commands = new ArrayList<String>();
- commands.add(command.toString());
- ctx.setCommands(commands);
-
- // Set up tokens for the container too. Today, for normal shell commands,
- // the container in distribute-shell doesn't need any tokens. We are
- // populating them mainly for NodeManagers to be able to download any
- // files in the distributed file-system. The tokens are otherwise also
- // useful in cases, for e.g., when one is running a "hadoop dfs" command
- // inside the distributed shell.
- ctx.setTokens(allTokens.duplicate());
-
- containerListener.addContainer(container.getId(), container);
- nmClientAsync.startContainerAsync(container, ctx);
- }
- }
-
- /**
- * Setup the request that will be sent to the RM for the container ask.
- *
- * @return the setup ResourceRequest to be sent to RM
- */
- private ContainerRequest setupContainerAskForRM() {
- // setup requirements for hosts
- // using * as any host will do for the distributed shell app
- // set the priority for the request
- Priority pri = Records.newRecord(Priority.class);
- // TODO - what is the range for priority? how to decide?
- pri.setPriority(requestPriority);
-
- // Set up resource type requirements
- // For now, only memory is supported so we set memory requirements
- Resource capability = Records.newRecord(Resource.class);
- capability.setMemory(containerMemory);
-
- ContainerRequest request = new ContainerRequest(capability, null, null,
- pri);
- LOG.info("Requested container ask: " + request.toString());
- return request;
- }
-
- public static ApplicationMaster getInstance() {
- return null;
- }
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ApplicationSpec.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ApplicationSpec.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ApplicationSpec.java
deleted file mode 100644
index 285d036..0000000
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ApplicationSpec.java
+++ /dev/null
@@ -1,28 +0,0 @@
-package org.apache.helix.provisioning.yarn;
-
-import java.net.URI;
-import java.util.List;
-
-
-public interface ApplicationSpec {
- /**
- * Returns the name of the application
- * @return
- */
- String getAppName();
-
- AppConfig getConfig();
-
- List<String> getServices();
-
- URI getAppMasterPackage();
-
- URI getServicePackage(String serviceName);
-
- String getServiceMainClass(String service);
-
- ServiceConfig getServiceConfig(String serviceName);
-
- List<TaskConfig> getTaskConfigs();
-
-}
http://git-wip-us.apache.org/repos/asf/helix/blob/8992aa5a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ApplicationSpecFactory.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ApplicationSpecFactory.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ApplicationSpecFactory.java
deleted file mode 100644
index 352dc0c..0000000
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/ApplicationSpecFactory.java
+++ /dev/null
@@ -1,9 +0,0 @@
-package org.apache.helix.provisioning.yarn;
-
-import java.io.InputStream;
-
-public interface ApplicationSpecFactory {
-
- ApplicationSpec fromYaml(InputStream yamlFile);
-
-}
[20/50] [abbrv] git commit: Fix issue with updating provisioner config
Posted by ka...@apache.org.
Fix issue with updating provisioner config
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/224c7eaa
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/224c7eaa
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/224c7eaa
Branch: refs/heads/master
Commit: 224c7eaaa0863ae89da7fe7ed12ce36303faca07
Parents: c072aca
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Mon Feb 24 10:13:22 2014 -0800
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Mon Feb 24 10:13:22 2014 -0800
----------------------------------------------------------------------
.../java/org/apache/helix/api/accessor/ResourceAccessor.java | 5 +++++
.../main/java/org/apache/helix/api/config/ResourceConfig.java | 2 ++
2 files changed, 7 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/224c7eaa/helix-core/src/main/java/org/apache/helix/api/accessor/ResourceAccessor.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/api/accessor/ResourceAccessor.java b/helix-core/src/main/java/org/apache/helix/api/accessor/ResourceAccessor.java
index 8359da5..0052871 100644
--- a/helix-core/src/main/java/org/apache/helix/api/accessor/ResourceAccessor.java
+++ b/helix-core/src/main/java/org/apache/helix/api/accessor/ResourceAccessor.java
@@ -49,6 +49,7 @@ import org.apache.helix.model.ExternalView;
import org.apache.helix.model.IdealState;
import org.apache.helix.model.IdealState.RebalanceMode;
import org.apache.helix.model.InstanceConfig;
+import org.apache.helix.model.ProvisionerConfigHolder;
import org.apache.helix.model.ResourceAssignment;
import org.apache.helix.model.ResourceConfiguration;
import org.apache.helix.model.StateModelDefinition;
@@ -260,6 +261,10 @@ public class ResourceAccessor {
config.addNamespacedConfig(new RebalancerConfigHolder(resourceConfig.getRebalancerConfig())
.toNamespacedConfig());
}
+ if (resourceConfig.getProvisionerConfig() != null) {
+ config.addNamespacedConfig(new ProvisionerConfigHolder(resourceConfig.getProvisionerConfig())
+ .toNamespacedConfig());
+ }
config.setBucketSize(resourceConfig.getBucketSize());
config.setBatchMessageMode(resourceConfig.getBatchMessageMode());
setConfiguration(resourceId, config, resourceConfig.getRebalancerConfig());
http://git-wip-us.apache.org/repos/asf/helix/blob/224c7eaa/helix-core/src/main/java/org/apache/helix/api/config/ResourceConfig.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/api/config/ResourceConfig.java b/helix-core/src/main/java/org/apache/helix/api/config/ResourceConfig.java
index 5443236..6185383 100644
--- a/helix-core/src/main/java/org/apache/helix/api/config/ResourceConfig.java
+++ b/helix-core/src/main/java/org/apache/helix/api/config/ResourceConfig.java
@@ -230,6 +230,7 @@ public class ResourceConfig {
_updateFields.add(Fields.PROVISIONER_CONFIG);
return this;
}
+
/**
* Set the user configuration
* @param userConfig user-specified properties
@@ -273,6 +274,7 @@ public class ResourceConfig {
Builder builder =
new Builder(orig.getId()).type(orig.getType())
.rebalancerConfig(orig.getRebalancerConfig())
+ .provisionerConfig(orig.getProvisionerConfig())
.schedulerTaskConfig(orig.getSchedulerTaskConfig()).userConfig(orig.getUserConfig())
.bucketSize(orig.getBucketSize()).batchMessageMode(orig.getBatchMessageMode());
for (Fields field : _updateFields) {
[37/50] [abbrv] git commit: Add Java port of or-tools knapsack solver
Posted by ka...@apache.org.
Add Java port of or-tools knapsack solver
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/9a2b729e
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/9a2b729e
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/9a2b729e
Branch: refs/heads/master
Commit: 9a2b729e63cdcfe888ca63c495f23dbe27be9a9e
Parents: c73e95e
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Fri May 23 13:43:50 2014 -0700
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Fri May 23 13:43:50 2014 -0700
----------------------------------------------------------------------
.../knapsack/AbstractBaseKnapsackSolver.java | 32 +++
.../knapsack/AbstractKnapsackPropagator.java | 104 +++++++
.../strategy/knapsack/BaseKnapsackSolver.java | 49 ++++
.../strategy/knapsack/KnapsackAssignment.java | 21 ++
.../KnapsackCapacityPropagatorImpl.java | 218 +++++++++++++++
.../knapsack/KnapsackGenericSolverImpl.java | 269 +++++++++++++++++++
.../strategy/knapsack/KnapsackItem.java | 33 +++
.../strategy/knapsack/KnapsackPropagator.java | 61 +++++
.../strategy/knapsack/KnapsackSearchNode.java | 62 +++++
.../knapsack/KnapsackSearchNodeImpl.java | 77 ++++++
.../strategy/knapsack/KnapsackSearchPath.java | 39 +++
.../knapsack/KnapsackSearchPathImpl.java | 65 +++++
.../strategy/knapsack/KnapsackSolver.java | 60 +++++
.../strategy/knapsack/KnapsackSolverImpl.java | 191 +++++++++++++
.../strategy/knapsack/KnapsackState.java | 42 +++
.../strategy/knapsack/KnapsackStateImpl.java | 61 +++++
.../strategy/knapsack/KnapsackTester.java | 58 ++++
17 files changed, 1442 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/9a2b729e/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/AbstractBaseKnapsackSolver.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/AbstractBaseKnapsackSolver.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/AbstractBaseKnapsackSolver.java
new file mode 100644
index 0000000..4d27bd7
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/AbstractBaseKnapsackSolver.java
@@ -0,0 +1,32 @@
+package org.apache.helix.controller.strategy.knapsack;
+
+/**
+ * Common implementation of a knapsack solver<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+public abstract class AbstractBaseKnapsackSolver implements BaseKnapsackSolver {
+ private final String _solverName;
+
+ /**
+ * Initialize the solver
+ * @param solverName the name of the solvers
+ */
+ public AbstractBaseKnapsackSolver(final String solverName) {
+ _solverName = solverName;
+ }
+
+ @Override
+ public long[] getLowerAndUpperBoundWhenItem(int itemId, boolean isItemIn, long lowerBound,
+ long upperBound) {
+ return new long[] {
+ 0L, Long.MAX_VALUE
+ };
+ }
+
+ @Override
+ public String getName() {
+ return _solverName;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/9a2b729e/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/AbstractKnapsackPropagator.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/AbstractKnapsackPropagator.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/AbstractKnapsackPropagator.java
new file mode 100644
index 0000000..0663990
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/AbstractKnapsackPropagator.java
@@ -0,0 +1,104 @@
+package org.apache.helix.controller.strategy.knapsack;
+
+import java.util.ArrayList;
+
+/**
+ * Common implementation of a knapsack constraint satisfier<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+public abstract class AbstractKnapsackPropagator implements KnapsackPropagator {
+ private ArrayList<KnapsackItem> _items;
+ private long _currentProfit;
+ private long _profitLowerBound;
+ private long _profitUpperBound;
+ private KnapsackState _state;
+
+ /**
+ * Initialize the propagator
+ * @param state the current knapsack state
+ */
+ public AbstractKnapsackPropagator(final KnapsackState state) {
+ _items = new ArrayList<KnapsackItem>();
+ _currentProfit = 0L;
+ _profitLowerBound = 0L;
+ _profitUpperBound = Long.MAX_VALUE;
+ _state = state;
+ }
+
+ @Override
+ public void init(ArrayList<Long> profits, ArrayList<Long> weights) {
+ final int numberOfItems = profits.size();
+ _items.clear();
+ for (int i = 0; i < numberOfItems; i++) {
+ _items.add(new KnapsackItem(i, weights.get(i), profits.get(i)));
+ }
+ _currentProfit = 0;
+ _profitLowerBound = Long.MIN_VALUE;
+ _profitUpperBound = Long.MAX_VALUE;
+ initPropagator();
+ }
+
+ @Override
+ public boolean update(boolean revert, KnapsackAssignment assignment) {
+ if (assignment.isIn) {
+ if (revert) {
+ _currentProfit -= _items.get(assignment.itemId).profit;
+ } else {
+ _currentProfit += _items.get(assignment.itemId).profit;
+ }
+ }
+ return updatePropagator(revert, assignment);
+ }
+
+ @Override
+ public long currentProfit() {
+ return _currentProfit;
+ }
+
+ @Override
+ public long profitLowerBound() {
+ return _profitLowerBound;
+ }
+
+ @Override
+ public long profitUpperBound() {
+ return _profitUpperBound;
+ }
+
+ @Override
+ public void copyCurrentStateToSolution(boolean hasOnePropagator, ArrayList<Boolean> solution) {
+ if (solution == null) {
+ throw new RuntimeException("solution cannot be null!");
+ }
+ for (KnapsackItem item : _items) {
+ final int itemId = item.id;
+ solution.set(itemId, _state.isBound(itemId) && _state.isIn(itemId));
+ }
+ if (hasOnePropagator) {
+ copyCurrentStateToSolutionPropagator(solution);
+ }
+ }
+
+ protected abstract void initPropagator();
+
+ protected abstract boolean updatePropagator(boolean revert, final KnapsackAssignment assignment);
+
+ protected abstract void copyCurrentStateToSolutionPropagator(ArrayList<Boolean> solution);
+
+ protected KnapsackState state() {
+ return _state;
+ }
+
+ protected ArrayList<KnapsackItem> items() {
+ return _items;
+ }
+
+ protected void setProfitLowerBound(long profit) {
+ _profitLowerBound = profit;
+ }
+
+ protected void setProfitUpperBound(long profit) {
+ _profitUpperBound = profit;
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/9a2b729e/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/BaseKnapsackSolver.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/BaseKnapsackSolver.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/BaseKnapsackSolver.java
new file mode 100644
index 0000000..1d71a22
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/BaseKnapsackSolver.java
@@ -0,0 +1,49 @@
+package org.apache.helix.controller.strategy.knapsack;
+
+import java.util.ArrayList;
+
+/**
+ * The interface of any multidimensional knapsack solver<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+public interface BaseKnapsackSolver {
+ /**
+ * Initialize the solver
+ * @param profits profit of adding each item to the knapsack
+ * @param weights cost of adding each item in each dimension
+ * @param capacities maximum weight per dimension
+ */
+ void init(final ArrayList<Long> profits, final ArrayList<ArrayList<Long>> weights,
+ final ArrayList<Long> capacities);
+
+ /**
+ * Compute an upper and lower bound on the knapsack given the assignment state of the knapsack
+ * @param itemId the item id
+ * @param isItemIn true if the item is in the knapsack, false otherwise
+ * @param lowerBound the current lower bound
+ * @param upperBound the current upper bound
+ * @return the new lower and upper bounds
+ */
+ long[] getLowerAndUpperBoundWhenItem(int itemId, boolean isItemIn, long lowerBound,
+ long upperBound);
+
+ /**
+ * Solve the knapsack problem
+ * @return the (approximate) optimal profit
+ */
+ long solve();
+
+ /**
+ * Check if an item is in the final solution
+ * @param itemId the item id
+ * @return true if the item is present, false otherwise
+ */
+ boolean bestSolution(int itemId);
+
+ /**
+ * Get the solver name
+ * @return solver name
+ */
+ String getName();
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/9a2b729e/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackAssignment.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackAssignment.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackAssignment.java
new file mode 100644
index 0000000..bfd29d7
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackAssignment.java
@@ -0,0 +1,21 @@
+package org.apache.helix.controller.strategy.knapsack;
+
+/**
+ * The assignment of a knapsack item to a knapsack<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+public class KnapsackAssignment {
+ public int itemId;
+ public boolean isIn;
+
+ /**
+ * Create the assignment
+ * @param itemId the item id
+ * @param isIn true if the item is in the knapsack, false otherwise
+ */
+ public KnapsackAssignment(int itemId, boolean isIn) {
+ this.itemId = itemId;
+ this.isIn = isIn;
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/9a2b729e/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackCapacityPropagatorImpl.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackCapacityPropagatorImpl.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackCapacityPropagatorImpl.java
new file mode 100644
index 0000000..357cc2a
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackCapacityPropagatorImpl.java
@@ -0,0 +1,218 @@
+package org.apache.helix.controller.strategy.knapsack;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+
+/**
+ * A knapsack propagator that constrains assignments based on knapsack capacity for a given
+ * dimension<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+public class KnapsackCapacityPropagatorImpl extends AbstractKnapsackPropagator {
+ private static final long ALL_BITS_64 = 0xFFFFFFFFFFFFFFFFL;
+ private static final int NO_SELECTION = -1;
+
+ private long _capacity;
+ private long _consumedCapacity;
+ private int _breakItemId;
+ private ArrayList<KnapsackItem> _sortedItems;
+ private long _profitMax;
+
+ /**
+ * Initialize the propagator
+ * @param state the current knapsack state
+ * @param capacity the knapsack capacity for this dimension
+ */
+ public KnapsackCapacityPropagatorImpl(KnapsackState state, long capacity) {
+ super(state);
+ _capacity = capacity;
+ _consumedCapacity = 0L;
+ _breakItemId = NO_SELECTION;
+ _sortedItems = new ArrayList<KnapsackItem>();
+ _profitMax = 0L;
+ }
+
+ @Override
+ public void computeProfitBounds() {
+ setProfitLowerBound(currentProfit());
+ _breakItemId = NO_SELECTION;
+
+ long remainingCapacity = _capacity - _consumedCapacity;
+ int breakSortedItemId = NO_SELECTION;
+ final int numberOfSortedItems = _sortedItems.size();
+ for (int sortedId = 0; sortedId < numberOfSortedItems; sortedId++) {
+ final KnapsackItem item = _sortedItems.get(sortedId);
+ if (!state().isBound(item.id)) {
+ _breakItemId = item.id;
+
+ if (remainingCapacity >= item.weight) {
+ remainingCapacity -= item.weight;
+ setProfitLowerBound(profitLowerBound() + item.profit);
+ } else {
+ breakSortedItemId = sortedId;
+ break;
+ }
+ }
+ }
+ setProfitUpperBound(profitLowerBound());
+ if (breakSortedItemId != NO_SELECTION) {
+ final long additionalProfit = getAdditionalProfit(remainingCapacity, breakSortedItemId);
+ setProfitUpperBound(profitUpperBound() + additionalProfit);
+ }
+ }
+
+ @Override
+ public int getNextItemId() {
+ return _breakItemId;
+ }
+
+ @Override
+ protected void initPropagator() {
+ _consumedCapacity = 0L;
+ _breakItemId = NO_SELECTION;
+ _sortedItems = new ArrayList<KnapsackItem>(items());
+ _profitMax = 0L;
+ for (KnapsackItem item : _sortedItems) {
+ _profitMax = Math.max(_profitMax, item.profit);
+ }
+ _profitMax++;
+ Collections.sort(_sortedItems, new KnapsackItemDecreasingEfficiencyComparator(_profitMax));
+ }
+
+ @Override
+ protected boolean updatePropagator(boolean revert, KnapsackAssignment assignment) {
+ if (assignment.isIn) {
+ if (revert) {
+ _consumedCapacity -= items().get(assignment.itemId).weight;
+ } else {
+ _consumedCapacity += items().get(assignment.itemId).weight;
+ if (_consumedCapacity > _capacity) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ @Override
+ protected void copyCurrentStateToSolutionPropagator(ArrayList<Boolean> solution) {
+ if (solution == null) {
+ throw new RuntimeException("solution cannot be null!");
+ }
+ long remainingCapacity = _capacity - _consumedCapacity;
+ for (KnapsackItem item : _sortedItems) {
+ if (!state().isBound(item.id)) {
+ if (remainingCapacity >= item.weight) {
+ remainingCapacity -= item.weight;
+ solution.set(item.id, true);
+ } else {
+ return;
+ }
+ }
+ }
+ }
+
+ private long getAdditionalProfit(long remainingCapacity, int breakItemId) {
+ final int afterBreakItemId = breakItemId + 1;
+ long additionalProfitWhenNoBreakItem = 0L;
+ if (afterBreakItemId < _sortedItems.size()) {
+ final long nextWeight = _sortedItems.get(afterBreakItemId).weight;
+ final long nextProfit = _sortedItems.get(afterBreakItemId).profit;
+ additionalProfitWhenNoBreakItem =
+ upperBoundOfRatio(remainingCapacity, nextProfit, nextWeight);
+ }
+
+ final int beforeBreakItemId = breakItemId - 1;
+ long additionalProfitWhenBreakItem = 0L;
+ if (beforeBreakItemId >= 0) {
+ final long previousWeight = _sortedItems.get(beforeBreakItemId).weight;
+ if (previousWeight != 0) {
+ final long previousProfit = _sortedItems.get(beforeBreakItemId).profit;
+ final long overusedCapacity = _sortedItems.get(breakItemId).weight - remainingCapacity;
+ final long ratio = upperBoundOfRatio(overusedCapacity, previousProfit, previousWeight);
+
+ additionalProfitWhenBreakItem = _sortedItems.get(breakItemId).profit - ratio;
+ }
+ }
+
+ final long additionalProfit =
+ Math.max(additionalProfitWhenNoBreakItem, additionalProfitWhenBreakItem);
+ return additionalProfit;
+ }
+
+ private int mostSignificantBitsPosition64(long n) {
+ int b = 0;
+ if (0 != (n & (ALL_BITS_64 << (1 << 5)))) {
+ b |= (1 << 5);
+ n >>= (1 << 5);
+ }
+ if (0 != (n & (ALL_BITS_64 << (1 << 4)))) {
+ b |= (1 << 4);
+ n >>= (1 << 4);
+ }
+ if (0 != (n & (ALL_BITS_64 << (1 << 3)))) {
+ b |= (1 << 3);
+ n >>= (1 << 3);
+ }
+ if (0 != (n & (ALL_BITS_64 << (1 << 2)))) {
+ b |= (1 << 2);
+ n >>= (1 << 2);
+ }
+ if (0 != (n & (ALL_BITS_64 << (1 << 1)))) {
+ b |= (1 << 1);
+ n >>= (1 << 1);
+ }
+ if (0 != (n & (ALL_BITS_64 << (1 << 0)))) {
+ b |= (1 << 0);
+ }
+ return b;
+ }
+
+ private boolean willProductOverflow(long value1, long value2) {
+ final int mostSignificantBitsPosition1 = mostSignificantBitsPosition64(value1);
+ final int mostSignificantBitsPosition2 = mostSignificantBitsPosition64(value2);
+ final int overflow = 61;
+ return mostSignificantBitsPosition1 + mostSignificantBitsPosition2 > overflow;
+ }
+
+ private long upperBoundOfRatio(long numerator1, long numerator2, long denominator) {
+ if (!willProductOverflow(numerator1, numerator2)) {
+ final long numerator = numerator1 * numerator2;
+ final long result = numerator / denominator;
+ return result;
+ } else {
+ final double ratio = (((double) numerator1) * ((double) numerator2)) / ((double) denominator);
+ final long result = ((long) Math.floor(ratio + 0.5));
+ return result;
+ }
+ }
+
+ /**
+ * A special comparator that orders knapsack items by decreasing efficiency (profit to weight
+ * ratio)
+ */
+ private static class KnapsackItemDecreasingEfficiencyComparator implements
+ Comparator<KnapsackItem> {
+ private final long _profitMax;
+
+ public KnapsackItemDecreasingEfficiencyComparator(long profitMax) {
+ _profitMax = profitMax;
+ }
+
+ @Override
+ public int compare(KnapsackItem item1, KnapsackItem item2) {
+ double eff1 = item1.getEfficiency(_profitMax);
+ double eff2 = item2.getEfficiency(_profitMax);
+ if (eff1 < eff2) {
+ return 1;
+ } else if (eff1 > eff2) {
+ return -1;
+ } else {
+ return 0;
+ }
+ }
+
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/9a2b729e/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackGenericSolverImpl.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackGenericSolverImpl.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackGenericSolverImpl.java
new file mode 100644
index 0000000..1bf1d3f
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackGenericSolverImpl.java
@@ -0,0 +1,269 @@
+package org.apache.helix.controller.strategy.knapsack;
+
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.PriorityQueue;
+
+/**
+ * A generic knapsack solver that supports multiple dimensions<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+public class KnapsackGenericSolverImpl extends AbstractBaseKnapsackSolver {
+ private static final int MASTER_PROPAGATOR_ID = 0;
+ private static final int NO_SELECTION = -1;
+
+ private ArrayList<KnapsackPropagator> _propagators;
+ private int _masterPropagatorId;
+ private ArrayList<KnapsackSearchNode> _searchNodes;
+ private KnapsackState _state;
+ private long _bestSolutionProfit;
+ private ArrayList<Boolean> _bestSolution;
+
+ /**
+ * Create the solver
+ * @param solverName name of the solver
+ */
+ public KnapsackGenericSolverImpl(String solverName) {
+ super(solverName);
+ _propagators = new ArrayList<KnapsackPropagator>();
+ _masterPropagatorId = MASTER_PROPAGATOR_ID;
+ _searchNodes = new ArrayList<KnapsackSearchNode>();
+ _state = new KnapsackStateImpl();
+ _bestSolutionProfit = 0L;
+ _bestSolution = new ArrayList<Boolean>();
+ }
+
+ @Override
+ public void init(ArrayList<Long> profits, ArrayList<ArrayList<Long>> weights,
+ ArrayList<Long> capacities) {
+ clear();
+ final int numberOfItems = profits.size();
+ final int numberOfDimensions = weights.size();
+ _state.init(numberOfItems);
+
+ _bestSolution.clear();
+ for (int i = 0; i < numberOfItems; i++) {
+ _bestSolution.add(false);
+ }
+
+ for (int i = 0; i < numberOfDimensions; i++) {
+ KnapsackPropagator propagator = new KnapsackCapacityPropagatorImpl(_state, capacities.get(i));
+ propagator.init(profits, weights.get(i));
+ _propagators.add(propagator);
+ }
+ _masterPropagatorId = MASTER_PROPAGATOR_ID;
+ }
+
+ public int getNumberOfItems() {
+ return _state.getNumberOfItems();
+ }
+
+ @Override
+ public long[] getLowerAndUpperBoundWhenItem(int itemId, boolean isItemIn, long lowerBound,
+ long upperBound) {
+ long[] result = {
+ lowerBound, upperBound
+ };
+ KnapsackAssignment assignment = new KnapsackAssignment(itemId, isItemIn);
+ final boolean fail = !incrementalUpdate(false, assignment);
+ if (fail) {
+ result[0] = 0L;
+ result[1] = 0L;
+ } else {
+ result[0] =
+ (hasOnePropagator()) ? _propagators.get(_masterPropagatorId).profitLowerBound() : 0L;
+ result[1] = getAggregatedProfitUpperBound();
+ }
+
+ final boolean failRevert = !incrementalUpdate(true, assignment);
+ if (failRevert) {
+ result[0] = 0L;
+ result[1] = 0L;
+ }
+ return result;
+ }
+
+ public void setMasterPropagatorId(int masterPropagatorId) {
+ _masterPropagatorId = masterPropagatorId;
+ }
+
+ @Override
+ public long solve() {
+ _bestSolutionProfit = 0L;
+ PriorityQueue<KnapsackSearchNode> searchQueue =
+ new PriorityQueue<KnapsackSearchNode>(11,
+ new KnapsackSearchNodeInDecreasingUpperBoundComparator());
+ KnapsackAssignment assignment = new KnapsackAssignment(NO_SELECTION, true);
+ KnapsackSearchNode rootNode = new KnapsackSearchNodeImpl(null, assignment);
+ rootNode.setCurrentProfit(getCurrentProfit());
+ rootNode.setProfitUpperBound(getAggregatedProfitUpperBound());
+ rootNode.setNextItemId(getNextItemId());
+ _searchNodes.add(rootNode);
+
+ if (makeNewNode(rootNode, false)) {
+ searchQueue.add(_searchNodes.get(_searchNodes.size() - 1));
+ }
+ if (makeNewNode(rootNode, true)) {
+ searchQueue.add(_searchNodes.get(_searchNodes.size() - 1));
+ }
+
+ KnapsackSearchNode currentNode = rootNode;
+ while (!searchQueue.isEmpty() && searchQueue.peek().profitUpperBound() > _bestSolutionProfit) {
+ KnapsackSearchNode node = searchQueue.poll();
+
+ // TODO: check if equality is enough
+ if (node != currentNode) {
+ KnapsackSearchPath path = new KnapsackSearchPathImpl(currentNode, node);
+ path.init();
+ final boolean noFail = updatePropagators(path);
+ currentNode = node;
+ if (!noFail) {
+ throw new RuntimeException("solver failed to update propagators");
+ }
+ }
+
+ if (makeNewNode(node, false)) {
+ searchQueue.add(_searchNodes.get(_searchNodes.size() - 1));
+ }
+ if (makeNewNode(node, true)) {
+ searchQueue.add(_searchNodes.get(_searchNodes.size() - 1));
+ }
+ }
+ return _bestSolutionProfit;
+ }
+
+ @Override
+ public boolean bestSolution(int itemId) {
+ return _bestSolution.get(itemId);
+ }
+
+ private void clear() {
+ _propagators.clear();
+ _searchNodes.clear();
+ }
+
+ private boolean updatePropagators(final KnapsackSearchPath path) {
+ boolean noFail = true;
+ KnapsackSearchNode node = path.from();
+ KnapsackSearchNode via = path.via();
+ while (node != via) {
+ noFail = incrementalUpdate(true, node.assignment()) && noFail;
+ node = node.parent();
+ }
+ node = path.to();
+ while (node != via) {
+ noFail = incrementalUpdate(false, node.assignment()) && noFail;
+ node = node.parent();
+ }
+ return noFail;
+ }
+
+ private boolean incrementalUpdate(boolean revert, final KnapsackAssignment assignment) {
+ boolean noFail = _state.updateState(revert, assignment);
+ for (KnapsackPropagator propagator : _propagators) {
+ noFail = propagator.update(revert, assignment) && noFail;
+ }
+ return noFail;
+ }
+
+ private void updateBestSolution() {
+ final long profitLowerBound =
+ (hasOnePropagator()) ? _propagators.get(_masterPropagatorId).profitLowerBound()
+ : _propagators.get(_masterPropagatorId).currentProfit();
+
+ if (_bestSolutionProfit < profitLowerBound) {
+ _bestSolutionProfit = profitLowerBound;
+ _propagators.get(_masterPropagatorId).copyCurrentStateToSolution(hasOnePropagator(),
+ _bestSolution);
+ }
+ }
+
+ private boolean makeNewNode(final KnapsackSearchNode node, boolean isIn) {
+ if (node.nextItemId() == NO_SELECTION) {
+ return false;
+ }
+ KnapsackAssignment assignment = new KnapsackAssignment(node.nextItemId(), isIn);
+ KnapsackSearchNode newNode = new KnapsackSearchNodeImpl(node, assignment);
+
+ KnapsackSearchPath path = new KnapsackSearchPathImpl(node, newNode);
+ path.init();
+ final boolean noFail = updatePropagators(path);
+ if (noFail) {
+ newNode.setCurrentProfit(getCurrentProfit());
+ newNode.setProfitUpperBound(getAggregatedProfitUpperBound());
+ newNode.setNextItemId(getNextItemId());
+ updateBestSolution();
+ }
+
+ KnapsackSearchPath revertPath = new KnapsackSearchPathImpl(newNode, node);
+ revertPath.init();
+ updatePropagators(revertPath);
+
+ if (!noFail || newNode.profitUpperBound() < _bestSolutionProfit) {
+ return false;
+ }
+
+ KnapsackSearchNode relevantNode = new KnapsackSearchNodeImpl(node, assignment);
+ relevantNode.setCurrentProfit(newNode.currentProfit());
+ relevantNode.setProfitUpperBound(newNode.profitUpperBound());
+ relevantNode.setNextItemId(newNode.nextItemId());
+ _searchNodes.add(relevantNode);
+
+ return true;
+ }
+
+ private long getAggregatedProfitUpperBound() {
+ long upperBound = Long.MAX_VALUE;
+ for (KnapsackPropagator propagator : _propagators) {
+ propagator.computeProfitBounds();
+ final long propagatorUpperBound = propagator.profitUpperBound();
+ upperBound = Math.min(upperBound, propagatorUpperBound);
+ }
+ return upperBound;
+ }
+
+ private boolean hasOnePropagator() {
+ return _propagators.size() == 1;
+ }
+
+ private long getCurrentProfit() {
+ return _propagators.get(_masterPropagatorId).currentProfit();
+ }
+
+ private int getNextItemId() {
+ return _propagators.get(_masterPropagatorId).getNextItemId();
+ }
+
+ /**
+ * A special comparator that orders knapsack search nodes in decreasing potential profit order
+ */
+ // TODO: check order
+ private static class KnapsackSearchNodeInDecreasingUpperBoundComparator implements
+ Comparator<KnapsackSearchNode> {
+ @Override
+ public int compare(KnapsackSearchNode node1, KnapsackSearchNode node2) {
+ final long profitUpperBound1 = node1.profitUpperBound();
+ final long profitUpperBound2 = node2.profitUpperBound();
+ if (profitUpperBound1 == profitUpperBound2) {
+ final long currentProfit1 = node1.currentProfit();
+ final long currentProfit2 = node2.currentProfit();
+ if (currentProfit1 > currentProfit2) {
+ return -1;
+ } else if (currentProfit1 < currentProfit2) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+ if (profitUpperBound1 > profitUpperBound2) {
+ return -1;
+ } else if (profitUpperBound1 < profitUpperBound2) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/9a2b729e/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackItem.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackItem.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackItem.java
new file mode 100644
index 0000000..3996816
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackItem.java
@@ -0,0 +1,33 @@
+package org.apache.helix.controller.strategy.knapsack;
+
+/**
+ * Basic structure of an item in a knapsack<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+public class KnapsackItem {
+ public final int id;
+ public final long weight;
+ public final long profit;
+
+ /**
+ * Initialize the item
+ * @param id the item id
+ * @param weight the cost to place the item in the knapsack for one dimension
+ * @param profit the benefit of placing the item in the knapsack
+ */
+ public KnapsackItem(int id, long weight, long profit) {
+ this.id = id;
+ this.weight = weight;
+ this.profit = profit;
+ }
+
+ /**
+ * Get the profit to weight ratio
+ * @param profitMax the maximum possible profit for this item
+ * @return the item addition effciency
+ */
+ public double getEfficiency(long profitMax) {
+ return (weight > 0) ? ((double) profit) / ((double) weight) : ((double) profitMax);
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/9a2b729e/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackPropagator.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackPropagator.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackPropagator.java
new file mode 100644
index 0000000..702bf1e
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackPropagator.java
@@ -0,0 +1,61 @@
+package org.apache.helix.controller.strategy.knapsack;
+
+import java.util.ArrayList;
+
+/**
+ * Constraint enforcer for a single dimenstion on a knapsack solution search<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+public interface KnapsackPropagator {
+ /**
+ * Initialize the propagator
+ * @param profits profits for selecting each item
+ * @param weights weights of each item for this dimension
+ */
+ void init(final ArrayList<Long> profits, final ArrayList<Long> weights);
+
+ /**
+ * Update the search
+ * @param revert revert the assignment
+ * @param assignment the assignment to use for the update
+ * @return true if successful, false if failed
+ */
+ boolean update(boolean revert, final KnapsackAssignment assignment);
+
+ /**
+ * Compute the upper and lower bounds of potential profits
+ */
+ void computeProfitBounds();
+
+ /**
+ * Get the next item to use in the search
+ * @return item id
+ */
+ int getNextItemId();
+
+ /**
+ * Get the current profit of the search
+ * @return current profit
+ */
+ long currentProfit();
+
+ /**
+ * Get the lowest possible profit of the search
+ * @return profit lower bound
+ */
+ long profitLowerBound();
+
+ /**
+ * Get the highest possible profit of the search
+ * @return profit upper bound
+ */
+ long profitUpperBound();
+
+ /**
+ * Copy the current computed state to the final solution
+ * @param hasOnePropagator true if there is only one propagator, i.e. 1 dimension
+ * @param solution the solution vector
+ */
+ void copyCurrentStateToSolution(boolean hasOnePropagator, ArrayList<Boolean> solution);
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/9a2b729e/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchNode.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchNode.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchNode.java
new file mode 100644
index 0000000..1ac8061
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchNode.java
@@ -0,0 +1,62 @@
+package org.apache.helix.controller.strategy.knapsack;
+
+/**
+ * Description of a knapsack element during the search process<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+public interface KnapsackSearchNode {
+ /**
+ * Depth of the node in this search
+ * @return node depth
+ */
+ int depth();
+
+ /**
+ * The parent node in this search
+ * @return the node's immediate parent
+ */
+ KnapsackSearchNode parent();
+
+ /**
+ * The current node assignment
+ * @return KnapsackAssignment instance
+ */
+ KnapsackAssignment assignment();
+
+ /**
+ * The current profit with this node and search
+ * @return current profit
+ */
+ long currentProfit();
+
+ /**
+ * Set the current profit with this node and search
+ * @param profit current profit
+ */
+ void setCurrentProfit(long profit);
+
+ /**
+ * The maximum possible profit with this node and search
+ * @return profit upper bound
+ */
+ long profitUpperBound();
+
+ /**
+ * Set the maximum possible profit with this node and search
+ * @param profit profit upper bound
+ */
+ void setProfitUpperBound(long profit);
+
+ /**
+ * The next item given this node and search
+ * @return next item id
+ */
+ int nextItemId();
+
+ /**
+ * Set the next item given this node and search
+ * @param id next item id
+ */
+ void setNextItemId(int id);
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/9a2b729e/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchNodeImpl.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchNodeImpl.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchNodeImpl.java
new file mode 100644
index 0000000..ea9cb98
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchNodeImpl.java
@@ -0,0 +1,77 @@
+package org.apache.helix.controller.strategy.knapsack;
+
+/**
+ * Implementation of {@link KnapsackSearchNode}<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+public class KnapsackSearchNodeImpl implements KnapsackSearchNode {
+ private static final int NO_SELECTION = -1;
+
+ private int _depth;
+ private KnapsackSearchNode _parent;
+ private KnapsackAssignment _assignment;
+ private long _currentProfit;
+ private long _profitUpperBound;
+ private int _nextItemId;
+
+ /**
+ * Initialize a search node
+ * @param parent the node's parent
+ * @param assignment the node's assignment
+ */
+ public KnapsackSearchNodeImpl(final KnapsackSearchNode parent, final KnapsackAssignment assignment) {
+ _depth = (parent == null) ? 0 : parent.depth() + 1;
+ _parent = parent;
+ _assignment = assignment;
+ _currentProfit = 0L;
+ _profitUpperBound = Long.MAX_VALUE;
+ _nextItemId = NO_SELECTION;
+ }
+
+ @Override
+ public int depth() {
+ return _depth;
+ }
+
+ @Override
+ public KnapsackSearchNode parent() {
+ return _parent;
+ }
+
+ @Override
+ public KnapsackAssignment assignment() {
+ return _assignment;
+ }
+
+ @Override
+ public long currentProfit() {
+ return _currentProfit;
+ }
+
+ @Override
+ public void setCurrentProfit(long profit) {
+ _currentProfit = profit;
+ }
+
+ @Override
+ public long profitUpperBound() {
+ return _profitUpperBound;
+ }
+
+ @Override
+ public void setProfitUpperBound(long profit) {
+ _profitUpperBound = profit;
+ }
+
+ @Override
+ public int nextItemId() {
+ return _nextItemId;
+ }
+
+ @Override
+ public void setNextItemId(int id) {
+ _nextItemId = id;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/9a2b729e/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchPath.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchPath.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchPath.java
new file mode 100644
index 0000000..d977143
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchPath.java
@@ -0,0 +1,39 @@
+package org.apache.helix.controller.strategy.knapsack;
+
+/**
+ * Construction of the path between search nodes in a knapsack<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+public interface KnapsackSearchPath {
+ /**
+ * Initialize the path
+ */
+ void init();
+
+ /**
+ * Get the source node
+ * @return starting KnapsackSearchNode
+ */
+ KnapsackSearchNode from();
+
+ /**
+ * Get the intermediate node
+ * @return KnapsackSearchNode between source and destination
+ */
+ KnapsackSearchNode via();
+
+ /**
+ * Get the destination node
+ * @return terminating KnapsackSearchNode
+ */
+ KnapsackSearchNode to();
+
+ /**
+ * Get an ancestor of a given search node
+ * @param node the search node
+ * @param depth the depth of the ancestor
+ * @return the ancestor node
+ */
+ KnapsackSearchNode moveUpToDepth(final KnapsackSearchNode node, int depth);
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/9a2b729e/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchPathImpl.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchPathImpl.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchPathImpl.java
new file mode 100644
index 0000000..06a9ec7
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSearchPathImpl.java
@@ -0,0 +1,65 @@
+package org.apache.helix.controller.strategy.knapsack;
+
+/**
+ * Implementation of {@link KnapsackSearchPath}<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+public class KnapsackSearchPathImpl implements KnapsackSearchPath {
+ private KnapsackSearchNode _from;
+ private KnapsackSearchNode _via;
+ private KnapsackSearchNode _to;
+
+ /**
+ * Create a search path between nodes in a knapsack
+ * @param from the source node
+ * @param to the destination node
+ */
+ public KnapsackSearchPathImpl(final KnapsackSearchNode from, final KnapsackSearchNode to) {
+ _from = from;
+ _via = null;
+ _to = to;
+ }
+
+ @Override
+ public void init() {
+ KnapsackSearchNode nodeFrom = moveUpToDepth(_from, _to.depth());
+ KnapsackSearchNode nodeTo = moveUpToDepth(_to, _from.depth());
+ if (nodeFrom.depth() != nodeTo.depth()) {
+ throw new RuntimeException("to and from depths do not match!");
+ }
+
+ // Find common parent
+ // TODO: check if basic equality is enough
+ while (nodeFrom != nodeTo) {
+ nodeFrom = nodeFrom.parent();
+ nodeTo = nodeTo.parent();
+ }
+ _via = nodeFrom;
+ }
+
+ @Override
+ public KnapsackSearchNode from() {
+ return _from;
+ }
+
+ @Override
+ public KnapsackSearchNode via() {
+ return _via;
+ }
+
+ @Override
+ public KnapsackSearchNode to() {
+ return _to;
+ }
+
+ @Override
+ public KnapsackSearchNode moveUpToDepth(KnapsackSearchNode node, int depth) {
+ KnapsackSearchNode currentNode = node;
+ while (currentNode.depth() > depth) {
+ currentNode = currentNode.parent();
+ }
+ return currentNode;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/9a2b729e/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSolver.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSolver.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSolver.java
new file mode 100644
index 0000000..832a470
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSolver.java
@@ -0,0 +1,60 @@
+package org.apache.helix.controller.strategy.knapsack;
+
+import java.util.ArrayList;
+
+/**
+ * Interface for a factory of multidimensional 0-1 knapsack solvers that support reductions<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+public interface KnapsackSolver {
+ /**
+ * Collection of supported algorithms
+ */
+ enum SolverType {
+ /**
+ * A solver that uses the branch-and-bound technique, supports multiple dimensions
+ */
+ KNAPSACK_MULTIDIMENSION_BRANCH_AND_BOUND_SOLVER
+ }
+
+ /**
+ * Initialize the solver
+ * @param profits profit for each element if selected
+ * @param weights cost of each element in each dimension
+ * @param capacities maximum total weight in each dimension
+ */
+ void init(final ArrayList<Long> profits, final ArrayList<ArrayList<Long>> weights,
+ final ArrayList<Long> capacities);
+
+ /**
+ * Solve the knapsack problem
+ * @return the approximated optimal weight
+ */
+ long solve();
+
+ /**
+ * Check if an element was selected in the optimal solution
+ * @param itemId the index of the element to check
+ * @return true if the item is present, false otherwise
+ */
+ boolean bestSolutionContains(int itemId);
+
+ /**
+ * Get the name of this solver
+ * @return solver name
+ */
+ String getName();
+
+ /**
+ * Check if a reduction should be used to prune paths early on
+ * @return true if reduction enabled, false otherwise
+ */
+ boolean useReduction();
+
+ /**
+ * Set whether a reduction should be used to prune paths early on
+ * @param useReduction true to enable, false to disable
+ */
+ void setUseReduction(boolean useReduction);
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/9a2b729e/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSolverImpl.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSolverImpl.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSolverImpl.java
new file mode 100644
index 0000000..eeab0b1
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackSolverImpl.java
@@ -0,0 +1,191 @@
+package org.apache.helix.controller.strategy.knapsack;
+
+import java.util.ArrayList;
+
+/**
+ * Implementation of {@link KnapsackSolver}<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+public class KnapsackSolverImpl implements KnapsackSolver {
+ private final BaseKnapsackSolver _solver;
+ private final ArrayList<Boolean> _knownValue;
+ private final ArrayList<Boolean> _bestSolution;
+ private final ArrayList<Integer> _mappingReducedItemId;
+ private boolean _isProblemSolved;
+ private long _additionalProfit;
+ private boolean _useReduction;
+
+ /**
+ * Initialize a generic knapsack solver
+ * @param solverName the name of the solver
+ */
+ public KnapsackSolverImpl(String solverName) {
+ _solver = new KnapsackGenericSolverImpl(solverName);
+ _knownValue = new ArrayList<Boolean>();
+ _bestSolution = new ArrayList<Boolean>();
+ _mappingReducedItemId = new ArrayList<Integer>();
+ _isProblemSolved = false;
+ _additionalProfit = 0L;
+ _useReduction = true;
+ }
+
+ /**
+ * Initialize a specified knapsack solver
+ * @param solverType the type of solver
+ * @param solverName the name of the solver
+ */
+ public KnapsackSolverImpl(SolverType solverType, String solverName) {
+ _knownValue = new ArrayList<Boolean>();
+ _bestSolution = new ArrayList<Boolean>();
+ _mappingReducedItemId = new ArrayList<Integer>();
+ _isProblemSolved = false;
+ _additionalProfit = 0L;
+ _useReduction = true;
+ BaseKnapsackSolver solver = null;
+ switch (solverType) {
+ case KNAPSACK_MULTIDIMENSION_BRANCH_AND_BOUND_SOLVER:
+ solver = new KnapsackGenericSolverImpl(solverName);
+ break;
+ default:
+ throw new RuntimeException("Solver " + solverType + " not supported");
+ }
+ _solver = solver;
+ }
+
+ @Override
+ public void init(ArrayList<Long> profits, ArrayList<ArrayList<Long>> weights,
+ ArrayList<Long> capacities) {
+ _additionalProfit = 0L;
+ _isProblemSolved = false;
+ _solver.init(profits, weights, capacities);
+ if (_useReduction) {
+ final int numItems = profits.size();
+ final int numReducedItems = reduceProblem(numItems);
+
+ if (numReducedItems > 0) {
+ computeAdditionalProfit(profits);
+ }
+
+ if (numReducedItems > 0 && numReducedItems < numItems) {
+ initReducedProblem(profits, weights, capacities);
+ }
+ }
+ }
+
+ @Override
+ public long solve() {
+ return _additionalProfit + ((_isProblemSolved) ? 0 : _solver.solve());
+ }
+
+ @Override
+ public boolean bestSolutionContains(int itemId) {
+ final int mappedItemId = (_useReduction) ? _mappingReducedItemId.get(itemId) : itemId;
+ return (_useReduction && _knownValue.get(itemId)) ? _bestSolution.get(itemId) : _solver
+ .bestSolution(mappedItemId);
+ }
+
+ @Override
+ public String getName() {
+ return _solver.getName();
+ }
+
+ @Override
+ public boolean useReduction() {
+ return _useReduction;
+ }
+
+ @Override
+ public void setUseReduction(boolean useReduction) {
+ _useReduction = useReduction;
+ }
+
+ private int reduceProblem(int numItems) {
+ _knownValue.clear();
+ _bestSolution.clear();
+ _mappingReducedItemId.clear();
+ ArrayList<Long> j0UpperBounds = new ArrayList<Long>();
+ ArrayList<Long> j1UpperBounds = new ArrayList<Long>();
+ for (int i = 0; i < numItems; i++) {
+ _knownValue.add(false);
+ _bestSolution.add(false);
+ _mappingReducedItemId.add(i);
+ j0UpperBounds.add(Long.MAX_VALUE);
+ j1UpperBounds.add(Long.MAX_VALUE);
+ }
+ _additionalProfit = 0L;
+ long bestLowerBound = 0L;
+ for (int itemId = 0; itemId < numItems; itemId++) {
+ long upperBound = 0L;
+ long lowerBound = Long.MAX_VALUE;
+ long[] bounds = _solver.getLowerAndUpperBoundWhenItem(itemId, false, upperBound, lowerBound);
+ lowerBound = bounds[0];
+ upperBound = bounds[1];
+ j1UpperBounds.set(itemId, upperBound);
+ bestLowerBound = Math.max(bestLowerBound, lowerBound);
+ bounds = _solver.getLowerAndUpperBoundWhenItem(itemId, true, upperBound, lowerBound);
+ lowerBound = bounds[0];
+ upperBound = bounds[1];
+ j0UpperBounds.set(itemId, upperBound);
+ bestLowerBound = Math.max(bestLowerBound, lowerBound);
+ }
+
+ int numReducedItems = 0;
+ for (int itemId = 0; itemId < numItems; itemId++) {
+ if (bestLowerBound > j0UpperBounds.get(itemId)) {
+ _knownValue.set(itemId, true);
+ _bestSolution.set(itemId, false);
+ numReducedItems++;
+ } else if (bestLowerBound > j1UpperBounds.get(itemId)) {
+ _knownValue.set(itemId, true);
+ _bestSolution.set(itemId, true);
+ numReducedItems++;
+ }
+ }
+ _isProblemSolved = numReducedItems == numItems;
+ return numReducedItems;
+ }
+
+ private void computeAdditionalProfit(final ArrayList<Long> profits) {
+ final int numItems = profits.size();
+ _additionalProfit = 0L;
+ for (int itemId = 0; itemId < numItems; itemId++) {
+ if (_knownValue.get(itemId) && _bestSolution.get(itemId)) {
+ _additionalProfit += profits.get(itemId);
+ }
+ }
+ }
+
+ private void initReducedProblem(final ArrayList<Long> profits,
+ final ArrayList<ArrayList<Long>> weights, final ArrayList<Long> capacities) {
+ final int numItems = profits.size();
+ final int numDimensions = capacities.size();
+
+ ArrayList<Long> reducedProfits = new ArrayList<Long>();
+ for (int itemId = 0; itemId < numItems; itemId++) {
+ if (!_knownValue.get(itemId)) {
+ _mappingReducedItemId.set(itemId, reducedProfits.size());
+ reducedProfits.add(profits.get(itemId));
+ }
+ }
+
+ ArrayList<ArrayList<Long>> reducedWeights = new ArrayList<ArrayList<Long>>();
+ ArrayList<Long> reducedCapacities = new ArrayList<Long>(capacities);
+ for (int dim = 0; dim < numDimensions; dim++) {
+ final ArrayList<Long> oneDimensionWeights = weights.get(dim);
+ ArrayList<Long> oneDimensionReducedWeights = new ArrayList<Long>();
+ for (int itemId = 0; itemId < numItems; itemId++) {
+ if (_knownValue.get(itemId)) {
+ if (_bestSolution.get(itemId)) {
+ reducedCapacities
+ .set(dim, reducedCapacities.get(dim) - oneDimensionWeights.get(itemId));
+ }
+ } else {
+ oneDimensionReducedWeights.add(oneDimensionWeights.get(itemId));
+ }
+ }
+ reducedWeights.add(oneDimensionReducedWeights);
+ }
+ _solver.init(reducedProfits, reducedWeights, reducedCapacities);
+ }
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/9a2b729e/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackState.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackState.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackState.java
new file mode 100644
index 0000000..66713eb
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackState.java
@@ -0,0 +1,42 @@
+package org.apache.helix.controller.strategy.knapsack;
+
+/**
+ * The current state of the knapsack<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+public interface KnapsackState {
+ /**
+ * Initialize the knapsack with the number of items
+ * @param numberOfItems the number of items
+ */
+ void init(int numberOfItems);
+
+ /**
+ * Update this state with an assignment
+ * @param revert true to revert to the previous state, false otherwise
+ * @param assignment the assignment that was made
+ * @return true on success, false on failure
+ */
+ boolean updateState(boolean revert, final KnapsackAssignment assignment);
+
+ /**
+ * Get the current number of items in the knapsack
+ * @return number of items
+ */
+ int getNumberOfItems();
+
+ /**
+ * Check if an item is currently bound to the knapsack
+ * @param id the item id
+ * @return true if bound, false otherwise
+ */
+ boolean isBound(int id);
+
+ /**
+ * Check if an item is currently in the knapsack
+ * @param id the item id
+ * @return true if inside, false otherwise
+ */
+ boolean isIn(int id);
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/9a2b729e/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackStateImpl.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackStateImpl.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackStateImpl.java
new file mode 100644
index 0000000..8e86872
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackStateImpl.java
@@ -0,0 +1,61 @@
+package org.apache.helix.controller.strategy.knapsack;
+
+import java.util.ArrayList;
+
+/**
+ * Implementation of {@link KnapsackState}<br/>
+ * <br/>
+ * Based on the C++ knapsack solver in Google's or-tools package.
+ */
+public class KnapsackStateImpl implements KnapsackState {
+ private ArrayList<Boolean> _isBound;
+ private ArrayList<Boolean> _isIn;
+
+ /**
+ * Initialize the knapsack state
+ */
+ public KnapsackStateImpl() {
+ _isBound = new ArrayList<Boolean>();
+ _isIn = new ArrayList<Boolean>();
+ }
+
+ @Override
+ public void init(int numberOfItems) {
+ _isBound.clear();
+ _isIn.clear();
+ for (int i = 0; i < numberOfItems; i++) {
+ _isBound.add(false);
+ _isIn.add(false);
+ }
+ }
+
+ @Override
+ public boolean updateState(boolean revert, KnapsackAssignment assignment) {
+ if (revert) {
+ _isBound.set(assignment.itemId, false);
+ } else {
+ if (_isBound.get(assignment.itemId) && _isIn.get(assignment.itemId) != assignment.isIn) {
+ return false;
+ }
+ _isBound.set(assignment.itemId, true);
+ _isIn.set(assignment.itemId, assignment.isIn);
+ }
+ return true;
+ }
+
+ @Override
+ public int getNumberOfItems() {
+ return _isBound.size();
+ }
+
+ @Override
+ public boolean isBound(int id) {
+ return _isBound.get(id);
+ }
+
+ @Override
+ public boolean isIn(int id) {
+ return _isIn.get(id);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/helix/blob/9a2b729e/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackTester.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackTester.java b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackTester.java
new file mode 100644
index 0000000..0b3f8ef
--- /dev/null
+++ b/helix-core/src/main/java/org/apache/helix/controller/strategy/knapsack/KnapsackTester.java
@@ -0,0 +1,58 @@
+package org.apache.helix.controller.strategy.knapsack;
+
+import java.util.ArrayList;
+
+public class KnapsackTester {
+ public static void main(String[] args) {
+ // Construct an example
+ long[] PROFITS = {
+ 96, 76, 56, 11, 86, 10, 66, 86, 83, 12, 9, 81
+ };
+ long[][] WEIGHTS = {
+ {
+ 19, 1, 10, 1, 1, 14, 152, 11, 1, 1, 1, 1
+ }, {
+ 0, 4, 53, 0, 0, 80, 0, 4, 5, 0, 0, 0
+ }, {
+ 4, 660, 3, 0, 30, 0, 3, 0, 4, 90, 0, 0
+ }, {
+ 7, 0, 18, 6, 770, 330, 7, 0, 0, 6, 0, 0
+ }, {
+ 0, 20, 0, 4, 52, 3, 0, 0, 0, 5, 4, 0
+ }, {
+ 0, 0, 40, 70, 4, 63, 0, 0, 60, 0, 4, 0
+ }, {
+ 0, 32, 0, 0, 0, 5, 0, 3, 0, 660, 0, 9
+ }
+ };
+ long[] CAPACITIES = {
+ 18209, 7692, 1333, 924, 26638, 61188, 13360
+ };
+ ArrayList<Long> profits = new ArrayList<Long>();
+ for (long profit : PROFITS) {
+ profits.add(profit);
+ }
+ ArrayList<ArrayList<Long>> weights = new ArrayList<ArrayList<Long>>();
+ for (long[] innerWeights : WEIGHTS) {
+ ArrayList<Long> singleWeights = new ArrayList<Long>();
+ for (long weight : innerWeights) {
+ singleWeights.add(weight);
+ }
+ weights.add(singleWeights);
+ }
+ ArrayList<Long> capacities = new ArrayList<Long>();
+ for (long capacity : CAPACITIES) {
+ capacities.add(capacity);
+ }
+
+ // Solve
+ KnapsackSolver solver = new KnapsackSolverImpl("mySolver");
+ solver.init(profits, weights, capacities);
+ long result = solver.solve();
+ System.err.println(result);
+ for (int i = 0; i < profits.size(); i++) {
+ System.err.println(solver.bestSolutionContains(i));
+ }
+ }
+
+}
[39/50] [abbrv] git commit: Bump hadoop client version to 2.3.0 (for
java 7)
Posted by ka...@apache.org.
Bump hadoop client version to 2.3.0 (for java 7)
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/a4864664
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/a4864664
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/a4864664
Branch: refs/heads/master
Commit: a486466476fe464663a272827e9a96f4ae9c3aec
Parents: 0a1694b
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Mon Jun 30 10:23:33 2014 -0700
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Mon Jun 30 10:23:33 2014 -0700
----------------------------------------------------------------------
helix-provisioning/pom.xml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/a4864664/helix-provisioning/pom.xml
----------------------------------------------------------------------
diff --git a/helix-provisioning/pom.xml b/helix-provisioning/pom.xml
index 410c2be..0c02919 100644
--- a/helix-provisioning/pom.xml
+++ b/helix-provisioning/pom.xml
@@ -29,7 +29,7 @@ under the License.
<name>Apache Helix :: HelixProvisioning</name>
<properties>
- <hadoop.version>2.2.0</hadoop.version>
+ <hadoop.version>2.3.0</hadoop.version>
<osgi.import>
org.apache.helix*,
org.apache.commons.cli;version="[1.2,2)",
[48/50] [abbrv] Merge remote-tracking branch
'origin/helix-provisioning'
Posted by ka...@apache.org.
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/ContainerAdmin.java
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/ContainerAdmin.java
index 0000000,8154996..f0e3d37
mode 000000,100644..100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/ContainerAdmin.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/ContainerAdmin.java
@@@ -1,0 -1,98 +1,116 @@@
+ package org.apache.helix.provisioning.tools;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.util.HashMap;
+ import java.util.Map;
+ import java.util.UUID;
+
+ import org.apache.commons.cli.CommandLine;
+ import org.apache.commons.cli.GnuParser;
+ import org.apache.commons.cli.Option;
+ import org.apache.commons.cli.OptionBuilder;
+ import org.apache.commons.cli.OptionGroup;
+ import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
+ import org.apache.helix.HelixConnection;
+ import org.apache.helix.api.Participant;
+ import org.apache.helix.api.accessor.ParticipantAccessor;
+ import org.apache.helix.api.id.ClusterId;
+ import org.apache.helix.api.id.MessageId;
+ import org.apache.helix.api.id.ParticipantId;
+ import org.apache.helix.manager.zk.ZkHelixConnection;
+ import org.apache.helix.model.Message;
+ import org.apache.helix.model.Message.MessageType;
+ import org.apache.log4j.Logger;
+
+ /**
- *
++ *
+ *
+ */
+ public class ContainerAdmin {
+
+ private static Logger LOG = Logger.getLogger(ContainerAdmin.class);
+ private static String stopContainer = "stopContainer";
+ private HelixConnection _connection;
+
+ public ContainerAdmin(String zkAddress) {
+ _connection = new ZkHelixConnection(zkAddress);
+ _connection.connect();
+ }
+
+ public void stopContainer(String appName, String participantName) throws Exception {
+ ClusterId clusterId = ClusterId.from(appName);
+ ParticipantAccessor participantAccessor = _connection.createParticipantAccessor(clusterId);
+ ParticipantId participantId = ParticipantId.from(participantName);
+ Participant participant = participantAccessor.readParticipant(participantId);
+ if (participant != null && participant.isAlive()) {
+ Message message = new Message(MessageType.SHUTDOWN, UUID.randomUUID().toString());
+ message.setTgtName(participant.getId().toString());
+ message.setTgtSessionId(participant.getRunningInstance().getSessionId());
+ message.setMsgId(message.getId());
+ Map<MessageId, Message> msgMap = new HashMap<MessageId, Message>();
+ msgMap.put(MessageId.from(message.getId()), message);
+ participantAccessor.insertMessagesToParticipant(participantId, msgMap);
+ do {
+ participant = participantAccessor.readParticipant(participantId);
+ Thread.sleep(1000);
+ LOG.info("Waiting for container:" + participantName + " to shutdown");
- } while (participant!=null && participant.isAlive());
++ } while (participant != null && participant.isAlive());
+ }
-
++
+ }
+
+ @SuppressWarnings("static-access")
+ public static void main(String[] args) throws Exception {
+ Option zkServerOption =
+ OptionBuilder.withLongOpt("zookeeperAddress").withDescription("Provide zookeeper address")
+ .create();
+ zkServerOption.setArgs(1);
+ zkServerOption.setRequired(true);
+ zkServerOption.setArgName("zookeeperAddress(Required)");
+
+ OptionGroup group = new OptionGroup();
+ group.setRequired(true);
+
+ // update container count per service
+ Option stopContainerOption =
+ OptionBuilder.withLongOpt(stopContainer).withDescription("appName participantName")
+ .create();
+ stopContainerOption.setArgs(2);
+ stopContainerOption.setRequired(false);
+ stopContainerOption.setArgName("appName participantName");
+
+ group.addOption(stopContainerOption);
+
+ Options options = new Options();
+ options.addOption(zkServerOption);
+ options.addOptionGroup(group);
+ CommandLine cliParser = new GnuParser().parse(options, args);
+
+ String zkAddress = cliParser.getOptionValue("zookeeperAddress");
+ ContainerAdmin admin = new ContainerAdmin(zkAddress);
+
+ if (cliParser.hasOption(stopContainer)) {
+ String appName = cliParser.getOptionValues(stopContainer)[0];
+ String participantName = cliParser.getOptionValues(stopContainer)[1];
+ admin.stopContainer(appName, participantName);
+ }
+ }
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/UpdateProvisionerConfig.java
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/UpdateProvisionerConfig.java
index 0000000,f3cce42..f6713d1
mode 000000,100644..100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/UpdateProvisionerConfig.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/tools/UpdateProvisionerConfig.java
@@@ -1,0 -1,87 +1,106 @@@
+ package org.apache.helix.provisioning.tools;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import org.apache.commons.cli.CommandLine;
+ import org.apache.commons.cli.GnuParser;
+ import org.apache.commons.cli.Option;
+ import org.apache.commons.cli.OptionBuilder;
+ import org.apache.commons.cli.OptionGroup;
+ import org.apache.commons.cli.Options;
+ import org.apache.commons.cli.ParseException;
+ import org.apache.helix.HelixConnection;
+ import org.apache.helix.api.Resource;
+ import org.apache.helix.api.accessor.ResourceAccessor;
+ import org.apache.helix.api.config.ResourceConfig;
+ import org.apache.helix.api.id.ClusterId;
+ import org.apache.helix.api.id.ResourceId;
+ import org.apache.helix.manager.zk.ZkHelixConnection;
+ import org.apache.helix.provisioning.yarn.YarnProvisionerConfig;
+ import org.apache.log4j.Logger;
+ /**
+ * Update the provisioner config
+ */
+ public class UpdateProvisionerConfig {
+ private static Logger LOG = Logger.getLogger(UpdateProvisionerConfig.class);
+ private static String updateContainerCount = "updateContainerCount";
+ private HelixConnection _connection;
+
+ public UpdateProvisionerConfig(String zkAddress) {
+ _connection = new ZkHelixConnection(zkAddress);
+ _connection.connect();
+ }
+
+ public void setNumContainers(String appName, String serviceName, int numContainers) {
+ ResourceId resourceId = ResourceId.from(serviceName);
+
+ ResourceAccessor resourceAccessor = _connection.createResourceAccessor(ClusterId.from(appName));
+ Resource resource = resourceAccessor.readResource(resourceId);
+ LOG.info("Current provisioner config:"+ resource.getProvisionerConfig());
+
+ ResourceConfig.Delta delta = new ResourceConfig.Delta(resourceId);
+ YarnProvisionerConfig config = new YarnProvisionerConfig(resourceId);
+ config.setNumContainers(numContainers);
+ delta.setProvisionerConfig(config);
+ ResourceConfig updatedResourceConfig = resourceAccessor.updateResource(resourceId, delta);
+ LOG.info("Update provisioner config:"+ updatedResourceConfig.getProvisionerConfig());
+
+ }
+
+ @SuppressWarnings("static-access")
+ public static void main(String[] args) throws ParseException {
+ Option zkServerOption =
+ OptionBuilder.withLongOpt("zookeeperAddress").withDescription("Provide zookeeper address")
+ .create();
+ zkServerOption.setArgs(1);
+ zkServerOption.setRequired(true);
+ zkServerOption.setArgName("zookeeperAddress(Required)");
+
+ OptionGroup group = new OptionGroup();
+ group.setRequired(true);
+
+ // update container count per service
+ Option updateContainerCountOption =
+ OptionBuilder.withLongOpt(updateContainerCount)
+ .withDescription("appName serviceName numContainers").create();
+ updateContainerCountOption.setArgs(3);
+ updateContainerCountOption.setRequired(false);
+ updateContainerCountOption.setArgName("appName serviceName numContainers");
+
+ group.addOption(updateContainerCountOption);
+
+ Options options = new Options();
+ options.addOption(zkServerOption);
+ options.addOptionGroup(group);
+ CommandLine cliParser = new GnuParser().parse(options, args);
+
+ String zkAddress = cliParser.getOptionValue("zookeeperAddress");
+ UpdateProvisionerConfig updater = new UpdateProvisionerConfig(zkAddress);
+
+ if (cliParser.hasOption(updateContainerCount)) {
+ String appName = cliParser.getOptionValues(updateContainerCount)[0];
+ String serviceName = cliParser.getOptionValues(updateContainerCount)[1];
+ int numContainers = Integer.parseInt(
+ cliParser.getOptionValues(updateContainerCount)[2]);
+ updater.setNumContainers(appName, serviceName, numContainers);
+ }
+
+ }
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
index 0000000,2db4afb..2be7062
mode 000000,100644..100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
@@@ -1,0 -1,561 +1,580 @@@
+ package org.apache.helix.provisioning.yarn;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.io.File;
+ import java.io.FileInputStream;
+ import java.io.IOException;
+ import java.io.InputStream;
+ import java.net.URI;
+ import java.nio.ByteBuffer;
+ import java.util.ArrayList;
+ import java.util.HashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Vector;
+
+ import org.apache.commons.cli.CommandLine;
+ import org.apache.commons.cli.GnuParser;
+ import org.apache.commons.cli.Option;
+ import org.apache.commons.cli.Options;
+ import org.apache.commons.compress.archivers.ArchiveStreamFactory;
+ import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
+ import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
+ import org.apache.commons.logging.Log;
+ import org.apache.commons.logging.LogFactory;
+ import org.apache.hadoop.fs.FileStatus;
+ import org.apache.hadoop.fs.FileSystem;
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.io.DataOutputBuffer;
+ import org.apache.hadoop.security.Credentials;
+ import org.apache.hadoop.security.UserGroupInformation;
+ import org.apache.hadoop.security.token.Token;
+ import org.apache.hadoop.yarn.api.ApplicationConstants;
+ import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
+ import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
+ import org.apache.hadoop.yarn.api.records.ApplicationId;
+ import org.apache.hadoop.yarn.api.records.ApplicationReport;
+ import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+ import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+ import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+ import org.apache.hadoop.yarn.api.records.LocalResource;
+ import org.apache.hadoop.yarn.api.records.LocalResourceType;
+ import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+ import org.apache.hadoop.yarn.api.records.Priority;
+ import org.apache.hadoop.yarn.api.records.Resource;
+ import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+ import org.apache.hadoop.yarn.client.api.YarnClient;
+ import org.apache.hadoop.yarn.client.api.YarnClientApplication;
+ import org.apache.hadoop.yarn.conf.YarnConfiguration;
+ import org.apache.hadoop.yarn.util.ConverterUtils;
+ import org.apache.hadoop.yarn.util.Records;
+ import org.apache.helix.HelixConnection;
+ import org.apache.helix.api.id.ClusterId;
+ import org.apache.helix.manager.zk.ZkHelixConnection;
+ import org.apache.helix.provisioning.ApplicationSpec;
+ import org.apache.helix.provisioning.ApplicationSpecFactory;
+ import org.apache.helix.provisioning.HelixYarnUtil;
+ import org.apache.helix.provisioning.TaskConfig;
+
+ /**
+ * Main class to launch the job.
+ * Gets the yaml file as the input.
+ * Converts yaml file into ApplicationSpec.
+ */
+ public class AppLauncher {
+
+ private static final Log LOG = LogFactory.getLog(AppLauncher.class);
+
+ private ApplicationSpec _applicationSpec;
+ private YarnClient yarnClient;
+ private ApplicationSpecFactory _applicationSpecFactory;
+ private File _yamlConfigFile;
+
+ private YarnConfiguration _conf;
+
+ private File appMasterArchive;
+
+ private ApplicationId _appId;
+
+ private AppMasterConfig _appMasterConfig;
+
+ public AppLauncher(ApplicationSpecFactory applicationSpecFactory, File yamlConfigFile)
+ throws Exception {
+ _applicationSpecFactory = applicationSpecFactory;
+ _yamlConfigFile = yamlConfigFile;
+ init();
+ }
+
+ private void init() throws Exception {
+ _applicationSpec = _applicationSpecFactory.fromYaml(new FileInputStream(_yamlConfigFile));
+ _appMasterConfig = new AppMasterConfig();
+ appMasterArchive = new File(_applicationSpec.getAppMasterPackage());
+ yarnClient = YarnClient.createYarnClient();
+ _conf = new YarnConfiguration();
+ yarnClient.init(_conf);
+ }
+
+ public ApplicationSpec getApplicationSpec() {
+ return _applicationSpec;
+ }
+
+ public boolean launch() throws Exception {
+ LOG.info("Running Client");
+ yarnClient.start();
+
+ // Get a new application id
+ YarnClientApplication app = yarnClient.createApplication();
+ GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
+ // TODO get min/max resource capabilities from RM and change memory ask if needed
+ // If we do not have min/max, we may not be able to correctly request
+ // the required resources from the RM for the app master
+ // Memory ask has to be a multiple of min and less than max.
+ // Dump out information about cluster capability as seen by the resource manager
+ int maxMem = appResponse.getMaximumResourceCapability().getMemory();
+ LOG.info("Max mem capabililty of resources in this cluster " + maxMem);
+
+ // set the application name
+ ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
+ _appId = appContext.getApplicationId();
+ _appMasterConfig.setAppId(_appId.getId());
+ String appName = _applicationSpec.getAppName();
+ _appMasterConfig.setAppName(appName);
+ _appMasterConfig.setApplicationSpecFactory(_applicationSpecFactory.getClass()
+ .getCanonicalName());
+ appContext.setApplicationName(appName);
+
+ // Set up the container launch context for the application master
+ ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
+
+ LOG.info("Copy Application archive file from local filesystem and add to local environment");
+ // Copy the application master jar to the filesystem
+ // Create a local resource to point to the destination jar path
+ FileSystem fs = FileSystem.get(_conf);
+
+ // get packages for each component packages
+ Map<String, URI> packages = new HashMap<String, URI>();
+ packages
+ .put(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString(), appMasterArchive.toURI());
+ packages.put(AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString(), _yamlConfigFile.toURI());
+ for (String serviceName : _applicationSpec.getServices()) {
+ packages.put(serviceName, _applicationSpec.getServicePackage(serviceName));
+ }
+ Map<String, Path> hdfsDest = new HashMap<String, Path>();
+ Map<String, String> classpathMap = new HashMap<String, String>();
+ for (String name : packages.keySet()) {
+ URI uri = packages.get(name);
+ Path dst = copyToHDFS(fs, name, uri);
+ hdfsDest.put(name, dst);
+ String classpath = generateClasspathAfterExtraction(name, new File(uri));
+ classpathMap.put(name, classpath);
+ _appMasterConfig.setClasspath(name, classpath);
+ String serviceMainClass = _applicationSpec.getServiceMainClass(name);
+ if (serviceMainClass != null) {
+ _appMasterConfig.setMainClass(name, serviceMainClass);
+ }
+ }
+
+ // Get YAML files describing all workflows to immediately start
+ Map<String, URI> workflowFiles = new HashMap<String, URI>();
+ List<TaskConfig> taskConfigs = _applicationSpec.getTaskConfigs();
+ if (taskConfigs != null) {
+ for (TaskConfig taskConfig : taskConfigs) {
+ URI configUri = taskConfig.getYamlURI();
+ if (taskConfig.name != null && configUri != null) {
+ workflowFiles.put(taskConfig.name, taskConfig.getYamlURI());
+ }
+ }
+ }
+
+ // set local resources for the application master
+ // local files or archives as needed
+ // In this scenario, the jar file for the application master is part of the local resources
+ Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
+ LocalResource appMasterPkg =
+ setupLocalResource(fs,
+ hdfsDest.get(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString()));
+ LocalResource appSpecFile =
+ setupLocalResource(fs,
+ hdfsDest.get(AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString()));
+ localResources.put(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString(), appMasterPkg);
+ localResources.put(AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString(), appSpecFile);
+ for (String name : workflowFiles.keySet()) {
+ URI uri = workflowFiles.get(name);
+ Path dst = copyToHDFS(fs, name, uri);
+ LocalResource taskLocalResource = setupLocalResource(fs, dst);
+ localResources.put(AppMasterConfig.AppEnvironment.TASK_CONFIG_FILE.toString() + "_" + name,
+ taskLocalResource);
+ }
+
+ // Set local resource info into app master container launch context
+ amContainer.setLocalResources(localResources);
+
+ // Set the necessary security tokens as needed
+ // amContainer.setContainerTokens(containerToken);
+
+ // Add AppMaster.jar location to classpath
+ // At some point we should not be required to add
+ // the hadoop specific classpaths to the env.
+ // It should be provided out of the box.
+ // For now setting all required classpaths including
+ // the classpath to "." for the application jar
+ StringBuilder classPathEnv =
+ new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar).append("./*")
+ .append(File.pathSeparatorChar);
+ classPathEnv.append(classpathMap.get(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString()));
+ for (String c : _conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
+ YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
+ classPathEnv.append(File.pathSeparatorChar);
+ classPathEnv.append(c.trim());
+ }
+ classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties");
+
+ // add the runtime classpath needed for tests to work
+ if (_conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
+ classPathEnv.append(':');
+ classPathEnv.append(System.getProperty("java.class.path"));
+ }
+ LOG.info("\n\n Setting the classpath to launch AppMaster:\n\n");
+ // Set the env variables to be setup in the env where the application master will be run
+ Map<String, String> env = new HashMap<String, String>(_appMasterConfig.getEnv());
+ env.put("CLASSPATH", classPathEnv.toString());
+
+ amContainer.setEnvironment(env);
+
+ // Set the necessary command to execute the application master
+ Vector<CharSequence> vargs = new Vector<CharSequence>(30);
+
+ // Set java executable command
+ LOG.info("Setting up app master launch command");
+ vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
+ int amMemory = 1024;
+ // Set Xmx based on am memory size
+ vargs.add("-Xmx" + amMemory + "m");
+ // Set class name
+ vargs.add(AppMasterLauncher.class.getCanonicalName());
+ // Set params for Application Master
+ // vargs.add("--num_containers " + String.valueOf(numContainers));
+
+ vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
+ vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");
+
+ // Get final commmand
+ StringBuilder command = new StringBuilder();
+ for (CharSequence str : vargs) {
+ command.append(str).append(" ");
+ }
+
+ LOG.info("Completed setting up app master command " + command.toString());
+ List<String> commands = new ArrayList<String>();
+ commands.add(command.toString());
+ amContainer.setCommands(commands);
+
+ // Set up resource type requirements
+ // For now, only memory is supported so we set memory requirements
+ Resource capability = Records.newRecord(Resource.class);
+ capability.setMemory(amMemory);
+ appContext.setResource(capability);
+
+ // Service data is a binary blob that can be passed to the application
+ // Not needed in this scenario
+ // amContainer.setServiceData(serviceData);
+
+ // Setup security tokens
+ if (UserGroupInformation.isSecurityEnabled()) {
+ Credentials credentials = new Credentials();
+ String tokenRenewer = _conf.get(YarnConfiguration.RM_PRINCIPAL);
+ if (tokenRenewer == null || tokenRenewer.length() == 0) {
+ throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
+ }
+
+ // For now, only getting tokens for the default file-system.
+ final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
+ if (tokens != null) {
+ for (Token<?> token : tokens) {
+ LOG.info("Got dt for " + fs.getUri() + "; " + token);
+ }
+ }
+ DataOutputBuffer dob = new DataOutputBuffer();
+ credentials.writeTokenStorageToStream(dob);
+ ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
+ amContainer.setTokens(fsTokens);
+ }
+
+ appContext.setAMContainerSpec(amContainer);
+
+ // Set the priority for the application master
+ Priority pri = Records.newRecord(Priority.class);
+ int amPriority = 0;
+ // TODO - what is the range for priority? how to decide?
+ pri.setPriority(amPriority);
+ appContext.setPriority(pri);
+
+ String amQueue = "default";
+ // Set the queue to which this application is to be submitted in the RM
+ appContext.setQueue(amQueue);
+
+ LOG.info("Submitting application to YARN Resource Manager");
+
+ ApplicationId applicationId = yarnClient.submitApplication(appContext);
+
+ LOG.info("Submitted application with applicationId:" + applicationId);
+
+ return true;
+ }
+
+ /**
+ * Generates the classpath after the archive file gets extracted under 'serviceName' folder
+ * @param serviceName
+ * @param archiveFile
+ * @return
+ */
+ private String generateClasspathAfterExtraction(String serviceName, File archiveFile) {
+ if (!isArchive(archiveFile.getAbsolutePath())) {
+ return "./";
+ }
+ StringBuilder classpath = new StringBuilder();
+ // put the jar files under the archive in the classpath
+ try {
+ final InputStream is = new FileInputStream(archiveFile);
+ final TarArchiveInputStream debInputStream =
+ (TarArchiveInputStream) new ArchiveStreamFactory().createArchiveInputStream("tar", is);
+ TarArchiveEntry entry = null;
+ while ((entry = (TarArchiveEntry) debInputStream.getNextEntry()) != null) {
+ if (entry.isFile()) {
+ classpath.append(File.pathSeparatorChar);
+ classpath.append("./" + serviceName + "/" + entry.getName());
+ }
+ }
+ debInputStream.close();
+
+ } catch (Exception e) {
+ LOG.error("Unable to read archive file:" + archiveFile, e);
+ }
+ return classpath.toString();
+ }
+
+ private Path copyToHDFS(FileSystem fs, String name, URI uri) throws Exception {
+ // will throw exception if the file name is without extension
+ String extension = uri.getPath().substring(uri.getPath().lastIndexOf(".") + 1);
+ String pathSuffix =
+ _applicationSpec.getAppName() + "/" + _appId.getId() + "/" + name + "." + extension;
+ Path dst = new Path(fs.getHomeDirectory(), pathSuffix);
+ Path src = new Path(uri);
+ fs.copyFromLocalFile(false, true, src, dst);
+ return dst;
+ }
+
+ private LocalResource setupLocalResource(FileSystem fs, Path dst) throws Exception {
+ URI uri = dst.toUri();
+ String extension = uri.getPath().substring(uri.getPath().lastIndexOf(".") + 1);
+ FileStatus destStatus = fs.getFileStatus(dst);
+ LocalResource amJarRsrc = Records.newRecord(LocalResource.class);
+ // Set the type of resource - file or archive
+ // archives are untarred at destination
+ // we don't need the jar file to be untarred for now
+ if (isArchive(extension)) {
+ amJarRsrc.setType(LocalResourceType.ARCHIVE);
+ } else {
+ amJarRsrc.setType(LocalResourceType.FILE);
+ }
+ // Set visibility of the resource
+ // Setting to most private option
+ amJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
+ // Set the resource to be copied over
+ amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(dst));
+ // Set timestamp and length of file so that the framework
+ // can do basic sanity checks for the local resource
+ // after it has been copied over to ensure it is the same
+ // resource the client intended to use with the application
+ amJarRsrc.setTimestamp(destStatus.getModificationTime());
+ amJarRsrc.setSize(destStatus.getLen());
+ return amJarRsrc;
+ }
+
+ private boolean isArchive(String path) {
+ return path.endsWith("tar") || path.endsWith("gz") || path.endsWith("tar.gz")
+ || path.endsWith("zip");
+ }
+
+ public HelixConnection pollForConnection() {
+ String prevReport = "";
+ HelixConnection connection = null;
+
+ while (true) {
+ try {
+ // Get application report for the appId we are interested in
+ ApplicationReport report = yarnClient.getApplicationReport(_appId);
+
+ String reportMessage = generateReport(report);
+ if (!reportMessage.equals(prevReport)) {
+ LOG.info(reportMessage);
+ }
+ YarnApplicationState state = report.getYarnApplicationState();
+ if (YarnApplicationState.RUNNING == state) {
+ if (connection == null) {
+ String hostName = null;
+ int ind = report.getHost().indexOf('/');
+ if (ind > -1) {
+ hostName = report.getHost().substring(ind + 1);
+ } else {
+ hostName = report.getHost();
+ }
+ connection = new ZkHelixConnection(hostName + ":2181");
+
+ try {
+ connection.connect();
+ } catch (Exception e) {
+ LOG.warn("AppMaster started but not yet initialized");
+ connection = null;
+ }
+ }
+ if (connection.isConnected()) {
+ return connection;
+ }
+ }
+ prevReport = reportMessage;
+ Thread.sleep(10000);
+ } catch (Exception e) {
+ LOG.error("Exception while getting info ", e);
+ break;
+ }
+ }
+ return null;
+ }
+
+ public ApplicationReport getApplicationReport() {
+ try {
+ return yarnClient.getApplicationReport(_appId);
+ } catch (Exception e) {
+ return null;
+ }
+ }
+
+ /**
+ * @return true if successfully completed, it will print status every X seconds
+ */
+ public boolean waitUntilDone() {
+ String prevReport = "";
+ HelixConnection connection = null;
+
+ while (true) {
+ try {
+ // Get application report for the appId we are interested in
+ ApplicationReport report = yarnClient.getApplicationReport(_appId);
+
+ String reportMessage = generateReport(report);
+ if (!reportMessage.equals(prevReport)) {
+ LOG.info(reportMessage);
+ }
+ YarnApplicationState state = report.getYarnApplicationState();
+ FinalApplicationStatus dsStatus = report.getFinalApplicationStatus();
+ if (YarnApplicationState.FINISHED == state) {
+ if (FinalApplicationStatus.SUCCEEDED == dsStatus) {
+ LOG.info("Application has completed successfully. Breaking monitoring loop");
+ return true;
+ } else {
+ LOG.info("Application did finished unsuccessfully." + " YarnState=" + state.toString()
+ + ", DSFinalStatus=" + dsStatus.toString() + ". Breaking monitoring loop");
+ return false;
+ }
+ } else if (YarnApplicationState.KILLED == state || YarnApplicationState.FAILED == state) {
+ LOG.info("Application did not finish." + " YarnState=" + state.toString()
+ + ", DSFinalStatus=" + dsStatus.toString() + ". Breaking monitoring loop");
+ return false;
+ }
+ if (YarnApplicationState.RUNNING == state) {
+ if (connection == null) {
+ String hostName = null;
+ int ind = report.getHost().indexOf('/');
+ if (ind > -1) {
+ hostName = report.getHost().substring(ind + 1);
+ } else {
+ hostName = report.getHost();
+ }
+ connection = new ZkHelixConnection(hostName + ":2181");
+
+ try {
+ connection.connect();
+ } catch (Exception e) {
+ LOG.warn("AppMaster started but not yet initialized");
+ connection = null;
+ }
+ }
+ if (connection.isConnected()) {
+ AppStatusReportGenerator generator = new AppStatusReportGenerator();
+ ClusterId clusterId = ClusterId.from(_applicationSpec.getAppName());
+ String generateReport = generator.generateReport(connection, clusterId);
+ LOG.info(generateReport);
+ }
+ }
+ prevReport = reportMessage;
+ Thread.sleep(10000);
+ } catch (Exception e) {
+ LOG.error("Exception while getting info ");
+ break;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * TODO: kill the app only in dev mode. In prod, its ok for the app to continue running if the
+ * launcher dies after launching
+ */
+
+ private String generateReport(ApplicationReport report) {
+ return "Got application report from ASM for" + ", appId=" + _appId.getId()
+ + ", clientToAMToken=" + report.getClientToAMToken() + ", appDiagnostics="
+ + report.getDiagnostics() + ", appMasterHost=" + report.getHost() + ", appQueue="
+ + report.getQueue() + ", appMasterRpcPort=" + report.getRpcPort() + ", appStartTime="
+ + report.getStartTime() + ", yarnAppState=" + report.getYarnApplicationState().toString()
+ + ", distributedFinalState=" + report.getFinalApplicationStatus().toString()
+ + ", appTrackingUrl=" + report.getTrackingUrl() + ", appUser=" + report.getUser();
+ }
+
+ public void cleanup() {
+ LOG.info("Cleaning up");
+ try {
+ ApplicationReport applicationReport = yarnClient.getApplicationReport(_appId);
+ LOG.info("Killing application:" + _appId + " \n Application report"
+ + generateReport(applicationReport));
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+
+ /**
+ * Launches the application on a YARN cluster. Once launched, it will display (periodically) the
+ * status of the containers in the application.
+ * @param args app_spec_provider and app_config_spec
+ * @throws Exception
+ */
+ public static void main(String[] args) throws Exception {
+
+ Options opts = new Options();
+ opts.addOption(new Option("app_spec_provider", true,
+ "Application Spec Factory Class that will parse the app_config_spec file"));
+ opts.addOption(new Option("app_config_spec", true,
+ "YAML config file that provides the app specifications"));
+ CommandLine cliParser = new GnuParser().parse(opts, args);
+ String appSpecFactoryClass = cliParser.getOptionValue("app_spec_provider");
+ String yamlConfigFileName = cliParser.getOptionValue("app_config_spec");
+
+ ApplicationSpecFactory applicationSpecFactory =
+ HelixYarnUtil.createInstance(appSpecFactoryClass);
+ File yamlConfigFile = new File(yamlConfigFileName);
+ if (!yamlConfigFile.exists()) {
+ throw new IllegalArgumentException("YAML app_config_spec file: '" + yamlConfigFileName
+ + "' does not exist");
+ }
+ final AppLauncher launcher = new AppLauncher(applicationSpecFactory, yamlConfigFile);
+ launcher.launch();
+ Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
+
+ @Override
+ public void run() {
+ launcher.cleanup();
+ }
+ }));
+ launcher.waitUntilDone();
+ }
+
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterConfig.java
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterConfig.java
index 0000000,38a0dd1..d0952c1
mode 000000,100644..100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterConfig.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterConfig.java
@@@ -1,0 -1,111 +1,130 @@@
+ package org.apache.helix.provisioning.yarn;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.util.HashMap;
+ import java.util.Map;
+
+ import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
+ import org.apache.log4j.Logger;
+
+ /**
+ * Convenient method to pass information to containers
+ * The methods simply sets up environment variables
+ */
+ public class AppMasterConfig {
+ private static Logger LOG = Logger.getLogger(AppMasterConfig.class);
+ private Map<String, String> _envs;
+
+ public enum AppEnvironment {
+ APP_MASTER_PKG("APP_MASTER_PKG"),
+ APP_SPEC_FILE("APP_SPEC_FILE"),
+ APP_NAME("APP_NAME"),
+ APP_ID("APP_ID"),
+ APP_SPEC_FACTORY("APP_SPEC_FACTORY"),
+ TASK_CONFIG_FILE("TASK_CONFIG_FILE");
+ String _name;
+
+ private AppEnvironment(String name) {
+ _name = name;
+ }
+
+ public String toString() {
+ return _name;
+ }
+ }
+
+ public AppMasterConfig() {
+ _envs = new HashMap<String, String>();
+ }
+
+ private String get(String key) {
+ String value = (_envs.containsKey(key)) ? _envs.get(key) : System.getenv().get(key);
+ LOG.info("Returning value:" + value + " for key:'" + key + "'");
+
+ return value;
+ }
+
+ public void setAppId(int id) {
+ _envs.put(AppEnvironment.APP_ID.toString(), "" + id);
+ }
+
+ public String getAppName() {
+ return get(AppEnvironment.APP_NAME.toString());
+ }
+
+ public int getAppId() {
+ return Integer.parseInt(get(AppEnvironment.APP_ID.toString()));
+ }
+
+ public String getClassPath(String serviceName) {
+ return get(serviceName + "_classpath");
+ }
+
+ public String getMainClass(String serviceName) {
+ return get(serviceName + "_mainClass");
+ }
+
+ public String getZKAddress() {
+ return get(Environment.NM_HOST.name()) + ":2181";
+ }
+
+ public String getContainerId() {
+ return get(Environment.CONTAINER_ID.name());
+ }
+
+ public Map<String, String> getEnv() {
+ return _envs;
+ }
+
+ public void setAppName(String appName) {
+ _envs.put(AppEnvironment.APP_NAME.toString(), appName);
+
+ }
+
+ public void setClasspath(String serviceName, String classpath) {
+ _envs.put(serviceName + "_classpath", classpath);
+ }
+
+ public void setTaskConfigFile(String configName, String path) {
+ _envs.put(AppEnvironment.TASK_CONFIG_FILE.toString() + "_" + configName, path);
+ }
+
+ public String getTaskConfigFile(String configName) {
+ return get(AppEnvironment.TASK_CONFIG_FILE.toString() + "_" + configName);
+ }
+
+ public String getApplicationSpecConfigFile() {
+ return get(AppEnvironment.APP_SPEC_FILE.toString());
+ }
+
+ public String getApplicationSpecFactory() {
+ return get(AppEnvironment.APP_SPEC_FACTORY.toString());
+ }
+
+ public void setApplicationSpecFactory(String className) {
+ _envs.put(AppEnvironment.APP_SPEC_FACTORY.toString(), className);
+
+ }
+
+ public void setMainClass(String serviceName, String serviceMainClass) {
+ _envs.put(serviceName + "_mainClass", serviceMainClass);
+ }
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterLauncher.java
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterLauncher.java
index 0000000,e7a0f61..31ef05c
mode 000000,100644..100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterLauncher.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppMasterLauncher.java
@@@ -1,0 -1,194 +1,213 @@@
+ package org.apache.helix.provisioning.yarn;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.io.File;
+ import java.io.FileInputStream;
+ import java.io.IOException;
+ import java.io.InputStream;
+ import java.net.URI;
+ import java.util.List;
+ import java.util.Map;
+
+ import org.I0Itec.zkclient.IDefaultNameSpace;
+ import org.I0Itec.zkclient.ZkClient;
+ import org.I0Itec.zkclient.ZkServer;
+ import org.apache.commons.cli.Options;
+ import org.apache.commons.io.FileUtils;
+ import org.apache.hadoop.fs.FileSystem;
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+ import org.apache.hadoop.yarn.api.records.ApplicationId;
+ import org.apache.hadoop.yarn.api.records.ContainerId;
+ import org.apache.hadoop.yarn.conf.YarnConfiguration;
+ import org.apache.hadoop.yarn.util.ConverterUtils;
+ import org.apache.helix.HelixController;
+ import org.apache.helix.api.accessor.ClusterAccessor;
+ import org.apache.helix.api.config.ClusterConfig;
+ import org.apache.helix.api.config.ResourceConfig;
+ import org.apache.helix.api.id.ClusterId;
+ import org.apache.helix.api.id.ControllerId;
+ import org.apache.helix.api.id.ResourceId;
+ import org.apache.helix.controller.rebalancer.config.FullAutoRebalancerConfig;
+ import org.apache.helix.controller.rebalancer.config.RebalancerConfig;
+ import org.apache.helix.manager.zk.HelixConnectionAdaptor;
+ import org.apache.helix.manager.zk.ZkHelixConnection;
+ import org.apache.helix.model.StateModelDefinition;
+ import org.apache.helix.provisioning.ApplicationSpec;
+ import org.apache.helix.provisioning.ApplicationSpecFactory;
+ import org.apache.helix.provisioning.HelixYarnUtil;
+ import org.apache.helix.provisioning.ServiceConfig;
+ import org.apache.helix.provisioning.TaskConfig;
+ import org.apache.helix.task.TaskDriver;
+ import org.apache.helix.task.Workflow;
+ import org.apache.helix.tools.StateModelConfigGenerator;
+ import org.apache.log4j.Logger;
+
+ /**
+ * This will <br/>
+ * <ul>
+ * <li>start zookeeper automatically</li>
+ * <li>create the cluster</li>
+ * <li>set up resource(s)</li>
+ * <li>start helix controller</li>
+ * </ul>
+ */
+ public class AppMasterLauncher {
+ public static Logger LOG = Logger.getLogger(AppMasterLauncher.class);
+
+ public static void main(String[] args) throws Exception {
+ Map<String, String> env = System.getenv();
+ LOG.info("Starting app master with the following environment variables");
+ for (String key : env.keySet()) {
+ LOG.info(key + "\t\t=" + env.get(key));
+ }
+
+ Options opts;
+ opts = new Options();
+ opts.addOption("num_containers", true, "Number of containers");
+
+ // START ZOOKEEPER
+ String dataDir = "dataDir";
+ String logDir = "logDir";
+ IDefaultNameSpace defaultNameSpace = new IDefaultNameSpace() {
+ @Override
+ public void createDefaultNameSpace(ZkClient zkClient) {
+
+ }
+ };
+ try {
+ FileUtils.deleteDirectory(new File(dataDir));
+ FileUtils.deleteDirectory(new File(logDir));
+ } catch (IOException e) {
+ LOG.error(e);
+ }
+
+ final ZkServer server = new ZkServer(dataDir, logDir, defaultNameSpace);
+ server.start();
+
+ // start Generic AppMaster that interacts with Yarn RM
+ AppMasterConfig appMasterConfig = new AppMasterConfig();
+ String containerIdStr = appMasterConfig.getContainerId();
+ ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
+ ApplicationAttemptId appAttemptID = containerId.getApplicationAttemptId();
+
+ String configFile = AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString();
+ String className = appMasterConfig.getApplicationSpecFactory();
+
+ GenericApplicationMaster genericApplicationMaster = new GenericApplicationMaster(appAttemptID);
+ try {
+ genericApplicationMaster.start();
+ } catch (Exception e) {
+ LOG.error("Unable to start application master: ", e);
+ }
+ ApplicationSpecFactory factory = HelixYarnUtil.createInstance(className);
+
+ // TODO: Avoid setting static variable.
+ YarnProvisioner.applicationMaster = genericApplicationMaster;
+ YarnProvisioner.applicationMasterConfig = appMasterConfig;
+ ApplicationSpec applicationSpec = factory.fromYaml(new FileInputStream(configFile));
+ YarnProvisioner.applicationSpec = applicationSpec;
+ String zkAddress = appMasterConfig.getZKAddress();
+ String clusterName = appMasterConfig.getAppName();
+
+ // CREATE CLUSTER and setup the resources
+ // connect
+ ZkHelixConnection connection = new ZkHelixConnection(zkAddress);
+ connection.connect();
+
+ // create the cluster
+ ClusterId clusterId = ClusterId.from(clusterName);
+ ClusterAccessor clusterAccessor = connection.createClusterAccessor(clusterId);
+ StateModelDefinition statelessService =
+ new StateModelDefinition(StateModelConfigGenerator.generateConfigForStatelessService());
+ StateModelDefinition taskStateModel =
+ new StateModelDefinition(StateModelConfigGenerator.generateConfigForTaskStateModel());
+ clusterAccessor.createCluster(new ClusterConfig.Builder(clusterId)
+ .addStateModelDefinition(statelessService).addStateModelDefinition(taskStateModel).build());
+ for (String service : applicationSpec.getServices()) {
+ String resourceName = service;
+ // add the resource with the local provisioner
+ ResourceId resourceId = ResourceId.from(resourceName);
+
+ ServiceConfig serviceConfig = applicationSpec.getServiceConfig(resourceName);
+ serviceConfig.setSimpleField("service_name", service);
+ int numContainers = serviceConfig.getIntField("num_containers", 1);
+
+ YarnProvisionerConfig provisionerConfig = new YarnProvisionerConfig(resourceId);
+ provisionerConfig.setNumContainers(numContainers);
+
+ FullAutoRebalancerConfig.Builder rebalancerConfigBuilder =
+ new FullAutoRebalancerConfig.Builder(resourceId);
+ RebalancerConfig rebalancerConfig =
+ rebalancerConfigBuilder.stateModelDefId(statelessService.getStateModelDefId())//
+ .build();
+ ResourceConfig.Builder resourceConfigBuilder =
+ new ResourceConfig.Builder(ResourceId.from(resourceName));
+ ResourceConfig resourceConfig = resourceConfigBuilder.provisionerConfig(provisionerConfig) //
+ .rebalancerConfig(rebalancerConfig) //
+ .userConfig(serviceConfig) //
+ .build();
+ clusterAccessor.addResourceToCluster(resourceConfig);
+ }
+ // start controller
+ ControllerId controllerId = ControllerId.from("controller1");
+ HelixController controller = connection.createController(clusterId, controllerId);
+ controller.start();
+
+ // Start any pre-specified jobs
+ List<TaskConfig> taskConfigs = applicationSpec.getTaskConfigs();
+ if (taskConfigs != null) {
+ YarnConfiguration conf = new YarnConfiguration();
+ FileSystem fs;
+ fs = FileSystem.get(conf);
+ for (TaskConfig taskConfig : taskConfigs) {
+ URI yamlUri = taskConfig.getYamlURI();
+ if (yamlUri != null && taskConfig.name != null) {
+ InputStream is =
+ readFromHDFS(fs, taskConfig.name, yamlUri, applicationSpec,
+ appAttemptID.getApplicationId());
+ Workflow workflow = Workflow.parse(is);
+ TaskDriver taskDriver = new TaskDriver(new HelixConnectionAdaptor(controller));
+ taskDriver.start(workflow);
+ }
+ }
+ }
+
+ Thread shutdownhook = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ server.shutdown();
+ }
+ });
+ Runtime.getRuntime().addShutdownHook(shutdownhook);
+ Thread.sleep(10000);
+
+ }
+
+ private static InputStream readFromHDFS(FileSystem fs, String name, URI uri,
+ ApplicationSpec appSpec, ApplicationId appId) throws Exception {
+ // will throw exception if the file name is without extension
+ String extension = uri.getPath().substring(uri.getPath().lastIndexOf(".") + 1);
+ String pathSuffix = appSpec.getAppName() + "/" + appId.getId() + "/" + name + "." + extension;
+ Path dst = new Path(fs.getHomeDirectory(), pathSuffix);
+ return fs.open(dst).getWrappedStream();
+ }
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppStatusReportGenerator.java
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppStatusReportGenerator.java
index 0000000,40c8186..c436443
mode 000000,100644..100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppStatusReportGenerator.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppStatusReportGenerator.java
@@@ -1,0 -1,84 +1,103 @@@
+ package org.apache.helix.provisioning.yarn;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.util.Map;
+
+ import org.apache.helix.HelixConnection;
+ import org.apache.helix.api.Participant;
+ import org.apache.helix.api.Resource;
+ import org.apache.helix.api.State;
+ import org.apache.helix.api.accessor.ClusterAccessor;
+ import org.apache.helix.api.config.ContainerConfig;
+ import org.apache.helix.api.id.ClusterId;
+ import org.apache.helix.api.id.ParticipantId;
+ import org.apache.helix.api.id.PartitionId;
+ import org.apache.helix.api.id.ResourceId;
+ import org.apache.helix.controller.provisioner.ContainerId;
+ import org.apache.helix.controller.provisioner.ContainerState;
+ import org.apache.helix.manager.zk.ZkHelixConnection;
+
+ public class AppStatusReportGenerator {
+ static String TAB = "\t";
+ static String NEWLINE = "\n";
+
+ String generateReport(HelixConnection connection, ClusterId clusterId) {
+ if (!connection.isConnected()) {
+ return "Unable to connect to cluster";
+ }
+ StringBuilder builder = new StringBuilder();
+ ClusterAccessor clusterAccessor = connection.createClusterAccessor(clusterId);
+ Map<ParticipantId, Participant> participants = clusterAccessor.readParticipants();
+ builder.append("AppName").append(TAB).append(clusterId).append(NEWLINE);
+ Map<ResourceId, Resource> resources = clusterAccessor.readResources();
+ for (ResourceId resourceId : resources.keySet()) {
+ builder.append("SERVICE").append(TAB).append(resourceId).append(NEWLINE);
+ Resource resource = resources.get(resourceId);
+ Map<ParticipantId, State> serviceStateMap =
+ resource.getExternalView().getStateMap(PartitionId.from(resourceId.stringify() + "_0"));
+
+ builder.append(TAB).append("CONTAINER_NAME").append(TAB).append(TAB)
+ .append("CONTAINER_STATE").append(TAB).append("SERVICE_STATE").append(TAB)
+ .append("CONTAINER_ID").append(NEWLINE);
+ for (Participant participant : participants.values()) {
+ // need a better check
+ if (!participant.getId().stringify().startsWith(resource.getId().stringify())) {
+ continue;
+ }
+ ContainerConfig containerConfig = participant.getContainerConfig();
+ ContainerState containerState = ContainerState.UNDEFINED;
+ ContainerId containerId = ContainerId.from("N/A");
+
+ if (containerConfig != null) {
+ containerId = containerConfig.getId();
+ containerState = containerConfig.getState();
+ }
+ State participantState = null;
+ if (serviceStateMap != null) {
+ participantState = serviceStateMap.get(participant.getId());
+ }
+ if (participantState == null) {
+ participantState = State.from("UNKNOWN");
+ }
+ builder.append(TAB).append(participant.getId()).append(TAB).append(containerState)
+ .append(TAB).append(participantState).append(TAB).append(TAB).append(containerId);
+ builder.append(NEWLINE);
+ }
+
+ }
+ return builder.toString();
+
+ }
+
+ public static void main(String[] args) throws InterruptedException {
+ AppStatusReportGenerator generator = new AppStatusReportGenerator();
+
+ ZkHelixConnection connection = new ZkHelixConnection("localhost:2181");
+ connection.connect();
+ while (true) {
+ String generateReport = generator.generateReport(connection, ClusterId.from("testApp1"));
+ System.out.println(generateReport);
+ Thread.sleep(10000);
+ connection.createClusterManagementTool().addCluster("testApp1");
+ }
+ // connection.disconnect();
+ }
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/FixedTargetProvider.java
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/FixedTargetProvider.java
index 0000000,83ad461..17c0fe1
mode 000000,100644..100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/FixedTargetProvider.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/FixedTargetProvider.java
@@@ -1,0 -1,20 +1,39 @@@
+ package org.apache.helix.provisioning.yarn;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.util.Collection;
+
+ import org.apache.helix.api.Cluster;
+ import org.apache.helix.api.Participant;
+ import org.apache.helix.api.id.ResourceId;
+ import org.apache.helix.controller.provisioner.TargetProvider;
+ import org.apache.helix.controller.provisioner.TargetProviderResponse;
+
+ public class FixedTargetProvider implements TargetProvider {
+
+ @Override
+ public TargetProviderResponse evaluateExistingContainers(Cluster cluster, ResourceId resourceId,
+ Collection<Participant> participants) {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/LaunchContainerRunnable.java
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/LaunchContainerRunnable.java
index 0000000,c54f87f..f66dd55
mode 000000,100644..100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/LaunchContainerRunnable.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/LaunchContainerRunnable.java
@@@ -1,0 -1,79 +1,98 @@@
+ package org.apache.helix.provisioning.yarn;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.util.ArrayList;
+ import java.util.HashMap;
+ import java.util.List;
+ import java.util.Map;
-import java.util.Vector;
+
+ import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
+ import org.apache.hadoop.yarn.api.records.Container;
+ import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+ import org.apache.hadoop.yarn.api.records.LocalResource;
+ import org.apache.hadoop.yarn.util.Records;
+
+ /**
+ * Thread to connect to the {@link ContainerManagementProtocol} and launch the container
+ * that will execute the shell command.
+ */
+ class LaunchContainerRunnable implements Runnable {
+
+ /**
- *
++ *
+ */
+ private final GenericApplicationMaster _genericApplicationMaster;
+
+ // Allocated container
+ Container container;
+
+ NMCallbackHandler containerListener;
+
+ /**
+ * @param lcontainer Allocated container
+ * @param containerListener Callback handler of the container
+ * @param genericApplicationMaster TODO
+ */
- public LaunchContainerRunnable(GenericApplicationMaster genericApplicationMaster, Container lcontainer, NMCallbackHandler containerListener) {
++ public LaunchContainerRunnable(GenericApplicationMaster genericApplicationMaster,
++ Container lcontainer, NMCallbackHandler containerListener) {
+ _genericApplicationMaster = genericApplicationMaster;
+ this.container = lcontainer;
+ this.containerListener = containerListener;
+ }
+
+ @Override
+ /**
- * Connects to CM, sets up container launch context
- * for shell command and eventually dispatches the container
- * start request to the CM.
++ * Connects to CM, sets up container launch context
++ * for shell command and eventually dispatches the container
++ * start request to the CM.
+ */
+ public void run() {
- GenericApplicationMaster.LOG.info("Setting up container launch container for containerid=" + container.getId());
++ GenericApplicationMaster.LOG.info("Setting up container launch container for containerid="
++ + container.getId());
+ ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);
+
+ // Set the environment
- //ctx.setEnvironment(shellEnv);
++ // ctx.setEnvironment(shellEnv);
+
+ // Set the local resources
+ Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
+
+ ctx.setLocalResources(localResources);
+
+ // Set the necessary command to execute on the allocated container
- Vector<CharSequence> vargs = new Vector<CharSequence>(5);
++ // Vector<CharSequence> vargs = new Vector<CharSequence>(5);
+
-
+ List<String> commands = new ArrayList<String>();
- // commands.add(command.toString());
++ // commands.add(command.toString());
+ ctx.setCommands(commands);
+
+ // Set up tokens for the container too. Today, for normal shell commands,
+ // the container in distribute-shell doesn't need any tokens. We are
+ // populating them mainly for NodeManagers to be able to download any
+ // files in the distributed file-system. The tokens are otherwise also
+ // useful in cases, for e.g., when one is running a "hadoop dfs" command
+ // inside the distributed shell.
+ ctx.setTokens(_genericApplicationMaster.allTokens.duplicate());
+
+ containerListener.addContainer(container.getId(), container);
+ _genericApplicationMaster.nmClientAsync.startContainerAsync(container, ctx);
+ }
-}
++}
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/NMCallbackHandler.java
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/NMCallbackHandler.java
index 0000000,f7c3a9f..7d7883e
mode 000000,100644..100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/NMCallbackHandler.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/NMCallbackHandler.java
@@@ -1,0 -1,84 +1,103 @@@
+ package org.apache.helix.provisioning.yarn;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.nio.ByteBuffer;
+ import java.util.Map;
+ import java.util.concurrent.ConcurrentHashMap;
+ import java.util.concurrent.ConcurrentMap;
+
+ import org.apache.hadoop.yarn.api.records.Container;
+ import org.apache.hadoop.yarn.api.records.ContainerId;
+ import org.apache.hadoop.yarn.api.records.ContainerStatus;
+ import org.apache.hadoop.yarn.client.api.async.NMClientAsync;
+ import org.apache.helix.provisioning.ContainerLaunchResponse;
+ import org.apache.helix.provisioning.ContainerStopResponse;
+ import org.apache.log4j.Logger;
+
+ import com.google.common.annotations.VisibleForTesting;
+ import com.google.common.util.concurrent.SettableFuture;
+
+ @VisibleForTesting
+ class NMCallbackHandler implements NMClientAsync.CallbackHandler {
+
+ private Logger LOG = Logger.getLogger(NMCallbackHandler.class);
+ private ConcurrentMap<ContainerId, Container> containers =
+ new ConcurrentHashMap<ContainerId, Container>();
+ private final GenericApplicationMaster applicationMaster;
+
+ public NMCallbackHandler(GenericApplicationMaster applicationMaster) {
+ this.applicationMaster = applicationMaster;
+ }
+
+ public void addContainer(ContainerId containerId, Container container) {
+ containers.putIfAbsent(containerId, container);
+ }
+
+ @Override
+ public void onContainerStopped(ContainerId containerId) {
+ LOG.info("Succeeded to stop Container " + containerId);
+ Container container = containers.get(containerId);
+ if (container != null) {
+ applicationMaster.nmClientAsync.getContainerStatusAsync(containerId, container.getNodeId());
+ }
+ SettableFuture<ContainerStopResponse> settableFuture =
+ applicationMaster.containerStopMap.remove(containerId);
+ ContainerStopResponse value = new ContainerStopResponse();
+ settableFuture.set(value);
+ containers.remove(containerId);
+ }
+
+ @Override
+ public void onContainerStatusReceived(ContainerId containerId, ContainerStatus containerStatus) {
+ LOG.info("Container Status: id=" + containerId + ", status=" + containerStatus);
+ }
+
+ @Override
+ public void onContainerStarted(ContainerId containerId, Map<String, ByteBuffer> allServiceResponse) {
+ LOG.debug("Succeeded to start Container " + containerId);
+
+ Container container = containers.get(containerId);
+ if (container != null) {
+ applicationMaster.nmClientAsync.getContainerStatusAsync(containerId, container.getNodeId());
+ }
+ SettableFuture<ContainerLaunchResponse> settableFuture =
+ applicationMaster.containerLaunchResponseMap.remove(containerId);
+ ContainerLaunchResponse value = new ContainerLaunchResponse();
+ settableFuture.set(value);
+ }
+
+ @Override
+ public void onStartContainerError(ContainerId containerId, Throwable t) {
+ LOG.error("Failed to start Container " + containerId);
+ containers.remove(containerId);
+ }
+
+ @Override
+ public void onGetContainerStatusError(ContainerId containerId, Throwable t) {
+ LOG.error("Failed to query the status of Container " + containerId);
+ }
+
+ @Override
+ public void onStopContainerError(ContainerId containerId, Throwable t) {
+ LOG.error("Failed to stop Container " + containerId);
+ containers.remove(containerId);
+ }
+ }
http://git-wip-us.apache.org/repos/asf/helix/blob/713586c4/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java
----------------------------------------------------------------------
diff --cc helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java
index 0000000,ced1431..0fc748c
mode 000000,100644..100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java
@@@ -1,0 -1,132 +1,150 @@@
+ package org.apache.helix.provisioning.yarn;
+
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing,
++ * software distributed under the License is distributed on an
++ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
++ * KIND, either express or implied. See the License for the
++ * specific language governing permissions and limitations
++ * under the License.
++ */
++
+ import java.util.List;
+
+ import org.apache.commons.logging.Log;
+ import org.apache.commons.logging.LogFactory;
+ import org.apache.hadoop.yarn.api.records.Container;
+ import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
+ import org.apache.hadoop.yarn.api.records.ContainerState;
+ import org.apache.hadoop.yarn.api.records.ContainerStatus;
+ import org.apache.hadoop.yarn.api.records.NodeReport;
+ import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
+ import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
+ import org.apache.helix.provisioning.ContainerAskResponse;
+ import org.apache.helix.provisioning.ContainerReleaseResponse;
+ import org.apache.helix.provisioning.ContainerStopResponse;
+
+ import com.google.common.util.concurrent.SettableFuture;
+
+ class RMCallbackHandler implements AMRMClientAsync.CallbackHandler {
+ private static final Log LOG = LogFactory.getLog(RMCallbackHandler.class);
+ long startTime;
+ /**
- *
++ *
+ */
+ private final GenericApplicationMaster _genericApplicationMaster;
+
+ /**
+ * @param genericApplicationMaster
+ */
+ RMCallbackHandler(GenericApplicationMaster genericApplicationMaster) {
+ _genericApplicationMaster = genericApplicationMaster;
+ startTime = System.currentTimeMillis();
+ }
+
- @SuppressWarnings("unchecked")
+ @Override
+ public void onContainersCompleted(List<ContainerStatus> completedContainers) {
+ LOG.info("Got response from RM for container ask, completedCnt=" + completedContainers.size());
+ for (ContainerStatus containerStatus : completedContainers) {
+ GenericApplicationMaster.LOG.info("Got container status for containerID="
+ + containerStatus.getContainerId() + ", state=" + containerStatus.getState()
+ + ", exitStatus=" + containerStatus.getExitStatus() + ", diagnostics="
+ + containerStatus.getDiagnostics());
+
+ // non complete containers should not be here
+ assert (containerStatus.getState() == ContainerState.COMPLETE);
+ synchronized (_genericApplicationMaster.allocatedContainerSet) {
+ _genericApplicationMaster.allocatedContainerSet.remove(containerStatus.getContainerId());
+ SettableFuture<ContainerStopResponse> stopResponseFuture =
+ _genericApplicationMaster.containerStopMap.remove(containerStatus.getContainerId());
+ if (stopResponseFuture != null) {
+ ContainerStopResponse value = new ContainerStopResponse();
+ stopResponseFuture.set(value);
+ } else {
+ SettableFuture<ContainerReleaseResponse> releaseResponseFuture =
+ _genericApplicationMaster.containerReleaseMap
+ .remove(containerStatus.getContainerId());
+ if (releaseResponseFuture != null) {
+ ContainerReleaseResponse value = new ContainerReleaseResponse();
+ releaseResponseFuture.set(value);
+ }
+ }
+ }
+ // increment counters for completed/failed containers
+ int exitStatus = containerStatus.getExitStatus();
+ if (0 != exitStatus) {
+ // container failed
+ if (ContainerExitStatus.ABORTED != exitStatus) {
+
+ } else {
+ // container was killed by framework, possibly preempted
+ // we should re-try as the container was lost for some reason
+
+ // we do not need to release the container as it would be done
+ // by the RM
+ }
+ } else {
+ // nothing to do
+ // container completed successfully
+ GenericApplicationMaster.LOG.info("Container completed successfully." + ", containerId="
+ + containerStatus.getContainerId());
+ }
+ }
+ }
+
+ @Override
+ public void onContainersAllocated(List<Container> allocatedContainers) {
+ GenericApplicationMaster.LOG.info("Got response from RM for container ask, allocatedCnt="
+ + allocatedContainers.size());
+ for (Container allocatedContainer : allocatedContainers) {
+ GenericApplicationMaster.LOG.info("Allocated new container." + ", containerId="
+ + allocatedContainer.getId() + ", containerNode="
+ + allocatedContainer.getNodeId().getHost() + ":"
+ + allocatedContainer.getNodeId().getPort() + ", containerNodeURI="
+ + allocatedContainer.getNodeHttpAddress() + ", containerResourceMemory"
+ + allocatedContainer.getResource().getMemory());
+ for (ContainerRequest containerRequest : _genericApplicationMaster.containerRequestMap
+ .keySet()) {
+ if (containerRequest.getCapability().getMemory() == allocatedContainer.getResource()
+ .getMemory()) {
+ SettableFuture<ContainerAskResponse> future =
+ _genericApplicationMaster.containerRequestMap.remove(containerRequest);
+ ContainerAskResponse response = new ContainerAskResponse();
+ response.setContainer(allocatedContainer);
+ _genericApplicationMaster.allocatedContainerSet.add(allocatedContainer.getId());
+ future.set(response);
+ break;
+ }
+ }
+ }
+ }
+
+ @Override
+ public void onShutdownRequest() {
+ }
+
+ @Override
+ public void onNodesUpdated(List<NodeReport> updatedNodes) {
+ }
+
+ @Override
+ public float getProgress() {
+ // set progress to deliver to RM on next heartbeat
+ return (System.currentTimeMillis() - startTime) % Integer.MAX_VALUE;
+ }
+
+ @Override
+ public void onError(Throwable e) {
+ _genericApplicationMaster.amRMClient.stop();
+ }
+ }
[12/50] [abbrv] git commit: Fix deallocation issue for killed
processes
Posted by ka...@apache.org.
Fix deallocation issue for killed processes
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/b3dacb7a
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/b3dacb7a
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/b3dacb7a
Branch: refs/heads/master
Commit: b3dacb7abadb526c8cb1661a6b51ad47d39d9537
Parents: 64e1531
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Fri Feb 21 14:57:15 2014 -0800
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Fri Feb 21 14:57:15 2014 -0800
----------------------------------------------------------------------
.../yarn/GenericApplicationMaster.java | 19 +++++++++-----
.../provisioning/yarn/RMCallbackHandler.java | 27 ++++++++++++--------
2 files changed, 29 insertions(+), 17 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/b3dacb7a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/GenericApplicationMaster.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/GenericApplicationMaster.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/GenericApplicationMaster.java
index a006363..79eb402 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/GenericApplicationMaster.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/GenericApplicationMaster.java
@@ -23,13 +23,11 @@ import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.ByteBuffer;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
-import java.util.concurrent.Future;
-import java.util.concurrent.atomic.AtomicInteger;
+import java.util.Set;
import org.apache.commons.cli.ParseException;
import org.apache.commons.logging.Log;
@@ -61,6 +59,7 @@ import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
+import com.google.common.collect.Sets;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
@@ -104,6 +103,7 @@ public class GenericApplicationMaster {
new LinkedHashMap<ContainerId, SettableFuture<ContainerStopResponse>>();
Map<ContainerId, SettableFuture<ContainerLaunchResponse>> containerLaunchResponseMap =
new LinkedHashMap<ContainerId, SettableFuture<ContainerLaunchResponse>>();
+ Set<ContainerId> allocatedContainerSet = Sets.newHashSet();
ByteBuffer allTokens;
@@ -246,14 +246,21 @@ public class GenericApplicationMaster {
public ListenableFuture<ContainerReleaseResponse> releaseContainer(Container container) {
LOG.info("Requesting container RELEASE:" + container);
SettableFuture<ContainerReleaseResponse> future = SettableFuture.create();
- containerReleaseMap.put(container.getId(), future);
- amRMClient.releaseAssignedContainer(container.getId());
+ synchronized (allocatedContainerSet) {
+ if (!allocatedContainerSet.contains(container.getId())) {
+ future.set(new ContainerReleaseResponse());
+ } else {
+ containerReleaseMap.put(container.getId(), future);
+ amRMClient.releaseAssignedContainer(container.getId());
+ }
+ }
return future;
}
public ListenableFuture<ContainerLaunchResponse> launchContainer(Container container,
ContainerLaunchContext containerLaunchContext) {
- LOG.info("Requesting container LAUNCH:" + container + " :" + Joiner.on(" ").join(containerLaunchContext.getCommands()));
+ LOG.info("Requesting container LAUNCH:" + container + " :"
+ + Joiner.on(" ").join(containerLaunchContext.getCommands()));
SettableFuture<ContainerLaunchResponse> future = SettableFuture.create();
containerLaunchResponseMap.put(container.getId(), future);
nmClientAsync.startContainerAsync(container, containerLaunchContext);
http://git-wip-us.apache.org/repos/asf/helix/blob/b3dacb7a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java
index fe2c854..8612d3a 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/RMCallbackHandler.java
@@ -42,17 +42,21 @@ class RMCallbackHandler implements AMRMClientAsync.CallbackHandler {
// non complete containers should not be here
assert (containerStatus.getState() == ContainerState.COMPLETE);
- SettableFuture<ContainerStopResponse> stopResponseFuture =
- _genericApplicationMaster.containerStopMap.remove(containerStatus.getContainerId());
- if (stopResponseFuture != null) {
- ContainerStopResponse value = new ContainerStopResponse();
- stopResponseFuture.set(value);
- } else {
- SettableFuture<ContainerReleaseResponse> releaseResponseFuture =
- _genericApplicationMaster.containerReleaseMap.remove(containerStatus.getContainerId());
- if (releaseResponseFuture != null) {
- ContainerReleaseResponse value = new ContainerReleaseResponse();
- releaseResponseFuture.set(value);
+ synchronized (_genericApplicationMaster.allocatedContainerSet) {
+ _genericApplicationMaster.allocatedContainerSet.remove(containerStatus.getContainerId());
+ SettableFuture<ContainerStopResponse> stopResponseFuture =
+ _genericApplicationMaster.containerStopMap.remove(containerStatus.getContainerId());
+ if (stopResponseFuture != null) {
+ ContainerStopResponse value = new ContainerStopResponse();
+ stopResponseFuture.set(value);
+ } else {
+ SettableFuture<ContainerReleaseResponse> releaseResponseFuture =
+ _genericApplicationMaster.containerReleaseMap
+ .remove(containerStatus.getContainerId());
+ if (releaseResponseFuture != null) {
+ ContainerReleaseResponse value = new ContainerReleaseResponse();
+ releaseResponseFuture.set(value);
+ }
}
}
// increment counters for completed/failed containers
@@ -96,6 +100,7 @@ class RMCallbackHandler implements AMRMClientAsync.CallbackHandler {
_genericApplicationMaster.containerRequestMap.remove(containerRequest);
ContainerAskResponse response = new ContainerAskResponse();
response.setContainer(allocatedContainer);
+ _genericApplicationMaster.allocatedContainerSet.add(allocatedContainer.getId());
future.set(response);
break;
}
[22/50] [abbrv] git commit: Restrict shutdown message to a specific
session
Posted by ka...@apache.org.
Restrict shutdown message to a specific session
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/2ab31ddb
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/2ab31ddb
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/2ab31ddb
Branch: refs/heads/master
Commit: 2ab31ddb503b726fe22936ce8831877d9fae3292
Parents: 224c7ea
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Mon Feb 24 15:30:29 2014 -0800
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Mon Feb 24 15:30:29 2014 -0800
----------------------------------------------------------------------
.../apache/helix/controller/stages/ContainerProvisioningStage.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/2ab31ddb/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java b/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
index bc3e0c6..ae433e0 100644
--- a/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
+++ b/helix-core/src/main/java/org/apache/helix/controller/stages/ContainerProvisioningStage.java
@@ -276,7 +276,7 @@ public class ContainerProvisioningStage extends AbstractBaseStage {
if (participant.isAlive()) {
Message message = new Message(MessageType.SHUTDOWN, UUID.randomUUID().toString());
message.setTgtName(participant.getId().toString());
- message.setTgtSessionId("*");
+ message.setTgtSessionId(participant.getRunningInstance().getSessionId());
message.setMsgId(message.getId());
accessor.createProperty(
keyBuilder.message(participant.getId().toString(), message.getId()), message);
[44/50] [abbrv] git commit: [HELIX-438] Improve task framework retry
logic
Posted by ka...@apache.org.
[HELIX-438] Improve task framework retry logic
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/0272e370
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/0272e370
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/0272e370
Branch: refs/heads/master
Commit: 0272e3701492fc738eadb878ddbc45b54d0ca62f
Parents: c5921f4
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Fri May 23 14:22:48 2014 -0700
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Tue Jul 8 18:47:56 2014 -0700
----------------------------------------------------------------------
.../helix/task/FixedTargetTaskRebalancer.java | 13 +--
.../helix/task/GenericTaskRebalancer.java | 91 ++++++++++++++++++--
.../java/org/apache/helix/task/JobConfig.java | 28 +++++-
.../org/apache/helix/task/TaskRebalancer.java | 8 +-
.../java/org/apache/helix/task/TaskRunner.java | 14 ++-
.../java/org/apache/helix/task/Workflow.java | 47 +++++-----
.../org/apache/helix/task/beans/JobBean.java | 3 +-
.../task/TestIndependentTaskRebalancer.java | 89 ++++++++++++++-----
8 files changed, 231 insertions(+), 62 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/0272e370/helix-core/src/main/java/org/apache/helix/task/FixedTargetTaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/FixedTargetTaskRebalancer.java b/helix-core/src/main/java/org/apache/helix/task/FixedTargetTaskRebalancer.java
index d1329ee..3d6f2eb 100644
--- a/helix-core/src/main/java/org/apache/helix/task/FixedTargetTaskRebalancer.java
+++ b/helix-core/src/main/java/org/apache/helix/task/FixedTargetTaskRebalancer.java
@@ -19,6 +19,7 @@ package org.apache.helix.task;
* under the License.
*/
+import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
@@ -56,7 +57,7 @@ public class FixedTargetTaskRebalancer extends TaskRebalancer {
@Override
public Map<ParticipantId, SortedSet<Integer>> getTaskAssignment(
ResourceCurrentState currStateOutput, ResourceAssignment prevAssignment,
- Iterable<ParticipantId> instanceList, JobConfig jobCfg, JobContext jobContext,
+ Collection<ParticipantId> instances, JobConfig jobCfg, JobContext jobContext,
WorkflowConfig workflowCfg, WorkflowContext workflowCtx, Set<Integer> partitionSet,
Cluster cache) {
IdealState tgtIs = getTgtIdealState(jobCfg, cache);
@@ -64,7 +65,7 @@ public class FixedTargetTaskRebalancer extends TaskRebalancer {
return Collections.emptyMap();
}
Set<String> tgtStates = jobCfg.getTargetPartitionStates();
- return getTgtPartitionAssignment(currStateOutput, instanceList, tgtIs, tgtStates, partitionSet,
+ return getTgtPartitionAssignment(currStateOutput, instances, tgtIs, tgtStates, partitionSet,
jobContext);
}
@@ -120,7 +121,7 @@ public class FixedTargetTaskRebalancer extends TaskRebalancer {
/**
* Get partition assignments for the target resource, but only for the partitions of interest.
* @param currStateOutput The current state of the instances in the cluster.
- * @param instanceList The set of instances.
+ * @param instances The instances.
* @param tgtIs The ideal state of the target resource.
* @param tgtStates Only partitions in this set of states will be considered. If null, partitions
* do not need to
@@ -129,11 +130,11 @@ public class FixedTargetTaskRebalancer extends TaskRebalancer {
* @return A map of instance vs set of partition ids assigned to that instance.
*/
private static Map<ParticipantId, SortedSet<Integer>> getTgtPartitionAssignment(
- ResourceCurrentState currStateOutput, Iterable<ParticipantId> instanceList, IdealState tgtIs,
+ ResourceCurrentState currStateOutput, Collection<ParticipantId> instances, IdealState tgtIs,
Set<String> tgtStates, Set<Integer> includeSet, JobContext jobCtx) {
Map<ParticipantId, SortedSet<Integer>> result =
new HashMap<ParticipantId, SortedSet<Integer>>();
- for (ParticipantId instance : instanceList) {
+ for (ParticipantId instance : instances) {
result.put(instance, new TreeSet<Integer>());
}
@@ -145,7 +146,7 @@ public class FixedTargetTaskRebalancer extends TaskRebalancer {
}
int pId = partitions.get(0);
if (includeSet.contains(pId)) {
- for (ParticipantId instance : instanceList) {
+ for (ParticipantId instance : instances) {
State s =
currStateOutput.getCurrentState(ResourceId.from(tgtIs.getResourceName()),
PartitionId.from(pName), instance);
http://git-wip-us.apache.org/repos/asf/helix/blob/0272e370/helix-core/src/main/java/org/apache/helix/task/GenericTaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/GenericTaskRebalancer.java b/helix-core/src/main/java/org/apache/helix/task/GenericTaskRebalancer.java
index 8b5a258..740b1b9 100644
--- a/helix-core/src/main/java/org/apache/helix/task/GenericTaskRebalancer.java
+++ b/helix-core/src/main/java/org/apache/helix/task/GenericTaskRebalancer.java
@@ -20,6 +20,7 @@ package org.apache.helix.task;
*/
import java.util.ArrayList;
+import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
@@ -41,6 +42,8 @@ import org.apache.helix.model.IdealState;
import org.apache.helix.model.ResourceAssignment;
import com.google.common.base.Function;
+import com.google.common.collect.BiMap;
+import com.google.common.collect.HashBiMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
@@ -50,6 +53,9 @@ import com.google.common.collect.Sets;
* assignment to target partitions and states of another resource
*/
public class GenericTaskRebalancer extends TaskRebalancer {
+ /** Reassignment policy for this algorithm */
+ private RetryPolicy _retryPolicy = new DefaultRetryReassigner();
+
@Override
public Set<Integer> getAllTaskPartitions(JobConfig jobCfg, JobContext jobCtx,
WorkflowConfig workflowCfg, WorkflowContext workflowCtx, Cluster cache) {
@@ -68,7 +74,7 @@ public class GenericTaskRebalancer extends TaskRebalancer {
@Override
public Map<ParticipantId, SortedSet<Integer>> getTaskAssignment(
ResourceCurrentState currStateOutput, ResourceAssignment prevAssignment,
- Iterable<ParticipantId> instanceList, JobConfig jobCfg, final JobContext jobContext,
+ Collection<ParticipantId> instances, JobConfig jobCfg, final JobContext jobContext,
WorkflowConfig workflowCfg, WorkflowContext workflowCtx, Set<Integer> partitionSet,
Cluster cache) {
// Gather input to the full auto rebalancing algorithm
@@ -121,7 +127,7 @@ public class GenericTaskRebalancer extends TaskRebalancer {
new AutoRebalanceStrategy(resourceId, partitions, states, Integer.MAX_VALUE,
new AutoRebalanceStrategy.DefaultPlacementScheme());
List<ParticipantId> allNodes =
- Lists.newArrayList(getEligibleInstances(jobCfg, currStateOutput, instanceList, cache));
+ Lists.newArrayList(getEligibleInstances(jobCfg, currStateOutput, instances, cache));
Collections.sort(allNodes);
ZNRecord record = strategy.typedComputePartitionAssignment(allNodes, currentMapping, allNodes);
Map<String, List<String>> preferenceLists = record.getListFields();
@@ -140,6 +146,9 @@ public class GenericTaskRebalancer extends TaskRebalancer {
taskAssignment.get(participantId).add(Integer.valueOf(partitionName));
}
}
+
+ // Finally, adjust the assignment if tasks have been failing
+ taskAssignment = _retryPolicy.reassign(jobCfg, jobContext, allNodes, taskAssignment);
return taskAssignment;
}
@@ -147,14 +156,14 @@ public class GenericTaskRebalancer extends TaskRebalancer {
* Filter a list of instances based on targeted resource policies
* @param jobCfg the job configuration
* @param currStateOutput the current state of all instances in the cluster
- * @param instanceList valid instances
+ * @param instances valid instances
* @param cache current snapshot of the cluster
* @return a set of instances that can be assigned to
*/
private Set<ParticipantId> getEligibleInstances(JobConfig jobCfg,
- ResourceCurrentState currStateOutput, Iterable<ParticipantId> instanceList, Cluster cache) {
+ ResourceCurrentState currStateOutput, Iterable<ParticipantId> instances, Cluster cache) {
// No target resource means any instance is available
- Set<ParticipantId> allInstances = Sets.newHashSet(instanceList);
+ Set<ParticipantId> allInstances = Sets.newHashSet(instances);
String targetResource = jobCfg.getTargetResource();
if (targetResource == null) {
return allInstances;
@@ -193,4 +202,76 @@ public class GenericTaskRebalancer extends TaskRebalancer {
allInstances.retainAll(eligibleInstances);
return allInstances;
}
+
+ public interface RetryPolicy {
+ /**
+ * Adjust the assignment to allow for reassignment if a task keeps failing where it's currently
+ * assigned
+ * @param jobCfg the job configuration
+ * @param jobCtx the job context
+ * @param instances instances that can serve tasks
+ * @param origAssignment the unmodified assignment
+ * @return the adjusted assignment
+ */
+ Map<ParticipantId, SortedSet<Integer>> reassign(JobConfig jobCfg, JobContext jobCtx,
+ Collection<ParticipantId> instances, Map<ParticipantId, SortedSet<Integer>> origAssignment);
+ }
+
+ private static class DefaultRetryReassigner implements RetryPolicy {
+ @Override
+ public Map<ParticipantId, SortedSet<Integer>> reassign(JobConfig jobCfg, JobContext jobCtx,
+ Collection<ParticipantId> instances, Map<ParticipantId, SortedSet<Integer>> origAssignment) {
+ // Compute an increasing integer ID for each instance
+ BiMap<ParticipantId, Integer> instanceMap = HashBiMap.create(instances.size());
+ int instanceIndex = 0;
+ for (ParticipantId instance : instances) {
+ instanceMap.put(instance, instanceIndex++);
+ }
+
+ // Move partitions
+ Map<ParticipantId, SortedSet<Integer>> newAssignment = Maps.newHashMap();
+ for (Map.Entry<ParticipantId, SortedSet<Integer>> e : origAssignment.entrySet()) {
+ ParticipantId instance = e.getKey();
+ SortedSet<Integer> partitions = e.getValue();
+ Integer instanceId = instanceMap.get(instance);
+ if (instanceId != null) {
+ for (int p : partitions) {
+ // Determine for each partition if there have been failures with the current assignment
+ // strategy, and if so, force a shift in assignment for that partition only
+ int shiftValue = getNumInstancesToShift(jobCfg, jobCtx, instances, p);
+ int newInstanceId = (instanceId + shiftValue) % instances.size();
+ ParticipantId newInstance = instanceMap.inverse().get(newInstanceId);
+ if (newInstance == null) {
+ newInstance = instance;
+ }
+ if (!newAssignment.containsKey(newInstance)) {
+ newAssignment.put(newInstance, new TreeSet<Integer>());
+ }
+ newAssignment.get(newInstance).add(p);
+ }
+ } else {
+ // In case something goes wrong, just keep the previous assignment
+ newAssignment.put(instance, partitions);
+ }
+ }
+ return newAssignment;
+ }
+
+ /**
+ * In case tasks fail, we may not want to schedule them in the same place. This method allows us
+ * to compute a shifting value so that we can systematically choose other instances to try
+ * @param jobCfg the job configuration
+ * @param jobCtx the job context
+ * @param instances instances that can be chosen
+ * @param p the partition to look up
+ * @return the shifting value
+ */
+ private int getNumInstancesToShift(JobConfig jobCfg, JobContext jobCtx,
+ Collection<ParticipantId> instances, int p) {
+ int numAttempts = jobCtx.getPartitionNumAttempts(p);
+ int maxNumAttempts = jobCfg.getMaxAttemptsPerTask();
+ int numInstances = Math.min(instances.size(), jobCfg.getMaxForcedReassignmentsPerTask() + 1);
+ return numAttempts / (maxNumAttempts / numInstances);
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/helix/blob/0272e370/helix-core/src/main/java/org/apache/helix/task/JobConfig.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/JobConfig.java b/helix-core/src/main/java/org/apache/helix/task/JobConfig.java
index b166da1..3f9ab41 100644
--- a/helix-core/src/main/java/org/apache/helix/task/JobConfig.java
+++ b/helix-core/src/main/java/org/apache/helix/task/JobConfig.java
@@ -61,6 +61,8 @@ public class JobConfig {
public static final String TIMEOUT_PER_TASK = "TimeoutPerPartition";
/** The maximum number of times the task rebalancer may attempt to execute a task. */
public static final String MAX_ATTEMPTS_PER_TASK = "MaxAttemptsPerTask";
+ /** The maximum number of times Helix will intentionally move a failing task */
+ public static final String MAX_FORCED_REASSIGNMENTS_PER_TASK = "MaxForcedReassignmentsPerTask";
/** The number of concurrent tasks that are allowed to run on an instance. */
public static final String NUM_CONCURRENT_TASKS_PER_INSTANCE = "ConcurrentTasksPerInstance";
/** The number of tasks within the job that are allowed to fail. */
@@ -75,6 +77,7 @@ public class JobConfig {
public static final int DEFAULT_MAX_ATTEMPTS_PER_TASK = 10;
public static final int DEFAULT_NUM_CONCURRENT_TASKS_PER_INSTANCE = 1;
public static final int DEFAULT_FAILURE_THRESHOLD = 0;
+ public static final int DEFAULT_MAX_FORCED_REASSIGNMENTS_PER_TASK = 0;
private final String _workflow;
private final String _targetResource;
@@ -85,13 +88,14 @@ public class JobConfig {
private final long _timeoutPerTask;
private final int _numConcurrentTasksPerInstance;
private final int _maxAttemptsPerTask;
+ private final int _maxForcedReassignmentsPerTask;
private final int _failureThreshold;
private final Map<String, TaskConfig> _taskConfigMap;
private JobConfig(String workflow, String targetResource, List<String> targetPartitions,
Set<String> targetPartitionStates, String command, Map<String, String> jobConfigMap,
long timeoutPerTask, int numConcurrentTasksPerInstance, int maxAttemptsPerTask,
- int failureThreshold, Map<String, TaskConfig> taskConfigMap) {
+ int maxForcedReassignmentsPerTask, int failureThreshold, Map<String, TaskConfig> taskConfigMap) {
_workflow = workflow;
_targetResource = targetResource;
_targetPartitions = targetPartitions;
@@ -101,6 +105,7 @@ public class JobConfig {
_timeoutPerTask = timeoutPerTask;
_numConcurrentTasksPerInstance = numConcurrentTasksPerInstance;
_maxAttemptsPerTask = maxAttemptsPerTask;
+ _maxForcedReassignmentsPerTask = maxForcedReassignmentsPerTask;
_failureThreshold = failureThreshold;
if (taskConfigMap != null) {
_taskConfigMap = taskConfigMap;
@@ -145,6 +150,10 @@ public class JobConfig {
return _maxAttemptsPerTask;
}
+ public int getMaxForcedReassignmentsPerTask() {
+ return _maxForcedReassignmentsPerTask;
+ }
+
public int getFailureThreshold() {
return _failureThreshold;
}
@@ -180,6 +189,7 @@ public class JobConfig {
}
cfgMap.put(JobConfig.TIMEOUT_PER_TASK, "" + _timeoutPerTask);
cfgMap.put(JobConfig.MAX_ATTEMPTS_PER_TASK, "" + _maxAttemptsPerTask);
+ cfgMap.put(JobConfig.MAX_FORCED_REASSIGNMENTS_PER_TASK, "" + _maxForcedReassignmentsPerTask);
cfgMap.put(JobConfig.FAILURE_THRESHOLD, "" + _failureThreshold);
return cfgMap;
}
@@ -198,6 +208,7 @@ public class JobConfig {
private long _timeoutPerTask = DEFAULT_TIMEOUT_PER_TASK;
private int _numConcurrentTasksPerInstance = DEFAULT_NUM_CONCURRENT_TASKS_PER_INSTANCE;
private int _maxAttemptsPerTask = DEFAULT_MAX_ATTEMPTS_PER_TASK;
+ private int _maxForcedReassignmentsPerTask = DEFAULT_MAX_FORCED_REASSIGNMENTS_PER_TASK;
private int _failureThreshold = DEFAULT_FAILURE_THRESHOLD;
public JobConfig build() {
@@ -205,7 +216,7 @@ public class JobConfig {
return new JobConfig(_workflow, _targetResource, _targetPartitions, _targetPartitionStates,
_command, _commandConfig, _timeoutPerTask, _numConcurrentTasksPerInstance,
- _maxAttemptsPerTask, _failureThreshold, _taskConfigMap);
+ _maxAttemptsPerTask, _maxForcedReassignmentsPerTask, _failureThreshold, _taskConfigMap);
}
/**
@@ -246,6 +257,10 @@ public class JobConfig {
if (cfg.containsKey(MAX_ATTEMPTS_PER_TASK)) {
b.setMaxAttemptsPerTask(Integer.parseInt(cfg.get(MAX_ATTEMPTS_PER_TASK)));
}
+ if (cfg.containsKey(MAX_FORCED_REASSIGNMENTS_PER_TASK)) {
+ b.setMaxForcedReassignmentsPerTask(Integer.parseInt(cfg
+ .get(MAX_FORCED_REASSIGNMENTS_PER_TASK)));
+ }
if (cfg.containsKey(FAILURE_THRESHOLD)) {
b.setFailureThreshold(Integer.parseInt(cfg.get(FAILURE_THRESHOLD)));
}
@@ -297,6 +312,11 @@ public class JobConfig {
return this;
}
+ public Builder setMaxForcedReassignmentsPerTask(int v) {
+ _maxForcedReassignmentsPerTask = v;
+ return this;
+ }
+
public Builder setFailureThreshold(int v) {
_failureThreshold = v;
return this;
@@ -340,6 +360,10 @@ public class JobConfig {
throw new IllegalArgumentException(String.format("%s has invalid value %s",
MAX_ATTEMPTS_PER_TASK, _maxAttemptsPerTask));
}
+ if (_maxForcedReassignmentsPerTask < 0) {
+ throw new IllegalArgumentException(String.format("%s has invalid value %s",
+ MAX_FORCED_REASSIGNMENTS_PER_TASK, _maxForcedReassignmentsPerTask));
+ }
if (_failureThreshold < 0) {
throw new IllegalArgumentException(String.format("%s has invalid value %s",
FAILURE_THRESHOLD, _failureThreshold));
http://git-wip-us.apache.org/repos/asf/helix/blob/0272e370/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java b/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
index 376eca5..043e7dd 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskRebalancer.java
@@ -20,6 +20,7 @@ package org.apache.helix.task;
*/
import java.util.ArrayList;
+import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@@ -75,7 +76,7 @@ public abstract class TaskRebalancer implements HelixRebalancer {
* Compute an assignment of tasks to instances
* @param currStateOutput the current state of the instances
* @param prevAssignment the previous task partition assignment
- * @param instanceList the instances
+ * @param instances the instances
* @param jobCfg the task configuration
* @param taskCtx the task context
* @param workflowCfg the workflow configuration
@@ -86,7 +87,7 @@ public abstract class TaskRebalancer implements HelixRebalancer {
*/
public abstract Map<ParticipantId, SortedSet<Integer>> getTaskAssignment(
ResourceCurrentState currStateOutput, ResourceAssignment prevAssignment,
- Iterable<ParticipantId> instanceList, JobConfig jobCfg, JobContext jobContext,
+ Collection<ParticipantId> instanceList, JobConfig jobCfg, JobContext jobContext,
WorkflowConfig workflowCfg, WorkflowContext workflowCtx, Set<Integer> partitionSet,
Cluster cache);
@@ -192,7 +193,7 @@ public abstract class TaskRebalancer implements HelixRebalancer {
private ResourceAssignment computeResourceMapping(String jobResource,
WorkflowConfig workflowConfig, JobConfig jobCfg, ResourceAssignment prevAssignment,
- Iterable<ParticipantId> liveInstances, ResourceCurrentState currStateOutput,
+ Collection<ParticipantId> liveInstances, ResourceCurrentState currStateOutput,
WorkflowContext workflowCtx, JobContext jobCtx, Set<Integer> partitionsToDropFromIs,
Cluster cache) {
TargetState jobTgtState = workflowConfig.getTargetState();
@@ -381,6 +382,7 @@ public abstract class TaskRebalancer implements HelixRebalancer {
// This includes all completed, failed, already assigned partitions.
Set<Integer> excludeSet = Sets.newTreeSet(assignedPartitions);
addCompletedPartitions(excludeSet, jobCtx, allPartitions);
+ excludeSet.addAll(skippedPartitions);
// Get instance->[partition, ...] mappings for the target resource.
Map<ParticipantId, SortedSet<Integer>> tgtPartitionAssignments =
getTaskAssignment(currStateOutput, prevAssignment, liveInstances, jobCfg, jobCtx,
http://git-wip-us.apache.org/repos/asf/helix/blob/0272e370/helix-core/src/main/java/org/apache/helix/task/TaskRunner.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskRunner.java b/helix-core/src/main/java/org/apache/helix/task/TaskRunner.java
index cd909ed..66abba6 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskRunner.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskRunner.java
@@ -21,6 +21,7 @@ package org.apache.helix.task;
import org.apache.helix.HelixManager;
import org.apache.helix.participant.statemachine.StateModel;
+import org.apache.helix.task.TaskResult.Status;
import org.apache.log4j.Logger;
/**
@@ -64,7 +65,12 @@ public class TaskRunner implements Runnable {
public void run() {
try {
signalStarted();
- _result = _task.run();
+ try {
+ _result = _task.run();
+ } catch (Throwable t) {
+ LOG.error("Problem running the task", t);
+ _result = new TaskResult(Status.ERROR, null);
+ }
switch (_result.getStatus()) {
case COMPLETED:
@@ -96,8 +102,10 @@ public class TaskRunner implements Runnable {
* Signals the task to cancel itself.
*/
public void timeout() {
- _timeout = true;
- cancel();
+ if (!_done) {
+ _timeout = true;
+ cancel();
+ }
}
/**
http://git-wip-us.apache.org/repos/asf/helix/blob/0272e370/helix-core/src/main/java/org/apache/helix/task/Workflow.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/Workflow.java b/helix-core/src/main/java/org/apache/helix/task/Workflow.java
index 8afafe4..70fb82c 100644
--- a/helix-core/src/main/java/org/apache/helix/task/Workflow.java
+++ b/helix-core/src/main/java/org/apache/helix/task/Workflow.java
@@ -180,7 +180,9 @@ public class Workflow {
Joiner.on(",").join(job.targetPartitions));
}
builder.addConfig(job.name, JobConfig.MAX_ATTEMPTS_PER_TASK,
- String.valueOf(job.maxAttemptsPerPartition));
+ String.valueOf(job.maxAttemptsPerTask));
+ builder.addConfig(job.name, JobConfig.MAX_FORCED_REASSIGNMENTS_PER_TASK,
+ String.valueOf(job.maxForcedReassignmentsPerTask));
builder.addConfig(job.name, JobConfig.NUM_CONCURRENT_TASKS_PER_INSTANCE,
String.valueOf(job.numConcurrentTasksPerInstance));
builder.addConfig(job.name, JobConfig.TIMEOUT_PER_TASK,
@@ -243,40 +245,41 @@ public class Workflow {
_expiry = -1;
}
- public Builder addConfig(String node, String key, String val) {
- node = namespacify(node);
- _dag.addNode(node);
- if (!_jobConfigs.containsKey(node)) {
- _jobConfigs.put(node, new TreeMap<String, String>());
+ public Builder addConfig(String job, String key, String val) {
+ job = namespacify(job);
+ _dag.addNode(job);
+ if (!_jobConfigs.containsKey(job)) {
+ _jobConfigs.put(job, new TreeMap<String, String>());
}
- _jobConfigs.get(node).put(key, val);
+ _jobConfigs.get(job).put(key, val);
return this;
}
- public Builder addJobConfigMap(String node, Map<String, String> jobConfigMap) {
- return addConfig(node, JobConfig.JOB_CONFIG_MAP, TaskUtil.serializeJobConfigMap(jobConfigMap));
+ public Builder addJobConfigMap(String job, Map<String, String> jobConfigMap) {
+ return addConfig(job, JobConfig.JOB_CONFIG_MAP, TaskUtil.serializeJobConfigMap(jobConfigMap));
}
- public Builder addJobConfig(String node, JobConfig jobConfig) {
+ public Builder addJobConfig(String job, JobConfig jobConfig) {
for (Map.Entry<String, String> e : jobConfig.getResourceConfigMap().entrySet()) {
String key = e.getKey();
String val = e.getValue();
- addConfig(node, key, val);
+ addConfig(job, key, val);
}
- addTaskConfigs(node, jobConfig.getTaskConfigMap().values());
+ jobConfig.getJobConfigMap().put(JobConfig.WORKFLOW_ID, _name);
+ addTaskConfigs(job, jobConfig.getTaskConfigMap().values());
return this;
}
- public Builder addTaskConfigs(String node, Collection<TaskConfig> taskConfigs) {
- node = namespacify(node);
- _dag.addNode(node);
- if (!_taskConfigs.containsKey(node)) {
- _taskConfigs.put(node, new ArrayList<TaskConfig>());
+ public Builder addTaskConfigs(String job, Collection<TaskConfig> taskConfigs) {
+ job = namespacify(job);
+ _dag.addNode(job);
+ if (!_taskConfigs.containsKey(job)) {
+ _taskConfigs.put(job, new ArrayList<TaskConfig>());
}
- if (!_jobConfigs.containsKey(node)) {
- _jobConfigs.put(node, new TreeMap<String, String>());
+ if (!_jobConfigs.containsKey(job)) {
+ _jobConfigs.put(job, new TreeMap<String, String>());
}
- _taskConfigs.get(node).addAll(taskConfigs);
+ _taskConfigs.get(job).addAll(taskConfigs);
return this;
}
@@ -293,8 +296,8 @@ public class Workflow {
return this;
}
- public String namespacify(String task) {
- return TaskUtil.getNamespacedJobName(_name, task);
+ public String namespacify(String job) {
+ return TaskUtil.getNamespacedJobName(_name, job);
}
public Workflow build() {
http://git-wip-us.apache.org/repos/asf/helix/blob/0272e370/helix-core/src/main/java/org/apache/helix/task/beans/JobBean.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/beans/JobBean.java b/helix-core/src/main/java/org/apache/helix/task/beans/JobBean.java
index af5882c..bc5350a 100644
--- a/helix-core/src/main/java/org/apache/helix/task/beans/JobBean.java
+++ b/helix-core/src/main/java/org/apache/helix/task/beans/JobBean.java
@@ -38,6 +38,7 @@ public class JobBean {
public List<TaskBean> tasks;
public long timeoutPerPartition = JobConfig.DEFAULT_TIMEOUT_PER_TASK;
public int numConcurrentTasksPerInstance = JobConfig.DEFAULT_NUM_CONCURRENT_TASKS_PER_INSTANCE;
- public int maxAttemptsPerPartition = JobConfig.DEFAULT_MAX_ATTEMPTS_PER_TASK;
+ public int maxAttemptsPerTask = JobConfig.DEFAULT_MAX_ATTEMPTS_PER_TASK;
+ public int maxForcedReassignmentsPerTask = JobConfig.DEFAULT_MAX_FORCED_REASSIGNMENTS_PER_TASK;
public int failureThreshold = JobConfig.DEFAULT_FAILURE_THRESHOLD;
}
http://git-wip-us.apache.org/repos/asf/helix/blob/0272e370/helix-core/src/test/java/org/apache/helix/integration/task/TestIndependentTaskRebalancer.java
----------------------------------------------------------------------
diff --git a/helix-core/src/test/java/org/apache/helix/integration/task/TestIndependentTaskRebalancer.java b/helix-core/src/test/java/org/apache/helix/integration/task/TestIndependentTaskRebalancer.java
index 5dad94c..006c3fe 100644
--- a/helix-core/src/test/java/org/apache/helix/integration/task/TestIndependentTaskRebalancer.java
+++ b/helix-core/src/test/java/org/apache/helix/integration/task/TestIndependentTaskRebalancer.java
@@ -63,6 +63,7 @@ public class TestIndependentTaskRebalancer extends ZkIntegrationTestBase {
private final MockParticipantManager[] _participants = new MockParticipantManager[n];
private ClusterControllerManager _controller;
private Set<String> _invokedClasses = Sets.newHashSet();
+ private Map<String, Integer> _runCounts = Maps.newHashMap();
private HelixManager _manager;
private TaskDriver _driver;
@@ -82,24 +83,25 @@ public class TestIndependentTaskRebalancer extends ZkIntegrationTestBase {
setupTool.addInstanceToCluster(CLUSTER_NAME, storageNodeName);
}
- // Set task callbacks
- Map<String, TaskFactory> taskFactoryReg = new HashMap<String, TaskFactory>();
- taskFactoryReg.put("TaskOne", new TaskFactory() {
- @Override
- public Task createNewTask(TaskCallbackContext context) {
- return new TaskOne(context);
- }
- });
- taskFactoryReg.put("TaskTwo", new TaskFactory() {
- @Override
- public Task createNewTask(TaskCallbackContext context) {
- return new TaskTwo(context);
- }
- });
-
// start dummy participants
for (int i = 0; i < n; i++) {
- String instanceName = PARTICIPANT_PREFIX + "_" + (START_PORT + i);
+ final String instanceName = PARTICIPANT_PREFIX + "_" + (START_PORT + i);
+
+ // Set task callbacks
+ Map<String, TaskFactory> taskFactoryReg = new HashMap<String, TaskFactory>();
+ taskFactoryReg.put("TaskOne", new TaskFactory() {
+ @Override
+ public Task createNewTask(TaskCallbackContext context) {
+ return new TaskOne(context, instanceName);
+ }
+ });
+ taskFactoryReg.put("TaskTwo", new TaskFactory() {
+ @Override
+ public Task createNewTask(TaskCallbackContext context) {
+ return new TaskTwo(context, instanceName);
+ }
+ });
+
_participants[i] = new MockParticipantManager(ZK_ADDR, CLUSTER_NAME, instanceName);
// Register a Task state model factory.
@@ -125,6 +127,7 @@ public class TestIndependentTaskRebalancer extends ZkIntegrationTestBase {
@BeforeMethod
public void beforeMethod() {
_invokedClasses.clear();
+ _runCounts.clear();
}
@Test
@@ -208,10 +211,46 @@ public class TestIndependentTaskRebalancer extends ZkIntegrationTestBase {
Assert.assertTrue(_invokedClasses.contains(TaskTwo.class.getName()));
}
+ @Test
+ public void testReassignment() throws Exception {
+ final int NUM_INSTANCES = 2;
+ String jobName = TestHelper.getTestMethodName();
+ Workflow.Builder workflowBuilder = new Workflow.Builder(jobName);
+ List<TaskConfig> taskConfigs = Lists.newArrayListWithCapacity(2);
+ Map<String, String> taskConfigMap =
+ Maps.newHashMap(ImmutableMap.of("fail", "" + true, "failInstance", PARTICIPANT_PREFIX + '_'
+ + START_PORT));
+ TaskConfig taskConfig1 = new TaskConfig("TaskOne", taskConfigMap, false);
+ taskConfigs.add(taskConfig1);
+ workflowBuilder.addTaskConfigs(jobName, taskConfigs);
+ workflowBuilder.addConfig(jobName, JobConfig.COMMAND, "DummyCommand");
+ workflowBuilder.addConfig(jobName, JobConfig.MAX_FORCED_REASSIGNMENTS_PER_TASK, ""
+ + (NUM_INSTANCES - 1)); // this ensures that every instance gets one chance
+ Map<String, String> jobConfigMap = Maps.newHashMap();
+ jobConfigMap.put("Timeout", "1000");
+ workflowBuilder.addJobConfigMap(jobName, jobConfigMap);
+ _driver.start(workflowBuilder.build());
+
+ // Ensure the job completes
+ TestUtil.pollForWorkflowState(_manager, jobName, TaskState.IN_PROGRESS);
+ TestUtil.pollForWorkflowState(_manager, jobName, TaskState.COMPLETED);
+
+ // Ensure that the class was invoked
+ Assert.assertTrue(_invokedClasses.contains(TaskOne.class.getName()));
+
+ // Ensure that this was tried on two different instances, the first of which exhausted the
+ // attempts number, and the other passes on the first try
+ Assert.assertEquals(_runCounts.size(), NUM_INSTANCES);
+ Assert.assertTrue(_runCounts.values().contains(
+ JobConfig.DEFAULT_MAX_ATTEMPTS_PER_TASK / NUM_INSTANCES));
+ Assert.assertTrue(_runCounts.values().contains(1));
+ }
+
private class TaskOne extends ReindexTask {
private final boolean _shouldFail;
+ private final String _instanceName;
- public TaskOne(TaskCallbackContext context) {
+ public TaskOne(TaskCallbackContext context, String instanceName) {
super(context);
// Check whether or not this task should succeed
@@ -221,15 +260,25 @@ public class TestIndependentTaskRebalancer extends ZkIntegrationTestBase {
Map<String, String> configMap = taskConfig.getConfigMap();
if (configMap != null && configMap.containsKey("fail")
&& Boolean.parseBoolean(configMap.get("fail"))) {
- shouldFail = true;
+ // if a specific instance is specified, only fail for that one
+ shouldFail =
+ !configMap.containsKey("failInstance")
+ || configMap.get("failInstance").equals(instanceName);
}
}
_shouldFail = shouldFail;
+
+ // Initialize the count for this instance if not already done
+ if (!_runCounts.containsKey(instanceName)) {
+ _runCounts.put(instanceName, 0);
+ }
+ _instanceName = instanceName;
}
@Override
public TaskResult run() {
_invokedClasses.add(getClass().getName());
+ _runCounts.put(_instanceName, _runCounts.get(_instanceName) + 1);
// Fail the task if it should fail
if (_shouldFail) {
@@ -241,8 +290,8 @@ public class TestIndependentTaskRebalancer extends ZkIntegrationTestBase {
}
private class TaskTwo extends TaskOne {
- public TaskTwo(TaskCallbackContext context) {
- super(context);
+ public TaskTwo(TaskCallbackContext context, String instanceName) {
+ super(context, instanceName);
}
}
}
[36/50] [abbrv] git commit: Add container information to job runner
recipe
Posted by ka...@apache.org.
Add container information to job runner recipe
Project: http://git-wip-us.apache.org/repos/asf/helix/repo
Commit: http://git-wip-us.apache.org/repos/asf/helix/commit/c73e95ea
Tree: http://git-wip-us.apache.org/repos/asf/helix/tree/c73e95ea
Diff: http://git-wip-us.apache.org/repos/asf/helix/diff/c73e95ea
Branch: refs/heads/master
Commit: c73e95eaa07cf71996453f206dc691099f7868d6
Parents: 785bb9f
Author: Kanak Biscuitwala <ka...@apache.org>
Authored: Thu May 1 12:50:42 2014 -0700
Committer: Kanak Biscuitwala <ka...@apache.org>
Committed: Thu May 1 12:50:42 2014 -0700
----------------------------------------------------------------------
.../src/main/java/org/apache/helix/task/TaskDriver.java | 5 +++++
.../src/main/java/org/apache/helix/task/WorkflowConfig.java | 4 ++++
.../org/apache/helix/provisioning/ParticipantLauncher.java | 1 +
.../java/org/apache/helix/provisioning/yarn/AppLauncher.java | 8 ++++++++
.../helix/provisioning/yarn/AppStatusReportGenerator.java | 2 --
.../helix/provisioning/yarn/example/JobRunnerMain.java | 5 +++++
6 files changed, 23 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/helix/blob/c73e95ea/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java b/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
index 193b78e..c8f0d08 100644
--- a/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
+++ b/helix-core/src/main/java/org/apache/helix/task/TaskDriver.java
@@ -234,11 +234,16 @@ public class TaskDriver {
System.out.println("Workflow " + resource + " consists of the following tasks: "
+ wCfg.getJobDag().getAllNodes());
+ TaskState workflowState = wCtx.getWorkflowState();
+ if (workflowState == null) {
+ workflowState = TaskState.IN_PROGRESS;
+ }
System.out.println("Current state of workflow is " + wCtx.getWorkflowState().name());
System.out.println("Job states are: ");
System.out.println("-------");
for (String job : wCfg.getJobDag().getAllNodes()) {
System.out.println("Job " + job + " is " + wCtx.getJobState(job));
+ System.out.println("-------");
// fetch task information
JobConfig jCfg = TaskUtil.getJobCfg(_manager, job);
http://git-wip-us.apache.org/repos/asf/helix/blob/c73e95ea/helix-core/src/main/java/org/apache/helix/task/WorkflowConfig.java
----------------------------------------------------------------------
diff --git a/helix-core/src/main/java/org/apache/helix/task/WorkflowConfig.java b/helix-core/src/main/java/org/apache/helix/task/WorkflowConfig.java
index 6f10955..ff4a2a9 100644
--- a/helix-core/src/main/java/org/apache/helix/task/WorkflowConfig.java
+++ b/helix-core/src/main/java/org/apache/helix/task/WorkflowConfig.java
@@ -89,6 +89,10 @@ public class WorkflowConfig {
public static Builder fromMap(Map<String, String> cfg) {
Builder b = new Builder();
+ if (cfg == null) {
+ return b;
+ }
+
if (cfg.containsKey(EXPIRY)) {
b.setExpiry(Long.parseLong(cfg.get(EXPIRY)));
}
http://git-wip-us.apache.org/repos/asf/helix/blob/c73e95ea/helix-provisioning/src/main/java/org/apache/helix/provisioning/ParticipantLauncher.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ParticipantLauncher.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ParticipantLauncher.java
index 60231fb..59a9eb5 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/ParticipantLauncher.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/ParticipantLauncher.java
@@ -130,6 +130,7 @@ public class ParticipantLauncher {
@Override
public void onError(Exception e, ErrorCode code, ErrorType type) {
+ LOG.error("Shutdown message error", e);
}
}
http://git-wip-us.apache.org/repos/asf/helix/blob/c73e95ea/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
index 9a19842..76b7877 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppLauncher.java
@@ -400,6 +400,14 @@ public class AppLauncher {
return null;
}
+ public ApplicationReport getApplicationReport() {
+ try {
+ return yarnClient.getApplicationReport(_appId);
+ } catch (Exception e) {
+ return null;
+ }
+ }
+
/**
* @return true if successfully completed, it will print status every X seconds
*/
http://git-wip-us.apache.org/repos/asf/helix/blob/c73e95ea/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppStatusReportGenerator.java
----------------------------------------------------------------------
diff --git a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppStatusReportGenerator.java b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppStatusReportGenerator.java
index b083ac9..40c8186 100644
--- a/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppStatusReportGenerator.java
+++ b/helix-provisioning/src/main/java/org/apache/helix/provisioning/yarn/AppStatusReportGenerator.java
@@ -2,8 +2,6 @@ package org.apache.helix.provisioning.yarn;
import java.util.Map;
-import jline.ConsoleReader;
-
import org.apache.helix.HelixConnection;
import org.apache.helix.api.Participant;
import org.apache.helix.api.Resource;
http://git-wip-us.apache.org/repos/asf/helix/blob/c73e95ea/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/JobRunnerMain.java
----------------------------------------------------------------------
diff --git a/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/JobRunnerMain.java b/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/JobRunnerMain.java
index 623854f..78266cf 100644
--- a/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/JobRunnerMain.java
+++ b/recipes/jobrunner-yarn/src/main/java/org/apache/helix/provisioning/yarn/example/JobRunnerMain.java
@@ -14,6 +14,7 @@ import org.apache.helix.HelixManager;
import org.apache.helix.HelixRole;
import org.apache.helix.InstanceType;
import org.apache.helix.api.Participant;
+import org.apache.helix.api.RunningInstance;
import org.apache.helix.api.accessor.ClusterAccessor;
import org.apache.helix.api.config.ContainerConfig;
import org.apache.helix.api.id.ClusterId;
@@ -115,6 +116,10 @@ public class JobRunnerMain {
System.out.println(participant.getId() + "[" + containerConfig.getId() + "]: "
+ containerConfig.getState());
}
+ if (participant.isAlive()) {
+ RunningInstance runningInstance = participant.getRunningInstance();
+ System.out.println("\tProcess: " + runningInstance.getPid());
+ }
}
System.out.println("----------------");
System.out.println("TASK STATUS");