You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@twill.apache.org by ch...@apache.org on 2013/12/12 22:59:43 UTC

[01/28] Making maven site works.

Updated Branches:
  refs/heads/site [created] 35dfccc4d


http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/test/resources/logback-test.xml
----------------------------------------------------------------------
diff --git a/zookeeper/src/test/resources/logback-test.xml b/zookeeper/src/test/resources/logback-test.xml
deleted file mode 100644
index 157df6e..0000000
--- a/zookeeper/src/test/resources/logback-test.xml
+++ /dev/null
@@ -1,17 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!-- Default logback configuration for twill library -->
-<configuration>
-    <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
-        <encoder>
-            <pattern>%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n</pattern>
-        </encoder>
-    </appender>
-
-    <logger name="org.apache.hadoop" level="WARN" />
-    <logger name="org.apache.zookeeper" level="WARN" />
-
-    <root level="INFO">
-        <appender-ref ref="STDOUT"/>
-    </root>
-</configuration>


[12/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/RunningContainers.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/RunningContainers.java b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/RunningContainers.java
new file mode 100644
index 0000000..beef0d4
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/RunningContainers.java
@@ -0,0 +1,427 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.appmaster;
+
+import org.apache.twill.api.ResourceReport;
+import org.apache.twill.api.RunId;
+import org.apache.twill.api.ServiceController;
+import org.apache.twill.api.TwillRunResources;
+import org.apache.twill.internal.ContainerInfo;
+import org.apache.twill.internal.DefaultResourceReport;
+import org.apache.twill.internal.DefaultTwillRunResources;
+import org.apache.twill.internal.RunIds;
+import org.apache.twill.internal.TwillContainerController;
+import org.apache.twill.internal.TwillContainerLauncher;
+import org.apache.twill.internal.container.TwillContainerMain;
+import org.apache.twill.internal.state.Message;
+import org.apache.twill.internal.yarn.YarnContainerStatus;
+import com.google.common.base.Function;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.HashBasedTable;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Multiset;
+import com.google.common.collect.Table;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.BitSet;
+import java.util.Collection;
+import java.util.Deque;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ * A helper class for ApplicationMasterService to keep track of running containers and to interact
+ * with them.
+ */
+final class RunningContainers {
+  private static final Logger LOG = LoggerFactory.getLogger(RunningContainers.class);
+
+  /**
+   * Function to return cardinality of a given BitSet.
+   */
+  private static final Function<BitSet, Integer> BITSET_CARDINALITY = new Function<BitSet, Integer>() {
+    @Override
+    public Integer apply(BitSet input) {
+      return input.cardinality();
+    }
+  };
+
+  // Table of <runnableName, containerId, controller>
+  private final Table<String, String, TwillContainerController> containers;
+
+  // Map from runnableName to a BitSet, with the <instanceId> bit turned on for having an instance running.
+  private final Map<String, BitSet> runnableInstances;
+  private final DefaultResourceReport resourceReport;
+  private final Deque<String> startSequence;
+  private final Lock containerLock;
+  private final Condition containerChange;
+
+  RunningContainers(String appId, TwillRunResources appMasterResources) {
+    containers = HashBasedTable.create();
+    runnableInstances = Maps.newHashMap();
+    startSequence = Lists.newLinkedList();
+    containerLock = new ReentrantLock();
+    containerChange = containerLock.newCondition();
+    resourceReport = new DefaultResourceReport(appId, appMasterResources);
+  }
+
+  /**
+   * Returns {@code true} if there is no live container.
+   */
+  boolean isEmpty() {
+    containerLock.lock();
+    try {
+      return runnableInstances.isEmpty();
+    } finally {
+      containerLock.unlock();
+    }
+  }
+
+  void start(String runnableName, ContainerInfo containerInfo, TwillContainerLauncher launcher) {
+    containerLock.lock();
+    try {
+      int instanceId = getStartInstanceId(runnableName);
+      RunId runId = getRunId(runnableName, instanceId);
+      TwillContainerController controller = launcher.start(runId, instanceId,
+                                                           TwillContainerMain.class, "$HADOOP_CONF_DIR");
+      containers.put(runnableName, containerInfo.getId(), controller);
+
+      TwillRunResources resources = new DefaultTwillRunResources(instanceId,
+                                                                 containerInfo.getId(),
+                                                                 containerInfo.getVirtualCores(),
+                                                                 containerInfo.getMemoryMB(),
+                                                                 containerInfo.getHost().getHostName());
+      resourceReport.addRunResources(runnableName, resources);
+
+      if (startSequence.isEmpty() || !runnableName.equals(startSequence.peekLast())) {
+        startSequence.addLast(runnableName);
+      }
+      containerChange.signalAll();
+
+    } finally {
+      containerLock.unlock();
+    }
+  }
+
+  ResourceReport getResourceReport() {
+    return resourceReport;
+  }
+
+  /**
+   * Stops and removes the last running container of the given runnable.
+   */
+  void removeLast(String runnableName) {
+    containerLock.lock();
+    try {
+      int maxInstanceId = getMaxInstanceId(runnableName);
+      if (maxInstanceId < 0) {
+        LOG.warn("No running container found for {}", runnableName);
+        return;
+      }
+
+      String lastContainerId = null;
+      TwillContainerController lastController = null;
+
+      // Find the controller with the maxInstanceId
+      for (Map.Entry<String, TwillContainerController> entry : containers.row(runnableName).entrySet()) {
+        if (getInstanceId(entry.getValue().getRunId()) == maxInstanceId) {
+          lastContainerId = entry.getKey();
+          lastController = entry.getValue();
+          break;
+        }
+      }
+
+      Preconditions.checkState(lastContainerId != null,
+                               "No container found for {} with instanceId = {}", runnableName, maxInstanceId);
+
+      LOG.info("Stopping service: {} {}", runnableName, lastController.getRunId());
+      lastController.stopAndWait();
+      containers.remove(runnableName, lastContainerId);
+      removeInstanceId(runnableName, maxInstanceId);
+      resourceReport.removeRunnableResources(runnableName, lastContainerId);
+      containerChange.signalAll();
+    } finally {
+      containerLock.unlock();
+    }
+  }
+
+  /**
+   * Blocks until there are changes in running containers.
+   */
+  void waitForCount(String runnableName, int count) throws InterruptedException {
+    containerLock.lock();
+    try {
+      while (getRunningInstances(runnableName) != count) {
+        containerChange.await();
+      }
+    } finally {
+      containerLock.unlock();
+    }
+  }
+
+  /**
+   * Returns the number of running instances of the given runnable.
+   */
+  int count(String runnableName) {
+    containerLock.lock();
+    try {
+      return getRunningInstances(runnableName);
+    } finally {
+      containerLock.unlock();
+    }
+  }
+
+  /**
+   * Returns a Map contains running instances of all runnables.
+   */
+  Map<String, Integer> countAll() {
+    containerLock.lock();
+    try {
+      return ImmutableMap.copyOf(Maps.transformValues(runnableInstances, BITSET_CARDINALITY));
+    } finally {
+      containerLock.unlock();
+    }
+  }
+
+  void sendToAll(Message message, Runnable completion) {
+    containerLock.lock();
+    try {
+      if (containers.isEmpty()) {
+        completion.run();
+      }
+
+      // Sends the command to all running containers
+      AtomicInteger count = new AtomicInteger(containers.size());
+      for (Map.Entry<String, Map<String, TwillContainerController>> entry : containers.rowMap().entrySet()) {
+        for (TwillContainerController controller : entry.getValue().values()) {
+          sendMessage(entry.getKey(), message, controller, count, completion);
+        }
+      }
+    } finally {
+      containerLock.unlock();
+    }
+  }
+
+  void sendToRunnable(String runnableName, Message message, Runnable completion) {
+    containerLock.lock();
+    try {
+      Collection<TwillContainerController> controllers = containers.row(runnableName).values();
+      if (controllers.isEmpty()) {
+        completion.run();
+      }
+
+      AtomicInteger count = new AtomicInteger(controllers.size());
+      for (TwillContainerController controller : controllers) {
+        sendMessage(runnableName, message, controller, count, completion);
+      }
+    } finally {
+      containerLock.unlock();
+    }
+  }
+
+  /**
+   * Stops all running services. Only called when the AppMaster stops.
+   */
+  void stopAll() {
+    containerLock.lock();
+    try {
+      // Stop it one by one in reverse order of start sequence
+      Iterator<String> itor = startSequence.descendingIterator();
+      List<ListenableFuture<ServiceController.State>> futures = Lists.newLinkedList();
+      while (itor.hasNext()) {
+        String runnableName = itor.next();
+        LOG.info("Stopping all instances of " + runnableName);
+
+        futures.clear();
+        // Parallel stops all running containers of the current runnable.
+        for (TwillContainerController controller : containers.row(runnableName).values()) {
+          futures.add(controller.stop());
+        }
+        // Wait for containers to stop. Assumes the future returned by Futures.successfulAsList won't throw exception.
+        Futures.getUnchecked(Futures.successfulAsList(futures));
+
+        LOG.info("Terminated all instances of " + runnableName);
+      }
+      containers.clear();
+      runnableInstances.clear();
+    } finally {
+      containerLock.unlock();
+    }
+  }
+
+  Set<String> getContainerIds() {
+    containerLock.lock();
+    try {
+      return ImmutableSet.copyOf(containers.columnKeySet());
+    } finally {
+      containerLock.unlock();
+    }
+  }
+
+  /**
+   * Handle completion of container.
+   * @param status The completion status.
+   * @param restartRunnables Set of runnable names that requires restart.
+   */
+  void handleCompleted(YarnContainerStatus status, Multiset<String> restartRunnables) {
+    containerLock.lock();
+    String containerId = status.getContainerId();
+    int exitStatus = status.getExitStatus();
+    ContainerState state = status.getState();
+
+    try {
+      Map<String, TwillContainerController> lookup = containers.column(containerId);
+      if (lookup.isEmpty()) {
+        // It's OK because if a container is stopped through removeLast, this would be empty.
+        return;
+      }
+
+      if (lookup.size() != 1) {
+        LOG.warn("More than one controller found for container {}", containerId);
+      }
+
+      if (exitStatus != 0) {
+        LOG.warn("Container {} exited abnormally with state {}, exit code {}. Re-request the container.",
+                 containerId, state, exitStatus);
+        restartRunnables.add(lookup.keySet().iterator().next());
+      } else {
+        LOG.info("Container {} exited normally with state {}", containerId, state);
+      }
+
+      for (Map.Entry<String, TwillContainerController> completedEntry : lookup.entrySet()) {
+        String runnableName = completedEntry.getKey();
+        TwillContainerController controller = completedEntry.getValue();
+        controller.completed(exitStatus);
+
+        removeInstanceId(runnableName, getInstanceId(controller.getRunId()));
+        resourceReport.removeRunnableResources(runnableName, containerId);
+      }
+
+      lookup.clear();
+      containerChange.signalAll();
+    } finally {
+      containerLock.unlock();
+    }
+  }
+
+  /**
+   * Sends a command through the given {@link org.apache.twill.internal.TwillContainerController} of a runnable. Decrements the count
+   * when the sending of command completed. Triggers completion when count reaches zero.
+   */
+  private void sendMessage(final String runnableName, final Message message,
+                           final TwillContainerController controller, final AtomicInteger count,
+                           final Runnable completion) {
+    Futures.addCallback(controller.sendMessage(message), new FutureCallback<Message>() {
+      @Override
+      public void onSuccess(Message result) {
+        if (count.decrementAndGet() == 0) {
+          completion.run();
+        }
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        try {
+          LOG.error("Failed to send message. Runnable: {}, RunId: {}, Message: {}.",
+                    runnableName, controller.getRunId(), message, t);
+        } finally {
+          if (count.decrementAndGet() == 0) {
+            completion.run();
+          }
+        }
+      }
+    });
+  }
+
+  /**
+   * Returns the instanceId to start the given runnable.
+   */
+  private int getStartInstanceId(String runnableName) {
+    BitSet instances = runnableInstances.get(runnableName);
+    if (instances == null) {
+      instances = new BitSet();
+      runnableInstances.put(runnableName, instances);
+    }
+    int instanceId = instances.nextClearBit(0);
+    instances.set(instanceId);
+    return instanceId;
+  }
+
+  private void removeInstanceId(String runnableName, int instanceId) {
+    BitSet instances = runnableInstances.get(runnableName);
+    if (instances == null) {
+      return;
+    }
+    instances.clear(instanceId);
+    if (instances.isEmpty()) {
+      runnableInstances.remove(runnableName);
+    }
+  }
+
+  /**
+   * Returns the largest instanceId for the given runnable. Returns -1 if no container is running.
+   */
+  private int getMaxInstanceId(String runnableName) {
+    BitSet instances = runnableInstances.get(runnableName);
+    if (instances == null || instances.isEmpty()) {
+      return -1;
+    }
+    return instances.length() - 1;
+  }
+
+  /**
+   * Returns nnumber of running instances for the given runnable.
+   */
+  private int getRunningInstances(String runableName) {
+    BitSet instances = runnableInstances.get(runableName);
+    return instances == null ? 0 : instances.cardinality();
+  }
+
+  private RunId getRunId(String runnableName, int instanceId) {
+    RunId baseId;
+
+    Collection<TwillContainerController> controllers = containers.row(runnableName).values();
+    if (controllers.isEmpty()) {
+      baseId = RunIds.generate();
+    } else {
+      String id = controllers.iterator().next().getRunId().getId();
+      baseId = RunIds.fromString(id.substring(0, id.lastIndexOf('-')));
+    }
+
+    return RunIds.fromString(baseId.getId() + '-' + instanceId);
+  }
+
+  private int getInstanceId(RunId runId) {
+    String id = runId.getId();
+    return Integer.parseInt(id.substring(id.lastIndexOf('-') + 1));
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/TrackerService.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/TrackerService.java b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/TrackerService.java
new file mode 100644
index 0000000..ca299e0
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/TrackerService.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.appmaster;
+
+import org.apache.twill.api.ResourceReport;
+import org.apache.twill.internal.json.ResourceReportAdapter;
+import com.google.common.util.concurrent.AbstractIdleService;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.jboss.netty.bootstrap.ServerBootstrap;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBufferOutputStream;
+import org.jboss.netty.buffer.ChannelBuffers;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelFactory;
+import org.jboss.netty.channel.ChannelFuture;
+import org.jboss.netty.channel.ChannelFutureListener;
+import org.jboss.netty.channel.ChannelHandlerContext;
+import org.jboss.netty.channel.ChannelPipeline;
+import org.jboss.netty.channel.ChannelPipelineFactory;
+import org.jboss.netty.channel.Channels;
+import org.jboss.netty.channel.ExceptionEvent;
+import org.jboss.netty.channel.MessageEvent;
+import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
+import org.jboss.netty.channel.group.ChannelGroup;
+import org.jboss.netty.channel.group.DefaultChannelGroup;
+import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
+import org.jboss.netty.handler.codec.http.DefaultHttpResponse;
+import org.jboss.netty.handler.codec.http.HttpChunkAggregator;
+import org.jboss.netty.handler.codec.http.HttpContentCompressor;
+import org.jboss.netty.handler.codec.http.HttpHeaders;
+import org.jboss.netty.handler.codec.http.HttpMethod;
+import org.jboss.netty.handler.codec.http.HttpRequest;
+import org.jboss.netty.handler.codec.http.HttpRequestDecoder;
+import org.jboss.netty.handler.codec.http.HttpResponse;
+import org.jboss.netty.handler.codec.http.HttpResponseEncoder;
+import org.jboss.netty.handler.codec.http.HttpResponseStatus;
+import org.jboss.netty.handler.codec.http.HttpVersion;
+import org.jboss.netty.util.CharsetUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URL;
+import java.util.concurrent.Executor;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Webservice that the Application Master will register back to the resource manager
+ * for clients to track application progress.  Currently used purely for getting a
+ * breakdown of resource usage as a {@link org.apache.twill.api.ResourceReport}.
+ */
+public final class TrackerService extends AbstractIdleService {
+
+  // TODO: This is temporary. When support more REST API, this would get moved.
+  public static final String PATH = "/resources";
+
+  private static final Logger LOG  = LoggerFactory.getLogger(TrackerService.class);
+  private static final int NUM_BOSS_THREADS = 1;
+  private static final int CLOSE_CHANNEL_TIMEOUT = 5;
+  private static final int MAX_INPUT_SIZE = 100 * 1024 * 1024;
+
+  private final String host;
+  private ServerBootstrap bootstrap;
+  private InetSocketAddress bindAddress;
+  private URL url;
+  private final ChannelGroup channelGroup;
+  private final ResourceReport resourceReport;
+
+  /**
+   * Initialize the service.
+   *
+   * @param resourceReport live report that the service will return to clients.
+   * @param appMasterHost the application master host.
+   */
+  public TrackerService(ResourceReport resourceReport, String appMasterHost) {
+    this.channelGroup = new DefaultChannelGroup("appMasterTracker");
+    this.resourceReport = resourceReport;
+    this.host = appMasterHost;
+  }
+
+  /**
+   * Returns the address this tracker service is bounded to.
+   */
+  public InetSocketAddress getBindAddress() {
+    return bindAddress;
+  }
+
+  /**
+   * @return tracker url.
+   */
+  public URL getUrl() {
+    return url;
+  }
+
+  @Override
+  protected void startUp() throws Exception {
+    Executor bossThreads = Executors.newFixedThreadPool(NUM_BOSS_THREADS,
+                                                        new ThreadFactoryBuilder()
+                                                          .setDaemon(true)
+                                                          .setNameFormat("boss-thread")
+                                                          .build());
+
+    Executor workerThreads = Executors.newCachedThreadPool(new ThreadFactoryBuilder()
+                                                             .setDaemon(true)
+                                                             .setNameFormat("worker-thread#%d")
+                                                             .build());
+
+    ChannelFactory factory = new NioServerSocketChannelFactory(bossThreads, workerThreads);
+
+    bootstrap = new ServerBootstrap(factory);
+
+    bootstrap.setPipelineFactory(new ChannelPipelineFactory() {
+      public ChannelPipeline getPipeline() {
+        ChannelPipeline pipeline = Channels.pipeline();
+
+        pipeline.addLast("decoder", new HttpRequestDecoder());
+        pipeline.addLast("aggregator", new HttpChunkAggregator(MAX_INPUT_SIZE));
+        pipeline.addLast("encoder", new HttpResponseEncoder());
+        pipeline.addLast("compressor", new HttpContentCompressor());
+        pipeline.addLast("handler", new ReportHandler(resourceReport));
+
+        return pipeline;
+      }
+    });
+
+    Channel channel = bootstrap.bind(new InetSocketAddress(host, 0));
+    bindAddress = (InetSocketAddress) channel.getLocalAddress();
+    url = URI.create(String.format("http://%s:%d", host, bindAddress.getPort()))
+             .resolve(TrackerService.PATH).toURL();
+    channelGroup.add(channel);
+  }
+
+  @Override
+  protected void shutDown() throws Exception {
+    try {
+      if (!channelGroup.close().await(CLOSE_CHANNEL_TIMEOUT, TimeUnit.SECONDS)) {
+        LOG.warn("Timeout when closing all channels.");
+      }
+    } finally {
+      bootstrap.releaseExternalResources();
+    }
+  }
+
+  /**
+   * Handler to return resources used by this application master, which will be available through
+   * the host and port set when this application master registered itself to the resource manager.
+   */
+  public class ReportHandler extends SimpleChannelUpstreamHandler {
+    private final ResourceReport report;
+    private final ResourceReportAdapter reportAdapter;
+
+    public ReportHandler(ResourceReport report) {
+      this.report = report;
+      this.reportAdapter = ResourceReportAdapter.create();
+    }
+
+    @Override
+    public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
+      HttpRequest request = (HttpRequest) e.getMessage();
+      if (!isValid(request)) {
+        write404(e);
+        return;
+      }
+
+      writeResponse(e);
+    }
+
+    // only accepts GET on /resources for now
+    private boolean isValid(HttpRequest request) {
+      return (request.getMethod() == HttpMethod.GET) && PATH.equals(request.getUri());
+    }
+
+    private void write404(MessageEvent e) {
+      HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.NOT_FOUND);
+      ChannelFuture future = e.getChannel().write(response);
+      future.addListener(ChannelFutureListener.CLOSE);
+    }
+
+    private void writeResponse(MessageEvent e) {
+      HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
+      response.setHeader(HttpHeaders.Names.CONTENT_TYPE, "application/json; charset=UTF-8");
+
+      ChannelBuffer content = ChannelBuffers.dynamicBuffer();
+      Writer writer = new OutputStreamWriter(new ChannelBufferOutputStream(content), CharsetUtil.UTF_8);
+      reportAdapter.toJson(report, writer);
+      try {
+        writer.close();
+      } catch (IOException e1) {
+        LOG.error("error writing resource report", e1);
+      }
+      response.setContent(content);
+      ChannelFuture future = e.getChannel().write(response);
+      future.addListener(ChannelFutureListener.CLOSE);
+    }
+
+    @Override
+    public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {
+      e.getChannel().close();
+    }
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/package-info.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/package-info.java b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/package-info.java
new file mode 100644
index 0000000..bf8e677
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * This package contains implementation of Twill application master.
+ */
+package org.apache.twill.internal.appmaster;

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/container/TwillContainerMain.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/container/TwillContainerMain.java b/twill-yarn/src/main/java/org/apache/twill/internal/container/TwillContainerMain.java
new file mode 100644
index 0000000..bbd6c10
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/container/TwillContainerMain.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.container;
+
+import org.apache.twill.api.LocalFile;
+import org.apache.twill.api.RunId;
+import org.apache.twill.api.RuntimeSpecification;
+import org.apache.twill.api.TwillRunnableSpecification;
+import org.apache.twill.api.TwillSpecification;
+import org.apache.twill.discovery.DiscoveryService;
+import org.apache.twill.discovery.ZKDiscoveryService;
+import org.apache.twill.internal.Arguments;
+import org.apache.twill.internal.BasicTwillContext;
+import org.apache.twill.internal.Constants;
+import org.apache.twill.internal.ContainerInfo;
+import org.apache.twill.internal.EnvContainerInfo;
+import org.apache.twill.internal.EnvKeys;
+import org.apache.twill.internal.RunIds;
+import org.apache.twill.internal.ServiceMain;
+import org.apache.twill.internal.json.ArgumentsCodec;
+import org.apache.twill.internal.json.TwillSpecificationAdapter;
+import org.apache.twill.zookeeper.RetryStrategies;
+import org.apache.twill.zookeeper.ZKClient;
+import org.apache.twill.zookeeper.ZKClientService;
+import org.apache.twill.zookeeper.ZKClientServices;
+import org.apache.twill.zookeeper.ZKClients;
+import com.google.common.base.Charsets;
+import com.google.common.base.Preconditions;
+import com.google.common.io.Files;
+import com.google.common.util.concurrent.Service;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.DataInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.Reader;
+import java.util.concurrent.TimeUnit;
+
+/**
+ *
+ */
+public final class TwillContainerMain extends ServiceMain {
+
+  private static final Logger LOG = LoggerFactory.getLogger(TwillContainerMain.class);
+
+  /**
+   * Main method for launching a {@link TwillContainerService} which runs
+   * a {@link org.apache.twill.api.TwillRunnable}.
+   */
+  public static void main(final String[] args) throws Exception {
+    // Try to load the secure store from localized file, which AM requested RM to localize it for this container.
+    loadSecureStore();
+
+    String zkConnectStr = System.getenv(EnvKeys.TWILL_ZK_CONNECT);
+    File twillSpecFile = new File(Constants.Files.TWILL_SPEC);
+    RunId appRunId = RunIds.fromString(System.getenv(EnvKeys.TWILL_APP_RUN_ID));
+    RunId runId = RunIds.fromString(System.getenv(EnvKeys.TWILL_RUN_ID));
+    String runnableName = System.getenv(EnvKeys.TWILL_RUNNABLE_NAME);
+    int instanceId = Integer.parseInt(System.getenv(EnvKeys.TWILL_INSTANCE_ID));
+    int instanceCount = Integer.parseInt(System.getenv(EnvKeys.TWILL_INSTANCE_COUNT));
+
+    ZKClientService zkClientService = ZKClientServices.delegate(
+      ZKClients.reWatchOnExpire(
+        ZKClients.retryOnFailure(ZKClientService.Builder.of(zkConnectStr).build(),
+                                 RetryStrategies.fixDelay(1, TimeUnit.SECONDS))));
+
+    DiscoveryService discoveryService = new ZKDiscoveryService(zkClientService);
+
+    TwillSpecification twillSpec = loadTwillSpec(twillSpecFile);
+    renameLocalFiles(twillSpec.getRunnables().get(runnableName));
+    
+    TwillRunnableSpecification runnableSpec = twillSpec.getRunnables().get(runnableName).getRunnableSpecification();
+    ContainerInfo containerInfo = new EnvContainerInfo();
+    Arguments arguments = decodeArgs();
+    BasicTwillContext context = new BasicTwillContext(
+      runId, appRunId, containerInfo.getHost(),
+      arguments.getRunnableArguments().get(runnableName).toArray(new String[0]),
+      arguments.getArguments().toArray(new String[0]),
+      runnableSpec, instanceId, discoveryService, instanceCount,
+      containerInfo.getMemoryMB(), containerInfo.getVirtualCores()
+    );
+
+    Configuration conf = new YarnConfiguration(new HdfsConfiguration(new Configuration()));
+    Service service = new TwillContainerService(context, containerInfo,
+                                                getContainerZKClient(zkClientService, appRunId, runnableName),
+                                                runId, runnableSpec, getClassLoader(),
+                                                createAppLocation(conf));
+    new TwillContainerMain().doMain(zkClientService, service);
+  }
+
+  private static void loadSecureStore() throws IOException {
+    if (!UserGroupInformation.isSecurityEnabled()) {
+      return;
+    }
+
+    File file = new File(Constants.Files.CREDENTIALS);
+    if (file.exists()) {
+      Credentials credentials = new Credentials();
+      DataInputStream input = new DataInputStream(new FileInputStream(file));
+      try {
+        credentials.readTokenStorageStream(input);
+      } finally {
+        input.close();
+      }
+
+      UserGroupInformation.getCurrentUser().addCredentials(credentials);
+      LOG.info("Secure store updated from {}", file);
+    }
+  }
+
+  private static void renameLocalFiles(RuntimeSpecification runtimeSpec) {
+    for (LocalFile file : runtimeSpec.getLocalFiles()) {
+      if (file.isArchive()) {
+        String path = file.getURI().toString();
+        String name = file.getName() + (path.endsWith(".tar.gz") ? ".tar.gz" : path.substring(path.lastIndexOf('.')));
+        Preconditions.checkState(new File(name).renameTo(new File(file.getName())),
+                                 "Fail to rename file from %s to %s.",
+                                 name, file.getName());
+      }
+    }
+  }
+
+  private static ZKClient getContainerZKClient(ZKClient zkClient, RunId appRunId, String runnableName) {
+    return ZKClients.namespace(zkClient, String.format("/%s/runnables/%s", appRunId, runnableName));
+  }
+
+  /**
+   * Returns the ClassLoader for the runnable.
+   */
+  private static ClassLoader getClassLoader() {
+    ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+    if (classLoader == null) {
+      return ClassLoader.getSystemClassLoader();
+    }
+    return classLoader;
+  }
+
+  private static TwillSpecification loadTwillSpec(File specFile) throws IOException {
+    Reader reader = Files.newReader(specFile, Charsets.UTF_8);
+    try {
+      return TwillSpecificationAdapter.create().fromJson(reader);
+    } finally {
+      reader.close();
+    }
+  }
+
+  private static Arguments decodeArgs() throws IOException {
+    return ArgumentsCodec.decode(Files.newReaderSupplier(new File(Constants.Files.ARGUMENTS), Charsets.UTF_8));
+  }
+
+  @Override
+  protected String getHostname() {
+    return System.getenv(EnvKeys.YARN_CONTAINER_HOST);
+  }
+
+  @Override
+  protected String getKafkaZKConnect() {
+    return System.getenv(EnvKeys.TWILL_LOG_KAFKA_ZK);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/container/TwillContainerService.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/container/TwillContainerService.java b/twill-yarn/src/main/java/org/apache/twill/internal/container/TwillContainerService.java
new file mode 100644
index 0000000..f5bc1f2
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/container/TwillContainerService.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.container;
+
+import org.apache.twill.api.Command;
+import org.apache.twill.api.RunId;
+import org.apache.twill.api.TwillRunnable;
+import org.apache.twill.api.TwillRunnableSpecification;
+import org.apache.twill.common.Threads;
+import org.apache.twill.filesystem.Location;
+import org.apache.twill.internal.AbstractTwillService;
+import org.apache.twill.internal.AbstractTwillService;
+import org.apache.twill.internal.BasicTwillContext;
+import org.apache.twill.internal.ContainerInfo;
+import org.apache.twill.internal.ContainerLiveNodeData;
+import org.apache.twill.internal.ZKServiceDecorator;
+import org.apache.twill.internal.logging.Loggings;
+import org.apache.twill.internal.state.Message;
+import org.apache.twill.internal.state.MessageCallback;
+import org.apache.twill.internal.utils.Instances;
+import org.apache.twill.zookeeper.ZKClient;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Supplier;
+import com.google.common.util.concurrent.AbstractExecutionThreadService;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.Service;
+import com.google.common.util.concurrent.SettableFuture;
+import com.google.gson.Gson;
+import com.google.gson.JsonElement;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+/**
+ * This class act as a yarn container and run a {@link org.apache.twill.api.TwillRunnable}.
+ */
+public final class TwillContainerService extends AbstractTwillService {
+
+  private static final Logger LOG = LoggerFactory.getLogger(TwillContainerService.class);
+
+  private final TwillRunnableSpecification specification;
+  private final ClassLoader classLoader;
+  private final ContainerLiveNodeData containerLiveNode;
+  private final BasicTwillContext context;
+  private final ZKServiceDecorator serviceDelegate;
+  private ExecutorService commandExecutor;
+  private TwillRunnable runnable;
+
+  public TwillContainerService(BasicTwillContext context, ContainerInfo containerInfo, ZKClient zkClient,
+                               RunId runId, TwillRunnableSpecification specification, ClassLoader classLoader,
+                               Location applicationLocation) {
+    super(applicationLocation);
+
+    this.specification = specification;
+    this.classLoader = classLoader;
+    this.serviceDelegate = new ZKServiceDecorator(zkClient, runId, createLiveNodeSupplier(), new ServiceDelegate());
+    this.context = context;
+    this.containerLiveNode = new ContainerLiveNodeData(containerInfo.getId(),
+                                                       containerInfo.getHost().getCanonicalHostName());
+  }
+
+  private ListenableFuture<String> processMessage(final String messageId, final Message message) {
+    LOG.debug("Message received: {} {}.", messageId, message);
+
+    if (handleSecureStoreUpdate(message)) {
+      return Futures.immediateFuture(messageId);
+    }
+
+    final SettableFuture<String> result = SettableFuture.create();
+    Command command = message.getCommand();
+    if (message.getType() == Message.Type.SYSTEM
+          && "instances".equals(command.getCommand()) && command.getOptions().containsKey("count")) {
+      context.setInstanceCount(Integer.parseInt(command.getOptions().get("count")));
+    }
+
+    commandExecutor.execute(new Runnable() {
+
+      @Override
+      public void run() {
+        try {
+          runnable.handleCommand(message.getCommand());
+          result.set(messageId);
+        } catch (Exception e) {
+          result.setException(e);
+        }
+      }
+    });
+    return result;
+  }
+
+  private Supplier<? extends JsonElement> createLiveNodeSupplier() {
+    return new Supplier<JsonElement>() {
+      @Override
+      public JsonElement get() {
+        return new Gson().toJsonTree(containerLiveNode);
+      }
+    };
+  }
+
+  @Override
+  protected Service getServiceDelegate() {
+    return serviceDelegate;
+  }
+
+  private final class ServiceDelegate extends AbstractExecutionThreadService implements MessageCallback {
+
+    @Override
+    protected void startUp() throws Exception {
+      commandExecutor = Executors.newSingleThreadExecutor(
+        Threads.createDaemonThreadFactory("runnable-command-executor"));
+
+      Class<?> runnableClass = classLoader.loadClass(specification.getClassName());
+      Preconditions.checkArgument(TwillRunnable.class.isAssignableFrom(runnableClass),
+                                  "Class %s is not instance of TwillRunnable.", specification.getClassName());
+
+      runnable = Instances.newInstance((Class<TwillRunnable>) runnableClass);
+      runnable.initialize(context);
+    }
+
+    @Override
+    protected void triggerShutdown() {
+      try {
+        runnable.stop();
+      } catch (Throwable t) {
+        LOG.error("Exception when stopping runnable.", t);
+      }
+    }
+
+    @Override
+    protected void shutDown() throws Exception {
+      commandExecutor.shutdownNow();
+      runnable.destroy();
+      Loggings.forceFlush();
+    }
+
+    @Override
+    protected void run() throws Exception {
+      runnable.run();
+    }
+
+    @Override
+    public ListenableFuture<String> onReceived(String messageId, Message message) {
+      if (state() == State.RUNNING) {
+        // Only process message if the service is still alive
+        return processMessage(messageId, message);
+      }
+      return Futures.immediateFuture(messageId);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/yarn/AbstractYarnProcessLauncher.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/yarn/AbstractYarnProcessLauncher.java b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/AbstractYarnProcessLauncher.java
new file mode 100644
index 0000000..b810854
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/AbstractYarnProcessLauncher.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import org.apache.twill.api.LocalFile;
+import org.apache.twill.internal.ProcessController;
+import org.apache.twill.internal.ProcessLauncher;
+import org.apache.twill.internal.utils.Paths;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Abstract class to help creating different types of process launcher that process on yarn.
+ *
+ * @param <T> Type of the object that contains information about the container that the process is going to launch.
+ */
+public abstract class AbstractYarnProcessLauncher<T> implements ProcessLauncher<T> {
+
+  private static final Logger LOG = LoggerFactory.getLogger(AbstractYarnProcessLauncher.class);
+
+  private final T containerInfo;
+
+  protected AbstractYarnProcessLauncher(T containerInfo) {
+    this.containerInfo = containerInfo;
+  }
+
+  @Override
+  public T getContainerInfo() {
+    return containerInfo;
+  }
+
+  @Override
+  public <C> PrepareLaunchContext prepareLaunch(Map<String, String> environments,
+                                                Iterable<LocalFile> resources, C credentials) {
+    if (credentials != null) {
+      Preconditions.checkArgument(credentials instanceof Credentials, "Credentials should be of type %s",
+                                  Credentials.class.getName());
+    }
+    return new PrepareLaunchContextImpl(environments, resources, (Credentials) credentials);
+  }
+
+  /**
+   * Tells whether to append suffix to localize resource name for archive file type. Default is true.
+   */
+  protected boolean useArchiveSuffix() {
+    return true;
+  }
+
+  /**
+   * For children class to override to perform actual process launching.
+   */
+  protected abstract <R> ProcessController<R> doLaunch(YarnLaunchContext launchContext);
+
+  /**
+   * Implementation for the {@link PrepareLaunchContext}.
+   */
+  private final class PrepareLaunchContextImpl implements PrepareLaunchContext {
+
+    private final Credentials credentials;
+    private final YarnLaunchContext launchContext;
+    private final Map<String, YarnLocalResource> localResources;
+    private final Map<String, String> environment;
+    private final List<String> commands;
+
+    private PrepareLaunchContextImpl(Map<String, String> env, Iterable<LocalFile> localFiles, Credentials credentials) {
+      this.credentials = credentials;
+      this.launchContext = YarnUtils.createLaunchContext();
+      this.localResources = Maps.newHashMap();
+      this.environment = Maps.newHashMap(env);
+      this.commands = Lists.newLinkedList();
+
+      for (LocalFile localFile : localFiles) {
+        addLocalFile(localFile);
+      }
+    }
+
+    private void addLocalFile(LocalFile localFile) {
+      String name = localFile.getName();
+      // Always append the file extension as the resource name so that archive expansion by Yarn could work.
+      // Renaming would happen by the Container Launcher.
+      if (localFile.isArchive() && useArchiveSuffix()) {
+        String path = localFile.getURI().toString();
+        String suffix = Paths.getExtension(path);
+        if (!suffix.isEmpty()) {
+          name += '.' + suffix;
+        }
+      }
+      localResources.put(name, YarnUtils.createLocalResource(localFile));
+    }
+
+    @Override
+    public ResourcesAdder withResources() {
+      return new MoreResourcesImpl();
+    }
+
+    @Override
+    public AfterResources noResources() {
+      return new MoreResourcesImpl();
+    }
+
+    private final class MoreResourcesImpl implements MoreResources {
+
+      @Override
+      public MoreResources add(LocalFile localFile) {
+        addLocalFile(localFile);
+        return this;
+      }
+
+      @Override
+      public EnvironmentAdder withEnvironment() {
+        return finish();
+      }
+
+      @Override
+      public AfterEnvironment noEnvironment() {
+        return finish();
+      }
+
+      private MoreEnvironmentImpl finish() {
+        launchContext.setLocalResources(localResources);
+        return new MoreEnvironmentImpl();
+      }
+    }
+
+    private final class MoreEnvironmentImpl implements MoreEnvironment {
+
+      @Override
+      public CommandAdder withCommands() {
+        launchContext.setEnvironment(environment);
+        return new MoreCommandImpl();
+      }
+
+      @Override
+      public <V> MoreEnvironment add(String key, V value) {
+        environment.put(key, value.toString());
+        return this;
+      }
+    }
+
+    private final class MoreCommandImpl implements MoreCommand, StdOutSetter, StdErrSetter {
+
+      private final StringBuilder commandBuilder = new StringBuilder();
+
+      @Override
+      public StdOutSetter add(String cmd, String... args) {
+        commandBuilder.append(cmd);
+        for (String arg : args) {
+          commandBuilder.append(' ').append(arg);
+        }
+        return this;
+      }
+
+      @Override
+      public <R> ProcessController<R> launch() {
+        if (credentials != null && !credentials.getAllTokens().isEmpty()) {
+          for (Token<?> token : credentials.getAllTokens()) {
+            LOG.info("Launch with delegation token {}", token);
+          }
+          launchContext.setCredentials(credentials);
+        }
+        launchContext.setCommands(commands);
+        return doLaunch(launchContext);
+      }
+
+      @Override
+      public MoreCommand redirectError(String stderr) {
+        redirect(2, stderr);
+        return noError();
+      }
+
+      @Override
+      public MoreCommand noError() {
+        commands.add(commandBuilder.toString());
+        commandBuilder.setLength(0);
+        return this;
+      }
+
+      @Override
+      public StdErrSetter redirectOutput(String stdout) {
+        redirect(1, stdout);
+        return this;
+      }
+
+      @Override
+      public StdErrSetter noOutput() {
+        return this;
+      }
+
+      private void redirect(int type, String out) {
+        commandBuilder.append(' ')
+                      .append(type).append('>')
+                      .append(ApplicationConstants.LOG_DIR_EXPANSION_VAR).append('/').append(out);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/yarn/VersionDetectYarnAMClientFactory.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/yarn/VersionDetectYarnAMClientFactory.java b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/VersionDetectYarnAMClientFactory.java
new file mode 100644
index 0000000..6f47b6c
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/VersionDetectYarnAMClientFactory.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import com.google.common.base.Throwables;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ *
+ */
+public final class VersionDetectYarnAMClientFactory implements YarnAMClientFactory {
+
+  private final Configuration conf;
+
+  public VersionDetectYarnAMClientFactory(Configuration conf) {
+    this.conf = conf;
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public YarnAMClient create() {
+    try {
+      Class<YarnAMClient> clz;
+      if (YarnUtils.isHadoop20()) {
+        // Uses hadoop-2.0 class
+        String clzName = getClass().getPackage().getName() + ".Hadoop20YarnAMClient";
+        clz = (Class<YarnAMClient>) Class.forName(clzName);
+      } else {
+        // Uses hadoop-2.1 class
+        String clzName = getClass().getPackage().getName() + ".Hadoop21YarnAMClient";
+        clz = (Class<YarnAMClient>) Class.forName(clzName);
+      }
+
+      return clz.getConstructor(Configuration.class).newInstance(conf);
+
+    } catch (Exception e) {
+      throw Throwables.propagate(e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/yarn/VersionDetectYarnAppClientFactory.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/yarn/VersionDetectYarnAppClientFactory.java b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/VersionDetectYarnAppClientFactory.java
new file mode 100644
index 0000000..f9db959
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/VersionDetectYarnAppClientFactory.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import com.google.common.base.Throwables;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ *
+ */
+public final class VersionDetectYarnAppClientFactory implements YarnAppClientFactory {
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public YarnAppClient create(Configuration configuration) {
+    try {
+      Class<YarnAppClient> clz;
+
+      if (YarnUtils.isHadoop20()) {
+        // Uses hadoop-2.0 class.
+        String clzName = getClass().getPackage().getName() + ".Hadoop20YarnAppClient";
+        clz = (Class<YarnAppClient>) Class.forName(clzName);
+      } else {
+        // Uses hadoop-2.1 class
+        String clzName = getClass().getPackage().getName() + ".Hadoop21YarnAppClient";
+        clz = (Class<YarnAppClient>) Class.forName(clzName);
+      }
+
+      return clz.getConstructor(Configuration.class).newInstance(configuration);
+
+    } catch (Exception e) {
+      throw Throwables.propagate(e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnAMClient.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnAMClient.java b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnAMClient.java
new file mode 100644
index 0000000..83ba6a8
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnAMClient.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import org.apache.twill.internal.ProcessLauncher;
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.Service;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.util.Records;
+
+import java.net.InetSocketAddress;
+import java.net.URL;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+
+/**
+ * This interface provides abstraction for AM to interacts with YARN to abstract out YARN version specific
+ * code, making multi-version compatibility easier.
+ */
+public interface YarnAMClient extends Service {
+
+  /**
+   * Builder for creating a container request.
+   */
+  abstract class ContainerRequestBuilder {
+
+    protected final Resource capability;
+    protected final int count;
+    protected final Set<String> hosts = Sets.newHashSet();
+    protected final Set<String> racks = Sets.newHashSet();
+    protected final Priority priority = Records.newRecord(Priority.class);
+
+    protected ContainerRequestBuilder(Resource capability, int count) {
+      this.capability = capability;
+      this.count = count;
+    }
+
+    public ContainerRequestBuilder addHosts(String firstHost, String...moreHosts) {
+      return add(hosts, firstHost, moreHosts);
+    }
+
+    public ContainerRequestBuilder addRacks(String firstRack, String...moreRacks) {
+      return add(racks, firstRack, moreRacks);
+    }
+
+    public ContainerRequestBuilder setPriority(int prio) {
+      priority.setPriority(prio);
+      return this;
+    }
+
+    /**
+     * Adds a container request. Returns an unique ID for the request.
+     */
+    public abstract String apply();
+
+    private <T> ContainerRequestBuilder add(Collection<T> collection, T first, T... more) {
+      collection.add(first);
+      Collections.addAll(collection, more);
+      return this;
+    }
+  }
+
+  ContainerId getContainerId();
+
+  String getHost();
+
+  /**
+   * Sets the tracker address and tracker url. This method should be called before calling {@link #start()}.
+   */
+  void setTracker(InetSocketAddress trackerAddr, URL trackerUrl);
+
+  /**
+   * Callback for allocate call.
+   */
+  // TODO: Move AM heartbeat logic into this interface so AM only needs to handle callback.
+  interface AllocateHandler {
+    void acquired(List<ProcessLauncher<YarnContainerInfo>> launchers);
+
+    void completed(List<YarnContainerStatus> completed);
+  }
+
+  void allocate(float progress, AllocateHandler handler) throws Exception;
+
+  ContainerRequestBuilder addContainerRequest(Resource capability);
+
+  ContainerRequestBuilder addContainerRequest(Resource capability, int count);
+
+  /**
+   * Notify a container request is fulfilled.
+   *
+   * Note: This method is needed to workaround a seemingly bug from AMRMClient implementation in YARN that if
+   * a container is requested after a previous container was acquired (with the same capability), multiple containers
+   * will get allocated instead of one.
+   *
+   * @param id The ID returned by {@link YarnAMClient.ContainerRequestBuilder#apply()}.
+   */
+  void completeContainerRequest(String id);
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnAMClientFactory.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnAMClientFactory.java b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnAMClientFactory.java
new file mode 100644
index 0000000..b2a1194
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnAMClientFactory.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+/**
+ *
+ */
+public interface YarnAMClientFactory {
+
+  YarnAMClient create();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnAppClient.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnAppClient.java b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnAppClient.java
new file mode 100644
index 0000000..71a9e68
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnAppClient.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import org.apache.twill.api.TwillSpecification;
+import org.apache.twill.internal.ProcessController;
+import org.apache.twill.internal.ProcessLauncher;
+import com.google.common.util.concurrent.Service;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+
+/**
+ * Interface for launching Yarn application from client.
+ */
+public interface YarnAppClient extends Service {
+
+  /**
+   * Creates a {@link ProcessLauncher} for launching the application represented by the given spec.
+   */
+  ProcessLauncher<ApplicationId> createLauncher(TwillSpecification twillSpec) throws Exception;
+
+  /**
+   * Creates a {@link ProcessLauncher} for launching application with the given user and spec.
+   *
+   * @deprecated This method will get removed.
+   */
+  @Deprecated
+  ProcessLauncher<ApplicationId> createLauncher(String user, TwillSpecification twillSpec) throws Exception;
+
+  ProcessController<YarnApplicationReport> createProcessController(ApplicationId appId);
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnAppClientFactory.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnAppClientFactory.java b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnAppClientFactory.java
new file mode 100644
index 0000000..70cecad
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnAppClientFactory.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ *
+ */
+public interface YarnAppClientFactory {
+
+  YarnAppClient create(Configuration configuration);
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnApplicationReport.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnApplicationReport.java b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnApplicationReport.java
new file mode 100644
index 0000000..4dbb1d1
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnApplicationReport.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+
+/**
+ * This interface is for adapting differences in ApplicationReport in different Hadoop version.
+ */
+public interface YarnApplicationReport {
+
+  /**
+   * Get the <code>ApplicationId</code> of the application.
+   * @return <code>ApplicationId</code> of the application
+   */
+  ApplicationId getApplicationId();
+
+  /**
+   * Get the <code>ApplicationAttemptId</code> of the current
+   * attempt of the application
+   * @return <code>ApplicationAttemptId</code> of the attempt
+   */
+  ApplicationAttemptId getCurrentApplicationAttemptId();
+
+  /**
+   * Get the <em>queue</em> to which the application was submitted.
+   * @return <em>queue</em> to which the application was submitted
+   */
+  String getQueue();
+
+  /**
+   * Get the user-defined <em>name</em> of the application.
+   * @return <em>name</em> of the application
+   */
+  String getName();
+
+  /**
+   * Get the <em>host</em> on which the <code>ApplicationMaster</code>
+   * is running.
+   * @return <em>host</em> on which the <code>ApplicationMaster</code>
+   *         is running
+   */
+  String getHost();
+
+  /**
+   * Get the <em>RPC port</em> of the <code>ApplicationMaster</code>.
+   * @return <em>RPC port</em> of the <code>ApplicationMaster</code>
+   */
+  int getRpcPort();
+
+
+  /**
+   * Get the <code>YarnApplicationState</code> of the application.
+   * @return <code>YarnApplicationState</code> of the application
+   */
+  YarnApplicationState getYarnApplicationState();
+
+
+  /**
+   * Get  the <em>diagnositic information</em> of the application in case of
+   * errors.
+   * @return <em>diagnositic information</em> of the application in case
+   *         of errors
+   */
+  String getDiagnostics();
+
+
+  /**
+   * Get the <em>tracking url</em> for the application.
+   * @return <em>tracking url</em> for the application
+   */
+  String getTrackingUrl();
+
+
+  /**
+   * Get the original not-proxied <em>tracking url</em> for the application.
+   * This is intended to only be used by the proxy itself.
+   * @return the original not-proxied <em>tracking url</em> for the application
+   */
+  String getOriginalTrackingUrl();
+
+  /**
+   * Get the <em>start time</em> of the application.
+   * @return <em>start time</em> of the application
+   */
+  long getStartTime();
+
+
+  /**
+   * Get the <em>finish time</em> of the application.
+   * @return <em>finish time</em> of the application
+   */
+  long getFinishTime();
+
+
+  /**
+   * Get the <em>final finish status</em> of the application.
+   * @return <em>final finish status</em> of the application
+   */
+  FinalApplicationStatus getFinalApplicationStatus();
+
+  /**
+   * Retrieve the structure containing the job resources for this application
+   * @return the job resources structure for this application
+   */
+  ApplicationResourceUsageReport getApplicationResourceUsageReport();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnContainerInfo.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnContainerInfo.java b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnContainerInfo.java
new file mode 100644
index 0000000..e806da7
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnContainerInfo.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import org.apache.twill.internal.ContainerInfo;
+
+/**
+ *
+ */
+public interface YarnContainerInfo extends ContainerInfo {
+
+  <T> T getContainer();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnContainerStatus.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnContainerStatus.java b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnContainerStatus.java
new file mode 100644
index 0000000..57e712c
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnContainerStatus.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import org.apache.hadoop.yarn.api.records.ContainerState;
+
+/**
+ * This interface is for adapting differences in ContainerStatus between Hadoop 2.0 and 2.1
+ */
+public interface YarnContainerStatus {
+
+  String getContainerId();
+
+  ContainerState getState();
+
+  int getExitStatus();
+
+  String getDiagnostics();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnLaunchContext.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnLaunchContext.java b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnLaunchContext.java
new file mode 100644
index 0000000..984a1be
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnLaunchContext.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * This interface is for adapting ContainerLaunchContext in different Hadoop version
+ */
+public interface YarnLaunchContext {
+
+  <T> T getLaunchContext();
+
+  void setCredentials(Credentials credentials);
+
+  void setLocalResources(Map<String, YarnLocalResource> localResources);
+
+  void setServiceData(Map<String, ByteBuffer> serviceData);
+
+  Map<String, String> getEnvironment();
+
+  void setEnvironment(Map<String, String> environment);
+
+  List<String> getCommands();
+
+  void setCommands(List<String> commands);
+
+  void setApplicationACLs(Map<ApplicationAccessType, String> acls);
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnLocalResource.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnLocalResource.java b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnLocalResource.java
new file mode 100644
index 0000000..9bfc224
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnLocalResource.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.URL;
+
+/**
+ * A adapter interface for the LocalResource class/interface in different Hadoop version.
+ */
+public interface YarnLocalResource {
+
+  /**
+   * Returns the actual LocalResource object in Yarn.
+   */
+  <T> T getLocalResource();
+
+  /**
+   * Get the <em>location</em> of the resource to be localized.
+   * @return <em>location</em> of the resource to be localized
+   */
+  URL getResource();
+
+  /**
+   * Set <em>location</em> of the resource to be localized.
+   * @param resource <em>location</em> of the resource to be localized
+   */
+  void setResource(URL resource);
+
+  /**
+   * Get the <em>size</em> of the resource to be localized.
+   * @return <em>size</em> of the resource to be localized
+   */
+  long getSize();
+
+  /**
+   * Set the <em>size</em> of the resource to be localized.
+   * @param size <em>size</em> of the resource to be localized
+   */
+  void setSize(long size);
+
+  /**
+   * Get the original <em>timestamp</em> of the resource to be localized, used
+   * for verification.
+   * @return <em>timestamp</em> of the resource to be localized
+   */
+  long getTimestamp();
+
+  /**
+   * Set the <em>timestamp</em> of the resource to be localized, used
+   * for verification.
+   * @param timestamp <em>timestamp</em> of the resource to be localized
+   */
+  void setTimestamp(long timestamp);
+
+  /**
+   * Get the <code>LocalResourceType</code> of the resource to be localized.
+   * @return <code>LocalResourceType</code> of the resource to be localized
+   */
+  LocalResourceType getType();
+
+  /**
+   * Set the <code>LocalResourceType</code> of the resource to be localized.
+   * @param type <code>LocalResourceType</code> of the resource to be localized
+   */
+  void setType(LocalResourceType type);
+
+  /**
+   * Get the <code>LocalResourceVisibility</code> of the resource to be
+   * localized.
+   * @return <code>LocalResourceVisibility</code> of the resource to be
+   *         localized
+   */
+  LocalResourceVisibility getVisibility();
+
+  /**
+   * Set the <code>LocalResourceVisibility</code> of the resource to be
+   * localized.
+   * @param visibility <code>LocalResourceVisibility</code> of the resource to be
+   *                   localized
+   */
+  void setVisibility(LocalResourceVisibility visibility);
+
+  /**
+   * Get the <em>pattern</em> that should be used to extract entries from the
+   * archive (only used when type is <code>PATTERN</code>).
+   * @return <em>pattern</em> that should be used to extract entries from the
+   * archive.
+   */
+  String getPattern();
+
+  /**
+   * Set the <em>pattern</em> that should be used to extract entries from the
+   * archive (only used when type is <code>PATTERN</code>).
+   * @param pattern <em>pattern</em> that should be used to extract entries
+   * from the archive.
+   */
+  void setPattern(String pattern);
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnNMClient.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnNMClient.java b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnNMClient.java
new file mode 100644
index 0000000..d863c91
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnNMClient.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import org.apache.twill.common.Cancellable;
+
+/**
+ * Abstraction for dealing with API differences in different hadoop yarn version
+ */
+public interface YarnNMClient {
+
+  /**
+   * Starts a process based on the given launch context.
+   *
+   * @param containerInfo The containerInfo that the new process will launch in.
+   * @param launchContext Contains information about the process going to start.
+   * @return A {@link Cancellable} that when {@link Cancellable#cancel()}} is invoked,
+   *         it will try to shutdown the process.
+   *
+   */
+  Cancellable start(YarnContainerInfo containerInfo, YarnLaunchContext launchContext);
+}


[21/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 0c8bc43..19d30da 100644
--- a/pom.xml
+++ b/pom.xml
@@ -25,18 +25,46 @@
     <artifactId>twill-parent</artifactId>
     <version>0.1.0-SNAPSHOT</version>
     <packaging>pom</packaging>
-    <name>Twill library parent project</name>
+    <name>Apache Twill</name>
+    <url>http://twill.incubator.apache.org</url>
 
     <modules>
-        <module>common</module>
-        <module>discovery-api</module>
-        <module>api</module>
-        <module>zookeeper</module>
-        <module>discovery-core</module>
-        <module>core</module>
-        <module>yarn</module>
+        <module>twill-common</module>
+        <module>twill-discovery-api</module>
+        <module>twill-api</module>
+        <module>twill-zookeeper</module>
+        <module>twill-discovery-core</module>
+        <module>twill-core</module>
+        <module>twill-yarn</module>
     </modules>
 
+
+    <issueManagement>
+        <url>https://issues.apache.org/jira/browse/TWILL</url>
+    </issueManagement>
+    <mailingLists>
+        <mailingList>
+            <name>Development</name>
+            <post>dev@twill.incubator.apache.org</post>
+            <subscribe>dev-subscribe@twill.incubator.apache.org</subscribe>
+            <unsubscribe>dev-unsubscribe@twill.incubator.apache.org</unsubscribe>
+            <archive>http://mail-archives.apache.org/mod_mbox/twill-dev/</archive>
+            <otherArchives>
+                <otherArchive>http://twill-dev.markmail.org</otherArchive>
+            </otherArchives>
+        </mailingList>
+        <mailingList>
+            <name>Commits</name>
+            <post>commits@twill.incubator.apache.org</post>
+            <subscribe>commits-subscribe@twill.incubator.apache.org</subscribe>
+            <unsubscribe>commits-unsubscribe@twill.incubator.apache.org</unsubscribe>
+            <archive>http://mail-archives.apache.org/mod_mbox/twill-commits/</archive>
+            <otherArchives>
+                <otherArchive>http://twill-comits.markmail.org</otherArchive>
+            </otherArchives>
+        </mailingList>
+    </mailingLists>
+
     <repositories>
         <repository>
             <id>cloudera-releases</id>
@@ -137,7 +165,14 @@
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-site-plugin</artifactId>
-                <version>3.2</version>
+                <version>3.3</version>
+                <dependencies>
+                    <dependency>
+                        <groupId>org.apache.maven.doxia</groupId>
+                        <artifactId>doxia-module-markdown</artifactId>
+                        <version>1.3</version>
+                    </dependency>
+                </dependencies>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
@@ -548,6 +583,55 @@
         <plugins>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-project-info-reports-plugin</artifactId>
+                <version>2.7</version>
+                <reportSets>
+                    <reportSet>
+                        <reports/>
+                    </reportSet>
+                    <reportSet>
+                        <id>aggregate</id>
+                        <inherited>false</inherited>
+                        <reports>
+                            <report>index</report>
+                            <report>mailing-list</report>
+                            <report>scm</report>
+                            <report>issue-tracking</report>
+                        </reports>
+                    </reportSet>
+                </reportSets>
+            </plugin>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-javadoc-plugin</artifactId>
+                <version>2.9</version>
+                <configuration>
+                    <failOnError>false</failOnError>
+                    <excludePackageNames>*.internal.*</excludePackageNames>
+                    <links>
+                        <link>http://download.oracle.com/javase/6/docs/api/</link>
+                    </links>
+                    <bottom>
+                        <![CDATA[Copyright &#169; 2013 <a href="http://www.apache.org">The Apache Software Foundation</a>. All rights reserved.]]>
+                    </bottom>
+                </configuration>
+                <reportSets>
+                    <reportSet>
+                        <reports>
+                            <report>javadoc</report>
+                        </reports>
+                    </reportSet>
+                    <reportSet>
+                        <id>aggregate</id>
+                        <inherited>false</inherited>
+                        <reports>
+                            <report>aggregate</report>
+                        </reports>
+                    </reportSet>
+                </reportSets>
+            </plugin>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-surefire-report-plugin</artifactId>
                 <version>2.14.1</version>
             </plugin>

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/src/site/markdown/GettingStarted.md
----------------------------------------------------------------------
diff --git a/src/site/markdown/GettingStarted.md b/src/site/markdown/GettingStarted.md
new file mode 100644
index 0000000..d06099d
--- /dev/null
+++ b/src/site/markdown/GettingStarted.md
@@ -0,0 +1,54 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+
+<head>
+  <title>Getting started</title>
+</head>
+
+### Clone and build the Twill library
+
+```sh
+$ git clone https://git-wip-us.apache.org/repos/asf/incubator-twill.git twill
+$ cd twill
+$ mvn clean install
+```
+
+### Quick example
+
+Let's begin by building a basic `EchoServer` in Twill. Traditionally, when you build a server as simple as this,
+you add logic within a `Runnable` implementation to run it in a `Thread` using an appropriate `ExecutorService`:
+
+```java
+public class EchoServer implements Runnable {
+  private static Logger LOG = LoggerFactory.getLogger(EchoServer.class);
+  private final ServerSocket serverSocket;
+
+  public EchoServer() {
+    ...
+  }
+
+  @Override
+  public void run() {
+    while ( isRunning() ) {
+      Socket socket = serverSocket.accept();
+      ...
+    }
+  }
+}
+```
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/src/site/markdown/index.md
----------------------------------------------------------------------
diff --git a/src/site/markdown/index.md b/src/site/markdown/index.md
new file mode 100644
index 0000000..06eb967
--- /dev/null
+++ b/src/site/markdown/index.md
@@ -0,0 +1,27 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<head>
+  <title>Home</title>
+</head>
+
+### What is Twill?
+
+Twill is an abstraction over Apache Hadoop® YARN that reduces the complexity of developing distributed applications,
+allowing developers to focus more on their business logic. Twill allows you to use YARN’s distributed capabilities
+with a programming model that is similar to running threads.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/src/site/site.xml
----------------------------------------------------------------------
diff --git a/src/site/site.xml b/src/site/site.xml
new file mode 100644
index 0000000..b4421da
--- /dev/null
+++ b/src/site/site.xml
@@ -0,0 +1,80 @@
+<?xml version="1.0" encoding="ISO-8859-1"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<project name="Apache Twill">
+    <bannerRight>
+        <src>http://incubator.apache.org/images/egg-logo.png</src>
+        <href>http://incubator.apache.org/</href>
+    </bannerRight>
+
+    <publishDate position="none"/>
+    <version position="none"/>
+
+    <skin>
+        <groupId>org.apache.maven.skins</groupId>
+        <artifactId>maven-fluido-skin</artifactId>
+        <version>1.3.0</version>
+    </skin>
+
+    <body>
+
+        <breadcrumbs position="left">
+            <item name="Apache Twill" href="http://twill.incubator.apache.org/"/>
+        </breadcrumbs>
+
+        <menu name="Apache Twill">
+            <item name="Introduction" href="./index.html"/>
+            <item name="Getting Started" href="./GettingStarted.html"/>
+            <item name="API documentations" href="./apidocs/index.html"/>
+        </menu>
+
+        <menu name="Get Involved">
+            <item name="Mailing Lists" href="mail-lists.html"/>
+            <item name="Sources" href="source-repository.html"/>
+            <item name="Issues" href="issue-tracking.html"/>
+        </menu>
+
+        <menu name="ASF">
+            <item name="How Apache Works" href="http://www.apache.org/foundation/how-it-works.html"/>
+            <item name="Foundation" href="http://www.apache.org/foundation/"/>
+            <item name="Sponsoring Apache" href="http://www.apache.org/foundation/sponsorship.html"/>
+            <item name="Thanks" href="http://www.apache.org/foundation/thanks.html"/>
+        </menu>
+
+        <footer>
+            <div class="row span16">
+                Apache Twill, Apache, the Apache feather logo,
+                and the Apache Twill project logos are trademarks of The Apache Software Foundation.
+                All other marks mentioned may be trademarks or registered trademarks of their respective owners.
+                <a href="${project.url}/privacy-policy.html">Privacy Policy</a>
+            </div>
+        </footer>
+    </body>
+
+    <custom>
+        <fluidoSkin>
+            <topBarEnabled>true</topBarEnabled>
+            <navBarStyle>navbar-inverse</navBarStyle>
+            <sideBarEnabled>true</sideBarEnabled>
+            <leftColumnClass>span2</leftColumnClass>
+            <bodyColumnClass>span10</bodyColumnClass>
+            <sourceLineNumbersEnabled>true</sourceLineNumbersEnabled>
+        </fluidoSkin>
+    </custom>
+
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/pom.xml
----------------------------------------------------------------------
diff --git a/twill-api/pom.xml b/twill-api/pom.xml
new file mode 100644
index 0000000..49b92c8
--- /dev/null
+++ b/twill-api/pom.xml
@@ -0,0 +1,54 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.apache.twill</groupId>
+        <artifactId>twill-parent</artifactId>
+        <version>0.1.0-SNAPSHOT</version>
+    </parent>
+
+    <artifactId>twill-api</artifactId>
+    <packaging>jar</packaging>
+    <name>Twill API</name>
+
+    <dependencies>
+        <dependency>
+            <groupId>${project.groupId}</groupId>
+            <artifactId>twill-common</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>${project.groupId}</groupId>
+            <artifactId>twill-discovery-api</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>com.google.code.findbugs</groupId>
+            <artifactId>jsr305</artifactId>
+        </dependency>
+    </dependencies>
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/AbstractTwillRunnable.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/AbstractTwillRunnable.java b/twill-api/src/main/java/org/apache/twill/api/AbstractTwillRunnable.java
new file mode 100644
index 0000000..67cec0a
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/AbstractTwillRunnable.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+import com.google.common.collect.ImmutableMap;
+
+import java.util.Map;
+
+/**
+ * This abstract class provides default implementation of the {@link TwillRunnable}.
+ */
+public abstract class AbstractTwillRunnable implements TwillRunnable {
+
+  private Map<String, String> args;
+  private TwillContext context;
+
+  protected AbstractTwillRunnable() {
+    this.args = ImmutableMap.of();
+  }
+
+  protected AbstractTwillRunnable(Map<String, String> args) {
+    this.args = ImmutableMap.copyOf(args);
+  }
+
+  @Override
+  public TwillRunnableSpecification configure() {
+    return TwillRunnableSpecification.Builder.with()
+      .setName(getClass().getSimpleName())
+      .withConfigs(args)
+      .build();
+  }
+
+  @Override
+  public void initialize(TwillContext context) {
+    this.context = context;
+    this.args = context.getSpecification().getConfigs();
+  }
+
+  @Override
+  public void handleCommand(org.apache.twill.api.Command command) throws Exception {
+    // No-op by default. Left for children class to override.
+  }
+
+  @Override
+  public void destroy() {
+    // No-op by default. Left for children class to override.
+  }
+
+  protected Map<String, String> getArguments() {
+    return args;
+  }
+
+  protected String getArgument(String key) {
+    return args.get(key);
+  }
+
+  protected TwillContext getContext() {
+    return context;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/Command.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/Command.java b/twill-api/src/main/java/org/apache/twill/api/Command.java
new file mode 100644
index 0000000..b23b3a8
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/Command.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+import com.google.common.base.Objects;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableMap;
+
+import java.util.Map;
+
+/**
+ * Represents command objects.
+ */
+public interface Command {
+
+  String getCommand();
+
+  Map<String, String> getOptions();
+
+  /**
+   * Builder for creating {@link Command} object.
+   */
+  static final class Builder {
+
+    private final String command;
+    private final ImmutableMap.Builder<String, String> options = ImmutableMap.builder();
+
+    public static Builder of(String command) {
+      Preconditions.checkArgument(command != null, "Command cannot be null.");
+      return new Builder(command);
+    }
+
+    public Builder addOption(String key, String value) {
+      options.put(key, value);
+      return this;
+    }
+
+    public Builder addOptions(Map<String, String> map) {
+      options.putAll(map);
+      return this;
+    }
+
+    public Command build() {
+      return new SimpleCommand(command, options.build());
+    }
+
+    private Builder(String command) {
+      this.command = command;
+    }
+
+    /**
+     * Simple implementation of {@link org.apache.twill.api.Command}.
+     */
+    private static final class SimpleCommand implements Command {
+      private final String command;
+      private final Map<String, String> options;
+
+      SimpleCommand(String command, Map<String, String> options) {
+        this.command = command;
+        this.options = options;
+      }
+
+      @Override
+      public String getCommand() {
+        return command;
+      }
+
+      @Override
+      public Map<String, String> getOptions() {
+        return options;
+      }
+
+      @Override
+      public int hashCode() {
+        return Objects.hashCode(command, options);
+      }
+
+      @Override
+      public String toString() {
+        return Objects.toStringHelper(Command.class)
+          .add("command", command)
+          .add("options", options)
+          .toString();
+      }
+
+      @Override
+      public boolean equals(Object obj) {
+        if (obj == this) {
+          return true;
+        }
+        if (!(obj instanceof Command)) {
+          return false;
+        }
+        Command other = (Command) obj;
+        return command.equals(other.getCommand()) && options.equals(other.getOptions());
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/EventHandler.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/EventHandler.java b/twill-api/src/main/java/org/apache/twill/api/EventHandler.java
new file mode 100644
index 0000000..ede5b65
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/EventHandler.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+import com.google.common.collect.ImmutableMap;
+
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * A callback handler for acting on application events related to {@link TwillRunnable} lifecycle events.
+ */
+public abstract class EventHandler {
+
+  protected EventHandlerContext context;
+
+  /**
+   * Represents action to act upon runnable launch timeout.
+   */
+  public static final class TimeoutAction {
+
+    // Next timeout in milliseconds.
+    private final long timeout;
+
+    /**
+     * Creates a {@link TimeoutAction} to indicate aborting the application.
+     */
+    public static TimeoutAction abort() {
+      return new TimeoutAction(-1);
+    }
+
+    /**
+     * Creates a {@link TimeoutAction} to indicate recheck again after the given time has passed.
+     * @param elapse Time to elapse before checking for the timeout again.
+     * @param unit Unit of the elapse time.
+     */
+    public static TimeoutAction recheck(long elapse, TimeUnit unit) {
+      return new TimeoutAction(TimeUnit.MILLISECONDS.convert(elapse, unit));
+    }
+
+    private TimeoutAction(long timeout) {
+      this.timeout = timeout;
+    }
+
+    /**
+     * Returns timeout in milliseconds or {@code -1} if to abort the application.
+     */
+    public long getTimeout() {
+      return timeout;
+    }
+  }
+
+  /**
+   * This class holds information about a launch timeout event.
+   */
+  public static final class TimeoutEvent {
+    private final String runnableName;
+    private final int expectedInstances;
+    private final int actualInstances;
+    private final long requestTime;
+
+    public TimeoutEvent(String runnableName, int expectedInstances, int actualInstances, long requestTime) {
+      this.runnableName = runnableName;
+      this.expectedInstances = expectedInstances;
+      this.actualInstances = actualInstances;
+      this.requestTime = requestTime;
+    }
+
+    public String getRunnableName() {
+      return runnableName;
+    }
+
+    public int getExpectedInstances() {
+      return expectedInstances;
+    }
+
+    public int getActualInstances() {
+      return actualInstances;
+    }
+
+    public long getRequestTime() {
+      return requestTime;
+    }
+  }
+
+  /**
+   * Returns an {@link EventHandlerSpecification} for configuring this handler class.
+   */
+  public EventHandlerSpecification configure() {
+    return new EventHandlerSpecification() {
+      @Override
+      public String getClassName() {
+        return EventHandler.this.getClass().getName();
+      }
+
+      @Override
+      public Map<String, String> getConfigs() {
+        return EventHandler.this.getConfigs();
+      }
+    };
+  }
+
+  /**
+   * Invoked by the application to initialize this EventHandler instance.
+   * @param context
+   */
+  public void initialize(EventHandlerContext context) {
+    this.context = context;
+  }
+
+  /**
+   * Invoked by the application when shutting down.
+   */
+  public void destroy() {
+    // No-op
+  }
+
+  /**
+   * Invoked when the number of expected instances doesn't match with number of actual instances.
+   * @param timeoutEvents An Iterable of {@link TimeoutEvent} that contains information about runnable launch timeout.
+   * @return A {@link TimeoutAction} to govern action to act.
+   */
+  public abstract TimeoutAction launchTimeout(Iterable<TimeoutEvent> timeoutEvents);
+
+  /**
+   * Returns set of configurations available at runtime for access.
+   */
+  protected Map<String, String> getConfigs() {
+    return ImmutableMap.of();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/EventHandlerContext.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/EventHandlerContext.java b/twill-api/src/main/java/org/apache/twill/api/EventHandlerContext.java
new file mode 100644
index 0000000..8e58af6
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/EventHandlerContext.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+/**
+ * Represents runtime context for {@link EventHandler}.
+ */
+public interface EventHandlerContext {
+
+  EventHandlerSpecification getSpecification();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/EventHandlerSpecification.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/EventHandlerSpecification.java b/twill-api/src/main/java/org/apache/twill/api/EventHandlerSpecification.java
new file mode 100644
index 0000000..190f222
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/EventHandlerSpecification.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+import java.util.Map;
+
+/**
+ * Specification for {@link EventHandler}.
+ */
+public interface EventHandlerSpecification {
+
+  String getClassName();
+
+  Map<String, String> getConfigs();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/LocalFile.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/LocalFile.java b/twill-api/src/main/java/org/apache/twill/api/LocalFile.java
new file mode 100644
index 0000000..df35a3b
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/LocalFile.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+import javax.annotation.Nullable;
+import java.net.URI;
+
+/**
+ * This interface represents a local file that will be available for the container running a {@link TwillRunnable}.
+ */
+public interface LocalFile {
+
+  String getName();
+
+  URI getURI();
+
+  /**
+   * Returns the the last modified time of the file or {@code -1} if unknown.
+   */
+  long getLastModified();
+
+  /**
+   * Returns the size of the file or {@code -1} if unknown.
+   */
+  long getSize();
+
+  boolean isArchive();
+
+  @Nullable
+  String getPattern();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/ResourceReport.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/ResourceReport.java b/twill-api/src/main/java/org/apache/twill/api/ResourceReport.java
new file mode 100644
index 0000000..0d63378
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/ResourceReport.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+import java.util.Collection;
+import java.util.Map;
+
+/**
+ * This interface provides a snapshot of the resources an application is using
+ * broken down by each runnable.
+ */
+public interface ResourceReport {
+  /**
+   * Get all the run resources being used by all instances of the specified runnable.
+   *
+   * @param runnableName the runnable name.
+   * @return resources being used by all instances of the runnable.
+   */
+  public Collection<TwillRunResources> getRunnableResources(String runnableName);
+
+  /**
+   * Get all the run resources being used across all runnables.
+   *
+   * @return all run resources used by all instances of all runnables.
+   */
+  public Map<String, Collection<TwillRunResources>> getResources();
+
+  /**
+   * Get the resources application master is using.
+   *
+   * @return resources being used by the application master.
+   */
+  public TwillRunResources getAppMasterResources();
+
+  /**
+   * Get the id of the application master.
+   *
+   * @return id of the application master.
+   */
+  public String getApplicationId();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/ResourceSpecification.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/ResourceSpecification.java b/twill-api/src/main/java/org/apache/twill/api/ResourceSpecification.java
new file mode 100644
index 0000000..b40682f
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/ResourceSpecification.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+import org.apache.twill.internal.DefaultResourceSpecification;
+
+/**
+ * This interface provides specifications for resource requirements including set and get methods for number of cores, amount of memory, and number of instances.
+ */
+public interface ResourceSpecification {
+
+  final ResourceSpecification BASIC = Builder.with().setVirtualCores(1).setMemory(512, SizeUnit.MEGA).build();
+
+  /**
+   * Unit for specifying memory size.
+   */
+  enum SizeUnit {
+    MEGA(1),
+    GIGA(1024);
+
+    private final int multiplier;
+
+    private SizeUnit(int multiplier) {
+      this.multiplier = multiplier;
+    }
+  }
+
+  /**
+   * Returns the number of virtual CPU cores. DEPRECATED, use getVirtualCores instead.
+   * @return Number of virtual CPU cores.
+   */
+  @Deprecated
+  int getCores();
+
+  /**
+   * Returns the number of virtual CPU cores.
+   * @return Number of virtual CPU cores.
+   */
+  int getVirtualCores();
+
+  /**
+   * Returns the memory size in MB.
+   * @return Memory size
+   */
+  int getMemorySize();
+
+  /**
+   * Returns the uplink bandwidth in Mbps.
+   * @return Uplink bandwidth or -1 representing unlimited bandwidth.
+   */
+  int getUplink();
+
+  /**
+   * Returns the downlink bandwidth in Mbps.
+   * @return Downlink bandwidth or -1 representing unlimited bandwidth.
+   */
+  int getDownlink();
+
+  /**
+   * Returns number of execution instances.
+   * @return Number of execution instances.
+   */
+  int getInstances();
+
+  /**
+   * Builder for creating {@link ResourceSpecification}.
+   */
+  static final class Builder {
+
+    private int cores;
+    private int memory;
+    private int uplink = -1;
+    private int downlink = -1;
+    private int instances = 1;
+
+    public static CoreSetter with() {
+      return new Builder().new CoreSetter();
+    }
+
+    public final class CoreSetter {
+      @Deprecated
+      public MemorySetter setCores(int cores) {
+        Builder.this.cores = cores;
+        return new MemorySetter();
+      }
+
+      public MemorySetter setVirtualCores(int cores) {
+        Builder.this.cores = cores;
+        return new MemorySetter();
+      }
+    }
+
+    public final class MemorySetter {
+      public AfterMemory setMemory(int size, SizeUnit unit) {
+        Builder.this.memory = size * unit.multiplier;
+        return new AfterMemory();
+      }
+    }
+
+    public final class AfterMemory extends Build {
+      public AfterInstances setInstances(int instances) {
+        Builder.this.instances = instances;
+        return new AfterInstances();
+      }
+    }
+
+    public final class AfterInstances extends Build {
+      public AfterUplink setUplink(int uplink, SizeUnit unit) {
+        Builder.this.uplink = uplink * unit.multiplier;
+        return new AfterUplink();
+      }
+    }
+
+    public final class AfterUplink extends Build {
+      public AfterDownlink setDownlink(int downlink, SizeUnit unit) {
+        Builder.this.downlink = downlink * unit.multiplier;
+        return new AfterDownlink();
+      }
+    }
+
+    public final class AfterDownlink extends Build {
+
+      @Override
+      public ResourceSpecification build() {
+        return super.build();
+      }
+    }
+
+    public abstract class Build {
+      public ResourceSpecification build() {
+        return new DefaultResourceSpecification(cores, memory, instances, uplink, downlink);
+      }
+    }
+
+    private Builder() {}
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/RunId.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/RunId.java b/twill-api/src/main/java/org/apache/twill/api/RunId.java
new file mode 100644
index 0000000..7f3c4fe
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/RunId.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+/**
+ * Represents the unique ID of a particular execution.
+ */
+public interface RunId {
+
+  String getId();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/RuntimeSpecification.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/RuntimeSpecification.java b/twill-api/src/main/java/org/apache/twill/api/RuntimeSpecification.java
new file mode 100644
index 0000000..99e11a4
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/RuntimeSpecification.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+import java.util.Collection;
+
+/**
+ * Specifications for runtime requirements.
+ */
+public interface RuntimeSpecification {
+
+  String getName();
+
+  TwillRunnableSpecification getRunnableSpecification();
+
+  ResourceSpecification getResourceSpecification();
+
+  Collection<LocalFile> getLocalFiles();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/SecureStore.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/SecureStore.java b/twill-api/src/main/java/org/apache/twill/api/SecureStore.java
new file mode 100644
index 0000000..707a152
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/SecureStore.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+/**
+ * Represents storage of secure tokens.
+ */
+public interface SecureStore {
+
+  <T> T getStore();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/SecureStoreUpdater.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/SecureStoreUpdater.java b/twill-api/src/main/java/org/apache/twill/api/SecureStoreUpdater.java
new file mode 100644
index 0000000..5912247
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/SecureStoreUpdater.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+/**
+ * Represents class capable of creating update of {@link SecureStore} for live applications.
+ */
+public interface SecureStoreUpdater {
+
+  /**
+   * Invoked when an update to SecureStore is needed.
+   *
+   * @param application The name of the application.
+   * @param runId The runId of the live application.
+   * @return A new {@link SecureStore}.
+   */
+  SecureStore update(String application, RunId runId);
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/ServiceAnnouncer.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/ServiceAnnouncer.java b/twill-api/src/main/java/org/apache/twill/api/ServiceAnnouncer.java
new file mode 100644
index 0000000..d8e4358
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/ServiceAnnouncer.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+import org.apache.twill.common.Cancellable;
+
+/**
+ * This interface provides a way to announce the availability of a service.
+ */
+public interface ServiceAnnouncer {
+
+  /**
+   * Registers an endpoint that could be discovered by external party.
+   * @param serviceName Name of the endpoint
+   * @param port Port of the endpoint.
+   */
+  Cancellable announce(String serviceName, int port);
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/ServiceController.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/ServiceController.java b/twill-api/src/main/java/org/apache/twill/api/ServiceController.java
new file mode 100644
index 0000000..0ea64f9
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/ServiceController.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.Service;
+
+import java.util.concurrent.Executor;
+
+/**
+ * This interface is for controlling a remote running service.
+ */
+public interface ServiceController extends Service {
+
+  /**
+   * Returns the {@link RunId} of the running application.
+   */
+  RunId getRunId();
+
+  /**
+   * Sends a user command to the running application.
+   * @param command The command to send.
+   * @return A {@link ListenableFuture} that will be completed when the command is successfully processed
+   *         by the target application.
+   */
+  ListenableFuture<Command> sendCommand(Command command);
+
+  /**
+   * Sends a user command to the given runnable of the running application.
+   * @param runnableName Name of the {@link TwillRunnable}.
+   * @param command The command to send.
+   * @return A {@link ListenableFuture} that will be completed when the command is successfully processed
+   *         by the target runnable.
+   */
+  ListenableFuture<Command> sendCommand(String runnableName, Command command);
+
+  /**
+   * Requests to forcefully kill a running service.
+   */
+  void kill();
+
+  /**
+   * Registers a {@link Listener} to be {@linkplain Executor#execute executed} on the given
+   * executor.  The listener will have the corresponding transition method called whenever the
+   * service changes state. When added, the current state of the service will be reflected through
+   * callback to the listener. Methods on the listener is guaranteed to be called no more than once.
+   *
+   * @param listener the listener to run when the service changes state is complete
+   * @param executor the executor in which the the listeners callback methods will be run. For fast,
+   *     lightweight listeners that would be safe to execute in any thread, consider
+   *     {@link com.google.common.util.concurrent.MoreExecutors#sameThreadExecutor}.
+   */
+  @Override
+  void addListener(Listener listener, Executor executor);
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/TwillApplication.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/TwillApplication.java b/twill-api/src/main/java/org/apache/twill/api/TwillApplication.java
new file mode 100644
index 0000000..b49e7a7
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/TwillApplication.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+/**
+ * Represents a application that can be launched by Twill.
+ */
+public interface TwillApplication {
+
+  /**
+   * Invoked when launching the application on the client side.
+   * @return A {@link TwillSpecification} specifying properties about this application.
+   */
+  TwillSpecification configure();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/TwillContext.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/TwillContext.java b/twill-api/src/main/java/org/apache/twill/api/TwillContext.java
new file mode 100644
index 0000000..b4ddb6e
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/TwillContext.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+import java.net.InetAddress;
+
+/**
+ * Represents the runtime context of a {@link TwillRunnable}.
+ */
+public interface TwillContext extends ServiceAnnouncer {
+
+  /**
+   * Returns the {@link RunId} of this running instance of {@link TwillRunnable}.
+   */
+  RunId getRunId();
+
+  /**
+   * Returns the {@link RunId} of this running application.
+   */
+  RunId getApplicationRunId();
+
+  /**
+   * Returns the number of running instances assigned for this {@link TwillRunnable}.
+   */
+  int getInstanceCount();
+
+  /**
+   * Returns the hostname that the runnable is running on.
+   */
+  InetAddress getHost();
+
+  /**
+   * Returns the runtime arguments that are passed to the {@link TwillRunnable}.
+   */
+  String[] getArguments();
+
+  /**
+   * Returns the runtime arguments that are passed to the {@link TwillApplication}.
+   */
+  String[] getApplicationArguments();
+
+  /**
+   * Returns the {@link TwillRunnableSpecification} that was created by {@link TwillRunnable#configure()}.
+   */
+  TwillRunnableSpecification getSpecification();
+
+  /**
+   * Returns an integer id from 0 to (instanceCount - 1).
+   */
+  int getInstanceId();
+
+  /**
+   * Returns the number of virtual cores the runnable is allowed to use.
+   */
+  int getVirtualCores();
+
+  /**
+   * Returns the amount of memory in MB the runnable is allowed to use.
+   */
+  int getMaxMemoryMB();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/TwillController.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/TwillController.java b/twill-api/src/main/java/org/apache/twill/api/TwillController.java
new file mode 100644
index 0000000..f31d3f9
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/TwillController.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+import org.apache.twill.api.logging.LogHandler;
+import org.apache.twill.discovery.Discoverable;
+import com.google.common.util.concurrent.ListenableFuture;
+
+/**
+ * For controlling a running application.
+ */
+public interface TwillController extends ServiceController {
+
+  /**
+   * Adds a {@link LogHandler} for receiving application log.
+   * @param handler The handler to add.
+   */
+  void addLogHandler(LogHandler handler);
+
+  /**
+   * Discovers the set of {@link Discoverable} endpoints that provides service for the given service name.
+   * @param serviceName Name of the service to discovery.
+   * @return An {@link Iterable} that gives set of latest {@link Discoverable} every time when
+   *         {@link Iterable#iterator()}} is invoked.
+   */
+  Iterable<Discoverable> discoverService(String serviceName);
+
+
+  /**
+   * Changes the number of running instances of a given runnable.
+   *
+   * @param runnable The name of the runnable.
+   * @param newCount Number of instances for the given runnable.
+   * @return A {@link ListenableFuture} that will be completed when the number running instances has been
+   *         successfully changed. The future will carry the new count as the result. If there is any error
+   *         while changing instances, it'll be reflected in the future.
+   */
+  ListenableFuture<Integer> changeInstances(String runnable, int newCount);
+
+  /**
+   * Get a snapshot of the resources used by the application, broken down by each runnable.
+   *
+   * @return A {@link ResourceReport} containing information about resources used by the application.
+   */
+  ResourceReport getResourceReport();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/TwillPreparer.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/TwillPreparer.java b/twill-api/src/main/java/org/apache/twill/api/TwillPreparer.java
new file mode 100644
index 0000000..b2a3ce2
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/TwillPreparer.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+import org.apache.twill.api.logging.LogHandler;
+
+import java.net.URI;
+
+/**
+ * This interface exposes methods to set up the Twill runtime environment and start a Twill application.
+ */
+public interface TwillPreparer {
+
+  /**
+   * Adds a {@link LogHandler} for receiving an application log.
+   * @param handler The {@link LogHandler}.
+   * @return This {@link TwillPreparer}.
+   */
+  TwillPreparer addLogHandler(LogHandler handler);
+
+  /**
+   * Sets the user name that runs the application. Default value is get from {@code "user.name"} by calling
+   * {@link System#getProperty(String)}.
+   * @param user User name
+   * @return This {@link TwillPreparer}.
+   *
+   * @deprecated This method will be removed in future version.
+   */
+  @Deprecated
+  TwillPreparer setUser(String user);
+
+  /**
+   * Sets the list of arguments that will be passed to the application. The arguments can be retrieved
+   * from {@link TwillContext#getApplicationArguments()}.
+   *
+   * @param args Array of arguments.
+   * @return This {@link TwillPreparer}.
+   */
+  TwillPreparer withApplicationArguments(String... args);
+
+  /**
+   * Sets the list of arguments that will be passed to the application. The arguments can be retrieved
+   * from {@link TwillContext#getApplicationArguments()}.
+   *
+   * @param args Iterable of arguments.
+   * @return This {@link TwillPreparer}.
+   */
+  TwillPreparer withApplicationArguments(Iterable<String> args);
+
+  /**
+   * Sets the list of arguments that will be passed to the {@link TwillRunnable} identified by the given name.
+   * The arguments can be retrieved from {@link TwillContext#getArguments()}.
+   *
+   * @param runnableName Name of the {@link TwillRunnable}.
+   * @param args Array of arguments.
+   * @return This {@link TwillPreparer}.
+   */
+  TwillPreparer withArguments(String runnableName, String...args);
+
+  /**
+   * Sets the list of arguments that will be passed to the {@link TwillRunnable} identified by the given name.
+   * The arguments can be retrieved from {@link TwillContext#getArguments()}.
+   *
+   * @param runnableName Name of the {@link TwillRunnable}.
+   * @param args Iterable of arguments.
+   * @return This {@link TwillPreparer}.
+   */
+  TwillPreparer withArguments(String runnableName, Iterable<String> args);
+
+  /**
+   * Adds extra classes that the application is dependent on and is not traceable from the application itself.
+   * @see #withDependencies(Iterable)
+   * @return This {@link TwillPreparer}.
+   */
+  TwillPreparer withDependencies(Class<?>...classes);
+
+  /**
+   * Adds extra classes that the application is dependent on and is not traceable from the application itself.
+   * E.g. Class name used in {@link Class#forName(String)}.
+   * @param classes set of classes to add to dependency list for generating the deployment jar.
+   * @return This {@link TwillPreparer}.
+   */
+  TwillPreparer withDependencies(Iterable<Class<?>> classes);
+
+  /**
+   * Adds resources that will be available through the ClassLoader of the {@link TwillRunnable runnables}.
+   * @see #withResources(Iterable)
+   * @return This {@link TwillPreparer}.
+   */
+  TwillPreparer withResources(URI...resources);
+
+  /**
+   * Adds resources that will be available through the ClassLoader of the {@link TwillRunnable runnables}.
+   * Useful for adding extra resource files or libraries that are not traceable from the application itself.
+   * If the URI is a jar file, classes inside would be loadable by the ClassLoader. If the URI is a directory,
+   * everything underneath would be available.
+   *
+   * @param resources Set of URI to the resources.
+   * @return This {@link TwillPreparer}.
+   */
+  TwillPreparer withResources(Iterable<URI> resources);
+
+  /**
+   * Adds the set of paths to the classpath on the target machine for all runnables.
+   * @see #withClassPaths(Iterable)
+   * @return This {@link TwillPreparer}
+   */
+  TwillPreparer withClassPaths(String... classPaths);
+
+  /**
+   * Adds the set of paths to the classpath on the target machine for all runnables.
+   * Note that the paths would be just added without verification.
+   * @param classPaths Set of classpaths
+   * @return This {@link TwillPreparer}
+   */
+  TwillPreparer withClassPaths(Iterable<String> classPaths);
+
+  /**
+   * Adds security credentials for the runtime environment to gives application access to resources.
+   *
+   * @param secureStore Contains security token available for the runtime environment.
+   * @return This {@link TwillPreparer}.
+   */
+  TwillPreparer addSecureStore(SecureStore secureStore);
+
+  /**
+   * Starts the application.
+   * @return A {@link TwillController} for controlling the running application.
+   */
+  TwillController start();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/TwillRunResources.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/TwillRunResources.java b/twill-api/src/main/java/org/apache/twill/api/TwillRunResources.java
new file mode 100644
index 0000000..4c3d2e7
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/TwillRunResources.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+/**
+ * Information about the container the {@link TwillRunnable}
+ * is running in.
+ */
+public interface TwillRunResources {
+
+  /**
+   * @return instance id of the runnable.
+   */
+  int getInstanceId();
+
+  /**
+   * @return number of cores the runnable is allowed to use.  YARN must be at least v2.1.0 and
+   *   it must be configured to use cgroups in order for this to be a reflection of truth.
+   */
+  int getVirtualCores();
+
+  /**
+   * @return amount of memory in MB the runnable is allowed to use.
+   */
+  int getMemoryMB();
+
+  /**
+   * @return the host the runnable is running on.
+   */
+  String getHost();
+
+  /**
+   * @return id of the container the runnable is running in.
+   */
+  String getContainerId();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/TwillRunnable.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/TwillRunnable.java b/twill-api/src/main/java/org/apache/twill/api/TwillRunnable.java
new file mode 100644
index 0000000..4350bfb
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/TwillRunnable.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+/**
+ * The {@link TwillRunnable} interface should be implemented by any
+ * class whose instances are intended to be executed in a Twill cluster.
+ */
+public interface TwillRunnable extends Runnable {
+
+  /**
+   * Called at submission time. Executed on the client side.
+   * @return A {@link TwillRunnableSpecification} built by {@link TwillRunnableSpecification.Builder}.
+   */
+  TwillRunnableSpecification configure();
+
+  /**
+   * Called when the container process starts. Executed in container machine.
+   * @param context Contains information about the runtime context.
+   */
+  void initialize(TwillContext context);
+
+  /**
+   * Called when a command is received. A normal return denotes the command has been processed successfully, otherwise
+   * {@link Exception} should be thrown.
+   * @param command Contains details of the command.
+   * @throws Exception
+   */
+  void handleCommand(Command command) throws Exception;
+
+  /**
+   * Requests to stop the running service.
+   */
+  void stop();
+
+  /**
+   * Called when the {@link TwillRunnable#run()} completed. Useful for doing
+   * resource cleanup. This method would only get called if the call to {@link #initialize(TwillContext)} was
+   * successful.
+   */
+  void destroy();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/TwillRunnableSpecification.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/TwillRunnableSpecification.java b/twill-api/src/main/java/org/apache/twill/api/TwillRunnableSpecification.java
new file mode 100644
index 0000000..bbcc5d7
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/TwillRunnableSpecification.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+import org.apache.twill.internal.DefaultTwillRunnableSpecification;
+import com.google.common.collect.ImmutableMap;
+
+import java.util.Map;
+
+/**
+ * Represents a specification of a {@link TwillRunnable}.
+ */
+public interface TwillRunnableSpecification {
+
+  String getClassName();
+
+  String getName();
+
+  Map<String, String> getConfigs();
+
+  /**
+   * Builder for constructing {@link TwillRunnableSpecification}.
+   */
+  static final class Builder {
+
+    private String name;
+    private Map<String, String> args;
+
+    public static NameSetter with() {
+      return new Builder().new NameSetter();
+    }
+
+    public final class NameSetter {
+      public AfterName setName(String name) {
+        Builder.this.name = name;
+        return new AfterName();
+      }
+    }
+
+    public final class AfterName {
+      public AfterConfigs withConfigs(Map<String, String> args) {
+        Builder.this.args = args;
+        return new AfterConfigs();
+      }
+
+      public AfterConfigs noConfigs() {
+        Builder.this.args = ImmutableMap.of();
+        return new AfterConfigs();
+      }
+    }
+
+    public final class AfterConfigs {
+      public TwillRunnableSpecification build() {
+        return new DefaultTwillRunnableSpecification(null, name, args);
+      }
+    }
+
+    private Builder() {
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/TwillRunner.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/TwillRunner.java b/twill-api/src/main/java/org/apache/twill/api/TwillRunner.java
new file mode 100644
index 0000000..0393a85
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/TwillRunner.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+import org.apache.twill.common.Cancellable;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * This interface prepares execution of {@link TwillRunnable} and {@link TwillApplication}.
+ */
+public interface TwillRunner {
+
+  /**
+   * Interface to represents information of a live application.
+   */
+  interface LiveInfo {
+
+    /**
+     * Returns name of the application.
+     * @return Application name as a {@link String}.
+     */
+    String getApplicationName();
+
+    /**
+     * Returns {@link TwillController}s for all live instances of the application.
+     * @return An {@link Iterable} of {@link TwillController}.
+     */
+    Iterable<TwillController> getControllers();
+  }
+
+  /**
+   * Prepares to run the given {@link TwillRunnable} with {@link ResourceSpecification#BASIC} resource specification.
+   * @param runnable The runnable to run through Twill when {@link TwillPreparer#start()} is called.
+   * @return A {@link TwillPreparer} for setting up runtime options.
+   */
+  TwillPreparer prepare(TwillRunnable runnable);
+
+  /**
+   * Prepares to run the given {@link TwillRunnable} with the given resource specification.
+   * @param runnable The runnable to run through Twill when {@link TwillPreparer#start()} is called.
+   * @param resourceSpecification The resource specification for running the runnable.
+   * @return A {@link TwillPreparer} for setting up runtime options.
+   */
+  TwillPreparer prepare(TwillRunnable runnable, ResourceSpecification resourceSpecification);
+
+  /**
+   * Prepares to run the given {@link TwillApplication} as specified by the application.
+   * @param application The application to run through Twill when {@link TwillPreparer#start()} is called.
+   * @return A {@link TwillPreparer} for setting up runtime options.
+   */
+  TwillPreparer prepare(TwillApplication application);
+
+  /**
+   * Gets a {@link TwillController} for the given application and runId.
+   * @param applicationName Name of the application.
+   * @param runId The runId of the running application.
+   * @return A {@link TwillController} to interact with the application or null if no such runId is found.
+   */
+  TwillController lookup(String applicationName, RunId runId);
+
+  /**
+   * Gets an {@link Iterable} of {@link TwillController} for all running instances of the given application.
+   * @param applicationName Name of the application.
+   * @return A live {@link Iterable} that gives the latest {@link TwillController} set for all running
+   *         instances of the application when {@link Iterable#iterator()} is invoked.
+   */
+  Iterable<TwillController> lookup(String applicationName);
+
+  /**
+   * Gets an {@link Iterable} of {@link LiveInfo}.
+   * @return A live {@link Iterable} that gives the latest information on the set of applications that
+   *         have running instances when {@link Iterable#iterator()}} is invoked.
+   */
+  Iterable<LiveInfo> lookupLive();
+
+  /**
+   * Schedules a periodic update of SecureStore. The first call to the given {@link SecureStoreUpdater} will be made
+   * after {@code initialDelay}, and subsequently with the given {@code delay} between completion of one update
+   * and starting of the next. If exception is thrown on call
+   * {@link SecureStoreUpdater#update(String, RunId)}, the exception will only get logged
+   * and won't suppress the next update call.
+   *
+   * @param updater A {@link SecureStoreUpdater} for creating new SecureStore.
+   * @param initialDelay Delay before the first call to update method.
+   * @param delay Delay between completion of one update call to the next one.
+   * @param unit time unit for the initialDelay and delay.
+   * @return A {@link Cancellable} for cancelling the scheduled update.
+   */
+  Cancellable scheduleSecureStoreUpdate(final SecureStoreUpdater updater,
+                                        long initialDelay, long delay, TimeUnit unit);
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/TwillRunnerService.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/TwillRunnerService.java b/twill-api/src/main/java/org/apache/twill/api/TwillRunnerService.java
new file mode 100644
index 0000000..76ec136
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/TwillRunnerService.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+import com.google.common.util.concurrent.Service;
+
+/**
+ * A {@link TwillRunner} that extends {@link Service} to provide lifecycle management functions.
+ * The {@link #start()} method needs to be called before calling any other method of this interface.
+ * When done with this service, call {@link #stop()} to release any resources that it holds.
+ */
+public interface TwillRunnerService extends TwillRunner, Service {
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/TwillSpecification.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/TwillSpecification.java b/twill-api/src/main/java/org/apache/twill/api/TwillSpecification.java
new file mode 100644
index 0000000..00d171d
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/TwillSpecification.java
@@ -0,0 +1,327 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api;
+
+import org.apache.twill.internal.DefaultLocalFile;
+import org.apache.twill.internal.DefaultRuntimeSpecification;
+import org.apache.twill.internal.DefaultTwillRunnableSpecification;
+import org.apache.twill.internal.DefaultTwillSpecification;
+import com.google.common.base.Function;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+import javax.annotation.Nullable;
+import java.io.File;
+import java.net.URI;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Represents specification of a {@link TwillApplication}.
+ */
+public interface TwillSpecification {
+
+  /**
+   * Defines execution order.
+   */
+  interface Order {
+
+    enum Type {
+      STARTED,
+      COMPLETED
+    }
+
+    /**
+     * @return Set of {@link TwillRunnable} name that belongs to this order.
+     */
+    Set<String> getNames();
+
+    Type getType();
+  }
+
+  /**
+   * @return Name of the application.
+   */
+  String getName();
+
+  /**
+   * @return A map from {@link TwillRunnable} name to {@link RuntimeSpecification}.
+   */
+  Map<String, RuntimeSpecification> getRunnables();
+
+  /**
+   * @return Returns a list of runnable names that should be executed in the given order.
+   */
+  List<Order> getOrders();
+
+  /**
+   * @return The {@link EventHandlerSpecification} for the {@link EventHandler} to be used for this application,
+   *         or {@code null} if no event handler has been provided.
+   */
+  @Nullable
+  EventHandlerSpecification getEventHandler();
+
+  /**
+   * Builder for constructing instance of {@link TwillSpecification}.
+   */
+  static final class Builder {
+
+    private String name;
+    private Map<String, RuntimeSpecification> runnables = Maps.newHashMap();
+    private List<Order> orders = Lists.newArrayList();
+    private EventHandlerSpecification eventHandler;
+
+    public static NameSetter with() {
+      return new Builder().new NameSetter();
+    }
+
+    public final class NameSetter {
+      public AfterName setName(String name) {
+        Builder.this.name = name;
+        return new AfterName();
+      }
+    }
+
+    public final class AfterName {
+      public MoreRunnable withRunnable() {
+        return new RunnableSetter();
+      }
+    }
+
+    public interface MoreRunnable {
+      RuntimeSpecificationAdder add(TwillRunnable runnable);
+
+      RuntimeSpecificationAdder add(TwillRunnable runnable, ResourceSpecification resourceSpec);
+
+      /**
+       * Adds a {@link TwillRunnable} with {@link ResourceSpecification#BASIC} resource specification.
+       * @param name Name of runnable
+       * @param runnable {@link TwillRunnable} to be run
+       * @return instance of {@link RuntimeSpecificationAdder}
+       */
+      RuntimeSpecificationAdder add(String name, TwillRunnable runnable);
+
+      RuntimeSpecificationAdder add(String name, TwillRunnable runnable, ResourceSpecification resourceSpec);
+    }
+
+    public interface AfterRunnable {
+      FirstOrder withOrder();
+
+      AfterOrder anyOrder();
+    }
+
+    public final class RunnableSetter implements MoreRunnable, AfterRunnable {
+
+      @Override
+      public RuntimeSpecificationAdder add(TwillRunnable runnable) {
+        return add(runnable.configure().getName(), runnable);
+      }
+
+      @Override
+      public RuntimeSpecificationAdder add(TwillRunnable runnable, ResourceSpecification resourceSpec) {
+        return add(runnable.configure().getName(), runnable, resourceSpec);
+      }
+
+      @Override
+      public RuntimeSpecificationAdder add(String name, TwillRunnable runnable) {
+        return add(name, runnable, ResourceSpecification.BASIC);
+      }
+
+      @Override
+      public RuntimeSpecificationAdder add(String name, TwillRunnable runnable,
+                                           final ResourceSpecification resourceSpec) {
+        final TwillRunnableSpecification spec = new DefaultTwillRunnableSpecification(
+                                            runnable.getClass().getName(), name, runnable.configure().getConfigs());
+        return new RuntimeSpecificationAdder(new Function<Collection<LocalFile>, RunnableSetter>() {
+          @Override
+          public RunnableSetter apply(Collection<LocalFile> files) {
+            runnables.put(spec.getName(), new DefaultRuntimeSpecification(spec.getName(), spec, resourceSpec, files));
+            return RunnableSetter.this;
+          }
+        });
+      }
+
+      @Override
+      public FirstOrder withOrder() {
+        return new OrderSetter();
+      }
+
+      @Override
+      public AfterOrder anyOrder() {
+        return new OrderSetter();
+      }
+    }
+
+    /**
+     * For setting runtime specific settings.
+     */
+    public final class RuntimeSpecificationAdder {
+
+      private final Function<Collection<LocalFile>, RunnableSetter> completer;
+
+      RuntimeSpecificationAdder(Function<Collection<LocalFile>, RunnableSetter> completer) {
+        this.completer = completer;
+      }
+
+      public LocalFileAdder withLocalFiles() {
+        return new MoreFile(completer);
+      }
+
+      public RunnableSetter noLocalFiles() {
+        return completer.apply(ImmutableList.<LocalFile>of());
+      }
+    }
+
+    public interface LocalFileAdder {
+      MoreFile add(String name, File file);
+
+      MoreFile add(String name, URI uri);
+
+      MoreFile add(String name, File file, boolean archive);
+
+      MoreFile add(String name, URI uri, boolean archive);
+
+      MoreFile add(String name, File file, String pattern);
+
+      MoreFile add(String name, URI uri, String pattern);
+    }
+
+    public final class MoreFile implements LocalFileAdder {
+
+      private final Function<Collection<LocalFile>, RunnableSetter> completer;
+      private final List<LocalFile> files = Lists.newArrayList();
+
+      public MoreFile(Function<Collection<LocalFile>, RunnableSetter> completer) {
+        this.completer = completer;
+      }
+
+      @Override
+      public MoreFile add(String name, File file) {
+        return add(name, file, false);
+      }
+
+      @Override
+      public MoreFile add(String name, URI uri) {
+        return add(name, uri, false);
+      }
+
+      @Override
+      public MoreFile add(String name, File file, boolean archive) {
+        return add(name, file.toURI(), archive);
+      }
+
+      @Override
+      public MoreFile add(String name, URI uri, boolean archive) {
+        files.add(new DefaultLocalFile(name, uri, -1, -1, archive, null));
+        return this;
+      }
+
+      @Override
+      public MoreFile add(String name, File file, String pattern) {
+        return add(name, file.toURI(), pattern);
+      }
+
+      @Override
+      public MoreFile add(String name, URI uri, String pattern) {
+        files.add(new DefaultLocalFile(name, uri, -1, -1, true, pattern));
+        return this;
+      }
+
+      public RunnableSetter apply() {
+        return completer.apply(files);
+      }
+    }
+
+    public interface FirstOrder {
+      NextOrder begin(String name, String...names);
+    }
+
+    public interface NextOrder extends AfterOrder {
+      NextOrder nextWhenStarted(String name, String...names);
+
+      NextOrder nextWhenCompleted(String name, String...names);
+    }
+
+    public interface AfterOrder {
+      AfterOrder withEventHandler(EventHandler handler);
+
+      TwillSpecification build();
+    }
+
+    public final class OrderSetter implements FirstOrder, NextOrder {
+      @Override
+      public NextOrder begin(String name, String... names) {
+        addOrder(Order.Type.STARTED, name, names);
+        return this;
+      }
+
+      @Override
+      public NextOrder nextWhenStarted(String name, String... names) {
+        addOrder(Order.Type.STARTED, name, names);
+        return this;
+      }
+
+      @Override
+      public NextOrder nextWhenCompleted(String name, String... names) {
+        addOrder(Order.Type.COMPLETED, name, names);
+        return this;
+      }
+
+      @Override
+      public AfterOrder withEventHandler(EventHandler handler) {
+        eventHandler = handler.configure();
+        return this;
+      }
+
+      @Override
+      public TwillSpecification build() {
+        // Set to track with runnable hasn't been assigned an order.
+        Set<String> runnableNames = Sets.newHashSet(runnables.keySet());
+        for (Order order : orders) {
+          runnableNames.removeAll(order.getNames());
+        }
+
+        // For all unordered runnables, add it to the end of orders list
+        orders.add(new DefaultTwillSpecification.DefaultOrder(runnableNames, Order.Type.STARTED));
+
+        return new DefaultTwillSpecification(name, runnables, orders, eventHandler);
+      }
+
+      private void addOrder(final Order.Type type, String name, String...names) {
+        Preconditions.checkArgument(name != null, "Name cannot be null.");
+        Preconditions.checkArgument(runnables.containsKey(name), "Runnable not exists.");
+
+        Set<String> runnableNames = Sets.newHashSet(name);
+        for (String runnableName : names) {
+          Preconditions.checkArgument(name != null, "Name cannot be null.");
+          Preconditions.checkArgument(runnables.containsKey(name), "Runnable not exists.");
+          runnableNames.add(runnableName);
+        }
+
+        orders.add(new DefaultTwillSpecification.DefaultOrder(runnableNames, type));
+      }
+    }
+
+    private Builder() {}
+  }
+}


[22/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/test/java/org/apache/twill/internal/ControllerTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/twill/internal/ControllerTest.java b/core/src/test/java/org/apache/twill/internal/ControllerTest.java
deleted file mode 100644
index 382dc95..0000000
--- a/core/src/test/java/org/apache/twill/internal/ControllerTest.java
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.api.Command;
-import org.apache.twill.api.ResourceReport;
-import org.apache.twill.api.RunId;
-import org.apache.twill.api.ServiceController;
-import org.apache.twill.api.TwillController;
-import org.apache.twill.api.logging.LogHandler;
-import org.apache.twill.common.ServiceListenerAdapter;
-import org.apache.twill.common.Threads;
-import org.apache.twill.internal.state.StateNode;
-import org.apache.twill.internal.zookeeper.InMemoryZKServer;
-import org.apache.twill.zookeeper.NodeData;
-import org.apache.twill.zookeeper.ZKClient;
-import org.apache.twill.zookeeper.ZKClientService;
-import com.google.common.base.Suppliers;
-import com.google.common.collect.ImmutableList;
-import com.google.common.util.concurrent.AbstractIdleService;
-import com.google.common.util.concurrent.Service;
-import com.google.gson.JsonObject;
-import org.junit.Assert;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-/**
- *
- */
-public class ControllerTest {
-
-  private static final Logger LOG = LoggerFactory.getLogger(ControllerTest.class);
-
-  @Test
-  public void testController() throws ExecutionException, InterruptedException, TimeoutException {
-    InMemoryZKServer zkServer = InMemoryZKServer.builder().build();
-    zkServer.startAndWait();
-
-    LOG.info("ZKServer: " + zkServer.getConnectionStr());
-
-    try {
-      RunId runId = RunIds.generate();
-      ZKClientService zkClientService = ZKClientService.Builder.of(zkServer.getConnectionStr()).build();
-      zkClientService.startAndWait();
-
-      Service service = createService(zkClientService, runId);
-      service.startAndWait();
-
-      TwillController controller = getController(zkClientService, runId);
-      controller.sendCommand(Command.Builder.of("test").build()).get(2, TimeUnit.SECONDS);
-      controller.stop().get(2, TimeUnit.SECONDS);
-
-      Assert.assertEquals(ServiceController.State.TERMINATED, controller.state());
-
-      final CountDownLatch terminateLatch = new CountDownLatch(1);
-      service.addListener(new ServiceListenerAdapter() {
-        @Override
-        public void terminated(Service.State from) {
-          terminateLatch.countDown();
-        }
-      }, Threads.SAME_THREAD_EXECUTOR);
-
-      Assert.assertTrue(service.state() == Service.State.TERMINATED || terminateLatch.await(2, TimeUnit.SECONDS));
-
-      zkClientService.stopAndWait();
-
-    } finally {
-      zkServer.stopAndWait();
-    }
-  }
-
-  // Test controller created before service starts.
-  @Test
-  public void testControllerBefore() throws InterruptedException {
-    InMemoryZKServer zkServer = InMemoryZKServer.builder().build();
-    zkServer.startAndWait();
-
-    LOG.info("ZKServer: " + zkServer.getConnectionStr());
-    try {
-      RunId runId = RunIds.generate();
-      ZKClientService zkClientService = ZKClientService.Builder.of(zkServer.getConnectionStr()).build();
-      zkClientService.startAndWait();
-
-      final CountDownLatch runLatch = new CountDownLatch(1);
-      final CountDownLatch stopLatch = new CountDownLatch(1);
-      TwillController controller = getController(zkClientService, runId);
-      controller.addListener(new ServiceListenerAdapter() {
-        @Override
-        public void running() {
-          runLatch.countDown();
-        }
-
-        @Override
-        public void terminated(Service.State from) {
-          stopLatch.countDown();
-        }
-      }, Threads.SAME_THREAD_EXECUTOR);
-
-      Service service = createService(zkClientService, runId);
-      service.start();
-
-      Assert.assertTrue(runLatch.await(2, TimeUnit.SECONDS));
-      Assert.assertFalse(stopLatch.await(2, TimeUnit.SECONDS));
-
-      service.stop();
-
-      Assert.assertTrue(stopLatch.await(2, TimeUnit.SECONDS));
-
-    } finally {
-      zkServer.stopAndWait();
-    }
-  }
-
-  // Test controller listener receive first state change without state transition from service
-  @Test
-  public void testControllerListener() throws InterruptedException {
-    InMemoryZKServer zkServer = InMemoryZKServer.builder().build();
-    zkServer.startAndWait();
-
-    LOG.info("ZKServer: " + zkServer.getConnectionStr());
-    try {
-      RunId runId = RunIds.generate();
-      ZKClientService zkClientService = ZKClientService.Builder.of(zkServer.getConnectionStr()).build();
-      zkClientService.startAndWait();
-
-      Service service = createService(zkClientService, runId);
-      service.startAndWait();
-
-      final CountDownLatch runLatch = new CountDownLatch(1);
-      TwillController controller = getController(zkClientService, runId);
-      controller.addListener(new ServiceListenerAdapter() {
-        @Override
-        public void running() {
-          runLatch.countDown();
-        }
-      }, Threads.SAME_THREAD_EXECUTOR);
-
-      Assert.assertTrue(runLatch.await(2, TimeUnit.SECONDS));
-
-      service.stopAndWait();
-
-      zkClientService.stopAndWait();
-    } finally {
-      zkServer.stopAndWait();
-    }
-  }
-
-  private Service createService(ZKClient zkClient, RunId runId) {
-    return new ZKServiceDecorator(
-      zkClient, runId, Suppliers.ofInstance(new JsonObject()), new AbstractIdleService() {
-
-      @Override
-      protected void startUp() throws Exception {
-        LOG.info("Start");
-      }
-
-      @Override
-      protected void shutDown() throws Exception {
-        LOG.info("Stop");
-      }
-    });
-  }
-
-  private TwillController getController(ZKClient zkClient, RunId runId) {
-    TwillController controller = new AbstractTwillController(runId, zkClient, ImmutableList.<LogHandler>of()) {
-
-      @Override
-      public void kill() {
-        // No-op
-      }
-
-      @Override
-      protected void instanceNodeUpdated(NodeData nodeData) {
-        // No-op
-      }
-
-      @Override
-      protected void stateNodeUpdated(StateNode stateNode) {
-        // No-op
-      }
-
-      @Override
-      public ResourceReport getResourceReport() {
-        return null;
-      }
-    };
-    controller.startAndWait();
-    return controller;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/test/java/org/apache/twill/internal/state/MessageCodecTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/twill/internal/state/MessageCodecTest.java b/core/src/test/java/org/apache/twill/internal/state/MessageCodecTest.java
deleted file mode 100644
index d267cf8..0000000
--- a/core/src/test/java/org/apache/twill/internal/state/MessageCodecTest.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.state;
-
-import org.apache.twill.api.Command;
-import com.google.common.collect.ImmutableMap;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.Map;
-
-/**
- *
- */
-public class MessageCodecTest {
-
-  @Test
-  public void testCodec() {
-    Message message = MessageCodec.decode(MessageCodec.encode(new Message() {
-
-      @Override
-      public Type getType() {
-        return Type.SYSTEM;
-      }
-
-      @Override
-      public Scope getScope() {
-        return Scope.APPLICATION;
-      }
-
-      @Override
-      public String getRunnableName() {
-        return null;
-      }
-
-      @Override
-      public Command getCommand() {
-        return new Command() {
-          @Override
-          public String getCommand() {
-            return "stop";
-          }
-
-          @Override
-          public Map<String, String> getOptions() {
-            return ImmutableMap.of("timeout", "1", "timeoutUnit", "SECONDS");
-          }
-        };
-      }
-    }));
-
-    Assert.assertEquals(Message.Type.SYSTEM, message.getType());
-    Assert.assertEquals(Message.Scope.APPLICATION, message.getScope());
-    Assert.assertNull(message.getRunnableName());
-    Assert.assertEquals("stop", message.getCommand().getCommand());
-    Assert.assertEquals(ImmutableMap.of("timeout", "1", "timeoutUnit", "SECONDS"), message.getCommand().getOptions());
-  }
-
-  @Test
-  public void testFailureDecode() {
-    Assert.assertNull(MessageCodec.decode("".getBytes()));
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/test/java/org/apache/twill/internal/state/ZKServiceDecoratorTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/twill/internal/state/ZKServiceDecoratorTest.java b/core/src/test/java/org/apache/twill/internal/state/ZKServiceDecoratorTest.java
deleted file mode 100644
index 47d8562..0000000
--- a/core/src/test/java/org/apache/twill/internal/state/ZKServiceDecoratorTest.java
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.state;
-
-import org.apache.twill.api.RunId;
-import org.apache.twill.internal.RunIds;
-import org.apache.twill.internal.ZKServiceDecorator;
-import org.apache.twill.internal.zookeeper.InMemoryZKServer;
-import org.apache.twill.zookeeper.NodeData;
-import org.apache.twill.zookeeper.ZKClientService;
-import org.apache.twill.zookeeper.ZKClients;
-import com.google.common.base.Charsets;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Suppliers;
-import com.google.common.util.concurrent.AbstractIdleService;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.Service;
-import com.google.gson.Gson;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.data.Stat;
-import org.junit.Assert;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Semaphore;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicReference;
-
-/**
- *
- */
-public class ZKServiceDecoratorTest {
-
-  private static final Logger LOG = LoggerFactory.getLogger(ZKServiceDecoratorTest.class);
-
-  @Test
-  public void testStateTransition() throws InterruptedException, ExecutionException, TimeoutException {
-    InMemoryZKServer zkServer = InMemoryZKServer.builder().build();
-    zkServer.startAndWait();
-
-    try {
-      final String namespace = Joiner.on('/').join("/twill", RunIds.generate(), "runnables", "Runner1");
-
-      final ZKClientService zkClient = ZKClientService.Builder.of(zkServer.getConnectionStr()).build();
-      zkClient.startAndWait();
-      zkClient.create(namespace, null, CreateMode.PERSISTENT).get();
-
-      try {
-        JsonObject content = new JsonObject();
-        content.addProperty("containerId", "container-123");
-        content.addProperty("host", "localhost");
-
-        RunId runId = RunIds.generate();
-        final Semaphore semaphore = new Semaphore(0);
-        ZKServiceDecorator service = new ZKServiceDecorator(ZKClients.namespace(zkClient, namespace),
-                                                            runId, Suppliers.ofInstance(content),
-                                                            new AbstractIdleService() {
-          @Override
-          protected void startUp() throws Exception {
-            Preconditions.checkArgument(semaphore.tryAcquire(5, TimeUnit.SECONDS), "Fail to start");
-          }
-
-          @Override
-          protected void shutDown() throws Exception {
-            Preconditions.checkArgument(semaphore.tryAcquire(5, TimeUnit.SECONDS), "Fail to stop");
-          }
-        });
-
-        final String runnablePath = namespace + "/" + runId.getId();
-        final AtomicReference<String> stateMatch = new AtomicReference<String>("STARTING");
-        watchDataChange(zkClient, runnablePath + "/state", semaphore, stateMatch);
-        Assert.assertEquals(Service.State.RUNNING, service.start().get(5, TimeUnit.SECONDS));
-
-        stateMatch.set("STOPPING");
-        Assert.assertEquals(Service.State.TERMINATED, service.stop().get(5, TimeUnit.SECONDS));
-
-      } finally {
-        zkClient.stopAndWait();
-      }
-    } finally {
-      zkServer.stopAndWait();
-    }
-  }
-
-  private void watchDataChange(final ZKClientService zkClient, final String path,
-                               final Semaphore semaphore, final AtomicReference<String> stateMatch) {
-    Futures.addCallback(zkClient.getData(path, new Watcher() {
-      @Override
-      public void process(WatchedEvent event) {
-        if (event.getType() == Event.EventType.NodeDataChanged) {
-          watchDataChange(zkClient, path, semaphore, stateMatch);
-        }
-      }
-    }), new FutureCallback<NodeData>() {
-      @Override
-      public void onSuccess(NodeData result) {
-        String content = new String(result.getData(), Charsets.UTF_8);
-        JsonObject json = new Gson().fromJson(content, JsonElement.class).getAsJsonObject();
-        if (stateMatch.get().equals(json.get("state").getAsString())) {
-          semaphore.release();
-        }
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        exists();
-      }
-
-      private void exists() {
-        Futures.addCallback(zkClient.exists(path, new Watcher() {
-          @Override
-          public void process(WatchedEvent event) {
-            if (event.getType() == Event.EventType.NodeCreated) {
-              watchDataChange(zkClient, path, semaphore, stateMatch);
-            }
-          }
-        }), new FutureCallback<Stat>() {
-          @Override
-          public void onSuccess(Stat result) {
-            if (result != null) {
-              watchDataChange(zkClient, path, semaphore, stateMatch);
-            }
-          }
-
-          @Override
-          public void onFailure(Throwable t) {
-            LOG.error(t.getMessage(), t);
-          }
-        });
-      }
-    });
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/test/java/org/apache/twill/internal/utils/ApplicationBundlerTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/twill/internal/utils/ApplicationBundlerTest.java b/core/src/test/java/org/apache/twill/internal/utils/ApplicationBundlerTest.java
deleted file mode 100644
index 508cadb..0000000
--- a/core/src/test/java/org/apache/twill/internal/utils/ApplicationBundlerTest.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.utils;
-
-import org.apache.twill.filesystem.LocalLocationFactory;
-import org.apache.twill.filesystem.Location;
-import org.apache.twill.internal.ApplicationBundler;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-import com.google.common.io.ByteStreams;
-import com.google.common.io.Files;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.net.MalformedURLException;
-import java.net.URL;
-import java.net.URLClassLoader;
-import java.util.List;
-import java.util.jar.JarEntry;
-import java.util.jar.JarInputStream;
-
-/**
- *
- */
-public class ApplicationBundlerTest {
-
-  @Rule
-  public TemporaryFolder tmpDir = new TemporaryFolder();
-
-  @Test
-  public void testFindDependencies() throws IOException, ClassNotFoundException {
-    Location location = new LocalLocationFactory(tmpDir.newFolder()).create("test.jar");
-
-    // Create a jar file with by tracing dependency
-    ApplicationBundler bundler = new ApplicationBundler(ImmutableList.<String>of());
-    bundler.createBundle(location, ApplicationBundler.class);
-
-    File targetDir = tmpDir.newFolder();
-    unjar(new File(location.toURI()), targetDir);
-
-    // Load the class back, it should be loaded by the custom classloader
-    ClassLoader classLoader = createClassLoader(targetDir);
-    Class<?> clz = classLoader.loadClass(ApplicationBundler.class.getName());
-    Assert.assertSame(classLoader, clz.getClassLoader());
-
-    // For system classes, they shouldn't be packaged, hence loaded by different classloader.
-    clz = classLoader.loadClass(Object.class.getName());
-    Assert.assertNotSame(classLoader, clz.getClassLoader());
-  }
-
-  private void unjar(File jarFile, File targetDir) throws IOException {
-    JarInputStream jarInput = new JarInputStream(new FileInputStream(jarFile));
-    try {
-      JarEntry jarEntry = jarInput.getNextJarEntry();
-      while (jarEntry != null) {
-        File target = new File(targetDir, jarEntry.getName());
-        if (jarEntry.isDirectory()) {
-          target.mkdirs();
-        } else {
-          target.getParentFile().mkdirs();
-          ByteStreams.copy(jarInput, Files.newOutputStreamSupplier(target));
-        }
-
-        jarEntry = jarInput.getNextJarEntry();
-      }
-    } finally {
-      jarInput.close();
-    }
-  }
-
-  private ClassLoader createClassLoader(File dir) throws MalformedURLException {
-    List<URL> urls = Lists.newArrayList();
-    urls.add(new File(dir, "classes").toURI().toURL());
-    File[] libFiles = new File(dir, "lib").listFiles();
-    if (libFiles != null) {
-      for (File file : libFiles) {
-        urls.add(file.toURI().toURL());
-      }
-    }
-    return new URLClassLoader(urls.toArray(new URL[0])) {
-      @Override
-      protected synchronized Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
-        // Load class from the given URLs first before delegating to parent.
-        try {
-          return super.findClass(name);
-        } catch (ClassNotFoundException e) {
-          ClassLoader parent = getParent();
-          return parent == null ? ClassLoader.getSystemClassLoader().loadClass(name) : parent.loadClass(name);
-        }
-      }
-    };
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/test/java/org/apache/twill/kafka/client/KafkaTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/twill/kafka/client/KafkaTest.java b/core/src/test/java/org/apache/twill/kafka/client/KafkaTest.java
deleted file mode 100644
index 40fc3ed..0000000
--- a/core/src/test/java/org/apache/twill/kafka/client/KafkaTest.java
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.kafka.client;
-
-import org.apache.twill.common.Services;
-import org.apache.twill.internal.kafka.EmbeddedKafkaServer;
-import org.apache.twill.internal.kafka.client.Compression;
-import org.apache.twill.internal.kafka.client.SimpleKafkaClient;
-import org.apache.twill.internal.utils.Networks;
-import org.apache.twill.internal.zookeeper.InMemoryZKServer;
-import org.apache.twill.zookeeper.ZKClientService;
-import com.google.common.base.Charsets;
-import com.google.common.base.Preconditions;
-import com.google.common.io.ByteStreams;
-import com.google.common.io.Files;
-import com.google.common.util.concurrent.Futures;
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.ArchiveException;
-import org.apache.commons.compress.archivers.ArchiveInputStream;
-import org.apache.commons.compress.archivers.ArchiveStreamFactory;
-import org.apache.commons.compress.compressors.CompressorException;
-import org.apache.commons.compress.compressors.CompressorStreamFactory;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Iterator;
-import java.util.Properties;
-import java.util.concurrent.TimeUnit;
-
-/**
- *
- */
-public class KafkaTest {
-
-  private static final Logger LOG = LoggerFactory.getLogger(KafkaTest.class);
-
-  @ClassRule
-  public static final TemporaryFolder TMP_FOLDER = new TemporaryFolder();
-
-  private static InMemoryZKServer zkServer;
-  private static EmbeddedKafkaServer kafkaServer;
-  private static ZKClientService zkClientService;
-  private static KafkaClient kafkaClient;
-
-  @BeforeClass
-  public static void init() throws Exception {
-    zkServer = InMemoryZKServer.builder().setDataDir(TMP_FOLDER.newFolder()).build();
-    zkServer.startAndWait();
-
-    // Extract the kafka.tgz and start the kafka server
-    kafkaServer = new EmbeddedKafkaServer(extractKafka(), generateKafkaConfig(zkServer.getConnectionStr()));
-    kafkaServer.startAndWait();
-
-    zkClientService = ZKClientService.Builder.of(zkServer.getConnectionStr()).build();
-
-    kafkaClient = new SimpleKafkaClient(zkClientService);
-    Services.chainStart(zkClientService, kafkaClient).get();
-  }
-
-  @AfterClass
-  public static void finish() throws Exception {
-    Services.chainStop(kafkaClient, zkClientService).get();
-    kafkaServer.stopAndWait();
-    zkServer.stopAndWait();
-  }
-
-  @Test
-  public void testKafkaClient() throws Exception {
-    String topic = "testClient";
-
-    Thread t1 = createPublishThread(kafkaClient, topic, Compression.GZIP, "GZIP Testing message", 10);
-    Thread t2 = createPublishThread(kafkaClient, topic, Compression.NONE, "Testing message", 10);
-
-    t1.start();
-    t2.start();
-
-    Thread t3 = createPublishThread(kafkaClient, topic, Compression.SNAPPY, "Snappy Testing message", 10);
-    t2.join();
-    t3.start();
-
-    Iterator<FetchedMessage> consumer = kafkaClient.consume(topic, 0, 0, 1048576);
-    int count = 0;
-    long startTime = System.nanoTime();
-    while (count < 30 && consumer.hasNext() && secondsPassed(startTime, TimeUnit.NANOSECONDS) < 5) {
-      LOG.info(Charsets.UTF_8.decode(consumer.next().getBuffer()).toString());
-      count++;
-    }
-
-    Assert.assertEquals(30, count);
-  }
-
-  @Test (timeout = 10000)
-  public void testOffset() throws Exception {
-    String topic = "testOffset";
-
-    // Initial earliest offset should be 0.
-    long[] offsets = kafkaClient.getOffset(topic, 0, -2, 10).get();
-    Assert.assertArrayEquals(new long[]{0L}, offsets);
-
-    // Publish some messages
-    Thread publishThread = createPublishThread(kafkaClient, topic, Compression.NONE, "Testing", 2000);
-    publishThread.start();
-    publishThread.join();
-
-    // Fetch earliest offset, should still be 0.
-    offsets = kafkaClient.getOffset(topic, 0, -2, 10).get();
-    Assert.assertArrayEquals(new long[]{0L}, offsets);
-
-    // Fetch latest offset
-    offsets = kafkaClient.getOffset(topic, 0, -1, 10).get();
-    Iterator<FetchedMessage> consumer = kafkaClient.consume(topic, 0, offsets[0], 1048576);
-
-    // Publish one more message, the consumer should see the new message being published.
-    publishThread = createPublishThread(kafkaClient, topic, Compression.NONE, "Testing", 1, 3000);
-    publishThread.start();
-    publishThread.join();
-
-    // Should see the last message being published.
-    Assert.assertTrue(consumer.hasNext());
-    Assert.assertEquals("3000 Testing", Charsets.UTF_8.decode(consumer.next().getBuffer()).toString());
-  }
-
-  private Thread createPublishThread(final KafkaClient kafkaClient, final String topic,
-                                     final Compression compression, final String message, final int count) {
-    return createPublishThread(kafkaClient, topic, compression, message, count, 0);
-  }
-
-  private Thread createPublishThread(final KafkaClient kafkaClient, final String topic, final Compression compression,
-                                     final String message, final int count, final int base) {
-    return new Thread() {
-      public void run() {
-        PreparePublish preparePublish = kafkaClient.preparePublish(topic, compression);
-        for (int i = 0; i < count; i++) {
-          preparePublish.add(((base + i) + " " + message).getBytes(Charsets.UTF_8), 0);
-        }
-        Futures.getUnchecked(preparePublish.publish());
-      }
-    };
-  }
-
-  private long secondsPassed(long startTime, TimeUnit startUnit) {
-    return TimeUnit.SECONDS.convert(System.nanoTime() - TimeUnit.NANOSECONDS.convert(startTime, startUnit),
-                                    TimeUnit.NANOSECONDS);
-  }
-
-  private static File extractKafka() throws IOException, ArchiveException, CompressorException {
-    File kafkaExtract = TMP_FOLDER.newFolder();
-    InputStream kakfaResource = KafkaTest.class.getClassLoader().getResourceAsStream("kafka-0.7.2.tgz");
-    ArchiveInputStream archiveInput = new ArchiveStreamFactory()
-      .createArchiveInputStream(ArchiveStreamFactory.TAR,
-                                new CompressorStreamFactory()
-                                  .createCompressorInputStream(CompressorStreamFactory.GZIP, kakfaResource));
-
-    try {
-      ArchiveEntry entry = archiveInput.getNextEntry();
-      while (entry != null) {
-        File file = new File(kafkaExtract, entry.getName());
-        if (entry.isDirectory()) {
-          file.mkdirs();
-        } else {
-          ByteStreams.copy(archiveInput, Files.newOutputStreamSupplier(file));
-        }
-        entry = archiveInput.getNextEntry();
-      }
-    } finally {
-      archiveInput.close();
-    }
-    return kafkaExtract;
-  }
-
-  private static Properties generateKafkaConfig(String zkConnectStr) throws IOException {
-    int port = Networks.getRandomPort();
-    Preconditions.checkState(port > 0, "Failed to get random port.");
-
-    Properties prop = new Properties();
-    prop.setProperty("log.dir", TMP_FOLDER.newFolder().getAbsolutePath());
-    prop.setProperty("zk.connect", zkConnectStr);
-    prop.setProperty("num.threads", "8");
-    prop.setProperty("port", Integer.toString(port));
-    prop.setProperty("log.flush.interval", "1000");
-    prop.setProperty("max.socket.request.bytes", "104857600");
-    prop.setProperty("log.cleanup.interval.mins", "1");
-    prop.setProperty("log.default.flush.scheduler.interval.ms", "1000");
-    prop.setProperty("zk.connectiontimeout.ms", "1000000");
-    prop.setProperty("socket.receive.buffer", "1048576");
-    prop.setProperty("enable.zookeeper", "true");
-    prop.setProperty("log.retention.hours", "24");
-    prop.setProperty("brokerid", "0");
-    prop.setProperty("socket.send.buffer", "1048576");
-    prop.setProperty("num.partitions", "1");
-    // Use a really small file size to force some flush to happen
-    prop.setProperty("log.file.size", "1024");
-    prop.setProperty("log.default.flush.interval.ms", "1000");
-    return prop;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/test/resources/logback-test.xml
----------------------------------------------------------------------
diff --git a/core/src/test/resources/logback-test.xml b/core/src/test/resources/logback-test.xml
deleted file mode 100644
index 3c36660..0000000
--- a/core/src/test/resources/logback-test.xml
+++ /dev/null
@@ -1,18 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!-- Default logback configuration for twill library -->
-<configuration>
-    <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
-        <encoder>
-            <pattern>%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n</pattern>
-        </encoder>
-    </appender>
-
-    <logger name="org.apache.hadoop" level="WARN" />
-    <logger name="org.apache.zookeeper" level="WARN" />
-
-    <root level="INFO">
-        <appender-ref ref="STDOUT"/>
-    </root>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/discovery-api/pom.xml
----------------------------------------------------------------------
diff --git a/discovery-api/pom.xml b/discovery-api/pom.xml
deleted file mode 100644
index e41b214..0000000
--- a/discovery-api/pom.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <parent>
-        <artifactId>twill-parent</artifactId>
-        <groupId>org.apache.twill</groupId>
-        <version>0.1.0-SNAPSHOT</version>
-    </parent>
-    <modelVersion>4.0.0</modelVersion>
-
-    <artifactId>twill-discovery-api</artifactId>
-    <name>Twill discovery service API</name>
-
-    <dependencies>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>twill-common</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-    </dependencies>
-</project>

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/discovery-api/src/main/java/org/apache/twill/discovery/Discoverable.java
----------------------------------------------------------------------
diff --git a/discovery-api/src/main/java/org/apache/twill/discovery/Discoverable.java b/discovery-api/src/main/java/org/apache/twill/discovery/Discoverable.java
deleted file mode 100644
index a5529fe..0000000
--- a/discovery-api/src/main/java/org/apache/twill/discovery/Discoverable.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.twill.discovery;
-
-import java.net.InetSocketAddress;
-
-/**
- * Discoverable defines the attributes of service to be discovered.
- */
-public interface Discoverable {
-
-  /**
-   * @return Name of the service
-   */
-  String getName();
-
-  /**
-   * @return An {@link InetSocketAddress} representing the host+port of the service.
-   */
-  InetSocketAddress getSocketAddress();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/discovery-api/src/main/java/org/apache/twill/discovery/DiscoveryService.java
----------------------------------------------------------------------
diff --git a/discovery-api/src/main/java/org/apache/twill/discovery/DiscoveryService.java b/discovery-api/src/main/java/org/apache/twill/discovery/DiscoveryService.java
deleted file mode 100644
index a26fff8..0000000
--- a/discovery-api/src/main/java/org/apache/twill/discovery/DiscoveryService.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.discovery;
-
-
-import org.apache.twill.common.Cancellable;
-
-/**
- * DiscoveryService defines interface for registering {@link Discoverable}.
- */
-public interface DiscoveryService {
-
-  /**
-   * Registers a {@link Discoverable} service.
-   * @param discoverable Information of the service provider that could be discovered.
-   * @return A {@link Cancellable} for un-registration.
-   */
-  Cancellable register(Discoverable discoverable);
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/discovery-api/src/main/java/org/apache/twill/discovery/DiscoveryServiceClient.java
----------------------------------------------------------------------
diff --git a/discovery-api/src/main/java/org/apache/twill/discovery/DiscoveryServiceClient.java b/discovery-api/src/main/java/org/apache/twill/discovery/DiscoveryServiceClient.java
deleted file mode 100644
index 89cf269..0000000
--- a/discovery-api/src/main/java/org/apache/twill/discovery/DiscoveryServiceClient.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.discovery;
-
-/**
- * Interface for {@link DiscoveryServiceClient} to discover services registered with {@link DiscoveryService}.
- */
-public interface DiscoveryServiceClient {
-
-  /**
-   * Retrieves a list of {@link Discoverable} for the a service with the given name.
-   *
-   * @param name Name of the service
-   * @return A live {@link Iterable} that on each call to {@link Iterable#iterator()} returns
-   *         an {@link java.util.Iterator Iterator} that reflects the latest set of
-   *         available {@link Discoverable} services.
-   */
-  Iterable<Discoverable> discover(String name);
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/discovery-core/pom.xml
----------------------------------------------------------------------
diff --git a/discovery-core/pom.xml b/discovery-core/pom.xml
deleted file mode 100644
index 2612138..0000000
--- a/discovery-core/pom.xml
+++ /dev/null
@@ -1,52 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <parent>
-        <artifactId>twill-parent</artifactId>
-        <groupId>org.apache.twill</groupId>
-        <version>0.1.0-SNAPSHOT</version>
-    </parent>
-    <modelVersion>4.0.0</modelVersion>
-
-    <artifactId>twill-discovery-core</artifactId>
-    <name>Twill discovery service implementations</name>
-
-    <dependencies>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>twill-discovery-api</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>twill-zookeeper</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-        <dependency>
-            <groupId>com.google.code.gson</groupId>
-            <artifactId>gson</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>junit</groupId>
-            <artifactId>junit</artifactId>
-        </dependency>
-    </dependencies>
-</project>

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/discovery-core/src/main/java/org/apache/twill/discovery/DiscoverableWrapper.java
----------------------------------------------------------------------
diff --git a/discovery-core/src/main/java/org/apache/twill/discovery/DiscoverableWrapper.java b/discovery-core/src/main/java/org/apache/twill/discovery/DiscoverableWrapper.java
deleted file mode 100644
index 5fa97d1..0000000
--- a/discovery-core/src/main/java/org/apache/twill/discovery/DiscoverableWrapper.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.discovery;
-
-import java.net.InetSocketAddress;
-
-/**
- * Wrapper for a discoverable.
- */
-final class DiscoverableWrapper implements Discoverable {
-  private final String name;
-  private final InetSocketAddress address;
-
-  DiscoverableWrapper(Discoverable discoverable) {
-    this.name = discoverable.getName();
-    this.address = discoverable.getSocketAddress();
-  }
-
-  @Override
-  public String getName() {
-    return name;
-  }
-
-  @Override
-  public InetSocketAddress getSocketAddress() {
-    return address;
-  }
-
-  @Override
-  public String toString() {
-    return "{name=" + name + ", address=" + address;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    Discoverable other = (Discoverable) o;
-
-    return name.equals(other.getName()) && address.equals(other.getSocketAddress());
-  }
-
-  @Override
-  public int hashCode() {
-    int result = name.hashCode();
-    result = 31 * result + address.hashCode();
-    return result;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/discovery-core/src/main/java/org/apache/twill/discovery/InMemoryDiscoveryService.java
----------------------------------------------------------------------
diff --git a/discovery-core/src/main/java/org/apache/twill/discovery/InMemoryDiscoveryService.java b/discovery-core/src/main/java/org/apache/twill/discovery/InMemoryDiscoveryService.java
deleted file mode 100644
index 7a9e984..0000000
--- a/discovery-core/src/main/java/org/apache/twill/discovery/InMemoryDiscoveryService.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.discovery;
-
-import org.apache.twill.common.Cancellable;
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Multimap;
-
-import java.util.Iterator;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-/**
- * A simple in memory implementation of {@link DiscoveryService} and {@link DiscoveryServiceClient}.
- */
-public class InMemoryDiscoveryService implements DiscoveryService, DiscoveryServiceClient {
-
-  private final Multimap<String, Discoverable> services = HashMultimap.create();
-  private final Lock lock = new ReentrantLock();
-
-  @Override
-  public Cancellable register(final Discoverable discoverable) {
-    lock.lock();
-    try {
-      final Discoverable wrapper = new DiscoverableWrapper(discoverable);
-      services.put(wrapper.getName(), wrapper);
-      return new Cancellable() {
-        @Override
-        public void cancel() {
-          lock.lock();
-          try {
-            services.remove(wrapper.getName(), wrapper);
-          } finally {
-            lock.unlock();
-          }
-        }
-      };
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  @Override
-  public Iterable<Discoverable> discover(final String name) {
-    return new Iterable<Discoverable>() {
-      @Override
-      public Iterator<Discoverable> iterator() {
-        lock.lock();
-        try {
-          return ImmutableList.copyOf(services.get(name)).iterator();
-        } finally {
-          lock.unlock();
-        }
-      }
-    };
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/discovery-core/src/main/java/org/apache/twill/discovery/ZKDiscoveryService.java
----------------------------------------------------------------------
diff --git a/discovery-core/src/main/java/org/apache/twill/discovery/ZKDiscoveryService.java b/discovery-core/src/main/java/org/apache/twill/discovery/ZKDiscoveryService.java
deleted file mode 100644
index e2f9bc0..0000000
--- a/discovery-core/src/main/java/org/apache/twill/discovery/ZKDiscoveryService.java
+++ /dev/null
@@ -1,511 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.discovery;
-
-import org.apache.twill.common.Cancellable;
-import org.apache.twill.common.Threads;
-import org.apache.twill.zookeeper.NodeChildren;
-import org.apache.twill.zookeeper.NodeData;
-import org.apache.twill.zookeeper.OperationFuture;
-import org.apache.twill.zookeeper.ZKClient;
-import org.apache.twill.zookeeper.ZKClients;
-import org.apache.twill.zookeeper.ZKOperations;
-import com.google.common.base.Charsets;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Multimap;
-import com.google.common.hash.Hashing;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
-import com.google.gson.GsonBuilder;
-import com.google.gson.JsonDeserializationContext;
-import com.google.gson.JsonDeserializer;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParseException;
-import com.google.gson.JsonSerializationContext;
-import com.google.gson.JsonSerializer;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.lang.reflect.Type;
-import java.net.InetSocketAddress;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-/**
- * Zookeeper implementation of {@link DiscoveryService} and {@link DiscoveryServiceClient}.
- * <p>
- *   Discoverable services are registered within Zookeeper under the namespace 'discoverable' by default.
- *   If you would like to change the namespace under which the services are registered then you can pass
- *   in the namespace during construction of {@link ZKDiscoveryService}.
- * </p>
- *
- * <p>
- *   Following is a simple example of how {@link ZKDiscoveryService} can be used for registering services
- *   and also for discovering the registered services.
- *   <blockquote>
- *    <pre>
- *      {@code
- *
- *      DiscoveryService service = new ZKDiscoveryService(zkClient);
- *      service.register(new Discoverable() {
- *        @Override
- *        public String getName() {
- *          return 'service-name';
- *        }
- *
- *        @Override
- *        public InetSocketAddress getSocketAddress() {
- *          return new InetSocketAddress(hostname, port);
- *        }
- *      });
- *      ...
- *      ...
- *      Iterable<Discoverable> services = service.discovery("service-name");
- *      ...
- *      }
- *    </pre>
- *   </blockquote>
- * </p>
- */
-public class ZKDiscoveryService implements DiscoveryService, DiscoveryServiceClient {
-  private static final Logger LOG = LoggerFactory.getLogger(ZKDiscoveryService.class);
-  private static final String NAMESPACE = "/discoverable";
-
-  private static final long RETRY_MILLIS = 1000;
-
-  // In memory map for recreating ephemeral nodes after session expires.
-  // It map from discoverable to the corresponding Cancellable
-  private final Multimap<Discoverable, DiscoveryCancellable> discoverables;
-  private final Lock lock;
-
-  private final LoadingCache<String, Iterable<Discoverable>> services;
-  private final ZKClient zkClient;
-  private final ScheduledExecutorService retryExecutor;
-
-  /**
-   * Constructs ZKDiscoveryService using the provided zookeeper client for storing service registry.
-   * @param zkClient The {@link ZKClient} for interacting with zookeeper.
-   */
-  public ZKDiscoveryService(ZKClient zkClient) {
-    this(zkClient, NAMESPACE);
-  }
-
-  /**
-   * Constructs ZKDiscoveryService using the provided zookeeper client for storing service registry under namepsace.
-   * @param zkClient of zookeeper quorum
-   * @param namespace under which the service registered would be stored in zookeeper.
-   *                  If namespace is {@code null}, no namespace will be used.
-   */
-  public ZKDiscoveryService(ZKClient zkClient, String namespace) {
-    this.discoverables = HashMultimap.create();
-    this.lock = new ReentrantLock();
-    this.retryExecutor = Executors.newSingleThreadScheduledExecutor(
-      Threads.createDaemonThreadFactory("zk-discovery-retry"));
-    this.zkClient = namespace == null ? zkClient : ZKClients.namespace(zkClient, namespace);
-    this.services = CacheBuilder.newBuilder().build(createServiceLoader());
-    this.zkClient.addConnectionWatcher(createConnectionWatcher());
-  }
-
-  /**
-   * Registers a {@link Discoverable} in zookeeper.
-   * <p>
-   *   Registering a {@link Discoverable} will create a node <base>/<service-name>
-   *   in zookeeper as a ephemeral node. If the node already exists (timeout associated with emphemeral, then a runtime
-   *   exception is thrown to make sure that a service with an intent to register is not started without registering.
-   *   When a runtime is thrown, expectation is that the process being started with fail and would be started again
-   *   by the monitoring service.
-   * </p>
-   * @param discoverable Information of the service provider that could be discovered.
-   * @return An instance of {@link Cancellable}
-   */
-  @Override
-  public Cancellable register(final Discoverable discoverable) {
-    final Discoverable wrapper = new DiscoverableWrapper(discoverable);
-    final SettableFuture<String> future = SettableFuture.create();
-    final DiscoveryCancellable cancellable = new DiscoveryCancellable(wrapper);
-
-    // Create the zk ephemeral node.
-    Futures.addCallback(doRegister(wrapper), new FutureCallback<String>() {
-      @Override
-      public void onSuccess(String result) {
-        // Set the sequence node path to cancellable for future cancellation.
-        cancellable.setPath(result);
-        lock.lock();
-        try {
-          discoverables.put(wrapper, cancellable);
-        } finally {
-          lock.unlock();
-        }
-        LOG.debug("Service registered: {} {}", wrapper, result);
-        future.set(result);
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        if (t instanceof KeeperException.NodeExistsException) {
-          handleRegisterFailure(discoverable, future, this, t);
-        } else {
-          LOG.warn("Failed to register: {}", wrapper, t);
-          future.setException(t);
-        }
-      }
-    }, Threads.SAME_THREAD_EXECUTOR);
-
-    Futures.getUnchecked(future);
-    return cancellable;
-  }
-
-  @Override
-  public Iterable<Discoverable> discover(String service) {
-    return services.getUnchecked(service);
-  }
-
-  /**
-   * Handle registration failure.
-   *
-   * @param discoverable The discoverable to register.
-   * @param completion A settable future to set when registration is completed / failed.
-   * @param creationCallback A future callback for path creation.
-   * @param failureCause The original cause of failure.
-   */
-  private void handleRegisterFailure(final Discoverable discoverable,
-                                     final SettableFuture<String> completion,
-                                     final FutureCallback<String> creationCallback,
-                                     final Throwable failureCause) {
-
-    final String path = getNodePath(discoverable);
-    Futures.addCallback(zkClient.exists(path), new FutureCallback<Stat>() {
-      @Override
-      public void onSuccess(Stat result) {
-        if (result == null) {
-          // If the node is gone, simply retry.
-          LOG.info("Node {} is gone. Retry registration for {}.", path, discoverable);
-          retryRegister(discoverable, creationCallback);
-          return;
-        }
-
-        long ephemeralOwner = result.getEphemeralOwner();
-        if (ephemeralOwner == 0) {
-          // it is not an ephemeral node, something wrong.
-          LOG.error("Node {} already exists and is not an ephemeral node. Discoverable registration failed: {}.",
-                    path, discoverable);
-          completion.setException(failureCause);
-          return;
-        }
-        Long sessionId = zkClient.getSessionId();
-        if (sessionId == null || ephemeralOwner != sessionId) {
-          // This zkClient is not valid or doesn't own the ephemeral node, simply keep retrying.
-          LOG.info("Owner of {} is different. Retry registration for {}.", path, discoverable);
-          retryRegister(discoverable, creationCallback);
-        } else {
-          // This client owned the node, treat the registration as completed.
-          // This could happen if same client tries to register twice (due to mistake or failure race condition).
-          completion.set(path);
-        }
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        // If exists call failed, simply retry creation.
-        LOG.warn("Error when getting stats on {}. Retry registration for {}.", path, discoverable);
-        retryRegister(discoverable, creationCallback);
-      }
-    }, Threads.SAME_THREAD_EXECUTOR);
-  }
-
-  private OperationFuture<String> doRegister(Discoverable discoverable) {
-    byte[] discoverableBytes = encode(discoverable);
-    return zkClient.create(getNodePath(discoverable), discoverableBytes, CreateMode.EPHEMERAL, true);
-  }
-
-  private void retryRegister(final Discoverable discoverable, final FutureCallback<String> creationCallback) {
-    retryExecutor.schedule(new Runnable() {
-
-      @Override
-      public void run() {
-        Futures.addCallback(doRegister(discoverable), creationCallback, Threads.SAME_THREAD_EXECUTOR);
-      }
-    }, RETRY_MILLIS, TimeUnit.MILLISECONDS);
-  }
-
-
-  /**
-   * Generate unique node path for a given {@link Discoverable}.
-   * @param discoverable An instance of {@link Discoverable}.
-   * @return A node name based on the discoverable.
-   */
-  private String getNodePath(Discoverable discoverable) {
-    InetSocketAddress socketAddress = discoverable.getSocketAddress();
-    String node = Hashing.md5()
-                         .newHasher()
-                         .putBytes(socketAddress.getAddress().getAddress())
-                         .putInt(socketAddress.getPort())
-                         .hash().toString();
-
-    return String.format("/%s/%s", discoverable.getName(), node);
-  }
-
-  private Watcher createConnectionWatcher() {
-    return new Watcher() {
-      // Watcher is invoked from single event thread, hence safe to use normal mutable variable.
-      private boolean expired;
-
-      @Override
-      public void process(WatchedEvent event) {
-        if (event.getState() == Event.KeeperState.Expired) {
-          LOG.warn("ZK Session expired: {}", zkClient.getConnectString());
-          expired = true;
-        } else if (event.getState() == Event.KeeperState.SyncConnected && expired) {
-          LOG.info("Reconnected after expiration: {}", zkClient.getConnectString());
-          expired = false;
-
-          // Re-register all services
-          lock.lock();
-          try {
-            for (final Map.Entry<Discoverable, DiscoveryCancellable> entry : discoverables.entries()) {
-              LOG.info("Re-registering service: {}", entry.getKey());
-
-              // Must be non-blocking in here.
-              Futures.addCallback(doRegister(entry.getKey()), new FutureCallback<String>() {
-                @Override
-                public void onSuccess(String result) {
-                  // Updates the cancellable to the newly created sequential node.
-                  entry.getValue().setPath(result);
-                  LOG.debug("Service re-registered: {} {}", entry.getKey(), result);
-                }
-
-                @Override
-                public void onFailure(Throwable t) {
-                  // When failed to create the node, there would be no retry and simply make the cancellable do nothing.
-                  entry.getValue().setPath(null);
-                  LOG.error("Failed to re-register service: {}", entry.getKey(), t);
-                }
-              }, Threads.SAME_THREAD_EXECUTOR);
-            }
-          } finally {
-            lock.unlock();
-          }
-        }
-      }
-    };
-  }
-
-  /**
-   * Creates a CacheLoader for creating live Iterable for watching instances changes for a given service.
-   */
-  private CacheLoader<String, Iterable<Discoverable>> createServiceLoader() {
-    return new CacheLoader<String, Iterable<Discoverable>>() {
-      @Override
-      public Iterable<Discoverable> load(String service) throws Exception {
-        // The atomic reference is to keep the resulting Iterable live. It always contains a
-        // immutable snapshot of the latest detected set of Discoverable.
-        final AtomicReference<Iterable<Discoverable>> iterable =
-              new AtomicReference<Iterable<Discoverable>>(ImmutableList.<Discoverable>of());
-        final String serviceBase = "/" + service;
-
-        // Watch for children changes in /service
-        ZKOperations.watchChildren(zkClient, serviceBase, new ZKOperations.ChildrenCallback() {
-          @Override
-          public void updated(NodeChildren nodeChildren) {
-            // Fetch data of all children nodes in parallel.
-            List<String> children = nodeChildren.getChildren();
-            List<OperationFuture<NodeData>> dataFutures = Lists.newArrayListWithCapacity(children.size());
-            for (String child : children) {
-              dataFutures.add(zkClient.getData(serviceBase + "/" + child));
-            }
-
-            // Update the service map when all fetching are done.
-            final ListenableFuture<List<NodeData>> fetchFuture = Futures.successfulAsList(dataFutures);
-            fetchFuture.addListener(new Runnable() {
-              @Override
-              public void run() {
-                ImmutableList.Builder<Discoverable> builder = ImmutableList.builder();
-                for (NodeData nodeData : Futures.getUnchecked(fetchFuture)) {
-                  // For successful fetch, decode the content.
-                  if (nodeData != null) {
-                    Discoverable discoverable = decode(nodeData.getData());
-                    if (discoverable != null) {
-                      builder.add(discoverable);
-                    }
-                  }
-                }
-                iterable.set(builder.build());
-              }
-            }, Threads.SAME_THREAD_EXECUTOR);
-          }
-        });
-
-        return new Iterable<Discoverable>() {
-          @Override
-          public Iterator<Discoverable> iterator() {
-            return iterable.get().iterator();
-          }
-        };
-      }
-    };
-  }
-
-  /**
-   * Static helper function for decoding array of bytes into a {@link DiscoverableWrapper} object.
-   * @param bytes representing serialized {@link DiscoverableWrapper}
-   * @return null if bytes are null; else an instance of {@link DiscoverableWrapper}
-   */
-  private static Discoverable decode(byte[] bytes) {
-    if (bytes == null) {
-      return null;
-    }
-    String content = new String(bytes, Charsets.UTF_8);
-    return new GsonBuilder().registerTypeAdapter(Discoverable.class, new DiscoverableCodec())
-      .create()
-      .fromJson(content, Discoverable.class);
-  }
-
-  /**
-   * Static helper function for encoding an instance of {@link DiscoverableWrapper} into array of bytes.
-   * @param discoverable An instance of {@link Discoverable}
-   * @return array of bytes representing an instance of <code>discoverable</code>
-   */
-  private static byte[] encode(Discoverable discoverable) {
-    return new GsonBuilder().registerTypeAdapter(DiscoverableWrapper.class, new DiscoverableCodec())
-      .create()
-      .toJson(discoverable, DiscoverableWrapper.class)
-      .getBytes(Charsets.UTF_8);
-  }
-
-  /**
-   * Inner class for cancelling (un-register) discovery service.
-   */
-  private final class DiscoveryCancellable implements Cancellable {
-
-    private final Discoverable discoverable;
-    private final AtomicBoolean cancelled;
-    private volatile String path;
-
-    DiscoveryCancellable(Discoverable discoverable) {
-      this.discoverable = discoverable;
-      this.cancelled = new AtomicBoolean();
-    }
-
-    /**
-     * Set the zk node path representing the ephemeral sequence node of this registered discoverable.
-     * Called from ZK event thread when creating of the node completed, either from normal registration or
-     * re-registration due to session expiration.
-     *
-     * @param path The path to ephemeral sequence node.
-     */
-    void setPath(String path) {
-      this.path = path;
-      if (cancelled.get() && path != null) {
-        // Simply delete the path if it's already cancelled
-        // It's for the case when session expire happened and re-registration completed after this has been cancelled.
-        // Not bother with the result as if there is error, nothing much we could do.
-        zkClient.delete(path);
-      }
-    }
-
-    @Override
-    public void cancel() {
-      if (!cancelled.compareAndSet(false, true)) {
-        return;
-      }
-
-      // Take a snapshot of the volatile path.
-      String path = this.path;
-
-      // If it is null, meaning cancel() is called before the ephemeral node is created, hence
-      // setPath() will be called in future (through zk callback when creation is completed)
-      // so that deletion will be done in setPath().
-      if (path == null) {
-        return;
-      }
-
-      // Remove this Cancellable from the map so that upon session expiration won't try to register.
-      lock.lock();
-      try {
-        discoverables.remove(discoverable, this);
-      } finally {
-        lock.unlock();
-      }
-
-      // Delete the path. It's ok if the path not exists
-      // (e.g. what session expired and before node has been re-created)
-      Futures.getUnchecked(ZKOperations.ignoreError(zkClient.delete(path),
-                                                    KeeperException.NoNodeException.class, path));
-      LOG.debug("Service unregistered: {} {}", discoverable, path);
-    }
-  }
-
-  /**
-   * SerDe for converting a {@link DiscoverableWrapper} into a JSON object
-   * or from a JSON object into {@link DiscoverableWrapper}.
-   */
-  private static final class DiscoverableCodec implements JsonSerializer<Discoverable>, JsonDeserializer<Discoverable> {
-
-    @Override
-    public Discoverable deserialize(JsonElement json, Type typeOfT,
-                                    JsonDeserializationContext context) throws JsonParseException {
-      JsonObject jsonObj = json.getAsJsonObject();
-      final String service = jsonObj.get("service").getAsString();
-      String hostname = jsonObj.get("hostname").getAsString();
-      int port = jsonObj.get("port").getAsInt();
-      final InetSocketAddress address = new InetSocketAddress(hostname, port);
-      return new Discoverable() {
-        @Override
-        public String getName() {
-          return service;
-        }
-
-        @Override
-        public InetSocketAddress getSocketAddress() {
-          return address;
-        }
-      };
-    }
-
-    @Override
-    public JsonElement serialize(Discoverable src, Type typeOfSrc, JsonSerializationContext context) {
-      JsonObject jsonObj = new JsonObject();
-      jsonObj.addProperty("service", src.getName());
-      jsonObj.addProperty("hostname", src.getSocketAddress().getHostName());
-      jsonObj.addProperty("port", src.getSocketAddress().getPort());
-      return jsonObj;
-    }
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/discovery-core/src/main/java/org/apache/twill/discovery/package-info.java
----------------------------------------------------------------------
diff --git a/discovery-core/src/main/java/org/apache/twill/discovery/package-info.java b/discovery-core/src/main/java/org/apache/twill/discovery/package-info.java
deleted file mode 100644
index a1d6e0c..0000000
--- a/discovery-core/src/main/java/org/apache/twill/discovery/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Classes in this package provides service discovery implementations.
- */
-package org.apache.twill.discovery;

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/discovery-core/src/test/java/org/apache/twill/discovery/InMemoryDiscoveryServiceTest.java
----------------------------------------------------------------------
diff --git a/discovery-core/src/test/java/org/apache/twill/discovery/InMemoryDiscoveryServiceTest.java b/discovery-core/src/test/java/org/apache/twill/discovery/InMemoryDiscoveryServiceTest.java
deleted file mode 100644
index d8cc375..0000000
--- a/discovery-core/src/test/java/org/apache/twill/discovery/InMemoryDiscoveryServiceTest.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.twill.discovery;
-
-import org.apache.twill.common.Cancellable;
-import com.google.common.collect.Iterables;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.net.InetSocketAddress;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Test memory based service discovery service.
- */
-public class InMemoryDiscoveryServiceTest {
-  private Cancellable register(DiscoveryService service, final String name, final String host, final int port) {
-    return service.register(new Discoverable() {
-      @Override
-      public String getName() {
-        return name;
-      }
-
-      @Override
-      public InetSocketAddress getSocketAddress() {
-        return new InetSocketAddress(host, port);
-      }
-    });
-  }
-
-  @Test
-  public void simpleDiscoverable() throws Exception {
-    DiscoveryService discoveryService = new InMemoryDiscoveryService();
-    DiscoveryServiceClient discoveryServiceClient = (DiscoveryServiceClient) discoveryService;
-
-    // Register one service running on one host:port
-    Cancellable cancellable = register(discoveryService, "foo", "localhost", 8090);
-    Iterable<Discoverable> discoverables = discoveryServiceClient.discover("foo");
-
-    // Discover that registered host:port.
-    Assert.assertTrue(Iterables.size(discoverables) == 1);
-
-    // Remove the service
-    cancellable.cancel();
-
-    // There should be no service.
-    discoverables = discoveryServiceClient.discover("foo");
-    TimeUnit.MILLISECONDS.sleep(100);
-    Assert.assertTrue(Iterables.size(discoverables) == 0);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/discovery-core/src/test/java/org/apache/twill/discovery/ZKDiscoveryServiceTest.java
----------------------------------------------------------------------
diff --git a/discovery-core/src/test/java/org/apache/twill/discovery/ZKDiscoveryServiceTest.java b/discovery-core/src/test/java/org/apache/twill/discovery/ZKDiscoveryServiceTest.java
deleted file mode 100644
index feee8db..0000000
--- a/discovery-core/src/test/java/org/apache/twill/discovery/ZKDiscoveryServiceTest.java
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.discovery;
-
-import org.apache.twill.common.Cancellable;
-import org.apache.twill.common.Services;
-import org.apache.twill.internal.zookeeper.InMemoryZKServer;
-import org.apache.twill.internal.zookeeper.KillZKSession;
-import org.apache.twill.zookeeper.RetryStrategies;
-import org.apache.twill.zookeeper.ZKClientService;
-import org.apache.twill.zookeeper.ZKClientServices;
-import org.apache.twill.zookeeper.ZKClients;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.Futures;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.net.InetSocketAddress;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Test Zookeeper based discovery service.
- */
-public class ZKDiscoveryServiceTest {
-  private static final Logger LOG = LoggerFactory.getLogger(ZKDiscoveryServiceTest.class);
-
-  private static InMemoryZKServer zkServer;
-  private static ZKClientService zkClient;
-
-  @BeforeClass
-  public static void beforeClass() {
-    zkServer = InMemoryZKServer.builder().setTickTime(100000).build();
-    zkServer.startAndWait();
-
-    zkClient = ZKClientServices.delegate(
-      ZKClients.retryOnFailure(
-        ZKClients.reWatchOnExpire(
-          ZKClientService.Builder.of(zkServer.getConnectionStr()).build()),
-        RetryStrategies.fixDelay(1, TimeUnit.SECONDS)));
-    zkClient.startAndWait();
-  }
-
-  @AfterClass
-  public static void afterClass() {
-    Futures.getUnchecked(Services.chainStop(zkClient, zkServer));
-  }
-
-  private Cancellable register(DiscoveryService service, final String name, final String host, final int port) {
-    return service.register(new Discoverable() {
-      @Override
-      public String getName() {
-        return name;
-      }
-
-      @Override
-      public InetSocketAddress getSocketAddress() {
-        return new InetSocketAddress(host, port);
-      }
-    });
-  }
-
-
-  private boolean waitTillExpected(int expected, Iterable<Discoverable> discoverables) throws Exception {
-    for (int i = 0; i < 10; ++i) {
-      TimeUnit.MILLISECONDS.sleep(10);
-      if (Iterables.size(discoverables) == expected) {
-        return true;
-      }
-    }
-    return (Iterables.size(discoverables) == expected);
-  }
-
-  @Test (timeout = 5000)
-  public void testDoubleRegister() throws Exception {
-    ZKDiscoveryService discoveryService = new ZKDiscoveryService(zkClient);
-    DiscoveryServiceClient discoveryServiceClient = discoveryService;
-
-    // Register on the same host port, it shouldn't fail.
-    Cancellable cancellable = register(discoveryService, "test_double_reg", "localhost", 54321);
-    Cancellable cancellable2 = register(discoveryService, "test_double_reg", "localhost", 54321);
-
-    Iterable<Discoverable> discoverables = discoveryServiceClient.discover("test_double_reg");
-
-    Assert.assertTrue(waitTillExpected(1, discoverables));
-
-    cancellable.cancel();
-    cancellable2.cancel();
-
-    // Register again with two different clients, but killing session of the first one.
-    final ZKClientService zkClient2 = ZKClientServices.delegate(
-      ZKClients.retryOnFailure(
-        ZKClients.reWatchOnExpire(
-          ZKClientService.Builder.of(zkServer.getConnectionStr()).build()),
-        RetryStrategies.fixDelay(1, TimeUnit.SECONDS)));
-    zkClient2.startAndWait();
-
-    try {
-      ZKDiscoveryService discoveryService2 = new ZKDiscoveryService(zkClient2);
-      cancellable2 = register(discoveryService2, "test_multi_client", "localhost", 54321);
-
-      // Schedule a thread to shutdown zkClient2.
-      new Thread() {
-        @Override
-        public void run() {
-          try {
-            TimeUnit.SECONDS.sleep(2);
-            zkClient2.stopAndWait();
-          } catch (InterruptedException e) {
-            LOG.error(e.getMessage(), e);
-          }
-        }
-      }.start();
-
-      // This call would block until zkClient2 is shutdown.
-      cancellable = register(discoveryService, "test_multi_client", "localhost", 54321);
-      cancellable.cancel();
-
-    } finally {
-      zkClient2.stopAndWait();
-    }
-  }
-
-  @Test
-  public void testSessionExpires() throws Exception {
-    ZKDiscoveryService discoveryService = new ZKDiscoveryService(zkClient);
-    DiscoveryServiceClient discoveryServiceClient = discoveryService;
-
-    Cancellable cancellable = register(discoveryService, "test_expires", "localhost", 54321);
-
-    Iterable<Discoverable> discoverables = discoveryServiceClient.discover("test_expires");
-
-    // Discover that registered host:port.
-    Assert.assertTrue(waitTillExpected(1, discoverables));
-
-    KillZKSession.kill(zkClient.getZooKeeperSupplier().get(), zkServer.getConnectionStr(), 5000);
-
-    // Register one more endpoint to make sure state has been reflected after reconnection
-    Cancellable cancellable2 = register(discoveryService, "test_expires", "localhost", 54322);
-
-    // Reconnection would trigger re-registration.
-    Assert.assertTrue(waitTillExpected(2, discoverables));
-
-    cancellable.cancel();
-    cancellable2.cancel();
-
-    // Verify that both are now gone.
-    Assert.assertTrue(waitTillExpected(0, discoverables));
-  }
-
-  @Test
-  public void simpleDiscoverable() throws Exception {
-    DiscoveryService discoveryService = new ZKDiscoveryService(zkClient);
-    DiscoveryServiceClient discoveryServiceClient = new ZKDiscoveryService(zkClient);
-
-    // Register one service running on one host:port
-    Cancellable cancellable = register(discoveryService, "foo", "localhost", 8090);
-    Iterable<Discoverable> discoverables = discoveryServiceClient.discover("foo");
-
-    // Discover that registered host:port.
-    Assert.assertTrue(waitTillExpected(1, discoverables));
-
-    // Remove the service
-    cancellable.cancel();
-
-    // There should be no service.
-
-    discoverables = discoveryServiceClient.discover("foo");
-
-    Assert.assertTrue(waitTillExpected(0, discoverables));
-  }
-
-  @Test
-  public void manySameDiscoverable() throws Exception {
-    List<Cancellable> cancellables = Lists.newArrayList();
-    DiscoveryService discoveryService = new ZKDiscoveryService(zkClient);
-    DiscoveryServiceClient discoveryServiceClient = new ZKDiscoveryService(zkClient);
-
-    cancellables.add(register(discoveryService, "manyDiscoverable", "localhost", 1));
-    cancellables.add(register(discoveryService, "manyDiscoverable", "localhost", 2));
-    cancellables.add(register(discoveryService, "manyDiscoverable", "localhost", 3));
-    cancellables.add(register(discoveryService, "manyDiscoverable", "localhost", 4));
-    cancellables.add(register(discoveryService, "manyDiscoverable", "localhost", 5));
-
-    Iterable<Discoverable> discoverables = discoveryServiceClient.discover("manyDiscoverable");
-    Assert.assertTrue(waitTillExpected(5, discoverables));
-
-    for (int i = 0; i < 5; i++) {
-      cancellables.get(i).cancel();
-      Assert.assertTrue(waitTillExpected(4 - i, discoverables));
-    }
-  }
-
-  @Test
-  public void multiServiceDiscoverable() throws Exception {
-    List<Cancellable> cancellables = Lists.newArrayList();
-    DiscoveryService discoveryService = new ZKDiscoveryService(zkClient);
-    DiscoveryServiceClient discoveryServiceClient = new ZKDiscoveryService(zkClient);
-
-    cancellables.add(register(discoveryService, "service1", "localhost", 1));
-    cancellables.add(register(discoveryService, "service1", "localhost", 2));
-    cancellables.add(register(discoveryService, "service1", "localhost", 3));
-    cancellables.add(register(discoveryService, "service1", "localhost", 4));
-    cancellables.add(register(discoveryService, "service1", "localhost", 5));
-
-    cancellables.add(register(discoveryService, "service2", "localhost", 1));
-    cancellables.add(register(discoveryService, "service2", "localhost", 2));
-    cancellables.add(register(discoveryService, "service2", "localhost", 3));
-
-    cancellables.add(register(discoveryService, "service3", "localhost", 1));
-    cancellables.add(register(discoveryService, "service3", "localhost", 2));
-
-    Iterable<Discoverable> discoverables = discoveryServiceClient.discover("service1");
-    Assert.assertTrue(waitTillExpected(5, discoverables));
-
-    discoverables = discoveryServiceClient.discover("service2");
-    Assert.assertTrue(waitTillExpected(3, discoverables));
-
-    discoverables = discoveryServiceClient.discover("service3");
-    Assert.assertTrue(waitTillExpected(2, discoverables));
-
-    cancellables.add(register(discoveryService, "service3", "localhost", 3));
-    Assert.assertTrue(waitTillExpected(3, discoverables)); // Shows live iterator.
-
-    for (Cancellable cancellable : cancellables) {
-      cancellable.cancel();
-    }
-
-    Assert.assertTrue(waitTillExpected(0, discoveryServiceClient.discover("service1")));
-    Assert.assertTrue(waitTillExpected(0, discoveryServiceClient.discover("service2")));
-    Assert.assertTrue(waitTillExpected(0, discoveryServiceClient.discover("service3")));
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/discovery-core/src/test/resources/logback-test.xml
----------------------------------------------------------------------
diff --git a/discovery-core/src/test/resources/logback-test.xml b/discovery-core/src/test/resources/logback-test.xml
deleted file mode 100644
index 2615cb4..0000000
--- a/discovery-core/src/test/resources/logback-test.xml
+++ /dev/null
@@ -1,17 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!-- Default logback configuration for twill library -->
-<configuration>
-    <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
-        <encoder>
-            <pattern>%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n</pattern>
-        </encoder>
-    </appender>
-
-    <logger name="org.apache.twill" level="DEBUG" />
-
-    <root level="WARN">
-        <appender-ref ref="STDOUT"/>
-    </root>
-
-</configuration>


[20/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/logging/LogEntry.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/logging/LogEntry.java b/twill-api/src/main/java/org/apache/twill/api/logging/LogEntry.java
new file mode 100644
index 0000000..4995328
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/logging/LogEntry.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api.logging;
+
+/**
+ * Represents a log entry emitted by application.
+ */
+public interface LogEntry {
+
+  /**
+   * Log level.
+   */
+  enum Level {
+    FATAL,
+    ERROR,
+    WARN,
+    INFO,
+    DEBUG,
+    TRACE
+  }
+
+  String getLoggerName();
+
+  String getHost();
+
+  long getTimestamp();
+
+  Level getLogLevel();
+
+  String getSourceClassName();
+
+  String getSourceMethodName();
+
+  String getFileName();
+
+  int getLineNumber();
+
+  String getThreadName();
+
+  String getMessage();
+
+  StackTraceElement[] getStackTraces();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/logging/LogHandler.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/logging/LogHandler.java b/twill-api/src/main/java/org/apache/twill/api/logging/LogHandler.java
new file mode 100644
index 0000000..afded19
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/logging/LogHandler.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api.logging;
+
+/**
+ *
+ */
+public interface LogHandler {
+
+  void onLog(LogEntry logEntry);
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/logging/PrinterLogHandler.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/logging/PrinterLogHandler.java b/twill-api/src/main/java/org/apache/twill/api/logging/PrinterLogHandler.java
new file mode 100644
index 0000000..71a2bca
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/logging/PrinterLogHandler.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.api.logging;
+
+import com.google.common.base.Splitter;
+
+import java.io.PrintWriter;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.Formatter;
+import java.util.TimeZone;
+
+/**
+ * A {@link LogHandler} that prints the {@link LogEntry} through a {@link PrintWriter}.
+ */
+public final class PrinterLogHandler implements LogHandler {
+
+  private static final ThreadLocal<DateFormat> DATE_FORMAT = new ThreadLocal<DateFormat>() {
+    @Override
+    protected DateFormat initialValue() {
+      DateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss,SSS'Z'");
+      format.setTimeZone(TimeZone.getTimeZone("UTC"));
+      return format;
+    }
+  };
+
+  private final PrintWriter writer;
+  private final Formatter formatter;
+
+  /**
+   * Creates a {@link PrinterLogHandler} which has {@link LogEntry} written to the given {@link PrintWriter}.
+   * @param writer The write that log entries will write to.
+   */
+  public PrinterLogHandler(PrintWriter writer) {
+    this.writer = writer;
+    this.formatter = new Formatter(writer);
+  }
+
+  @Override
+  public void onLog(LogEntry logEntry) {
+    String utc = timestampToUTC(logEntry.getTimestamp());
+
+    formatter.format("%s %-5s %s [%s] [%s] %s:%s(%s:%d) - %s\n",
+                     utc,
+                     logEntry.getLogLevel().name(),
+                     getShortenLoggerName(logEntry.getLoggerName()),
+                     logEntry.getHost(),
+                     logEntry.getThreadName(),
+                     getSimpleClassName(logEntry.getSourceClassName()),
+                     logEntry.getSourceMethodName(),
+                     logEntry.getFileName(),
+                     logEntry.getLineNumber(),
+                     logEntry.getMessage());
+    formatter.flush();
+
+    StackTraceElement[] stackTraces = logEntry.getStackTraces();
+    if (stackTraces != null) {
+      for (StackTraceElement stackTrace : stackTraces) {
+        writer.append("\tat ").append(stackTrace.toString());
+        writer.println();
+      }
+      writer.flush();
+    }
+  }
+
+  private String timestampToUTC(long timestamp) {
+    return DATE_FORMAT.get().format(new Date(timestamp));
+  }
+
+  private String getShortenLoggerName(String loggerName) {
+    StringBuilder builder = new StringBuilder();
+    String previous = null;
+    for (String part : Splitter.on('.').split(loggerName)) {
+      if (previous != null) {
+        builder.append(previous.charAt(0)).append('.');
+      }
+      previous = part;
+    }
+    return builder.append(previous).toString();
+  }
+
+  private String getSimpleClassName(String className) {
+    return className.substring(className.lastIndexOf('.') + 1);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/logging/package-info.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/logging/package-info.java b/twill-api/src/main/java/org/apache/twill/api/logging/package-info.java
new file mode 100644
index 0000000..e325c18
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/logging/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This package contains class for handling logging events.
+ */
+package org.apache.twill.api.logging;

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/api/package-info.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/api/package-info.java b/twill-api/src/main/java/org/apache/twill/api/package-info.java
new file mode 100644
index 0000000..5d9df6b
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/api/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Classes in this package provides core functionality of the Twill library.
+ */
+package org.apache.twill.api;

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/internal/DefaultEventHandlerSpecification.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/internal/DefaultEventHandlerSpecification.java b/twill-api/src/main/java/org/apache/twill/internal/DefaultEventHandlerSpecification.java
new file mode 100644
index 0000000..df21400
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/internal/DefaultEventHandlerSpecification.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.api.EventHandlerSpecification;
+import org.apache.twill.api.EventHandler;
+import org.apache.twill.api.EventHandlerSpecification;
+import org.apache.twill.api.EventHandlerSpecification;
+import com.google.common.collect.ImmutableMap;
+import org.apache.twill.api.EventHandlerSpecification;
+
+import java.util.Map;
+
+/**
+ *
+ */
+public class DefaultEventHandlerSpecification implements EventHandlerSpecification {
+
+  private final String className;
+  private final Map<String, String> configs;
+
+  public DefaultEventHandlerSpecification(String className, Map<String, String> configs) {
+    this.className = className;
+    this.configs = configs;
+  }
+
+  public DefaultEventHandlerSpecification(EventHandler eventHandler) {
+    EventHandlerSpecification spec = eventHandler.configure();
+    this.className = eventHandler.getClass().getName();
+    this.configs = ImmutableMap.copyOf(spec.getConfigs());
+  }
+
+  @Override
+  public String getClassName() {
+    return className;
+  }
+
+  @Override
+  public Map<String, String> getConfigs() {
+    return configs;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/internal/DefaultLocalFile.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/internal/DefaultLocalFile.java b/twill-api/src/main/java/org/apache/twill/internal/DefaultLocalFile.java
new file mode 100644
index 0000000..e43c0c0
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/internal/DefaultLocalFile.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.api.LocalFile;
+
+import javax.annotation.Nullable;
+import java.net.URI;
+
+/**
+ * A straightforward implementation of {@link LocalFile}.
+ */
+public final class DefaultLocalFile implements LocalFile {
+
+  private final String name;
+  private final URI uri;
+  private final long lastModified;
+  private final long size;
+  private final boolean archive;
+  private final String pattern;
+
+  public DefaultLocalFile(String name, URI uri, long lastModified,
+                          long size, boolean archive, @Nullable String pattern) {
+    this.name = name;
+    this.uri = uri;
+    this.lastModified = lastModified;
+    this.size = size;
+    this.archive = archive;
+    this.pattern = pattern;
+  }
+
+  @Override
+  public String getName() {
+    return name;
+  }
+
+  @Override
+  public URI getURI() {
+    return uri;
+  }
+
+  @Override
+  public long getLastModified() {
+    return lastModified;
+  }
+
+  @Override
+  public long getSize() {
+    return size;
+  }
+
+  @Override
+  public boolean isArchive() {
+    return archive;
+  }
+
+  @Override
+  public String getPattern() {
+    return pattern;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/internal/DefaultResourceReport.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/internal/DefaultResourceReport.java b/twill-api/src/main/java/org/apache/twill/internal/DefaultResourceReport.java
new file mode 100644
index 0000000..c4c8a29
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/internal/DefaultResourceReport.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.api.ResourceReport;
+import org.apache.twill.api.TwillRunResources;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Multimaps;
+import com.google.common.collect.SetMultimap;
+
+import java.util.Collection;
+import java.util.Map;
+
+/**
+ * Implementation of {@link org.apache.twill.api.ResourceReport} with some
+ * additional methods for maintaining the report.
+ */
+public final class DefaultResourceReport implements ResourceReport {
+  private final SetMultimap<String, TwillRunResources> usedResources;
+  private final TwillRunResources appMasterResources;
+  private final String applicationId;
+
+  public DefaultResourceReport(String applicationId, TwillRunResources masterResources) {
+    this.applicationId = applicationId;
+    this.appMasterResources = masterResources;
+    this.usedResources = HashMultimap.create();
+  }
+
+  public DefaultResourceReport(String applicationId, TwillRunResources masterResources,
+                               Map<String, Collection<TwillRunResources>> resources) {
+    this.applicationId = applicationId;
+    this.appMasterResources = masterResources;
+    this.usedResources = HashMultimap.create();
+    for (Map.Entry<String, Collection<TwillRunResources>> entry : resources.entrySet()) {
+      this.usedResources.putAll(entry.getKey(), entry.getValue());
+    }
+  }
+
+  /**
+   * Add resources used by an instance of the runnable.
+   *
+   * @param runnableName name of runnable.
+   * @param resources resources to add.
+   */
+  public void addRunResources(String runnableName, TwillRunResources resources) {
+    usedResources.put(runnableName, resources);
+  }
+
+  /**
+   * Remove the resource corresponding to the given runnable and container.
+   *
+   * @param runnableName name of runnable.
+   * @param containerId container id of the runnable.
+   */
+  public void removeRunnableResources(String runnableName, String containerId) {
+    TwillRunResources toRemove = null;
+    // could be faster if usedResources was a Table, but that makes returning the
+    // report a little more complex, and this does not need to be terribly fast.
+    for (TwillRunResources resources : usedResources.get(runnableName)) {
+      if (resources.getContainerId().equals(containerId)) {
+        toRemove = resources;
+        break;
+      }
+    }
+    usedResources.remove(runnableName, toRemove);
+  }
+
+  /**
+   * Get all the run resources being used by all instances of the specified runnable.
+   *
+   * @param runnableName the runnable name.
+   * @return resources being used by all instances of the runnable.
+   */
+  @Override
+  public Collection<TwillRunResources> getRunnableResources(String runnableName) {
+    return usedResources.get(runnableName);
+  }
+
+  /**
+   * Get all the run resources being used across all runnables.
+   *
+   * @return all run resources used by all instances of all runnables.
+   */
+  @Override
+  public Map<String, Collection<TwillRunResources>> getResources() {
+    return Multimaps.unmodifiableSetMultimap(usedResources).asMap();
+  }
+
+  /**
+   * Get the resources application master is using.
+   *
+   * @return resources being used by the application master.
+   */
+  @Override
+  public TwillRunResources getAppMasterResources() {
+    return appMasterResources;
+  }
+
+  /**
+   * Get the id of the application master.
+   *
+   * @return id of the application master.
+   */
+  @Override
+  public String getApplicationId() {
+    return applicationId;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/internal/DefaultResourceSpecification.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/internal/DefaultResourceSpecification.java b/twill-api/src/main/java/org/apache/twill/internal/DefaultResourceSpecification.java
new file mode 100644
index 0000000..1327ce5
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/internal/DefaultResourceSpecification.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.api.ResourceSpecification;
+
+/**
+ * Straightforward implementation of {@link org.apache.twill.api.ResourceSpecification}.
+ */
+public final class DefaultResourceSpecification implements ResourceSpecification {
+  private final int virtualCores;
+  private final int memorySize;
+  private final int instances;
+  private final int uplink;
+  private final int downlink;
+
+  public DefaultResourceSpecification(int virtualCores, int memorySize, int instances, int uplink, int downlink) {
+    this.virtualCores = virtualCores;
+    this.memorySize = memorySize;
+    this.instances = instances;
+    this.uplink = uplink;
+    this.downlink = downlink;
+  }
+
+  @Deprecated
+  @Override
+  public int getCores() {
+    return virtualCores;
+  }
+
+  @Override
+  public int getVirtualCores() {
+    return virtualCores;
+  }
+
+  @Override
+  public int getMemorySize() {
+    return memorySize;
+  }
+
+  @Override
+  public int getInstances() {
+    return instances;
+  }
+
+  @Override
+  public int getUplink() {
+    return uplink;
+  }
+
+  @Override
+  public int getDownlink() {
+    return downlink;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/internal/DefaultRuntimeSpecification.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/internal/DefaultRuntimeSpecification.java b/twill-api/src/main/java/org/apache/twill/internal/DefaultRuntimeSpecification.java
new file mode 100644
index 0000000..c4f496e
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/internal/DefaultRuntimeSpecification.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.api.LocalFile;
+import org.apache.twill.api.ResourceSpecification;
+import org.apache.twill.api.RuntimeSpecification;
+import org.apache.twill.api.TwillRunnableSpecification;
+import com.google.common.collect.ImmutableList;
+
+import java.util.Collection;
+
+/**
+ * Straightforward implementation of {@link RuntimeSpecification}.
+ */
+public final class DefaultRuntimeSpecification implements RuntimeSpecification {
+
+  private final String name;
+  private final TwillRunnableSpecification runnableSpec;
+  private final ResourceSpecification resourceSpec;
+  private final Collection<LocalFile> localFiles;
+
+  public DefaultRuntimeSpecification(String name,
+                                     TwillRunnableSpecification runnableSpec,
+                                     ResourceSpecification resourceSpec,
+                                     Collection<LocalFile> localFiles) {
+    this.name = name;
+    this.runnableSpec = runnableSpec;
+    this.resourceSpec = resourceSpec;
+    this.localFiles = ImmutableList.copyOf(localFiles);
+  }
+
+  @Override
+  public String getName() {
+    return name;
+  }
+
+  @Override
+  public TwillRunnableSpecification getRunnableSpecification() {
+    return runnableSpec;
+  }
+
+  @Override
+  public ResourceSpecification getResourceSpecification() {
+    return resourceSpec;
+  }
+
+  @Override
+  public Collection<LocalFile> getLocalFiles() {
+    return localFiles;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/internal/DefaultTwillRunResources.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/internal/DefaultTwillRunResources.java b/twill-api/src/main/java/org/apache/twill/internal/DefaultTwillRunResources.java
new file mode 100644
index 0000000..bd8f8f5
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/internal/DefaultTwillRunResources.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.api.TwillRunResources;
+
+/**
+ *  Straightforward implementation of {@link org.apache.twill.api.TwillRunResources}.
+ */
+public class DefaultTwillRunResources implements TwillRunResources {
+  private final String containerId;
+  private final int instanceId;
+  private final int virtualCores;
+  private final int memoryMB;
+  private final String host;
+
+  public DefaultTwillRunResources(int instanceId, String containerId,
+                                  int cores, int memoryMB, String host) {
+    this.instanceId = instanceId;
+    this.containerId = containerId;
+    this.virtualCores = cores;
+    this.memoryMB = memoryMB;
+    this.host = host;
+  }
+
+  /**
+   * @return instance id of the runnable.
+   */
+  @Override
+  public int getInstanceId() {
+    return instanceId;
+  }
+
+  /**
+   * @return id of the container the runnable is running in.
+   */
+  @Override
+  public String getContainerId() {
+    return containerId;
+  }
+
+  /**
+   * @return number of cores the runnable is allowed to use.  YARN must be at least v2.1.0 and
+   *   it must be configured to use cgroups in order for this to be a reflection of truth.
+   */
+  @Override
+  public int getVirtualCores() {
+    return virtualCores;
+  }
+
+  /**
+   * @return amount of memory in MB the runnable is allowed to use.
+   */
+  @Override
+  public int getMemoryMB() {
+    return memoryMB;
+  }
+
+  /**
+   * @return the host the runnable is running on.
+   */
+  @Override
+  public String getHost() {
+    return host;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (!(o instanceof TwillRunResources)) {
+      return false;
+    }
+    TwillRunResources other = (TwillRunResources) o;
+    return (instanceId == other.getInstanceId()) &&
+      containerId.equals(other.getContainerId()) &&
+      host.equals(other.getHost()) &&
+      (virtualCores == other.getVirtualCores()) &&
+      (memoryMB == other.getMemoryMB());
+  }
+
+  @Override
+  public int hashCode() {
+    int hash = 17;
+    hash = 31 *  hash + containerId.hashCode();
+    hash = 31 *  hash + host.hashCode();
+    hash = 31 *  hash + (int) (instanceId ^ (instanceId >>> 32));
+    hash = 31 *  hash + (int) (virtualCores ^ (virtualCores >>> 32));
+    hash = 31 *  hash + (int) (memoryMB ^ (memoryMB >>> 32));
+    return hash;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/internal/DefaultTwillRunnableSpecification.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/internal/DefaultTwillRunnableSpecification.java b/twill-api/src/main/java/org/apache/twill/internal/DefaultTwillRunnableSpecification.java
new file mode 100644
index 0000000..14ea7f5
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/internal/DefaultTwillRunnableSpecification.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.api.TwillRunnableSpecification;
+import com.google.common.collect.ImmutableMap;
+
+import java.util.Map;
+
+/**
+ * Straightforward implementation of {@link org.apache.twill.api.TwillRunnableSpecification}.
+ */
+public final class DefaultTwillRunnableSpecification implements TwillRunnableSpecification {
+
+  private final String className;
+  private final String name;
+  private final Map<String, String> arguments;
+
+  public DefaultTwillRunnableSpecification(String className, String name, Map<String, String> arguments) {
+    this.className = className;
+    this.name = name;
+    this.arguments = ImmutableMap.copyOf(arguments);
+  }
+
+  public DefaultTwillRunnableSpecification(String className, TwillRunnableSpecification other) {
+    this.className = className;
+    this.name = other.getName();
+    this.arguments = ImmutableMap.copyOf(other.getConfigs());
+  }
+
+  @Override
+  public String getClassName() {
+    return className;
+  }
+
+  @Override
+  public String getName() {
+    return name;
+  }
+
+  @Override
+  public Map<String, String> getConfigs() {
+    return arguments;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/internal/DefaultTwillSpecification.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/internal/DefaultTwillSpecification.java b/twill-api/src/main/java/org/apache/twill/internal/DefaultTwillSpecification.java
new file mode 100644
index 0000000..6bb2b15
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/internal/DefaultTwillSpecification.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.api.EventHandlerSpecification;
+import org.apache.twill.api.RuntimeSpecification;
+import org.apache.twill.api.TwillSpecification;
+import com.google.common.base.Objects;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+
+import javax.annotation.Nullable;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Straightforward implementation of {@link org.apache.twill.api.TwillSpecification}.
+ */
+public final class DefaultTwillSpecification implements TwillSpecification {
+
+  private final String name;
+  private final Map<String, RuntimeSpecification> runnables;
+  private final List<Order> orders;
+  private final EventHandlerSpecification eventHandler;
+
+  public DefaultTwillSpecification(String name, Map<String, RuntimeSpecification> runnables,
+                                   List<Order> orders, EventHandlerSpecification eventHandler) {
+    this.name = name;
+    this.runnables = ImmutableMap.copyOf(runnables);
+    this.orders = ImmutableList.copyOf(orders);
+    this.eventHandler = eventHandler;
+  }
+
+  @Override
+  public String getName() {
+    return name;
+  }
+
+  @Override
+  public Map<String, RuntimeSpecification> getRunnables() {
+    return runnables;
+  }
+
+  @Override
+  public List<Order> getOrders() {
+    return orders;
+  }
+
+  @Nullable
+  @Override
+  public EventHandlerSpecification getEventHandler() {
+    return eventHandler;
+  }
+
+  /**
+   * Straightforward implementation of {@link Order}.
+   */
+  public static final class DefaultOrder implements Order {
+
+    private final Set<String> names;
+    private final Type type;
+
+    public DefaultOrder(Iterable<String> names, Type type) {
+      this.names = ImmutableSet.copyOf(names);
+      this.type = type;
+    }
+
+    @Override
+    public Set<String> getNames() {
+      return names;
+    }
+
+    @Override
+    public Type getType() {
+      return type;
+    }
+
+    @Override
+    public String toString() {
+      return Objects.toStringHelper(this)
+        .add("names", names)
+        .add("type", type)
+        .toString();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/internal/RunIds.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/internal/RunIds.java b/twill-api/src/main/java/org/apache/twill/internal/RunIds.java
new file mode 100644
index 0000000..7249d81
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/internal/RunIds.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.api.RunId;
+import com.google.common.base.Preconditions;
+
+import java.util.UUID;
+
+/**
+ * Factory class for creating instance of {@link org.apache.twill.api.RunId}.
+ */
+public final class RunIds {
+
+  public static RunId generate() {
+    return new RunIdImpl(UUID.randomUUID().toString());
+  }
+
+  public static RunId fromString(String str) {
+    return new RunIdImpl(str);
+  }
+
+  private RunIds() {
+  }
+
+  private static final class RunIdImpl implements RunId {
+
+    final String id;
+
+    private RunIdImpl(String id) {
+      Preconditions.checkArgument(id != null, "RunId cannot be null.");
+      this.id = id;
+    }
+
+    @Override
+    public String getId() {
+      return id;
+    }
+
+    @Override
+    public String toString() {
+      return getId();
+    }
+
+    @Override
+    public boolean equals(Object other) {
+      if (this == other) {
+        return true;
+      }
+      if (other == null || !(other instanceof RunId)) {
+        return false;
+      }
+      return id.equals(((RunId)other).getId());
+    }
+
+    @Override
+    public int hashCode() {
+      return id.hashCode();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-api/src/main/java/org/apache/twill/internal/package-info.java
----------------------------------------------------------------------
diff --git a/twill-api/src/main/java/org/apache/twill/internal/package-info.java b/twill-api/src/main/java/org/apache/twill/internal/package-info.java
new file mode 100644
index 0000000..8af8362
--- /dev/null
+++ b/twill-api/src/main/java/org/apache/twill/internal/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Internal classes for Twill API.
+ */
+package org.apache.twill.internal;

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-common/pom.xml
----------------------------------------------------------------------
diff --git a/twill-common/pom.xml b/twill-common/pom.xml
new file mode 100644
index 0000000..a4372f6
--- /dev/null
+++ b/twill-common/pom.xml
@@ -0,0 +1,51 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <parent>
+        <artifactId>twill-parent</artifactId>
+        <groupId>org.apache.twill</groupId>
+        <version>0.1.0-SNAPSHOT</version>
+    </parent>
+    <modelVersion>4.0.0</modelVersion>
+
+    <artifactId>twill-common</artifactId>
+    <name>Twill common library</name>
+
+    <dependencies>
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>com.google.code.findbugs</groupId>
+            <artifactId>jsr305</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-api</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+        </dependency>
+    </dependencies>
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-common/src/main/java/org/apache/twill/common/Cancellable.java
----------------------------------------------------------------------
diff --git a/twill-common/src/main/java/org/apache/twill/common/Cancellable.java b/twill-common/src/main/java/org/apache/twill/common/Cancellable.java
new file mode 100644
index 0000000..08f22d3
--- /dev/null
+++ b/twill-common/src/main/java/org/apache/twill/common/Cancellable.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.twill.common;
+
+/**
+ * Something, usually a task, that can be cancelled. Cancellation is performed by the cancel method.
+ */
+public interface Cancellable {
+  /**
+   * Attempts to cancel execution of this task.
+   */
+  void cancel();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-common/src/main/java/org/apache/twill/common/ServiceListenerAdapter.java
----------------------------------------------------------------------
diff --git a/twill-common/src/main/java/org/apache/twill/common/ServiceListenerAdapter.java b/twill-common/src/main/java/org/apache/twill/common/ServiceListenerAdapter.java
new file mode 100644
index 0000000..527ba7d
--- /dev/null
+++ b/twill-common/src/main/java/org/apache/twill/common/ServiceListenerAdapter.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.common;
+
+import com.google.common.util.concurrent.Service;
+
+/**
+ * An adapter for implementing {@link Service.Listener} with all method default to no-op.
+ */
+public abstract class ServiceListenerAdapter implements Service.Listener {
+  @Override
+  public void starting() {
+    // No-op
+  }
+
+  @Override
+  public void running() {
+    // No-op
+  }
+
+  @Override
+  public void stopping(Service.State from) {
+    // No-op
+  }
+
+  @Override
+  public void terminated(Service.State from) {
+    // No-op
+  }
+
+  @Override
+  public void failed(Service.State from, Throwable failure) {
+    // No-op
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-common/src/main/java/org/apache/twill/common/Services.java
----------------------------------------------------------------------
diff --git a/twill-common/src/main/java/org/apache/twill/common/Services.java b/twill-common/src/main/java/org/apache/twill/common/Services.java
new file mode 100644
index 0000000..7e294f0
--- /dev/null
+++ b/twill-common/src/main/java/org/apache/twill/common/Services.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.common;
+
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.Service;
+import com.google.common.util.concurrent.SettableFuture;
+
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * Utility methods for help dealing with {@link Service}.
+ */
+public final class Services {
+
+  /**
+   * Starts a list of {@link Service} one by one. Starting of next Service is triggered from the callback listener
+   * thread of the previous Service.
+   *
+   * @param firstService First service to start.
+   * @param moreServices The rest services to start.
+   * @return A {@link ListenableFuture} that will be completed when all services are started, with the
+   *         result carries the completed {@link ListenableFuture} of each corresponding service in the
+   *         same order as they are passed to this method.
+   */
+  public static ListenableFuture<List<ListenableFuture<Service.State>>> chainStart(Service firstService,
+                                                                                   Service...moreServices) {
+    return doChain(true, firstService, moreServices);
+  }
+
+  /**
+   * Stops a list of {@link Service} one by one. It behaves the same as
+   * {@link #chainStart(com.google.common.util.concurrent.Service, com.google.common.util.concurrent.Service...)}
+   * except {@link com.google.common.util.concurrent.Service#stop()} is called instead of start.
+   *
+   * @param firstService First service to stop.
+   * @param moreServices The rest services to stop.
+   * @return A {@link ListenableFuture} that will be completed when all services are stopped.
+   * @see #chainStart(com.google.common.util.concurrent.Service, com.google.common.util.concurrent.Service...)
+   */
+  public static ListenableFuture<List<ListenableFuture<Service.State>>> chainStop(Service firstService,
+                                                                                  Service...moreServices) {
+    return doChain(false, firstService, moreServices);
+  }
+
+  /**
+   * Returns a {@link ListenableFuture} that will be completed when the given service is stopped. If the service
+   * stopped due to error, the failure cause would be reflected in the future.
+   *
+   * @param service The {@link Service} to block on.
+   * @return A {@link ListenableFuture} that will be completed when the service is stopped.
+   */
+  public static ListenableFuture<Service.State> getCompletionFuture(Service service) {
+    final SettableFuture<Service.State> resultFuture = SettableFuture.create();
+
+    service.addListener(new ServiceListenerAdapter() {
+      @Override
+      public void terminated(Service.State from) {
+        resultFuture.set(Service.State.TERMINATED);
+      }
+
+      @Override
+      public void failed(Service.State from, Throwable failure) {
+        resultFuture.setException(failure);
+      }
+    }, Threads.SAME_THREAD_EXECUTOR);
+
+    Service.State state = service.state();
+    if (state == Service.State.TERMINATED) {
+      return Futures.immediateFuture(state);
+    } else if (state == Service.State.FAILED) {
+      return Futures.immediateFailedFuture(new IllegalStateException("Service failed with unknown exception."));
+    }
+
+    return resultFuture;
+  }
+
+  /**
+   * Performs the actual logic of chain Service start/stop.
+   */
+  private static ListenableFuture<List<ListenableFuture<Service.State>>> doChain(boolean doStart,
+                                                                                 Service firstService,
+                                                                                 Service...moreServices) {
+    SettableFuture<List<ListenableFuture<Service.State>>> resultFuture = SettableFuture.create();
+    List<ListenableFuture<Service.State>> result = Lists.newArrayListWithCapacity(moreServices.length + 1);
+
+    ListenableFuture<Service.State> future = doStart ? firstService.start() : firstService.stop();
+    future.addListener(createChainListener(future, moreServices, new AtomicInteger(0), result, resultFuture, doStart),
+                       Threads.SAME_THREAD_EXECUTOR);
+    return resultFuture;
+  }
+
+  /**
+   * Returns a {@link Runnable} that can be used as a {@link ListenableFuture} listener to trigger
+   * further service action or completing the result future. Used by
+   * {@link #doChain(boolean, com.google.common.util.concurrent.Service, com.google.common.util.concurrent.Service...)}
+   */
+  private static Runnable createChainListener(final ListenableFuture<Service.State> future, final Service[] services,
+                                              final AtomicInteger idx,
+                                              final List<ListenableFuture<Service.State>> result,
+                                              final SettableFuture<List<ListenableFuture<Service.State>>> resultFuture,
+                                              final boolean doStart) {
+    return new Runnable() {
+
+      @Override
+      public void run() {
+        result.add(future);
+        int nextIdx = idx.getAndIncrement();
+        if (nextIdx == services.length) {
+          resultFuture.set(result);
+          return;
+        }
+        ListenableFuture<Service.State> actionFuture = doStart ? services[nextIdx].start() : services[nextIdx].stop();
+        actionFuture.addListener(createChainListener(actionFuture, services, idx, result, resultFuture, doStart),
+                                 Threads.SAME_THREAD_EXECUTOR);
+      }
+    };
+  }
+
+  private Services() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-common/src/main/java/org/apache/twill/common/Threads.java
----------------------------------------------------------------------
diff --git a/twill-common/src/main/java/org/apache/twill/common/Threads.java b/twill-common/src/main/java/org/apache/twill/common/Threads.java
new file mode 100644
index 0000000..e33a677
--- /dev/null
+++ b/twill-common/src/main/java/org/apache/twill/common/Threads.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.common;
+
+import com.google.common.util.concurrent.MoreExecutors;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+import java.util.concurrent.Executor;
+import java.util.concurrent.ThreadFactory;
+
+/**
+ *
+ */
+public final class Threads {
+
+  /**
+   * A executor that execute task from the submitter thread.
+   */
+  public static final Executor SAME_THREAD_EXECUTOR = MoreExecutors.sameThreadExecutor();
+
+  /**
+   * Handy method to create {@link ThreadFactory} that creates daemon threads with the given name format.
+   *
+   * @param nameFormat Name format for the thread names
+   * @return A {@link ThreadFactory}.
+   * @see ThreadFactoryBuilder
+   */
+  public static ThreadFactory createDaemonThreadFactory(String nameFormat) {
+    return new ThreadFactoryBuilder()
+      .setDaemon(true)
+      .setNameFormat(nameFormat)
+      .build();
+  }
+
+  private Threads() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-common/src/main/java/org/apache/twill/filesystem/ForwardingLocationFactory.java
----------------------------------------------------------------------
diff --git a/twill-common/src/main/java/org/apache/twill/filesystem/ForwardingLocationFactory.java b/twill-common/src/main/java/org/apache/twill/filesystem/ForwardingLocationFactory.java
new file mode 100644
index 0000000..d25ea20
--- /dev/null
+++ b/twill-common/src/main/java/org/apache/twill/filesystem/ForwardingLocationFactory.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.filesystem;
+
+/**
+ *
+ */
+public abstract class ForwardingLocationFactory implements LocationFactory {
+
+  private final LocationFactory delegate;
+
+  protected ForwardingLocationFactory(LocationFactory delegate) {
+    this.delegate = delegate;
+  }
+
+  public LocationFactory getDelegate() {
+    return delegate;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-common/src/main/java/org/apache/twill/filesystem/LocalLocation.java
----------------------------------------------------------------------
diff --git a/twill-common/src/main/java/org/apache/twill/filesystem/LocalLocation.java b/twill-common/src/main/java/org/apache/twill/filesystem/LocalLocation.java
new file mode 100644
index 0000000..d107eac
--- /dev/null
+++ b/twill-common/src/main/java/org/apache/twill/filesystem/LocalLocation.java
@@ -0,0 +1,205 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.filesystem;
+
+import com.google.common.collect.Lists;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+import java.util.Collections;
+import java.util.Deque;
+import java.util.UUID;
+
+/**
+ * A concrete implementation of {@link Location} for the Local filesystem.
+ */
+final class LocalLocation implements Location {
+  private final File file;
+
+  /**
+   * Constructs a LocalLocation.
+   *
+   * @param file to the file.
+   */
+  LocalLocation(File file) {
+    this.file = file;
+  }
+
+  /**
+   * Checks if the this location exists on local file system.
+   *
+   * @return true if found; false otherwise.
+   * @throws java.io.IOException
+   */
+  @Override
+  public boolean exists() throws IOException {
+    return file.exists();
+  }
+
+  /**
+   * @return An {@link java.io.InputStream} for this location on local filesystem.
+   * @throws IOException
+   */
+  @Override
+  public InputStream getInputStream() throws IOException {
+    File parent = file.getParentFile();
+    if (!parent.exists()) {
+      parent.mkdirs();
+    }
+    return new FileInputStream(file);
+  }
+
+  /**
+   * @return An {@link java.io.OutputStream} for this location on local filesystem.
+   * @throws IOException
+   */
+  @Override
+  public OutputStream getOutputStream() throws IOException {
+    File parent = file.getParentFile();
+    if (!parent.exists()) {
+      parent.mkdirs();
+    }
+    return new FileOutputStream(file);
+  }
+
+  /**
+   * Local location doesn't supports permission. It's the same as calling {@link #getOutputStream()}.
+   */
+  @Override
+  public OutputStream getOutputStream(String permission) throws IOException {
+    return getOutputStream();
+  }
+
+  /**
+   * @return Returns the name of the file or directory denoteed by this abstract pathname.
+   */
+  @Override
+  public String getName() {
+    return file.getName();
+  }
+
+  @Override
+  public boolean createNew() throws IOException {
+    return file.createNewFile();
+  }
+
+  /**
+   * Appends the child to the current {@link Location} on local filesystem.
+   * <p>
+   * Returns a new instance of Location.
+   * </p>
+   *
+   * @param child to be appended to this location.
+   * @return A new instance of {@link Location}
+   * @throws IOException
+   */
+  @Override
+  public Location append(String child) throws IOException {
+    return new LocalLocation(new File(file, child));
+  }
+
+  @Override
+  public Location getTempFile(String suffix) throws IOException {
+    return new LocalLocation(
+      new File(file.getAbsolutePath() + "." + UUID.randomUUID() + (suffix == null ? TEMP_FILE_SUFFIX : suffix)));
+  }
+
+  /**
+   * @return A {@link URI} for this location on local filesystem.
+   */
+  @Override
+  public URI toURI() {
+    return file.toURI();
+  }
+
+  /**
+   * Deletes the file or directory denoted by this abstract pathname. If this
+   * pathname denotes a directory, then the directory must be empty in order
+   * to be deleted.
+   *
+   * @return true if and only if the file or directory is successfully delete; false otherwise.
+   */
+  @Override
+  public boolean delete() throws IOException {
+    return file.delete();
+  }
+
+  @Override
+  public boolean delete(boolean recursive) throws IOException {
+    if (!recursive) {
+      return delete();
+    }
+
+    Deque<File> stack = Lists.newLinkedList();
+    stack.add(file);
+    while (!stack.isEmpty()) {
+      File f = stack.peekLast();
+      File[] files = f.listFiles();
+
+      if (files != null && files.length != 0) {
+        Collections.addAll(stack, files);
+      } else {
+        if (!f.delete()) {
+          return false;
+        }
+        stack.pollLast();
+      }
+    }
+    return true;
+  }
+
+  @Override
+  public Location renameTo(Location destination) throws IOException {
+    // destination will always be of the same type as this location
+    boolean success = file.renameTo(((LocalLocation) destination).file);
+    if (success) {
+      return new LocalLocation(((LocalLocation) destination).file);
+    } else {
+      return null;
+    }
+  }
+
+  /**
+   * Creates the directory named by this abstract pathname, including any necessary
+   * but nonexistent parent directories.
+   *
+   * @return true if and only if the renaming succeeded; false otherwise
+   */
+  @Override
+  public boolean mkdirs() throws IOException {
+    return file.mkdirs();
+  }
+
+  /**
+   * @return Length of file.
+   */
+  @Override
+  public long length() throws IOException {
+    return file.length();
+  }
+
+  @Override
+  public long lastModified() {
+    return file.lastModified();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-common/src/main/java/org/apache/twill/filesystem/LocalLocationFactory.java
----------------------------------------------------------------------
diff --git a/twill-common/src/main/java/org/apache/twill/filesystem/LocalLocationFactory.java b/twill-common/src/main/java/org/apache/twill/filesystem/LocalLocationFactory.java
new file mode 100644
index 0000000..f44cd87
--- /dev/null
+++ b/twill-common/src/main/java/org/apache/twill/filesystem/LocalLocationFactory.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.filesystem;
+
+import java.io.File;
+import java.net.URI;
+
+/**
+ * A {@link LocationFactory} for creating local file {@link Location}.
+ */
+public final class LocalLocationFactory implements LocationFactory {
+
+  private final File basePath;
+
+  /**
+   * Constructs a LocalLocationFactory that Location created will be relative to system root.
+   */
+  public LocalLocationFactory() {
+    this(new File("/"));
+  }
+
+  public LocalLocationFactory(File basePath) {
+    this.basePath = basePath;
+  }
+
+  @Override
+  public Location create(String path) {
+    return new LocalLocation(new File(basePath, path));
+  }
+
+  @Override
+  public Location create(URI uri) {
+    if (uri.isAbsolute()) {
+      return new LocalLocation(new File(uri));
+    }
+    return new LocalLocation(new File(basePath, uri.getPath()));
+  }
+
+  @Override
+  public Location getHomeLocation() {
+    return new LocalLocation(new File(System.getProperty("user.home")));
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-common/src/main/java/org/apache/twill/filesystem/Location.java
----------------------------------------------------------------------
diff --git a/twill-common/src/main/java/org/apache/twill/filesystem/Location.java b/twill-common/src/main/java/org/apache/twill/filesystem/Location.java
new file mode 100644
index 0000000..dee9546
--- /dev/null
+++ b/twill-common/src/main/java/org/apache/twill/filesystem/Location.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.filesystem;
+
+import javax.annotation.Nullable;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+
+/**
+ * This interface defines the location and operations of a resource on the filesystem.
+ * <p>
+ * {@link Location} is agnostic to the type of file system the resource is on.
+ * </p>
+ */
+public interface Location {
+  /**
+   * Suffix added to every temp file name generated with {@link #getTempFile(String)}.
+   */
+  static final String TEMP_FILE_SUFFIX = ".tmp";
+
+  /**
+   * Checks if the this location exists.
+   *
+   * @return true if found; false otherwise.
+   * @throws IOException
+   */
+  boolean exists() throws IOException;
+
+  /**
+   * @return Returns the name of the file or directory denoteed by this abstract pathname.
+   */
+  String getName();
+
+  /**
+   * Atomically creates a new, empty file named by this abstract pathname if and only if a file with this name
+   * does not yet exist.
+   * @return {@code true} if the file is successfully create, {@code false} otherwise.
+   * @throws IOException
+   */
+  boolean createNew() throws IOException;
+
+  /**
+   * @return An {@link java.io.InputStream} for this location.
+   * @throws IOException
+   */
+  InputStream getInputStream() throws IOException;
+
+  /**
+   * @return An {@link java.io.OutputStream} for this location.
+   * @throws IOException
+   */
+  OutputStream getOutputStream() throws IOException;
+
+  /**
+   * Creates an {@link OutputStream} for this location with the given permission. The actual permission supported
+   * depends on implementation.
+   *
+   * @param permission A POSIX permission string.
+   * @return An {@link OutputStream} for writing to this location.
+   * @throws IOException If failed to create the {@link OutputStream}.
+   */
+  OutputStream getOutputStream(String permission) throws IOException;
+
+  /**
+   * Appends the child to the current {@link Location}.
+   * <p>
+   * Returns a new instance of Location.
+   * </p>
+   *
+   * @param child to be appended to this location.
+   * @return A new instance of {@link Location}
+   * @throws IOException
+   */
+  Location append(String child) throws IOException;
+
+  /**
+   * Returns unique location for temporary file to be placed near this location.
+   * Allows all temp files to follow same pattern for easier management of them.
+   * @param suffix part of the file name to include in the temp file name
+   * @return location of the temp file
+   * @throws IOException
+   */
+  Location getTempFile(String suffix) throws IOException;
+
+  /**
+   * @return A {@link java.net.URI} for this location.
+   */
+  URI toURI();
+
+  /**
+   * Deletes the file or directory denoted by this abstract pathname. If this
+   * pathname denotes a directory, then the directory must be empty in order
+   * to be deleted.
+   *
+   * @return true if and only if the file or directory is successfully delete; false otherwise.
+   */
+  boolean delete() throws IOException;
+
+  /**
+   * Deletes the file or directory denoted by this abstract pathname. If this
+   * pathname denotes a directory and {@code recursive} is {@code true}, then content of the
+   * directory will be deleted recursively, otherwise the directory must be empty in order to be deleted.
+   * Note that when calling this method with {@code recursive = true} for a directory, any
+   * failure during deletion will have some entries inside the directory being deleted while some are not.
+   *
+   * @param recursive Indicate if recursively delete a directory. Ignored if the pathname represents a file.
+   * @return true if and only if the file or directory is successfully delete; false otherwise.
+   */
+  boolean delete(boolean recursive) throws IOException;
+
+  /**
+   * Moves the file or directory denoted by this abstract pathname.
+   *
+   * @param destination destination location
+   * @return new location if and only if the file or directory is successfully moved; null otherwise.
+   */
+  @Nullable
+  Location renameTo(Location destination) throws IOException;
+
+  /**
+   * Creates the directory named by this abstract pathname, including any necessary
+   * but nonexistent parent directories.
+   *
+   * @return true if and only if the renaming succeeded; false otherwise
+   */
+  boolean mkdirs() throws IOException;
+
+  /**
+   * @return Length of file.
+   */
+  long length() throws IOException;
+
+  /**
+   * @return Last modified time of file.
+   */
+  long lastModified() throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-common/src/main/java/org/apache/twill/filesystem/LocationFactories.java
----------------------------------------------------------------------
diff --git a/twill-common/src/main/java/org/apache/twill/filesystem/LocationFactories.java b/twill-common/src/main/java/org/apache/twill/filesystem/LocationFactories.java
new file mode 100644
index 0000000..751a632
--- /dev/null
+++ b/twill-common/src/main/java/org/apache/twill/filesystem/LocationFactories.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.filesystem;
+
+import com.google.common.base.Throwables;
+
+import java.io.IOException;
+import java.net.URI;
+
+/**
+ * Providers helper methods for creating different {@link LocationFactory}.
+ */
+public final class LocationFactories {
+
+  /**
+   * Creates a {@link LocationFactory} that always applies the giving namespace prefix.
+   */
+  public static LocationFactory namespace(LocationFactory delegate, final String namespace) {
+    return new ForwardingLocationFactory(delegate) {
+      @Override
+      public Location create(String path) {
+        try {
+          Location base = getDelegate().create(namespace);
+          return base.append(path);
+        } catch (IOException e) {
+          throw Throwables.propagate(e);
+        }
+      }
+
+      @Override
+      public Location create(URI uri) {
+        if (uri.isAbsolute()) {
+          return getDelegate().create(uri);
+        }
+        try {
+          Location base = getDelegate().create(namespace);
+          return base.append(uri.getPath());
+        } catch (IOException e) {
+          throw Throwables.propagate(e);
+        }
+      }
+
+      @Override
+      public Location getHomeLocation() {
+        return getDelegate().getHomeLocation();
+      }
+    };
+  }
+
+  private LocationFactories() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-common/src/main/java/org/apache/twill/filesystem/LocationFactory.java
----------------------------------------------------------------------
diff --git a/twill-common/src/main/java/org/apache/twill/filesystem/LocationFactory.java b/twill-common/src/main/java/org/apache/twill/filesystem/LocationFactory.java
new file mode 100644
index 0000000..f88d94d
--- /dev/null
+++ b/twill-common/src/main/java/org/apache/twill/filesystem/LocationFactory.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.filesystem;
+
+import java.net.URI;
+
+/**
+ * Factory for creating instance of {@link Location}.
+ */
+public interface LocationFactory {
+
+  /**
+   * Creates an instance of {@link Location} of the given path.
+   * @param path The path representing the location.
+   * @return An instance of {@link Location}.
+   */
+  Location create(String path);
+
+  /**
+   * Creates an instance of {@link Location} based on {@link java.net.URI} <code>uri</code>.
+   *
+   * @param uri to the resource on the filesystem.
+   * @return An instance of {@link Location}
+   */
+  Location create(URI uri);
+
+  /**
+   * Returns the home location.
+   */
+  Location getHomeLocation();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-common/src/test/java/org/apache/twill/common/ServicesTest.java
----------------------------------------------------------------------
diff --git a/twill-common/src/test/java/org/apache/twill/common/ServicesTest.java b/twill-common/src/test/java/org/apache/twill/common/ServicesTest.java
new file mode 100644
index 0000000..c0aa7ee
--- /dev/null
+++ b/twill-common/src/test/java/org/apache/twill/common/ServicesTest.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.common;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.AbstractIdleService;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.Service;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * Unit test for {@link Services} methods.
+ */
+public class ServicesTest {
+
+  private static final Logger LOG = LoggerFactory.getLogger(ServicesTest.class);
+
+  @Test
+  public void testChain() throws ExecutionException, InterruptedException {
+    AtomicBoolean transiting = new AtomicBoolean(false);
+    Service s1 = new DummyService("s1", transiting);
+    Service s2 = new DummyService("s2", transiting);
+    Service s3 = new DummyService("s3", transiting);
+
+    Futures.allAsList(Services.chainStart(s1, s2, s3).get()).get();
+    Futures.allAsList(Services.chainStop(s3, s2, s1).get()).get();
+  }
+
+  @Test
+  public void testCompletion() throws ExecutionException, InterruptedException {
+    Service service = new DummyService("s1", new AtomicBoolean());
+    ListenableFuture<Service.State> completion = Services.getCompletionFuture(service);
+
+    service.start();
+    service.stop();
+
+    completion.get();
+
+    AtomicBoolean transiting = new AtomicBoolean();
+    service = new DummyService("s2", transiting);
+    completion = Services.getCompletionFuture(service);
+
+    service.startAndWait();
+    transiting.set(true);
+    service.stop();
+
+    try {
+      completion.get();
+      Assert.assertTrue(false);
+    } catch (ExecutionException e) {
+      // Expected
+    }
+  }
+
+  private static final class DummyService extends AbstractIdleService {
+
+    private final String name;
+    private final AtomicBoolean transiting;
+
+    private DummyService(String name, AtomicBoolean transiting) {
+      this.name = name;
+      this.transiting = transiting;
+    }
+
+    @Override
+    protected void startUp() throws Exception {
+      Preconditions.checkState(transiting.compareAndSet(false, true));
+      LOG.info("Starting: " + name);
+      TimeUnit.MILLISECONDS.sleep(500);
+      LOG.info("Started: " + name);
+      Preconditions.checkState(transiting.compareAndSet(true, false));
+    }
+
+    @Override
+    protected void shutDown() throws Exception {
+      Preconditions.checkState(transiting.compareAndSet(false, true));
+      LOG.info("Stopping: " + name);
+      TimeUnit.MILLISECONDS.sleep(500);
+      LOG.info("Stopped: " + name);
+      Preconditions.checkState(transiting.compareAndSet(true, false));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-common/src/test/java/org/apache/twill/filesystem/LocalLocationTest.java
----------------------------------------------------------------------
diff --git a/twill-common/src/test/java/org/apache/twill/filesystem/LocalLocationTest.java b/twill-common/src/test/java/org/apache/twill/filesystem/LocalLocationTest.java
new file mode 100644
index 0000000..198f77f
--- /dev/null
+++ b/twill-common/src/test/java/org/apache/twill/filesystem/LocalLocationTest.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.filesystem;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URI;
+
+/**
+ *
+ */
+public class LocalLocationTest {
+
+  @Test
+  public void testDelete() throws IOException {
+    LocationFactory factory = new LocalLocationFactory(new File(System.getProperty("java.io.tmpdir")));
+
+    Location base = factory.create("test").getTempFile(".tmp");
+    Assert.assertTrue(base.mkdirs());
+
+    Assert.assertTrue(base.append("test1").getTempFile(".tmp").createNew());
+    Assert.assertTrue(base.append("test2").getTempFile(".tmp").createNew());
+
+    Location subDir = base.append("test3");
+    Assert.assertTrue(subDir.mkdirs());
+
+    Assert.assertTrue(subDir.append("test4").getTempFile(".tmp").createNew());
+    Assert.assertTrue(subDir.append("test5").getTempFile(".tmp").createNew());
+
+    Assert.assertTrue(base.delete(true));
+    Assert.assertFalse(base.exists());
+  }
+
+  @Test
+  public void testHelper() {
+    LocationFactory factory = LocationFactories.namespace(
+                                new LocalLocationFactory(new File(System.getProperty("java.io.tmpdir"))),
+                                "testhelper");
+
+    Location location = factory.create("test");
+    Assert.assertTrue(location.toURI().getPath().endsWith("testhelper/test"));
+
+    location = factory.create(URI.create("test2"));
+    Assert.assertTrue(location.toURI().getPath().endsWith("testhelper/test2"));
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/pom.xml
----------------------------------------------------------------------
diff --git a/twill-core/pom.xml b/twill-core/pom.xml
new file mode 100644
index 0000000..faff711
--- /dev/null
+++ b/twill-core/pom.xml
@@ -0,0 +1,89 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <parent>
+        <artifactId>twill-parent</artifactId>
+        <groupId>org.apache.twill</groupId>
+        <version>0.1.0-SNAPSHOT</version>
+    </parent>
+    <modelVersion>4.0.0</modelVersion>
+
+    <artifactId>twill-core</artifactId>
+    <name>Twill core library</name>
+
+    <dependencies>
+        <dependency>
+            <groupId>${project.groupId}</groupId>
+            <artifactId>twill-api</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>${project.groupId}</groupId>
+            <artifactId>twill-zookeeper</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>${project.groupId}</groupId>
+            <artifactId>twill-discovery-core</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>com.google.code.gson</groupId>
+            <artifactId>gson</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>io.netty</groupId>
+            <artifactId>netty</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.xerial.snappy</groupId>
+            <artifactId>snappy-java</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.ow2.asm</groupId>
+            <artifactId>asm-all</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>ch.qos.logback</groupId>
+            <artifactId>logback-core</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>ch.qos.logback</groupId>
+            <artifactId>logback-classic</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.commons</groupId>
+            <artifactId>commons-compress</artifactId>
+        </dependency>
+    </dependencies>
+</project>


[15/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/test/resources/logback-test.xml
----------------------------------------------------------------------
diff --git a/twill-core/src/test/resources/logback-test.xml b/twill-core/src/test/resources/logback-test.xml
new file mode 100644
index 0000000..3c36660
--- /dev/null
+++ b/twill-core/src/test/resources/logback-test.xml
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!-- Default logback configuration for twill library -->
+<configuration>
+    <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+        <encoder>
+            <pattern>%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n</pattern>
+        </encoder>
+    </appender>
+
+    <logger name="org.apache.hadoop" level="WARN" />
+    <logger name="org.apache.zookeeper" level="WARN" />
+
+    <root level="INFO">
+        <appender-ref ref="STDOUT"/>
+    </root>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-discovery-api/pom.xml
----------------------------------------------------------------------
diff --git a/twill-discovery-api/pom.xml b/twill-discovery-api/pom.xml
new file mode 100644
index 0000000..e41b214
--- /dev/null
+++ b/twill-discovery-api/pom.xml
@@ -0,0 +1,39 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <parent>
+        <artifactId>twill-parent</artifactId>
+        <groupId>org.apache.twill</groupId>
+        <version>0.1.0-SNAPSHOT</version>
+    </parent>
+    <modelVersion>4.0.0</modelVersion>
+
+    <artifactId>twill-discovery-api</artifactId>
+    <name>Twill discovery service API</name>
+
+    <dependencies>
+        <dependency>
+            <groupId>${project.groupId}</groupId>
+            <artifactId>twill-common</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+    </dependencies>
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-discovery-api/src/main/java/org/apache/twill/discovery/Discoverable.java
----------------------------------------------------------------------
diff --git a/twill-discovery-api/src/main/java/org/apache/twill/discovery/Discoverable.java b/twill-discovery-api/src/main/java/org/apache/twill/discovery/Discoverable.java
new file mode 100644
index 0000000..a5529fe
--- /dev/null
+++ b/twill-discovery-api/src/main/java/org/apache/twill/discovery/Discoverable.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.twill.discovery;
+
+import java.net.InetSocketAddress;
+
+/**
+ * Discoverable defines the attributes of service to be discovered.
+ */
+public interface Discoverable {
+
+  /**
+   * @return Name of the service
+   */
+  String getName();
+
+  /**
+   * @return An {@link InetSocketAddress} representing the host+port of the service.
+   */
+  InetSocketAddress getSocketAddress();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-discovery-api/src/main/java/org/apache/twill/discovery/DiscoveryService.java
----------------------------------------------------------------------
diff --git a/twill-discovery-api/src/main/java/org/apache/twill/discovery/DiscoveryService.java b/twill-discovery-api/src/main/java/org/apache/twill/discovery/DiscoveryService.java
new file mode 100644
index 0000000..a26fff8
--- /dev/null
+++ b/twill-discovery-api/src/main/java/org/apache/twill/discovery/DiscoveryService.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.discovery;
+
+
+import org.apache.twill.common.Cancellable;
+
+/**
+ * DiscoveryService defines interface for registering {@link Discoverable}.
+ */
+public interface DiscoveryService {
+
+  /**
+   * Registers a {@link Discoverable} service.
+   * @param discoverable Information of the service provider that could be discovered.
+   * @return A {@link Cancellable} for un-registration.
+   */
+  Cancellable register(Discoverable discoverable);
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-discovery-api/src/main/java/org/apache/twill/discovery/DiscoveryServiceClient.java
----------------------------------------------------------------------
diff --git a/twill-discovery-api/src/main/java/org/apache/twill/discovery/DiscoveryServiceClient.java b/twill-discovery-api/src/main/java/org/apache/twill/discovery/DiscoveryServiceClient.java
new file mode 100644
index 0000000..89cf269
--- /dev/null
+++ b/twill-discovery-api/src/main/java/org/apache/twill/discovery/DiscoveryServiceClient.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.discovery;
+
+/**
+ * Interface for {@link DiscoveryServiceClient} to discover services registered with {@link DiscoveryService}.
+ */
+public interface DiscoveryServiceClient {
+
+  /**
+   * Retrieves a list of {@link Discoverable} for the a service with the given name.
+   *
+   * @param name Name of the service
+   * @return A live {@link Iterable} that on each call to {@link Iterable#iterator()} returns
+   *         an {@link java.util.Iterator Iterator} that reflects the latest set of
+   *         available {@link Discoverable} services.
+   */
+  Iterable<Discoverable> discover(String name);
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-discovery-core/pom.xml
----------------------------------------------------------------------
diff --git a/twill-discovery-core/pom.xml b/twill-discovery-core/pom.xml
new file mode 100644
index 0000000..2612138
--- /dev/null
+++ b/twill-discovery-core/pom.xml
@@ -0,0 +1,52 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <parent>
+        <artifactId>twill-parent</artifactId>
+        <groupId>org.apache.twill</groupId>
+        <version>0.1.0-SNAPSHOT</version>
+    </parent>
+    <modelVersion>4.0.0</modelVersion>
+
+    <artifactId>twill-discovery-core</artifactId>
+    <name>Twill discovery service implementations</name>
+
+    <dependencies>
+        <dependency>
+            <groupId>${project.groupId}</groupId>
+            <artifactId>twill-discovery-api</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>${project.groupId}</groupId>
+            <artifactId>twill-zookeeper</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>com.google.code.gson</groupId>
+            <artifactId>gson</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+        </dependency>
+    </dependencies>
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-discovery-core/src/main/java/org/apache/twill/discovery/DiscoverableWrapper.java
----------------------------------------------------------------------
diff --git a/twill-discovery-core/src/main/java/org/apache/twill/discovery/DiscoverableWrapper.java b/twill-discovery-core/src/main/java/org/apache/twill/discovery/DiscoverableWrapper.java
new file mode 100644
index 0000000..5fa97d1
--- /dev/null
+++ b/twill-discovery-core/src/main/java/org/apache/twill/discovery/DiscoverableWrapper.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.discovery;
+
+import java.net.InetSocketAddress;
+
+/**
+ * Wrapper for a discoverable.
+ */
+final class DiscoverableWrapper implements Discoverable {
+  private final String name;
+  private final InetSocketAddress address;
+
+  DiscoverableWrapper(Discoverable discoverable) {
+    this.name = discoverable.getName();
+    this.address = discoverable.getSocketAddress();
+  }
+
+  @Override
+  public String getName() {
+    return name;
+  }
+
+  @Override
+  public InetSocketAddress getSocketAddress() {
+    return address;
+  }
+
+  @Override
+  public String toString() {
+    return "{name=" + name + ", address=" + address;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+
+    Discoverable other = (Discoverable) o;
+
+    return name.equals(other.getName()) && address.equals(other.getSocketAddress());
+  }
+
+  @Override
+  public int hashCode() {
+    int result = name.hashCode();
+    result = 31 * result + address.hashCode();
+    return result;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-discovery-core/src/main/java/org/apache/twill/discovery/InMemoryDiscoveryService.java
----------------------------------------------------------------------
diff --git a/twill-discovery-core/src/main/java/org/apache/twill/discovery/InMemoryDiscoveryService.java b/twill-discovery-core/src/main/java/org/apache/twill/discovery/InMemoryDiscoveryService.java
new file mode 100644
index 0000000..7a9e984
--- /dev/null
+++ b/twill-discovery-core/src/main/java/org/apache/twill/discovery/InMemoryDiscoveryService.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.discovery;
+
+import org.apache.twill.common.Cancellable;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Multimap;
+
+import java.util.Iterator;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ * A simple in memory implementation of {@link DiscoveryService} and {@link DiscoveryServiceClient}.
+ */
+public class InMemoryDiscoveryService implements DiscoveryService, DiscoveryServiceClient {
+
+  private final Multimap<String, Discoverable> services = HashMultimap.create();
+  private final Lock lock = new ReentrantLock();
+
+  @Override
+  public Cancellable register(final Discoverable discoverable) {
+    lock.lock();
+    try {
+      final Discoverable wrapper = new DiscoverableWrapper(discoverable);
+      services.put(wrapper.getName(), wrapper);
+      return new Cancellable() {
+        @Override
+        public void cancel() {
+          lock.lock();
+          try {
+            services.remove(wrapper.getName(), wrapper);
+          } finally {
+            lock.unlock();
+          }
+        }
+      };
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  @Override
+  public Iterable<Discoverable> discover(final String name) {
+    return new Iterable<Discoverable>() {
+      @Override
+      public Iterator<Discoverable> iterator() {
+        lock.lock();
+        try {
+          return ImmutableList.copyOf(services.get(name)).iterator();
+        } finally {
+          lock.unlock();
+        }
+      }
+    };
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-discovery-core/src/main/java/org/apache/twill/discovery/ZKDiscoveryService.java
----------------------------------------------------------------------
diff --git a/twill-discovery-core/src/main/java/org/apache/twill/discovery/ZKDiscoveryService.java b/twill-discovery-core/src/main/java/org/apache/twill/discovery/ZKDiscoveryService.java
new file mode 100644
index 0000000..e2f9bc0
--- /dev/null
+++ b/twill-discovery-core/src/main/java/org/apache/twill/discovery/ZKDiscoveryService.java
@@ -0,0 +1,511 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.discovery;
+
+import org.apache.twill.common.Cancellable;
+import org.apache.twill.common.Threads;
+import org.apache.twill.zookeeper.NodeChildren;
+import org.apache.twill.zookeeper.NodeData;
+import org.apache.twill.zookeeper.OperationFuture;
+import org.apache.twill.zookeeper.ZKClient;
+import org.apache.twill.zookeeper.ZKClients;
+import org.apache.twill.zookeeper.ZKOperations;
+import com.google.common.base.Charsets;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Multimap;
+import com.google.common.hash.Hashing;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
+import com.google.gson.GsonBuilder;
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.data.Stat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.lang.reflect.Type;
+import java.net.InetSocketAddress;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ * Zookeeper implementation of {@link DiscoveryService} and {@link DiscoveryServiceClient}.
+ * <p>
+ *   Discoverable services are registered within Zookeeper under the namespace 'discoverable' by default.
+ *   If you would like to change the namespace under which the services are registered then you can pass
+ *   in the namespace during construction of {@link ZKDiscoveryService}.
+ * </p>
+ *
+ * <p>
+ *   Following is a simple example of how {@link ZKDiscoveryService} can be used for registering services
+ *   and also for discovering the registered services.
+ *   <blockquote>
+ *    <pre>
+ *      {@code
+ *
+ *      DiscoveryService service = new ZKDiscoveryService(zkClient);
+ *      service.register(new Discoverable() {
+ *        @Override
+ *        public String getName() {
+ *          return 'service-name';
+ *        }
+ *
+ *        @Override
+ *        public InetSocketAddress getSocketAddress() {
+ *          return new InetSocketAddress(hostname, port);
+ *        }
+ *      });
+ *      ...
+ *      ...
+ *      Iterable<Discoverable> services = service.discovery("service-name");
+ *      ...
+ *      }
+ *    </pre>
+ *   </blockquote>
+ * </p>
+ */
+public class ZKDiscoveryService implements DiscoveryService, DiscoveryServiceClient {
+  private static final Logger LOG = LoggerFactory.getLogger(ZKDiscoveryService.class);
+  private static final String NAMESPACE = "/discoverable";
+
+  private static final long RETRY_MILLIS = 1000;
+
+  // In memory map for recreating ephemeral nodes after session expires.
+  // It map from discoverable to the corresponding Cancellable
+  private final Multimap<Discoverable, DiscoveryCancellable> discoverables;
+  private final Lock lock;
+
+  private final LoadingCache<String, Iterable<Discoverable>> services;
+  private final ZKClient zkClient;
+  private final ScheduledExecutorService retryExecutor;
+
+  /**
+   * Constructs ZKDiscoveryService using the provided zookeeper client for storing service registry.
+   * @param zkClient The {@link ZKClient} for interacting with zookeeper.
+   */
+  public ZKDiscoveryService(ZKClient zkClient) {
+    this(zkClient, NAMESPACE);
+  }
+
+  /**
+   * Constructs ZKDiscoveryService using the provided zookeeper client for storing service registry under namepsace.
+   * @param zkClient of zookeeper quorum
+   * @param namespace under which the service registered would be stored in zookeeper.
+   *                  If namespace is {@code null}, no namespace will be used.
+   */
+  public ZKDiscoveryService(ZKClient zkClient, String namespace) {
+    this.discoverables = HashMultimap.create();
+    this.lock = new ReentrantLock();
+    this.retryExecutor = Executors.newSingleThreadScheduledExecutor(
+      Threads.createDaemonThreadFactory("zk-discovery-retry"));
+    this.zkClient = namespace == null ? zkClient : ZKClients.namespace(zkClient, namespace);
+    this.services = CacheBuilder.newBuilder().build(createServiceLoader());
+    this.zkClient.addConnectionWatcher(createConnectionWatcher());
+  }
+
+  /**
+   * Registers a {@link Discoverable} in zookeeper.
+   * <p>
+   *   Registering a {@link Discoverable} will create a node <base>/<service-name>
+   *   in zookeeper as a ephemeral node. If the node already exists (timeout associated with emphemeral, then a runtime
+   *   exception is thrown to make sure that a service with an intent to register is not started without registering.
+   *   When a runtime is thrown, expectation is that the process being started with fail and would be started again
+   *   by the monitoring service.
+   * </p>
+   * @param discoverable Information of the service provider that could be discovered.
+   * @return An instance of {@link Cancellable}
+   */
+  @Override
+  public Cancellable register(final Discoverable discoverable) {
+    final Discoverable wrapper = new DiscoverableWrapper(discoverable);
+    final SettableFuture<String> future = SettableFuture.create();
+    final DiscoveryCancellable cancellable = new DiscoveryCancellable(wrapper);
+
+    // Create the zk ephemeral node.
+    Futures.addCallback(doRegister(wrapper), new FutureCallback<String>() {
+      @Override
+      public void onSuccess(String result) {
+        // Set the sequence node path to cancellable for future cancellation.
+        cancellable.setPath(result);
+        lock.lock();
+        try {
+          discoverables.put(wrapper, cancellable);
+        } finally {
+          lock.unlock();
+        }
+        LOG.debug("Service registered: {} {}", wrapper, result);
+        future.set(result);
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        if (t instanceof KeeperException.NodeExistsException) {
+          handleRegisterFailure(discoverable, future, this, t);
+        } else {
+          LOG.warn("Failed to register: {}", wrapper, t);
+          future.setException(t);
+        }
+      }
+    }, Threads.SAME_THREAD_EXECUTOR);
+
+    Futures.getUnchecked(future);
+    return cancellable;
+  }
+
+  @Override
+  public Iterable<Discoverable> discover(String service) {
+    return services.getUnchecked(service);
+  }
+
+  /**
+   * Handle registration failure.
+   *
+   * @param discoverable The discoverable to register.
+   * @param completion A settable future to set when registration is completed / failed.
+   * @param creationCallback A future callback for path creation.
+   * @param failureCause The original cause of failure.
+   */
+  private void handleRegisterFailure(final Discoverable discoverable,
+                                     final SettableFuture<String> completion,
+                                     final FutureCallback<String> creationCallback,
+                                     final Throwable failureCause) {
+
+    final String path = getNodePath(discoverable);
+    Futures.addCallback(zkClient.exists(path), new FutureCallback<Stat>() {
+      @Override
+      public void onSuccess(Stat result) {
+        if (result == null) {
+          // If the node is gone, simply retry.
+          LOG.info("Node {} is gone. Retry registration for {}.", path, discoverable);
+          retryRegister(discoverable, creationCallback);
+          return;
+        }
+
+        long ephemeralOwner = result.getEphemeralOwner();
+        if (ephemeralOwner == 0) {
+          // it is not an ephemeral node, something wrong.
+          LOG.error("Node {} already exists and is not an ephemeral node. Discoverable registration failed: {}.",
+                    path, discoverable);
+          completion.setException(failureCause);
+          return;
+        }
+        Long sessionId = zkClient.getSessionId();
+        if (sessionId == null || ephemeralOwner != sessionId) {
+          // This zkClient is not valid or doesn't own the ephemeral node, simply keep retrying.
+          LOG.info("Owner of {} is different. Retry registration for {}.", path, discoverable);
+          retryRegister(discoverable, creationCallback);
+        } else {
+          // This client owned the node, treat the registration as completed.
+          // This could happen if same client tries to register twice (due to mistake or failure race condition).
+          completion.set(path);
+        }
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        // If exists call failed, simply retry creation.
+        LOG.warn("Error when getting stats on {}. Retry registration for {}.", path, discoverable);
+        retryRegister(discoverable, creationCallback);
+      }
+    }, Threads.SAME_THREAD_EXECUTOR);
+  }
+
+  private OperationFuture<String> doRegister(Discoverable discoverable) {
+    byte[] discoverableBytes = encode(discoverable);
+    return zkClient.create(getNodePath(discoverable), discoverableBytes, CreateMode.EPHEMERAL, true);
+  }
+
+  private void retryRegister(final Discoverable discoverable, final FutureCallback<String> creationCallback) {
+    retryExecutor.schedule(new Runnable() {
+
+      @Override
+      public void run() {
+        Futures.addCallback(doRegister(discoverable), creationCallback, Threads.SAME_THREAD_EXECUTOR);
+      }
+    }, RETRY_MILLIS, TimeUnit.MILLISECONDS);
+  }
+
+
+  /**
+   * Generate unique node path for a given {@link Discoverable}.
+   * @param discoverable An instance of {@link Discoverable}.
+   * @return A node name based on the discoverable.
+   */
+  private String getNodePath(Discoverable discoverable) {
+    InetSocketAddress socketAddress = discoverable.getSocketAddress();
+    String node = Hashing.md5()
+                         .newHasher()
+                         .putBytes(socketAddress.getAddress().getAddress())
+                         .putInt(socketAddress.getPort())
+                         .hash().toString();
+
+    return String.format("/%s/%s", discoverable.getName(), node);
+  }
+
+  private Watcher createConnectionWatcher() {
+    return new Watcher() {
+      // Watcher is invoked from single event thread, hence safe to use normal mutable variable.
+      private boolean expired;
+
+      @Override
+      public void process(WatchedEvent event) {
+        if (event.getState() == Event.KeeperState.Expired) {
+          LOG.warn("ZK Session expired: {}", zkClient.getConnectString());
+          expired = true;
+        } else if (event.getState() == Event.KeeperState.SyncConnected && expired) {
+          LOG.info("Reconnected after expiration: {}", zkClient.getConnectString());
+          expired = false;
+
+          // Re-register all services
+          lock.lock();
+          try {
+            for (final Map.Entry<Discoverable, DiscoveryCancellable> entry : discoverables.entries()) {
+              LOG.info("Re-registering service: {}", entry.getKey());
+
+              // Must be non-blocking in here.
+              Futures.addCallback(doRegister(entry.getKey()), new FutureCallback<String>() {
+                @Override
+                public void onSuccess(String result) {
+                  // Updates the cancellable to the newly created sequential node.
+                  entry.getValue().setPath(result);
+                  LOG.debug("Service re-registered: {} {}", entry.getKey(), result);
+                }
+
+                @Override
+                public void onFailure(Throwable t) {
+                  // When failed to create the node, there would be no retry and simply make the cancellable do nothing.
+                  entry.getValue().setPath(null);
+                  LOG.error("Failed to re-register service: {}", entry.getKey(), t);
+                }
+              }, Threads.SAME_THREAD_EXECUTOR);
+            }
+          } finally {
+            lock.unlock();
+          }
+        }
+      }
+    };
+  }
+
+  /**
+   * Creates a CacheLoader for creating live Iterable for watching instances changes for a given service.
+   */
+  private CacheLoader<String, Iterable<Discoverable>> createServiceLoader() {
+    return new CacheLoader<String, Iterable<Discoverable>>() {
+      @Override
+      public Iterable<Discoverable> load(String service) throws Exception {
+        // The atomic reference is to keep the resulting Iterable live. It always contains a
+        // immutable snapshot of the latest detected set of Discoverable.
+        final AtomicReference<Iterable<Discoverable>> iterable =
+              new AtomicReference<Iterable<Discoverable>>(ImmutableList.<Discoverable>of());
+        final String serviceBase = "/" + service;
+
+        // Watch for children changes in /service
+        ZKOperations.watchChildren(zkClient, serviceBase, new ZKOperations.ChildrenCallback() {
+          @Override
+          public void updated(NodeChildren nodeChildren) {
+            // Fetch data of all children nodes in parallel.
+            List<String> children = nodeChildren.getChildren();
+            List<OperationFuture<NodeData>> dataFutures = Lists.newArrayListWithCapacity(children.size());
+            for (String child : children) {
+              dataFutures.add(zkClient.getData(serviceBase + "/" + child));
+            }
+
+            // Update the service map when all fetching are done.
+            final ListenableFuture<List<NodeData>> fetchFuture = Futures.successfulAsList(dataFutures);
+            fetchFuture.addListener(new Runnable() {
+              @Override
+              public void run() {
+                ImmutableList.Builder<Discoverable> builder = ImmutableList.builder();
+                for (NodeData nodeData : Futures.getUnchecked(fetchFuture)) {
+                  // For successful fetch, decode the content.
+                  if (nodeData != null) {
+                    Discoverable discoverable = decode(nodeData.getData());
+                    if (discoverable != null) {
+                      builder.add(discoverable);
+                    }
+                  }
+                }
+                iterable.set(builder.build());
+              }
+            }, Threads.SAME_THREAD_EXECUTOR);
+          }
+        });
+
+        return new Iterable<Discoverable>() {
+          @Override
+          public Iterator<Discoverable> iterator() {
+            return iterable.get().iterator();
+          }
+        };
+      }
+    };
+  }
+
+  /**
+   * Static helper function for decoding array of bytes into a {@link DiscoverableWrapper} object.
+   * @param bytes representing serialized {@link DiscoverableWrapper}
+   * @return null if bytes are null; else an instance of {@link DiscoverableWrapper}
+   */
+  private static Discoverable decode(byte[] bytes) {
+    if (bytes == null) {
+      return null;
+    }
+    String content = new String(bytes, Charsets.UTF_8);
+    return new GsonBuilder().registerTypeAdapter(Discoverable.class, new DiscoverableCodec())
+      .create()
+      .fromJson(content, Discoverable.class);
+  }
+
+  /**
+   * Static helper function for encoding an instance of {@link DiscoverableWrapper} into array of bytes.
+   * @param discoverable An instance of {@link Discoverable}
+   * @return array of bytes representing an instance of <code>discoverable</code>
+   */
+  private static byte[] encode(Discoverable discoverable) {
+    return new GsonBuilder().registerTypeAdapter(DiscoverableWrapper.class, new DiscoverableCodec())
+      .create()
+      .toJson(discoverable, DiscoverableWrapper.class)
+      .getBytes(Charsets.UTF_8);
+  }
+
+  /**
+   * Inner class for cancelling (un-register) discovery service.
+   */
+  private final class DiscoveryCancellable implements Cancellable {
+
+    private final Discoverable discoverable;
+    private final AtomicBoolean cancelled;
+    private volatile String path;
+
+    DiscoveryCancellable(Discoverable discoverable) {
+      this.discoverable = discoverable;
+      this.cancelled = new AtomicBoolean();
+    }
+
+    /**
+     * Set the zk node path representing the ephemeral sequence node of this registered discoverable.
+     * Called from ZK event thread when creating of the node completed, either from normal registration or
+     * re-registration due to session expiration.
+     *
+     * @param path The path to ephemeral sequence node.
+     */
+    void setPath(String path) {
+      this.path = path;
+      if (cancelled.get() && path != null) {
+        // Simply delete the path if it's already cancelled
+        // It's for the case when session expire happened and re-registration completed after this has been cancelled.
+        // Not bother with the result as if there is error, nothing much we could do.
+        zkClient.delete(path);
+      }
+    }
+
+    @Override
+    public void cancel() {
+      if (!cancelled.compareAndSet(false, true)) {
+        return;
+      }
+
+      // Take a snapshot of the volatile path.
+      String path = this.path;
+
+      // If it is null, meaning cancel() is called before the ephemeral node is created, hence
+      // setPath() will be called in future (through zk callback when creation is completed)
+      // so that deletion will be done in setPath().
+      if (path == null) {
+        return;
+      }
+
+      // Remove this Cancellable from the map so that upon session expiration won't try to register.
+      lock.lock();
+      try {
+        discoverables.remove(discoverable, this);
+      } finally {
+        lock.unlock();
+      }
+
+      // Delete the path. It's ok if the path not exists
+      // (e.g. what session expired and before node has been re-created)
+      Futures.getUnchecked(ZKOperations.ignoreError(zkClient.delete(path),
+                                                    KeeperException.NoNodeException.class, path));
+      LOG.debug("Service unregistered: {} {}", discoverable, path);
+    }
+  }
+
+  /**
+   * SerDe for converting a {@link DiscoverableWrapper} into a JSON object
+   * or from a JSON object into {@link DiscoverableWrapper}.
+   */
+  private static final class DiscoverableCodec implements JsonSerializer<Discoverable>, JsonDeserializer<Discoverable> {
+
+    @Override
+    public Discoverable deserialize(JsonElement json, Type typeOfT,
+                                    JsonDeserializationContext context) throws JsonParseException {
+      JsonObject jsonObj = json.getAsJsonObject();
+      final String service = jsonObj.get("service").getAsString();
+      String hostname = jsonObj.get("hostname").getAsString();
+      int port = jsonObj.get("port").getAsInt();
+      final InetSocketAddress address = new InetSocketAddress(hostname, port);
+      return new Discoverable() {
+        @Override
+        public String getName() {
+          return service;
+        }
+
+        @Override
+        public InetSocketAddress getSocketAddress() {
+          return address;
+        }
+      };
+    }
+
+    @Override
+    public JsonElement serialize(Discoverable src, Type typeOfSrc, JsonSerializationContext context) {
+      JsonObject jsonObj = new JsonObject();
+      jsonObj.addProperty("service", src.getName());
+      jsonObj.addProperty("hostname", src.getSocketAddress().getHostName());
+      jsonObj.addProperty("port", src.getSocketAddress().getPort());
+      return jsonObj;
+    }
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-discovery-core/src/main/java/org/apache/twill/discovery/package-info.java
----------------------------------------------------------------------
diff --git a/twill-discovery-core/src/main/java/org/apache/twill/discovery/package-info.java b/twill-discovery-core/src/main/java/org/apache/twill/discovery/package-info.java
new file mode 100644
index 0000000..a1d6e0c
--- /dev/null
+++ b/twill-discovery-core/src/main/java/org/apache/twill/discovery/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Classes in this package provides service discovery implementations.
+ */
+package org.apache.twill.discovery;

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-discovery-core/src/test/java/org/apache/twill/discovery/InMemoryDiscoveryServiceTest.java
----------------------------------------------------------------------
diff --git a/twill-discovery-core/src/test/java/org/apache/twill/discovery/InMemoryDiscoveryServiceTest.java b/twill-discovery-core/src/test/java/org/apache/twill/discovery/InMemoryDiscoveryServiceTest.java
new file mode 100644
index 0000000..d8cc375
--- /dev/null
+++ b/twill-discovery-core/src/test/java/org/apache/twill/discovery/InMemoryDiscoveryServiceTest.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.twill.discovery;
+
+import org.apache.twill.common.Cancellable;
+import com.google.common.collect.Iterables;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.net.InetSocketAddress;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Test memory based service discovery service.
+ */
+public class InMemoryDiscoveryServiceTest {
+  private Cancellable register(DiscoveryService service, final String name, final String host, final int port) {
+    return service.register(new Discoverable() {
+      @Override
+      public String getName() {
+        return name;
+      }
+
+      @Override
+      public InetSocketAddress getSocketAddress() {
+        return new InetSocketAddress(host, port);
+      }
+    });
+  }
+
+  @Test
+  public void simpleDiscoverable() throws Exception {
+    DiscoveryService discoveryService = new InMemoryDiscoveryService();
+    DiscoveryServiceClient discoveryServiceClient = (DiscoveryServiceClient) discoveryService;
+
+    // Register one service running on one host:port
+    Cancellable cancellable = register(discoveryService, "foo", "localhost", 8090);
+    Iterable<Discoverable> discoverables = discoveryServiceClient.discover("foo");
+
+    // Discover that registered host:port.
+    Assert.assertTrue(Iterables.size(discoverables) == 1);
+
+    // Remove the service
+    cancellable.cancel();
+
+    // There should be no service.
+    discoverables = discoveryServiceClient.discover("foo");
+    TimeUnit.MILLISECONDS.sleep(100);
+    Assert.assertTrue(Iterables.size(discoverables) == 0);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-discovery-core/src/test/java/org/apache/twill/discovery/ZKDiscoveryServiceTest.java
----------------------------------------------------------------------
diff --git a/twill-discovery-core/src/test/java/org/apache/twill/discovery/ZKDiscoveryServiceTest.java b/twill-discovery-core/src/test/java/org/apache/twill/discovery/ZKDiscoveryServiceTest.java
new file mode 100644
index 0000000..feee8db
--- /dev/null
+++ b/twill-discovery-core/src/test/java/org/apache/twill/discovery/ZKDiscoveryServiceTest.java
@@ -0,0 +1,253 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.discovery;
+
+import org.apache.twill.common.Cancellable;
+import org.apache.twill.common.Services;
+import org.apache.twill.internal.zookeeper.InMemoryZKServer;
+import org.apache.twill.internal.zookeeper.KillZKSession;
+import org.apache.twill.zookeeper.RetryStrategies;
+import org.apache.twill.zookeeper.ZKClientService;
+import org.apache.twill.zookeeper.ZKClientServices;
+import org.apache.twill.zookeeper.ZKClients;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.Futures;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.net.InetSocketAddress;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Test Zookeeper based discovery service.
+ */
+public class ZKDiscoveryServiceTest {
+  private static final Logger LOG = LoggerFactory.getLogger(ZKDiscoveryServiceTest.class);
+
+  private static InMemoryZKServer zkServer;
+  private static ZKClientService zkClient;
+
+  @BeforeClass
+  public static void beforeClass() {
+    zkServer = InMemoryZKServer.builder().setTickTime(100000).build();
+    zkServer.startAndWait();
+
+    zkClient = ZKClientServices.delegate(
+      ZKClients.retryOnFailure(
+        ZKClients.reWatchOnExpire(
+          ZKClientService.Builder.of(zkServer.getConnectionStr()).build()),
+        RetryStrategies.fixDelay(1, TimeUnit.SECONDS)));
+    zkClient.startAndWait();
+  }
+
+  @AfterClass
+  public static void afterClass() {
+    Futures.getUnchecked(Services.chainStop(zkClient, zkServer));
+  }
+
+  private Cancellable register(DiscoveryService service, final String name, final String host, final int port) {
+    return service.register(new Discoverable() {
+      @Override
+      public String getName() {
+        return name;
+      }
+
+      @Override
+      public InetSocketAddress getSocketAddress() {
+        return new InetSocketAddress(host, port);
+      }
+    });
+  }
+
+
+  private boolean waitTillExpected(int expected, Iterable<Discoverable> discoverables) throws Exception {
+    for (int i = 0; i < 10; ++i) {
+      TimeUnit.MILLISECONDS.sleep(10);
+      if (Iterables.size(discoverables) == expected) {
+        return true;
+      }
+    }
+    return (Iterables.size(discoverables) == expected);
+  }
+
+  @Test (timeout = 5000)
+  public void testDoubleRegister() throws Exception {
+    ZKDiscoveryService discoveryService = new ZKDiscoveryService(zkClient);
+    DiscoveryServiceClient discoveryServiceClient = discoveryService;
+
+    // Register on the same host port, it shouldn't fail.
+    Cancellable cancellable = register(discoveryService, "test_double_reg", "localhost", 54321);
+    Cancellable cancellable2 = register(discoveryService, "test_double_reg", "localhost", 54321);
+
+    Iterable<Discoverable> discoverables = discoveryServiceClient.discover("test_double_reg");
+
+    Assert.assertTrue(waitTillExpected(1, discoverables));
+
+    cancellable.cancel();
+    cancellable2.cancel();
+
+    // Register again with two different clients, but killing session of the first one.
+    final ZKClientService zkClient2 = ZKClientServices.delegate(
+      ZKClients.retryOnFailure(
+        ZKClients.reWatchOnExpire(
+          ZKClientService.Builder.of(zkServer.getConnectionStr()).build()),
+        RetryStrategies.fixDelay(1, TimeUnit.SECONDS)));
+    zkClient2.startAndWait();
+
+    try {
+      ZKDiscoveryService discoveryService2 = new ZKDiscoveryService(zkClient2);
+      cancellable2 = register(discoveryService2, "test_multi_client", "localhost", 54321);
+
+      // Schedule a thread to shutdown zkClient2.
+      new Thread() {
+        @Override
+        public void run() {
+          try {
+            TimeUnit.SECONDS.sleep(2);
+            zkClient2.stopAndWait();
+          } catch (InterruptedException e) {
+            LOG.error(e.getMessage(), e);
+          }
+        }
+      }.start();
+
+      // This call would block until zkClient2 is shutdown.
+      cancellable = register(discoveryService, "test_multi_client", "localhost", 54321);
+      cancellable.cancel();
+
+    } finally {
+      zkClient2.stopAndWait();
+    }
+  }
+
+  @Test
+  public void testSessionExpires() throws Exception {
+    ZKDiscoveryService discoveryService = new ZKDiscoveryService(zkClient);
+    DiscoveryServiceClient discoveryServiceClient = discoveryService;
+
+    Cancellable cancellable = register(discoveryService, "test_expires", "localhost", 54321);
+
+    Iterable<Discoverable> discoverables = discoveryServiceClient.discover("test_expires");
+
+    // Discover that registered host:port.
+    Assert.assertTrue(waitTillExpected(1, discoverables));
+
+    KillZKSession.kill(zkClient.getZooKeeperSupplier().get(), zkServer.getConnectionStr(), 5000);
+
+    // Register one more endpoint to make sure state has been reflected after reconnection
+    Cancellable cancellable2 = register(discoveryService, "test_expires", "localhost", 54322);
+
+    // Reconnection would trigger re-registration.
+    Assert.assertTrue(waitTillExpected(2, discoverables));
+
+    cancellable.cancel();
+    cancellable2.cancel();
+
+    // Verify that both are now gone.
+    Assert.assertTrue(waitTillExpected(0, discoverables));
+  }
+
+  @Test
+  public void simpleDiscoverable() throws Exception {
+    DiscoveryService discoveryService = new ZKDiscoveryService(zkClient);
+    DiscoveryServiceClient discoveryServiceClient = new ZKDiscoveryService(zkClient);
+
+    // Register one service running on one host:port
+    Cancellable cancellable = register(discoveryService, "foo", "localhost", 8090);
+    Iterable<Discoverable> discoverables = discoveryServiceClient.discover("foo");
+
+    // Discover that registered host:port.
+    Assert.assertTrue(waitTillExpected(1, discoverables));
+
+    // Remove the service
+    cancellable.cancel();
+
+    // There should be no service.
+
+    discoverables = discoveryServiceClient.discover("foo");
+
+    Assert.assertTrue(waitTillExpected(0, discoverables));
+  }
+
+  @Test
+  public void manySameDiscoverable() throws Exception {
+    List<Cancellable> cancellables = Lists.newArrayList();
+    DiscoveryService discoveryService = new ZKDiscoveryService(zkClient);
+    DiscoveryServiceClient discoveryServiceClient = new ZKDiscoveryService(zkClient);
+
+    cancellables.add(register(discoveryService, "manyDiscoverable", "localhost", 1));
+    cancellables.add(register(discoveryService, "manyDiscoverable", "localhost", 2));
+    cancellables.add(register(discoveryService, "manyDiscoverable", "localhost", 3));
+    cancellables.add(register(discoveryService, "manyDiscoverable", "localhost", 4));
+    cancellables.add(register(discoveryService, "manyDiscoverable", "localhost", 5));
+
+    Iterable<Discoverable> discoverables = discoveryServiceClient.discover("manyDiscoverable");
+    Assert.assertTrue(waitTillExpected(5, discoverables));
+
+    for (int i = 0; i < 5; i++) {
+      cancellables.get(i).cancel();
+      Assert.assertTrue(waitTillExpected(4 - i, discoverables));
+    }
+  }
+
+  @Test
+  public void multiServiceDiscoverable() throws Exception {
+    List<Cancellable> cancellables = Lists.newArrayList();
+    DiscoveryService discoveryService = new ZKDiscoveryService(zkClient);
+    DiscoveryServiceClient discoveryServiceClient = new ZKDiscoveryService(zkClient);
+
+    cancellables.add(register(discoveryService, "service1", "localhost", 1));
+    cancellables.add(register(discoveryService, "service1", "localhost", 2));
+    cancellables.add(register(discoveryService, "service1", "localhost", 3));
+    cancellables.add(register(discoveryService, "service1", "localhost", 4));
+    cancellables.add(register(discoveryService, "service1", "localhost", 5));
+
+    cancellables.add(register(discoveryService, "service2", "localhost", 1));
+    cancellables.add(register(discoveryService, "service2", "localhost", 2));
+    cancellables.add(register(discoveryService, "service2", "localhost", 3));
+
+    cancellables.add(register(discoveryService, "service3", "localhost", 1));
+    cancellables.add(register(discoveryService, "service3", "localhost", 2));
+
+    Iterable<Discoverable> discoverables = discoveryServiceClient.discover("service1");
+    Assert.assertTrue(waitTillExpected(5, discoverables));
+
+    discoverables = discoveryServiceClient.discover("service2");
+    Assert.assertTrue(waitTillExpected(3, discoverables));
+
+    discoverables = discoveryServiceClient.discover("service3");
+    Assert.assertTrue(waitTillExpected(2, discoverables));
+
+    cancellables.add(register(discoveryService, "service3", "localhost", 3));
+    Assert.assertTrue(waitTillExpected(3, discoverables)); // Shows live iterator.
+
+    for (Cancellable cancellable : cancellables) {
+      cancellable.cancel();
+    }
+
+    Assert.assertTrue(waitTillExpected(0, discoveryServiceClient.discover("service1")));
+    Assert.assertTrue(waitTillExpected(0, discoveryServiceClient.discover("service2")));
+    Assert.assertTrue(waitTillExpected(0, discoveryServiceClient.discover("service3")));
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-discovery-core/src/test/resources/logback-test.xml
----------------------------------------------------------------------
diff --git a/twill-discovery-core/src/test/resources/logback-test.xml b/twill-discovery-core/src/test/resources/logback-test.xml
new file mode 100644
index 0000000..2615cb4
--- /dev/null
+++ b/twill-discovery-core/src/test/resources/logback-test.xml
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!-- Default logback configuration for twill library -->
+<configuration>
+    <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+        <encoder>
+            <pattern>%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n</pattern>
+        </encoder>
+    </appender>
+
+    <logger name="org.apache.twill" level="DEBUG" />
+
+    <root level="WARN">
+        <appender-ref ref="STDOUT"/>
+    </root>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/pom.xml
----------------------------------------------------------------------
diff --git a/twill-yarn/pom.xml b/twill-yarn/pom.xml
new file mode 100644
index 0000000..b11bc7a
--- /dev/null
+++ b/twill-yarn/pom.xml
@@ -0,0 +1,127 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <parent>
+        <artifactId>twill-parent</artifactId>
+        <groupId>org.apache.twill</groupId>
+        <version>0.1.0-SNAPSHOT</version>
+    </parent>
+    <modelVersion>4.0.0</modelVersion>
+
+    <artifactId>twill-yarn</artifactId>
+    <name>Twill Apache Hadoop YARN library</name>
+
+    <properties>
+        <output.dir>target/classes</output.dir>
+    </properties>
+
+    <dependencies>
+        <dependency>
+            <groupId>${project.groupId}</groupId>
+            <artifactId>twill-core</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>${project.groupId}</groupId>
+            <artifactId>twill-discovery-core</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>jcl-over-slf4j</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-yarn-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-yarn-common</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-yarn-client</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-common</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-hdfs</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-minicluster</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <outputDirectory>${output.dir}</outputDirectory>
+    </build>
+
+    <profiles>
+        <profile>
+            <id>hadoop-2.0</id>
+            <properties>
+                <output.dir>${hadoop20.output.dir}</output.dir>
+            </properties>
+        </profile>
+        <profile>
+            <id>hadoop-2.1</id>
+            <build>
+                <resources>
+                    <resource>
+                        <directory>${hadoop20.output.dir}</directory>
+                    </resource>
+                    <resource>
+                        <directory>src/main/resources</directory>
+                    </resource>
+                </resources>
+            </build>
+        </profile>
+        <profile>
+            <id>hadoop-2.2</id>
+            <build>
+                <resources>
+                    <resource>
+                        <directory>${hadoop20.output.dir}</directory>
+                    </resource>
+                    <resource>
+                        <directory>src/main/resources</directory>
+                    </resource>
+                </resources>
+            </build>
+        </profile>
+    </profiles>
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnAMClient.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnAMClient.java b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnAMClient.java
new file mode 100644
index 0000000..d98dee1
--- /dev/null
+++ b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnAMClient.java
@@ -0,0 +1,213 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import org.apache.twill.internal.ProcessLauncher;
+import org.apache.twill.internal.appmaster.RunnableProcessLauncher;
+import org.apache.twill.internal.yarn.ports.AMRMClient;
+import org.apache.twill.internal.yarn.ports.AMRMClientImpl;
+import org.apache.twill.internal.yarn.ports.AllocationResponse;
+import com.google.common.base.Function;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Multimap;
+import com.google.common.util.concurrent.AbstractIdleService;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.net.InetSocketAddress;
+import java.net.URL;
+import java.util.List;
+import java.util.UUID;
+
+/**
+ *
+ */
+public final class Hadoop20YarnAMClient extends AbstractIdleService implements YarnAMClient {
+
+  private static final Logger LOG = LoggerFactory.getLogger(Hadoop20YarnAMClient.class);
+  private static final Function<ContainerStatus, YarnContainerStatus> STATUS_TRANSFORM;
+
+  static {
+    STATUS_TRANSFORM = new Function<ContainerStatus, YarnContainerStatus>() {
+      @Override
+      public YarnContainerStatus apply(ContainerStatus status) {
+        return new Hadoop20YarnContainerStatus(status);
+      }
+    };
+  }
+
+  private final ContainerId containerId;
+  private final Multimap<String, AMRMClient.ContainerRequest> containerRequests;
+  private final AMRMClient amrmClient;
+  private final YarnNMClient nmClient;
+  private InetSocketAddress trackerAddr;
+  private URL trackerUrl;
+  private Resource maxCapability;
+  private Resource minCapability;
+
+  public Hadoop20YarnAMClient(Configuration conf) {
+    String masterContainerId = System.getenv().get(ApplicationConstants.AM_CONTAINER_ID_ENV);
+    Preconditions.checkArgument(masterContainerId != null,
+                                "Missing %s from environment", ApplicationConstants.AM_CONTAINER_ID_ENV);
+    this.containerId = ConverterUtils.toContainerId(masterContainerId);
+    this.containerRequests = ArrayListMultimap.create();
+
+    this.amrmClient = new AMRMClientImpl(containerId.getApplicationAttemptId());
+    this.amrmClient.init(conf);
+    this.nmClient = new Hadoop20YarnNMClient(YarnRPC.create(conf), conf);
+  }
+
+  @Override
+  protected void startUp() throws Exception {
+    Preconditions.checkNotNull(trackerAddr, "Tracker address not set.");
+    Preconditions.checkNotNull(trackerUrl, "Tracker URL not set.");
+
+    amrmClient.start();
+
+    RegisterApplicationMasterResponse response = amrmClient.registerApplicationMaster(trackerAddr.getHostName(),
+                                                                                      trackerAddr.getPort(),
+                                                                                      trackerUrl.toString());
+    maxCapability = response.getMaximumResourceCapability();
+    minCapability = response.getMinimumResourceCapability();
+  }
+
+  @Override
+  protected void shutDown() throws Exception {
+    amrmClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, trackerUrl.toString());
+    amrmClient.stop();
+  }
+
+  @Override
+  public ContainerId getContainerId() {
+    return containerId;
+  }
+
+  @Override
+  public String getHost() {
+    return System.getenv().get(ApplicationConstants.NM_HOST_ENV);
+  }
+
+  @Override
+  public void setTracker(InetSocketAddress trackerAddr, URL trackerUrl) {
+    this.trackerAddr = trackerAddr;
+    this.trackerUrl = trackerUrl;
+  }
+
+  @Override
+  public synchronized void allocate(float progress, AllocateHandler handler) throws Exception {
+    AllocationResponse response = amrmClient.allocate(progress);
+    List<ProcessLauncher<YarnContainerInfo>> launchers
+      = Lists.newArrayListWithCapacity(response.getAllocatedContainers().size());
+
+    for (Container container : response.getAllocatedContainers()) {
+      launchers.add(new RunnableProcessLauncher(new Hadoop20YarnContainerInfo(container), nmClient));
+    }
+
+    if (!launchers.isEmpty()) {
+      handler.acquired(launchers);
+
+      // If no process has been launched through the given launcher, return the container.
+      for (ProcessLauncher<YarnContainerInfo> l : launchers) {
+        // This cast always works.
+        RunnableProcessLauncher launcher = (RunnableProcessLauncher) l;
+        if (!launcher.isLaunched()) {
+          Container container = launcher.getContainerInfo().getContainer();
+          LOG.info("Nothing to run in container, releasing it: {}", container);
+          amrmClient.releaseAssignedContainer(container.getId());
+        }
+      }
+    }
+
+    List<YarnContainerStatus> completed = ImmutableList.copyOf(
+      Iterables.transform(response.getCompletedContainersStatuses(), STATUS_TRANSFORM));
+    if (!completed.isEmpty()) {
+      handler.completed(completed);
+    }
+  }
+
+  @Override
+  public ContainerRequestBuilder addContainerRequest(Resource capability) {
+    return addContainerRequest(capability, 1);
+  }
+
+  @Override
+  public ContainerRequestBuilder addContainerRequest(Resource capability, int count) {
+    return new ContainerRequestBuilder(adjustCapability(capability), count) {
+      @Override
+      public String apply() {
+        synchronized (Hadoop20YarnAMClient.this) {
+          String id = UUID.randomUUID().toString();
+
+          String[] hosts = this.hosts.isEmpty() ? null : this.hosts.toArray(new String[this.hosts.size()]);
+          String[] racks = this.racks.isEmpty() ? null : this.racks.toArray(new String[this.racks.size()]);
+
+          for (int i = 0; i < count; i++) {
+            AMRMClient.ContainerRequest request = new AMRMClient.ContainerRequest(capability, hosts, racks,
+                                                                                  priority, 1);
+            containerRequests.put(id, request);
+            amrmClient.addContainerRequest(request);
+          }
+
+          return id;
+        }
+      }
+    };
+  }
+
+  @Override
+  public synchronized void completeContainerRequest(String id) {
+    for (AMRMClient.ContainerRequest request : containerRequests.removeAll(id)) {
+      amrmClient.removeContainerRequest(request);
+    }
+  }
+
+  private Resource adjustCapability(Resource resource) {
+    int cores = YarnUtils.getVirtualCores(resource);
+    int updatedCores = Math.max(Math.min(cores, YarnUtils.getVirtualCores(maxCapability)),
+                                YarnUtils.getVirtualCores(minCapability));
+    // Try and set the virtual cores, which older versions of YARN don't support this.
+    if (cores != updatedCores && YarnUtils.setVirtualCores(resource, updatedCores)) {
+      LOG.info("Adjust virtual cores requirement from {} to {}.", cores, updatedCores);
+    }
+
+    int updatedMemory = Math.min(resource.getMemory(), maxCapability.getMemory());
+    int minMemory = minCapability.getMemory();
+    updatedMemory = (int) Math.ceil(((double) updatedMemory / minMemory)) * minMemory;
+
+    if (resource.getMemory() != updatedMemory) {
+      resource.setMemory(updatedMemory);
+      LOG.info("Adjust memory requirement from {} to {} MB.", resource.getMemory(), updatedMemory);
+    }
+
+    return resource;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnAppClient.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnAppClient.java b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnAppClient.java
new file mode 100644
index 0000000..bfec34e
--- /dev/null
+++ b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnAppClient.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import org.apache.twill.api.TwillSpecification;
+import org.apache.twill.internal.ProcessController;
+import org.apache.twill.internal.ProcessLauncher;
+import org.apache.twill.internal.appmaster.ApplicationMasterProcessLauncher;
+import org.apache.twill.internal.appmaster.ApplicationSubmitter;
+import com.google.common.base.Throwables;
+import com.google.common.util.concurrent.AbstractIdleService;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.DelegationToken;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.client.YarnClient;
+import org.apache.hadoop.yarn.client.YarnClientImpl;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.util.Records;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.net.InetSocketAddress;
+
+/**
+ *
+ */
+public final class Hadoop20YarnAppClient extends AbstractIdleService implements YarnAppClient {
+
+  private static final Logger LOG = LoggerFactory.getLogger(Hadoop20YarnAppClient.class);
+  private final YarnClient yarnClient;
+  private String user;
+
+  public Hadoop20YarnAppClient(Configuration configuration) {
+    this.yarnClient = new YarnClientImpl();
+    yarnClient.init(configuration);
+    this.user = System.getProperty("user.name");
+  }
+
+  @Override
+  public ProcessLauncher<ApplicationId> createLauncher(TwillSpecification twillSpec) throws Exception {
+    // Request for new application
+    final GetNewApplicationResponse response = yarnClient.getNewApplication();
+    final ApplicationId appId = response.getApplicationId();
+
+    // Setup the context for application submission
+    final ApplicationSubmissionContext appSubmissionContext = Records.newRecord(ApplicationSubmissionContext.class);
+    appSubmissionContext.setApplicationId(appId);
+    appSubmissionContext.setApplicationName(twillSpec.getName());
+    appSubmissionContext.setUser(user);
+
+    ApplicationSubmitter submitter = new ApplicationSubmitter() {
+
+      @Override
+      public ProcessController<YarnApplicationReport> submit(YarnLaunchContext launchContext, Resource capability) {
+        ContainerLaunchContext context = launchContext.getLaunchContext();
+        addRMToken(context);
+        context.setUser(appSubmissionContext.getUser());
+        context.setResource(adjustMemory(response, capability));
+        appSubmissionContext.setAMContainerSpec(context);
+
+        try {
+          yarnClient.submitApplication(appSubmissionContext);
+          return new ProcessControllerImpl(yarnClient, appId);
+        } catch (YarnRemoteException e) {
+          LOG.error("Failed to submit application {}", appId, e);
+          throw Throwables.propagate(e);
+        }
+      }
+    };
+
+    return new ApplicationMasterProcessLauncher(appId, submitter);
+  }
+
+  private Resource adjustMemory(GetNewApplicationResponse response, Resource capability) {
+    int minMemory = response.getMinimumResourceCapability().getMemory();
+
+    int updatedMemory = Math.min(capability.getMemory(), response.getMaximumResourceCapability().getMemory());
+    updatedMemory = (int) Math.ceil(((double) updatedMemory / minMemory)) * minMemory;
+
+    if (updatedMemory != capability.getMemory()) {
+      capability.setMemory(updatedMemory);
+    }
+
+    return capability;
+  }
+
+  private void addRMToken(ContainerLaunchContext context) {
+    if (!UserGroupInformation.isSecurityEnabled()) {
+      return;
+    }
+
+    try {
+      Credentials credentials = YarnUtils.decodeCredentials(context.getContainerTokens());
+
+      Configuration config = yarnClient.getConfig();
+      Token<TokenIdentifier> token = convertToken(
+        yarnClient.getRMDelegationToken(new Text(YarnUtils.getYarnTokenRenewer(config))),
+        YarnUtils.getRMAddress(config));
+
+      LOG.info("Added RM delegation token {}", token);
+      credentials.addToken(token.getService(), token);
+
+      context.setContainerTokens(YarnUtils.encodeCredentials(credentials));
+
+    } catch (Exception e) {
+      LOG.error("Fails to create credentials.", e);
+      throw Throwables.propagate(e);
+    }
+  }
+
+  private <T extends TokenIdentifier> Token<T> convertToken(DelegationToken protoToken, InetSocketAddress serviceAddr) {
+    Token<T> token = new Token<T>(protoToken.getIdentifier().array(),
+                                  protoToken.getPassword().array(),
+                                  new Text(protoToken.getKind()),
+                                  new Text(protoToken.getService()));
+    if (serviceAddr != null) {
+      SecurityUtil.setTokenService(token, serviceAddr);
+    }
+    return token;
+  }
+
+  @Override
+  public ProcessLauncher<ApplicationId> createLauncher(String user, TwillSpecification twillSpec) throws Exception {
+    this.user = user;
+    return createLauncher(twillSpec);
+  }
+
+  @Override
+  public ProcessController<YarnApplicationReport> createProcessController(ApplicationId appId) {
+    return new ProcessControllerImpl(yarnClient, appId);
+  }
+
+  @Override
+  protected void startUp() throws Exception {
+    yarnClient.start();
+  }
+
+  @Override
+  protected void shutDown() throws Exception {
+    yarnClient.stop();
+  }
+
+  private static final class ProcessControllerImpl implements ProcessController<YarnApplicationReport> {
+    private final YarnClient yarnClient;
+    private final ApplicationId appId;
+
+    public ProcessControllerImpl(YarnClient yarnClient, ApplicationId appId) {
+      this.yarnClient = yarnClient;
+      this.appId = appId;
+    }
+
+    @Override
+    public YarnApplicationReport getReport() {
+      try {
+        return new Hadoop20YarnApplicationReport(yarnClient.getApplicationReport(appId));
+      } catch (YarnRemoteException e) {
+        LOG.error("Failed to get application report {}", appId, e);
+        throw Throwables.propagate(e);
+      }
+    }
+
+    @Override
+    public void cancel() {
+      try {
+        yarnClient.killApplication(appId);
+      } catch (YarnRemoteException e) {
+        LOG.error("Failed to kill application {}", appId, e);
+        throw Throwables.propagate(e);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnApplicationReport.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnApplicationReport.java b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnApplicationReport.java
new file mode 100644
index 0000000..6c1b764
--- /dev/null
+++ b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnApplicationReport.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+
+/**
+ *
+ */
+public final class Hadoop20YarnApplicationReport implements YarnApplicationReport {
+
+  private final ApplicationReport report;
+
+  public Hadoop20YarnApplicationReport(ApplicationReport report) {
+    this.report = report;
+  }
+
+  @Override
+  public ApplicationId getApplicationId() {
+    return report.getApplicationId();
+  }
+
+  @Override
+  public ApplicationAttemptId getCurrentApplicationAttemptId() {
+    return report.getCurrentApplicationAttemptId();
+  }
+
+  @Override
+  public String getQueue() {
+    return report.getQueue();
+  }
+
+  @Override
+  public String getName() {
+    return report.getName();
+  }
+
+  @Override
+  public String getHost() {
+    return report.getHost();
+  }
+
+  @Override
+  public int getRpcPort() {
+    return report.getRpcPort();
+  }
+
+  @Override
+  public YarnApplicationState getYarnApplicationState() {
+    return report.getYarnApplicationState();
+  }
+
+  @Override
+  public String getDiagnostics() {
+    return report.getDiagnostics();
+  }
+
+  @Override
+  public String getTrackingUrl() {
+    return report.getTrackingUrl();
+  }
+
+  @Override
+  public String getOriginalTrackingUrl() {
+    return report.getOriginalTrackingUrl();
+  }
+
+  @Override
+  public long getStartTime() {
+    return report.getStartTime();
+  }
+
+  @Override
+  public long getFinishTime() {
+    return report.getFinishTime();
+  }
+
+  @Override
+  public FinalApplicationStatus getFinalApplicationStatus() {
+    return report.getFinalApplicationStatus();
+  }
+
+  @Override
+  public ApplicationResourceUsageReport getApplicationResourceUsageReport() {
+    return report.getApplicationResourceUsageReport();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnContainerInfo.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnContainerInfo.java b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnContainerInfo.java
new file mode 100644
index 0000000..79b2cb5
--- /dev/null
+++ b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnContainerInfo.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import com.google.common.base.Throwables;
+import org.apache.hadoop.yarn.api.records.Container;
+
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+
+/**
+ *
+ */
+public final class Hadoop20YarnContainerInfo implements YarnContainerInfo {
+
+  private final Container container;
+
+  public Hadoop20YarnContainerInfo(Container container) {
+    this.container = container;
+  }
+
+  @Override
+  public <T> T getContainer() {
+    return (T) container;
+  }
+
+  @Override
+  public String getId() {
+    return container.getId().toString();
+  }
+
+  @Override
+  public InetAddress getHost() {
+    try {
+      return InetAddress.getByName(container.getNodeId().getHost());
+    } catch (UnknownHostException e) {
+      throw Throwables.propagate(e);
+    }
+  }
+
+  @Override
+  public int getPort() {
+    return container.getNodeId().getPort();
+  }
+
+  @Override
+  public int getMemoryMB() {
+    return container.getResource().getMemory();
+  }
+
+  @Override
+  public int getVirtualCores() {
+    return YarnUtils.getVirtualCores(container.getResource());
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnContainerStatus.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnContainerStatus.java b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnContainerStatus.java
new file mode 100644
index 0000000..cc61856
--- /dev/null
+++ b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnContainerStatus.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+
+/**
+ *
+ */
+public final class Hadoop20YarnContainerStatus implements YarnContainerStatus {
+
+  private final ContainerStatus containerStatus;
+
+  public Hadoop20YarnContainerStatus(ContainerStatus containerStatus) {
+    this.containerStatus = containerStatus;
+  }
+
+  @Override
+  public String getContainerId() {
+    return containerStatus.getContainerId().toString();
+  }
+
+  @Override
+  public ContainerState getState() {
+    return containerStatus.getState();
+  }
+
+  @Override
+  public int getExitStatus() {
+    return containerStatus.getExitStatus();
+  }
+
+  @Override
+  public String getDiagnostics() {
+    return containerStatus.getDiagnostics();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnLaunchContext.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnLaunchContext.java b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnLaunchContext.java
new file mode 100644
index 0000000..b1f6d66
--- /dev/null
+++ b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnLaunchContext.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import com.google.common.base.Function;
+import com.google.common.collect.Maps;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.util.Records;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public final class Hadoop20YarnLaunchContext implements YarnLaunchContext {
+
+  private static final Function<YarnLocalResource, LocalResource> RESOURCE_TRANSFORM;
+
+  static {
+    // Creates transform function from YarnLocalResource -> LocalResource
+    RESOURCE_TRANSFORM = new Function<YarnLocalResource, LocalResource>() {
+      @Override
+      public LocalResource apply(YarnLocalResource input) {
+        return input.getLocalResource();
+      }
+    };
+  }
+
+  private final ContainerLaunchContext launchContext;
+
+  public Hadoop20YarnLaunchContext() {
+    launchContext = Records.newRecord(ContainerLaunchContext.class);
+  }
+
+  @Override
+  public <T> T getLaunchContext() {
+    return (T) launchContext;
+  }
+
+  @Override
+  public void setCredentials(Credentials credentials) {
+    launchContext.setContainerTokens(YarnUtils.encodeCredentials(credentials));
+  }
+
+  @Override
+  public void setLocalResources(Map<String, YarnLocalResource> localResources) {
+    launchContext.setLocalResources(Maps.transformValues(localResources, RESOURCE_TRANSFORM));
+  }
+
+  @Override
+  public void setServiceData(Map<String, ByteBuffer> serviceData) {
+    launchContext.setServiceData(serviceData);
+  }
+
+  @Override
+  public Map<String, String> getEnvironment() {
+    return launchContext.getEnvironment();
+  }
+
+  @Override
+  public void setEnvironment(Map<String, String> environment) {
+    launchContext.setEnvironment(environment);
+  }
+
+  @Override
+  public List<String> getCommands() {
+    return launchContext.getCommands();
+  }
+
+  @Override
+  public void setCommands(List<String> commands) {
+    launchContext.setCommands(commands);
+  }
+
+  @Override
+  public void setApplicationACLs(Map<ApplicationAccessType, String> acls) {
+    launchContext.setApplicationACLs(acls);
+  }
+}


[18/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/ZKServiceDecorator.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/ZKServiceDecorator.java b/twill-core/src/main/java/org/apache/twill/internal/ZKServiceDecorator.java
new file mode 100644
index 0000000..7313d33
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/ZKServiceDecorator.java
@@ -0,0 +1,482 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.api.RunId;
+import org.apache.twill.api.ServiceController;
+import org.apache.twill.common.ServiceListenerAdapter;
+import org.apache.twill.common.Threads;
+import org.apache.twill.internal.json.StackTraceElementCodec;
+import org.apache.twill.internal.json.StateNodeCodec;
+import org.apache.twill.internal.state.Message;
+import org.apache.twill.internal.state.MessageCallback;
+import org.apache.twill.internal.state.MessageCodec;
+import org.apache.twill.internal.state.StateNode;
+import org.apache.twill.internal.state.SystemMessages;
+import org.apache.twill.zookeeper.NodeChildren;
+import org.apache.twill.zookeeper.NodeData;
+import org.apache.twill.zookeeper.OperationFuture;
+import org.apache.twill.zookeeper.ZKClient;
+import org.apache.twill.zookeeper.ZKOperations;
+import com.google.common.base.Charsets;
+import com.google.common.base.Supplier;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.AbstractService;
+import com.google.common.util.concurrent.AsyncFunction;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.MoreExecutors;
+import com.google.common.util.concurrent.Service;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.Nullable;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+/**
+ * A {@link Service} decorator that wrap another {@link Service} with the service states reflected
+ * to ZooKeeper.
+ */
+public final class ZKServiceDecorator extends AbstractService {
+
+  private static final Logger LOG = LoggerFactory.getLogger(ZKServiceDecorator.class);
+
+  private final ZKClient zkClient;
+  private final RunId id;
+  private final Supplier<? extends JsonElement> liveNodeData;
+  private final Service decoratedService;
+  private final MessageCallbackCaller messageCallback;
+  private ExecutorService callbackExecutor;
+
+
+  public ZKServiceDecorator(ZKClient zkClient, RunId id, Supplier<? extends JsonElement> liveNodeData,
+                            Service decoratedService) {
+    this(zkClient, id, liveNodeData, decoratedService, null);
+  }
+
+  /**
+   * Creates a ZKServiceDecorator.
+   * @param zkClient ZooKeeper client
+   * @param id The run id of the service
+   * @param liveNodeData A supplier for providing information writing to live node.
+   * @param decoratedService The Service for monitoring state changes
+   * @param finalizer An optional Runnable to run when this decorator terminated.
+   */
+  public ZKServiceDecorator(ZKClient zkClient, RunId id, Supplier <? extends JsonElement> liveNodeData,
+                            Service decoratedService, @Nullable Runnable finalizer) {
+    this.zkClient = zkClient;
+    this.id = id;
+    this.liveNodeData = liveNodeData;
+    this.decoratedService = decoratedService;
+    if (decoratedService instanceof MessageCallback) {
+      this.messageCallback = new MessageCallbackCaller((MessageCallback) decoratedService, zkClient);
+    } else {
+      this.messageCallback = new MessageCallbackCaller(zkClient);
+    }
+    if (finalizer != null) {
+      addFinalizer(finalizer);
+    }
+  }
+
+  /**
+   * Deletes the given ZK path recursively and create the path again.
+   */
+  private ListenableFuture<String> deleteAndCreate(final String path, final byte[] data, final CreateMode mode) {
+    return Futures.transform(ZKOperations.ignoreError(ZKOperations.recursiveDelete(zkClient, path),
+                                                      KeeperException.NoNodeException.class, null),
+                             new AsyncFunction<String, String>() {
+      @Override
+      public ListenableFuture<String> apply(String input) throws Exception {
+        return zkClient.create(path, data, mode);
+      }
+    }, Threads.SAME_THREAD_EXECUTOR);
+  }
+
+  @Override
+  protected void doStart() {
+    callbackExecutor = Executors.newSingleThreadExecutor(Threads.createDaemonThreadFactory("message-callback"));
+    Futures.addCallback(createLiveNode(), new FutureCallback<String>() {
+      @Override
+      public void onSuccess(String result) {
+        // Create nodes for states and messaging
+        StateNode stateNode = new StateNode(ServiceController.State.STARTING);
+
+        final ListenableFuture<List<String>> createFuture = Futures.allAsList(
+          deleteAndCreate(getZKPath("messages"), null, CreateMode.PERSISTENT),
+          deleteAndCreate(getZKPath("state"), encodeStateNode(stateNode), CreateMode.PERSISTENT)
+        );
+
+        createFuture.addListener(new Runnable() {
+          @Override
+          public void run() {
+            try {
+              createFuture.get();
+              // Starts the decorated service
+              decoratedService.addListener(createListener(), Threads.SAME_THREAD_EXECUTOR);
+              decoratedService.start();
+            } catch (Exception e) {
+              notifyFailed(e);
+            }
+          }
+        }, Threads.SAME_THREAD_EXECUTOR);
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        notifyFailed(t);
+      }
+    });
+  }
+
+  @Override
+  protected void doStop() {
+    // Stops the decorated service
+    decoratedService.stop();
+    callbackExecutor.shutdownNow();
+  }
+
+  private void addFinalizer(final Runnable finalizer) {
+    addListener(new ServiceListenerAdapter() {
+      @Override
+      public void terminated(State from) {
+        try {
+          finalizer.run();
+        } catch (Throwable t) {
+          LOG.warn("Exception when running finalizer.", t);
+        }
+      }
+
+      @Override
+      public void failed(State from, Throwable failure) {
+        try {
+          finalizer.run();
+        } catch (Throwable t) {
+          LOG.warn("Exception when running finalizer.", t);
+        }
+      }
+    }, Threads.SAME_THREAD_EXECUTOR);
+  }
+
+  private OperationFuture<String> createLiveNode() {
+    String liveNode = getLiveNodePath();
+    LOG.info("Create live node {}{}", zkClient.getConnectString(), liveNode);
+
+    JsonObject content = new JsonObject();
+    content.add("data", liveNodeData.get());
+    return ZKOperations.ignoreError(zkClient.create(liveNode, encodeJson(content), CreateMode.EPHEMERAL),
+                                    KeeperException.NodeExistsException.class, liveNode);
+  }
+
+  private OperationFuture<String> removeLiveNode() {
+    String liveNode = getLiveNodePath();
+    LOG.info("Remove live node {}{}", zkClient.getConnectString(), liveNode);
+    return ZKOperations.ignoreError(zkClient.delete(liveNode), KeeperException.NoNodeException.class, liveNode);
+  }
+
+  private OperationFuture<String> removeServiceNode() {
+    String serviceNode = String.format("/%s", id.getId());
+    LOG.info("Remove service node {}{}", zkClient.getConnectString(), serviceNode);
+    return ZKOperations.recursiveDelete(zkClient, serviceNode);
+  }
+
+  private void watchMessages() {
+    final String messagesPath = getZKPath("messages");
+    Futures.addCallback(zkClient.getChildren(messagesPath, new Watcher() {
+      @Override
+      public void process(WatchedEvent event) {
+        // TODO: Do we need to deal with other type of events?
+        if (event.getType() == Event.EventType.NodeChildrenChanged && decoratedService.isRunning()) {
+          watchMessages();
+        }
+      }
+    }), new FutureCallback<NodeChildren>() {
+      @Override
+      public void onSuccess(NodeChildren result) {
+        // Sort by the name, which is the messageId. Assumption is that message ids is ordered by time.
+        List<String> messages = Lists.newArrayList(result.getChildren());
+        Collections.sort(messages);
+        for (String messageId : messages) {
+          processMessage(messagesPath + "/" + messageId, messageId);
+        }
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        // TODO: what could be done besides just logging?
+        LOG.error("Failed to watch messages.", t);
+      }
+    });
+  }
+
+  private void processMessage(final String path, final String messageId) {
+    Futures.addCallback(zkClient.getData(path), new FutureCallback<NodeData>() {
+      @Override
+      public void onSuccess(NodeData result) {
+        Message message = MessageCodec.decode(result.getData());
+        if (message == null) {
+          LOG.error("Failed to decode message for " + messageId + " in " + path);
+          listenFailure(zkClient.delete(path, result.getStat().getVersion()));
+          return;
+        }
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Message received from " + path + ": " + new String(MessageCodec.encode(message), Charsets.UTF_8));
+        }
+        if (handleStopMessage(message, getDeleteSupplier(path, result.getStat().getVersion()))) {
+          return;
+        }
+        messageCallback.onReceived(callbackExecutor, path, result.getStat().getVersion(), messageId, message);
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        LOG.error("Failed to fetch message content.", t);
+      }
+    });
+  }
+
+  private <V> boolean handleStopMessage(Message message, final Supplier<OperationFuture<V>> postHandleSupplier) {
+    if (message.getType() == Message.Type.SYSTEM && SystemMessages.STOP_COMMAND.equals(message.getCommand())) {
+      callbackExecutor.execute(new Runnable() {
+        @Override
+        public void run() {
+          decoratedService.stop().addListener(new Runnable() {
+
+            @Override
+            public void run() {
+              stopServiceOnComplete(postHandleSupplier.get(), ZKServiceDecorator.this);
+            }
+          }, MoreExecutors.sameThreadExecutor());
+        }
+      });
+      return true;
+    }
+    return false;
+  }
+
+
+  private Supplier<OperationFuture<String>> getDeleteSupplier(final String path, final int version) {
+    return new Supplier<OperationFuture<String>>() {
+      @Override
+      public OperationFuture<String> get() {
+        return zkClient.delete(path, version);
+      }
+    };
+  }
+
+  private Listener createListener() {
+    return new DecoratedServiceListener();
+  }
+
+  private <V> byte[] encode(V data, Class<? extends V> clz) {
+    return new GsonBuilder().registerTypeAdapter(StateNode.class, new StateNodeCodec())
+                            .registerTypeAdapter(StackTraceElement.class, new StackTraceElementCodec())
+                            .create()
+      .toJson(data, clz).getBytes(Charsets.UTF_8);
+  }
+
+  private byte[] encodeStateNode(StateNode stateNode) {
+    return encode(stateNode, StateNode.class);
+  }
+
+  private <V extends JsonElement> byte[] encodeJson(V json) {
+    return new Gson().toJson(json).getBytes(Charsets.UTF_8);
+  }
+
+  private String getZKPath(String path) {
+    return String.format("/%s/%s", id, path);
+  }
+
+  private String getLiveNodePath() {
+    return "/instances/" + id;
+  }
+
+  private static <V> OperationFuture<V> listenFailure(final OperationFuture<V> operationFuture) {
+    operationFuture.addListener(new Runnable() {
+
+      @Override
+      public void run() {
+        try {
+          if (!operationFuture.isCancelled()) {
+            operationFuture.get();
+          }
+        } catch (Exception e) {
+          // TODO: what could be done besides just logging?
+          LOG.error("Operation execution failed for " + operationFuture.getRequestPath(), e);
+        }
+      }
+    }, Threads.SAME_THREAD_EXECUTOR);
+    return operationFuture;
+  }
+
+  private static final class MessageCallbackCaller {
+    private final MessageCallback callback;
+    private final ZKClient zkClient;
+
+    private MessageCallbackCaller(ZKClient zkClient) {
+      this(null, zkClient);
+    }
+
+    private MessageCallbackCaller(MessageCallback callback, ZKClient zkClient) {
+      this.callback = callback;
+      this.zkClient = zkClient;
+    }
+
+    public void onReceived(Executor executor, final String path,
+                           final int version, final String id, final Message message) {
+      if (callback == null) {
+        // Simply delete the message
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Ignoring incoming message from " + path + ": " + message);
+        }
+        listenFailure(zkClient.delete(path, version));
+        return;
+      }
+
+      executor.execute(new Runnable() {
+        @Override
+        public void run() {
+          try {
+            // Message process is synchronous for now. Making it async needs more thoughts about race conditions.
+            // The executor is the callbackExecutor which is a single thread executor.
+            callback.onReceived(id, message).get();
+          } catch (Throwable t) {
+            LOG.error("Exception when processing message: {}, {}, {}", id, message, path, t);
+          } finally {
+            listenFailure(zkClient.delete(path, version));
+          }
+        }
+      });
+    }
+  }
+
+  private final class DecoratedServiceListener implements Listener {
+    private volatile boolean zkFailure = false;
+
+    @Override
+    public void starting() {
+      LOG.info("Starting: " + id);
+      saveState(ServiceController.State.STARTING);
+    }
+
+    @Override
+    public void running() {
+      LOG.info("Running: " + id);
+      notifyStarted();
+      watchMessages();
+      saveState(ServiceController.State.RUNNING);
+    }
+
+    @Override
+    public void stopping(State from) {
+      LOG.info("Stopping: " + id);
+      saveState(ServiceController.State.STOPPING);
+    }
+
+    @Override
+    public void terminated(State from) {
+      LOG.info("Terminated: " + from + " " + id);
+      if (zkFailure) {
+        return;
+      }
+
+      ImmutableList<OperationFuture<String>> futures = ImmutableList.of(removeLiveNode(), removeServiceNode());
+      final ListenableFuture<List<String>> future = Futures.allAsList(futures);
+      Futures.successfulAsList(futures).addListener(new Runnable() {
+        @Override
+        public void run() {
+          try {
+            future.get();
+            LOG.info("Service and state node removed");
+            notifyStopped();
+          } catch (Exception e) {
+            LOG.warn("Failed to remove ZK nodes.", e);
+            notifyFailed(e);
+          }
+        }
+      }, Threads.SAME_THREAD_EXECUTOR);
+    }
+
+    @Override
+    public void failed(State from, final Throwable failure) {
+      LOG.info("Failed: {} {}.", from, id, failure);
+      if (zkFailure) {
+        return;
+      }
+
+      ImmutableList<OperationFuture<String>> futures = ImmutableList.of(removeLiveNode(), removeServiceNode());
+      Futures.successfulAsList(futures).addListener(new Runnable() {
+        @Override
+        public void run() {
+          LOG.info("Service and state node removed");
+          notifyFailed(failure);
+        }
+      }, Threads.SAME_THREAD_EXECUTOR);
+    }
+
+    private void saveState(ServiceController.State state) {
+      if (zkFailure) {
+        return;
+      }
+      StateNode stateNode = new StateNode(state);
+      stopOnFailure(zkClient.setData(getZKPath("state"), encodeStateNode(stateNode)));
+    }
+
+    private <V> void stopOnFailure(final OperationFuture<V> future) {
+      future.addListener(new Runnable() {
+        @Override
+        public void run() {
+          try {
+            future.get();
+          } catch (final Exception e) {
+            LOG.error("ZK operation failed", e);
+            zkFailure = true;
+            decoratedService.stop().addListener(new Runnable() {
+              @Override
+              public void run() {
+                notifyFailed(e);
+              }
+            }, Threads.SAME_THREAD_EXECUTOR);
+          }
+        }
+      }, Threads.SAME_THREAD_EXECUTOR);
+    }
+  }
+
+  private <V> ListenableFuture<State> stopServiceOnComplete(ListenableFuture <V> future, final Service service) {
+    return Futures.transform(future, new AsyncFunction<V, State>() {
+      @Override
+      public ListenableFuture<State> apply(V input) throws Exception {
+        return service.stop();
+      }
+    });
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/json/ArgumentsCodec.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/json/ArgumentsCodec.java b/twill-core/src/main/java/org/apache/twill/internal/json/ArgumentsCodec.java
new file mode 100644
index 0000000..07d4c1d
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/json/ArgumentsCodec.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.json;
+
+import org.apache.twill.internal.Arguments;
+import com.google.common.collect.ImmutableMultimap;
+import com.google.common.io.InputSupplier;
+import com.google.common.io.OutputSupplier;
+import com.google.common.reflect.TypeToken;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+
+import java.io.IOException;
+import java.io.Reader;
+import java.io.Writer;
+import java.lang.reflect.Type;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public final class ArgumentsCodec implements JsonSerializer<Arguments>, JsonDeserializer<Arguments> {
+
+  private static final Gson GSON = new GsonBuilder().registerTypeAdapter(Arguments.class, new ArgumentsCodec())
+                                                    .create();
+
+  public static void encode(Arguments arguments, OutputSupplier<? extends Writer> writerSupplier) throws IOException {
+    Writer writer = writerSupplier.getOutput();
+    try {
+      GSON.toJson(arguments, writer);
+    } finally {
+      writer.close();
+    }
+  }
+
+
+  public static Arguments decode(InputSupplier<? extends Reader> readerSupplier) throws IOException {
+    Reader reader = readerSupplier.getInput();
+    try {
+      return GSON.fromJson(reader, Arguments.class);
+    } finally {
+      reader.close();
+    }
+  }
+
+  @Override
+  public JsonElement serialize(Arguments src, Type typeOfSrc,
+                               JsonSerializationContext context) {
+    JsonObject json = new JsonObject();
+    json.add("arguments", context.serialize(src.getArguments()));
+    json.add("runnableArguments", context.serialize(src.getRunnableArguments().asMap()));
+
+    return json;
+  }
+
+  @Override
+  public Arguments deserialize(JsonElement json, Type typeOfT,
+                              JsonDeserializationContext context) throws JsonParseException {
+    JsonObject jsonObj = json.getAsJsonObject();
+    List<String> arguments = context.deserialize(jsonObj.get("arguments"), new TypeToken<List<String>>() {}.getType());
+    Map<String, Collection<String>> args = context.deserialize(jsonObj.get("runnableArguments"),
+                                                               new TypeToken<Map<String, Collection<String>>>(){
+                                                               }.getType());
+
+    ImmutableMultimap.Builder<String, String> builder = ImmutableMultimap.builder();
+    for (Map.Entry<String, Collection<String>> entry : args.entrySet()) {
+      builder.putAll(entry.getKey(), entry.getValue());
+    }
+    return new Arguments(arguments, builder.build());
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/json/JsonUtils.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/json/JsonUtils.java b/twill-core/src/main/java/org/apache/twill/internal/json/JsonUtils.java
new file mode 100644
index 0000000..9556ad8
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/json/JsonUtils.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.json;
+
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+
+/**
+ * Collections of helper functions for json codec.
+ */
+public final class JsonUtils {
+
+  private JsonUtils() {
+  }
+
+  /**
+   * Returns a String representation of the given property.
+   */
+  public static String getAsString(JsonObject json, String property) {
+    JsonElement jsonElement = json.get(property);
+    if (jsonElement.isJsonNull()) {
+      return null;
+    }
+    if (jsonElement.isJsonPrimitive()) {
+      return jsonElement.getAsString();
+    }
+    return jsonElement.toString();
+  }
+
+  /**
+   * Returns a long representation of the given property.
+   */
+  public static long getAsLong(JsonObject json, String property, long defaultValue) {
+    try {
+      return json.get(property).getAsLong();
+    } catch (Exception e) {
+      return defaultValue;
+    }
+  }
+
+  /**
+   * Returns a long representation of the given property.
+   */
+  public static int getAsInt(JsonObject json, String property, int defaultValue) {
+    try {
+      return json.get(property).getAsInt();
+    } catch (Exception e) {
+      return defaultValue;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/json/LocalFileCodec.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/json/LocalFileCodec.java b/twill-core/src/main/java/org/apache/twill/internal/json/LocalFileCodec.java
new file mode 100644
index 0000000..680a36c
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/json/LocalFileCodec.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.json;
+
+import org.apache.twill.api.LocalFile;
+import org.apache.twill.internal.DefaultLocalFile;
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+
+import java.lang.reflect.Type;
+import java.net.URI;
+
+/**
+ *
+ */
+public final class LocalFileCodec implements JsonSerializer<LocalFile>, JsonDeserializer<LocalFile> {
+
+  @Override
+  public JsonElement serialize(LocalFile src, Type typeOfSrc, JsonSerializationContext context) {
+    JsonObject json = new JsonObject();
+
+    json.addProperty("name", src.getName());
+    json.addProperty("uri", src.getURI().toASCIIString());
+    json.addProperty("lastModified", src.getLastModified());
+    json.addProperty("size", src.getSize());
+    json.addProperty("archive", src.isArchive());
+    json.addProperty("pattern", src.getPattern());
+
+    return json;
+  }
+
+  @Override
+  public LocalFile deserialize(JsonElement json, Type typeOfT,
+                               JsonDeserializationContext context) throws JsonParseException {
+    JsonObject jsonObj = json.getAsJsonObject();
+
+    String name = jsonObj.get("name").getAsString();
+    URI uri = URI.create(jsonObj.get("uri").getAsString());
+    long lastModified = jsonObj.get("lastModified").getAsLong();
+    long size = jsonObj.get("size").getAsLong();
+    boolean archive = jsonObj.get("archive").getAsBoolean();
+    JsonElement pattern = jsonObj.get("pattern");
+
+    return new DefaultLocalFile(name, uri, lastModified, size,
+                                archive, (pattern == null || pattern.isJsonNull()) ? null : pattern.getAsString());
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/json/ResourceReportAdapter.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/json/ResourceReportAdapter.java b/twill-core/src/main/java/org/apache/twill/internal/json/ResourceReportAdapter.java
new file mode 100644
index 0000000..e473fe7
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/json/ResourceReportAdapter.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.json;
+
+import org.apache.twill.api.ResourceReport;
+import org.apache.twill.api.TwillRunResources;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+
+import java.io.Reader;
+import java.io.Writer;
+
+/**
+ * This class provides utility to help encode/decode {@link ResourceReport} to/from Json.
+ */
+public final class ResourceReportAdapter {
+
+  private final Gson gson;
+
+  public static ResourceReportAdapter create() {
+    return new ResourceReportAdapter();
+  }
+
+  private ResourceReportAdapter() {
+    gson = new GsonBuilder()
+              .serializeNulls()
+              .registerTypeAdapter(TwillRunResources.class, new TwillRunResourcesCodec())
+              .registerTypeAdapter(ResourceReport.class, new ResourceReportCodec())
+              .create();
+  }
+
+  public String toJson(ResourceReport report) {
+    return gson.toJson(report, ResourceReport.class);
+  }
+
+  public void toJson(ResourceReport report, Writer writer) {
+    gson.toJson(report, ResourceReport.class, writer);
+  }
+
+  public ResourceReport fromJson(String json) {
+    return gson.fromJson(json, ResourceReport.class);
+  }
+
+  public ResourceReport fromJson(Reader reader) {
+    return gson.fromJson(reader, ResourceReport.class);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/json/ResourceReportCodec.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/json/ResourceReportCodec.java b/twill-core/src/main/java/org/apache/twill/internal/json/ResourceReportCodec.java
new file mode 100644
index 0000000..884d889
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/json/ResourceReportCodec.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.json;
+
+import org.apache.twill.api.ResourceReport;
+import org.apache.twill.api.TwillRunResources;
+import org.apache.twill.internal.DefaultResourceReport;
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+import com.google.gson.reflect.TypeToken;
+
+import java.lang.reflect.Type;
+import java.util.Collection;
+import java.util.Map;
+
+/**
+ * Codec for serializing and deserializing a {@link ResourceReport} object using json.
+ */
+public final class ResourceReportCodec implements JsonSerializer<ResourceReport>,
+                                           JsonDeserializer<ResourceReport> {
+
+  @Override
+  public JsonElement serialize(ResourceReport src, Type typeOfSrc, JsonSerializationContext context) {
+    JsonObject json = new JsonObject();
+
+    json.addProperty("appMasterId", src.getApplicationId());
+    json.add("appMasterResources", context.serialize(
+      src.getAppMasterResources(), new TypeToken<TwillRunResources>(){}.getType()));
+    json.add("runnableResources", context.serialize(
+      src.getResources(), new TypeToken<Map<String, Collection<TwillRunResources>>>(){}.getType()));
+
+    return json;
+  }
+
+  @Override
+  public ResourceReport deserialize(JsonElement json, Type typeOfT,
+                                           JsonDeserializationContext context) throws JsonParseException {
+    JsonObject jsonObj = json.getAsJsonObject();
+    String appMasterId = jsonObj.get("appMasterId").getAsString();
+    TwillRunResources masterResources = context.deserialize(
+      jsonObj.get("appMasterResources"), TwillRunResources.class);
+    Map<String, Collection<TwillRunResources>> resources = context.deserialize(
+      jsonObj.get("runnableResources"), new TypeToken<Map<String, Collection<TwillRunResources>>>(){}.getType());
+
+    return new DefaultResourceReport(appMasterId, masterResources, resources);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/json/ResourceSpecificationCodec.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/json/ResourceSpecificationCodec.java b/twill-core/src/main/java/org/apache/twill/internal/json/ResourceSpecificationCodec.java
new file mode 100644
index 0000000..bea73c4
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/json/ResourceSpecificationCodec.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.json;
+
+import org.apache.twill.api.ResourceSpecification;
+import org.apache.twill.internal.DefaultResourceSpecification;
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+
+import java.lang.reflect.Type;
+
+/**
+ *
+ */
+final class ResourceSpecificationCodec implements JsonSerializer<ResourceSpecification>,
+                                                  JsonDeserializer<ResourceSpecification> {
+
+  @Override
+  public JsonElement serialize(ResourceSpecification src, Type typeOfSrc, JsonSerializationContext context) {
+    JsonObject json = new JsonObject();
+
+    json.addProperty("cores", src.getVirtualCores());
+    json.addProperty("memorySize", src.getMemorySize());
+    json.addProperty("instances", src.getInstances());
+    json.addProperty("uplink", src.getUplink());
+    json.addProperty("downlink", src.getDownlink());
+
+    return json;
+  }
+
+  @Override
+  public ResourceSpecification deserialize(JsonElement json, Type typeOfT,
+                                           JsonDeserializationContext context) throws JsonParseException {
+    JsonObject jsonObj = json.getAsJsonObject();
+    return new DefaultResourceSpecification(jsonObj.get("cores").getAsInt(),
+                                            jsonObj.get("memorySize").getAsInt(),
+                                            jsonObj.get("instances").getAsInt(),
+                                            jsonObj.get("uplink").getAsInt(),
+                                            jsonObj.get("downlink").getAsInt());
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/json/RuntimeSpecificationCodec.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/json/RuntimeSpecificationCodec.java b/twill-core/src/main/java/org/apache/twill/internal/json/RuntimeSpecificationCodec.java
new file mode 100644
index 0000000..867f4a8
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/json/RuntimeSpecificationCodec.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.json;
+
+import org.apache.twill.api.LocalFile;
+import org.apache.twill.api.ResourceSpecification;
+import org.apache.twill.api.RuntimeSpecification;
+import org.apache.twill.api.TwillRunnableSpecification;
+import org.apache.twill.internal.DefaultRuntimeSpecification;
+import com.google.common.reflect.TypeToken;
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+
+import java.lang.reflect.Type;
+import java.util.Collection;
+
+/**
+ *
+ */
+final class RuntimeSpecificationCodec implements JsonSerializer<RuntimeSpecification>,
+                                                 JsonDeserializer<RuntimeSpecification> {
+
+  @Override
+  public JsonElement serialize(RuntimeSpecification src, Type typeOfSrc, JsonSerializationContext context) {
+    JsonObject json = new JsonObject();
+    json.addProperty("name", src.getName());
+    json.add("runnable", context.serialize(src.getRunnableSpecification(), TwillRunnableSpecification.class));
+    json.add("resources", context.serialize(src.getResourceSpecification(), ResourceSpecification.class));
+    json.add("files", context.serialize(src.getLocalFiles(), new TypeToken<Collection<LocalFile>>(){}.getType()));
+
+    return json;
+  }
+
+  @Override
+  public RuntimeSpecification deserialize(JsonElement json, Type typeOfT,
+                                          JsonDeserializationContext context) throws JsonParseException {
+    JsonObject jsonObj = json.getAsJsonObject();
+
+    String name = jsonObj.get("name").getAsString();
+    TwillRunnableSpecification runnable = context.deserialize(jsonObj.get("runnable"),
+                                                               TwillRunnableSpecification.class);
+    ResourceSpecification resources = context.deserialize(jsonObj.get("resources"),
+                                                          ResourceSpecification.class);
+    Collection<LocalFile> files = context.deserialize(jsonObj.get("files"),
+                                                      new TypeToken<Collection<LocalFile>>(){}.getType());
+
+    return new DefaultRuntimeSpecification(name, runnable, resources, files);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/json/StackTraceElementCodec.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/json/StackTraceElementCodec.java b/twill-core/src/main/java/org/apache/twill/internal/json/StackTraceElementCodec.java
new file mode 100644
index 0000000..9a57b46
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/json/StackTraceElementCodec.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.json;
+
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+
+import java.lang.reflect.Type;
+
+/**
+ *
+ */
+public final class StackTraceElementCodec implements JsonSerializer<StackTraceElement>,
+                                                     JsonDeserializer<StackTraceElement> {
+
+  @Override
+  public StackTraceElement deserialize(JsonElement json, Type typeOfT,
+                                       JsonDeserializationContext context) throws JsonParseException {
+    JsonObject jsonObj = json.getAsJsonObject();
+    return new StackTraceElement(JsonUtils.getAsString(jsonObj, "className"),
+                                 JsonUtils.getAsString(jsonObj, "method"),
+                                 JsonUtils.getAsString(jsonObj, "file"),
+                                 JsonUtils.getAsInt(jsonObj, "line", -1));
+  }
+
+  @Override
+  public JsonElement serialize(StackTraceElement src, Type typeOfSrc, JsonSerializationContext context) {
+    JsonObject jsonObj = new JsonObject();
+    jsonObj.addProperty("className", src.getClassName());
+    jsonObj.addProperty("method", src.getMethodName());
+    jsonObj.addProperty("file", src.getFileName());
+    jsonObj.addProperty("line", src.getLineNumber());
+
+    return jsonObj;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/json/StateNodeCodec.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/json/StateNodeCodec.java b/twill-core/src/main/java/org/apache/twill/internal/json/StateNodeCodec.java
new file mode 100644
index 0000000..c1e9d1c
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/json/StateNodeCodec.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.json;
+
+import org.apache.twill.api.ServiceController;
+import org.apache.twill.internal.state.StateNode;
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+
+import java.lang.reflect.Type;
+
+/**
+ *
+ */
+public final class StateNodeCodec implements JsonSerializer<StateNode>, JsonDeserializer<StateNode> {
+
+  @Override
+  public StateNode deserialize(JsonElement json, Type typeOfT,
+                               JsonDeserializationContext context) throws JsonParseException {
+    JsonObject jsonObj = json.getAsJsonObject();
+    ServiceController.State state = ServiceController.State.valueOf(jsonObj.get("state").getAsString());
+    String errorMessage = jsonObj.has("errorMessage") ? jsonObj.get("errorMessage").getAsString() : null;
+
+    return new StateNode(state, errorMessage,
+                         context.<StackTraceElement[]>deserialize(jsonObj.get("stackTraces"), StackTraceElement[].class));
+  }
+
+  @Override
+  public JsonElement serialize(StateNode src, Type typeOfSrc, JsonSerializationContext context) {
+    JsonObject jsonObj = new JsonObject();
+    jsonObj.addProperty("state", src.getState().name());
+    if (src.getErrorMessage() != null) {
+      jsonObj.addProperty("errorMessage", src.getErrorMessage());
+    }
+    if (src.getStackTraces() != null) {
+      jsonObj.add("stackTraces", context.serialize(src.getStackTraces(), StackTraceElement[].class));
+    }
+    return jsonObj;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/json/TwillRunResourcesCodec.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/json/TwillRunResourcesCodec.java b/twill-core/src/main/java/org/apache/twill/internal/json/TwillRunResourcesCodec.java
new file mode 100644
index 0000000..8951173
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/json/TwillRunResourcesCodec.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.json;
+
+import org.apache.twill.api.TwillRunResources;
+import org.apache.twill.internal.DefaultTwillRunResources;
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+
+import java.lang.reflect.Type;
+
+/**
+ * Codec for serializing and deserializing a {@link org.apache.twill.api.TwillRunResources} object using json.
+ */
+public final class TwillRunResourcesCodec implements JsonSerializer<TwillRunResources>,
+                                              JsonDeserializer<TwillRunResources> {
+
+  @Override
+  public JsonElement serialize(TwillRunResources src, Type typeOfSrc, JsonSerializationContext context) {
+    JsonObject json = new JsonObject();
+
+    json.addProperty("containerId", src.getContainerId());
+    json.addProperty("instanceId", src.getInstanceId());
+    json.addProperty("host", src.getHost());
+    json.addProperty("memoryMB", src.getMemoryMB());
+    json.addProperty("virtualCores", src.getVirtualCores());
+
+    return json;
+  }
+
+  @Override
+  public TwillRunResources deserialize(JsonElement json, Type typeOfT,
+                                           JsonDeserializationContext context) throws JsonParseException {
+    JsonObject jsonObj = json.getAsJsonObject();
+    return new DefaultTwillRunResources(jsonObj.get("instanceId").getAsInt(),
+                                        jsonObj.get("containerId").getAsString(),
+                                        jsonObj.get("virtualCores").getAsInt(),
+                                        jsonObj.get("memoryMB").getAsInt(),
+                                        jsonObj.get("host").getAsString());
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/json/TwillRunnableSpecificationCodec.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/json/TwillRunnableSpecificationCodec.java b/twill-core/src/main/java/org/apache/twill/internal/json/TwillRunnableSpecificationCodec.java
new file mode 100644
index 0000000..f37c1e8
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/json/TwillRunnableSpecificationCodec.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.json;
+
+import org.apache.twill.api.TwillRunnableSpecification;
+import org.apache.twill.internal.DefaultTwillRunnableSpecification;
+import com.google.common.reflect.TypeToken;
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+
+import java.lang.reflect.Type;
+import java.util.Map;
+
+/**
+ *
+ */
+final class TwillRunnableSpecificationCodec implements JsonSerializer<TwillRunnableSpecification>,
+                                                       JsonDeserializer<TwillRunnableSpecification> {
+
+  @Override
+  public JsonElement serialize(TwillRunnableSpecification src, Type typeOfSrc, JsonSerializationContext context) {
+    JsonObject json = new JsonObject();
+
+    json.addProperty("classname", src.getClassName());
+    json.addProperty("name", src.getName());
+    json.add("arguments", context.serialize(src.getConfigs(), new TypeToken<Map<String, String>>(){}.getType()));
+
+    return json;
+  }
+
+  @Override
+  public TwillRunnableSpecification deserialize(JsonElement json, Type typeOfT,
+                                                JsonDeserializationContext context) throws JsonParseException {
+    JsonObject jsonObj = json.getAsJsonObject();
+
+    String className = jsonObj.get("classname").getAsString();
+    String name = jsonObj.get("name").getAsString();
+    Map<String, String> arguments = context.deserialize(jsonObj.get("arguments"),
+                                                        new TypeToken<Map<String, String>>(){}.getType());
+
+    return new DefaultTwillRunnableSpecification(className, name, arguments);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/json/TwillSpecificationAdapter.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/json/TwillSpecificationAdapter.java b/twill-core/src/main/java/org/apache/twill/internal/json/TwillSpecificationAdapter.java
new file mode 100644
index 0000000..67c15a2
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/json/TwillSpecificationAdapter.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.json;
+
+import com.google.common.base.Charsets;
+import com.google.common.collect.Maps;
+import com.google.common.io.Files;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import com.google.gson.TypeAdapter;
+import com.google.gson.TypeAdapterFactory;
+import com.google.gson.reflect.TypeToken;
+import com.google.gson.stream.JsonReader;
+import com.google.gson.stream.JsonToken;
+import com.google.gson.stream.JsonWriter;
+import org.apache.twill.api.EventHandlerSpecification;
+import org.apache.twill.api.LocalFile;
+import org.apache.twill.api.ResourceSpecification;
+import org.apache.twill.api.RuntimeSpecification;
+import org.apache.twill.api.TwillRunnableSpecification;
+import org.apache.twill.api.TwillSpecification;
+import org.apache.twill.internal.json.TwillSpecificationCodec.EventHandlerSpecificationCoder;
+import org.apache.twill.internal.json.TwillSpecificationCodec.TwillSpecificationOrderCoder;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.Reader;
+import java.io.Writer;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
+import java.util.Map;
+
+/**
+ *
+ */
+public final class TwillSpecificationAdapter {
+
+  private final Gson gson;
+
+  public static TwillSpecificationAdapter create() {
+    return new TwillSpecificationAdapter();
+  }
+
+  private TwillSpecificationAdapter() {
+    gson = new GsonBuilder()
+              .serializeNulls()
+              .registerTypeAdapter(TwillSpecification.class, new TwillSpecificationCodec())
+              .registerTypeAdapter(TwillSpecification.Order.class, new TwillSpecificationOrderCoder())
+              .registerTypeAdapter(EventHandlerSpecification.class, new EventHandlerSpecificationCoder())
+              .registerTypeAdapter(RuntimeSpecification.class, new RuntimeSpecificationCodec())
+              .registerTypeAdapter(TwillRunnableSpecification.class, new TwillRunnableSpecificationCodec())
+              .registerTypeAdapter(ResourceSpecification.class, new ResourceSpecificationCodec())
+              .registerTypeAdapter(LocalFile.class, new LocalFileCodec())
+              .registerTypeAdapterFactory(new TwillSpecificationTypeAdapterFactory())
+              .create();
+  }
+
+  public String toJson(TwillSpecification spec) {
+    return gson.toJson(spec, TwillSpecification.class);
+  }
+
+  public void toJson(TwillSpecification spec, Writer writer) {
+    gson.toJson(spec, TwillSpecification.class, writer);
+  }
+
+  public void toJson(TwillSpecification spec, File file) throws IOException {
+    Writer writer = Files.newWriter(file, Charsets.UTF_8);
+    try {
+      toJson(spec, writer);
+    } finally {
+      writer.close();
+    }
+  }
+
+  public TwillSpecification fromJson(String json) {
+    return gson.fromJson(json, TwillSpecification.class);
+  }
+
+  public TwillSpecification fromJson(Reader reader) {
+    return gson.fromJson(reader, TwillSpecification.class);
+  }
+
+  public TwillSpecification fromJson(File file) throws IOException {
+    Reader reader = Files.newReader(file, Charsets.UTF_8);
+    try {
+      return fromJson(reader);
+    } finally {
+      reader.close();
+    }
+  }
+
+  // This is to get around gson ignoring of inner class
+  private static final class TwillSpecificationTypeAdapterFactory implements TypeAdapterFactory {
+
+    @Override
+    public <T> TypeAdapter<T> create(Gson gson, TypeToken<T> type) {
+      Class<?> rawType = type.getRawType();
+      if (!Map.class.isAssignableFrom(rawType)) {
+        return null;
+      }
+      Type[] typeArgs = ((ParameterizedType) type.getType()).getActualTypeArguments();
+      TypeToken<?> keyType = TypeToken.get(typeArgs[0]);
+      TypeToken<?> valueType = TypeToken.get(typeArgs[1]);
+      if (keyType.getRawType() != String.class) {
+        return null;
+      }
+      return (TypeAdapter<T>) mapAdapter(gson, valueType);
+    }
+
+    private <V> TypeAdapter<Map<String, V>> mapAdapter(Gson gson, TypeToken<V> valueType) {
+      final TypeAdapter<V> valueAdapter = gson.getAdapter(valueType);
+
+      return new TypeAdapter<Map<String, V>>() {
+        @Override
+        public void write(JsonWriter writer, Map<String, V> map) throws IOException {
+          if (map == null) {
+            writer.nullValue();
+            return;
+          }
+          writer.beginObject();
+          for (Map.Entry<String, V> entry : map.entrySet()) {
+            writer.name(entry.getKey());
+            valueAdapter.write(writer, entry.getValue());
+          }
+          writer.endObject();
+        }
+
+        @Override
+        public Map<String, V> read(JsonReader reader) throws IOException {
+          if (reader.peek() == JsonToken.NULL) {
+            reader.nextNull();
+            return null;
+          }
+          if (reader.peek() != JsonToken.BEGIN_OBJECT) {
+            return null;
+          }
+          Map<String, V> map = Maps.newHashMap();
+          reader.beginObject();
+          while (reader.peek() != JsonToken.END_OBJECT) {
+            map.put(reader.nextName(), valueAdapter.read(reader));
+          }
+          reader.endObject();
+          return map;
+        }
+      };
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/json/TwillSpecificationCodec.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/json/TwillSpecificationCodec.java b/twill-core/src/main/java/org/apache/twill/internal/json/TwillSpecificationCodec.java
new file mode 100644
index 0000000..5d88350
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/json/TwillSpecificationCodec.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.json;
+
+import org.apache.twill.api.EventHandlerSpecification;
+import org.apache.twill.api.RuntimeSpecification;
+import org.apache.twill.api.TwillSpecification;
+import org.apache.twill.internal.DefaultEventHandlerSpecification;
+import org.apache.twill.internal.DefaultTwillSpecification;
+import com.google.common.reflect.TypeToken;
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+
+import java.lang.reflect.Type;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * An implementation of gson serializer/deserializer {@link org.apache.twill.api.TwillSpecification}.
+ */
+final class TwillSpecificationCodec implements JsonSerializer<TwillSpecification>,
+                                               JsonDeserializer<TwillSpecification> {
+
+  @Override
+  public JsonElement serialize(TwillSpecification src, Type typeOfSrc, JsonSerializationContext context) {
+    JsonObject json = new JsonObject();
+    json.addProperty("name", src.getName());
+    json.add("runnables", context.serialize(src.getRunnables(),
+                                            new TypeToken<Map<String, RuntimeSpecification>>(){}.getType()));
+    json.add("orders", context.serialize(src.getOrders(),
+                                         new TypeToken<List<TwillSpecification.Order>>(){}.getType()));
+    EventHandlerSpecification eventHandler = src.getEventHandler();
+    if (eventHandler != null) {
+      json.add("handler", context.serialize(eventHandler, EventHandlerSpecification.class));
+    }
+
+    return json;
+  }
+
+  @Override
+  public TwillSpecification deserialize(JsonElement json, Type typeOfT,
+                                        JsonDeserializationContext context) throws JsonParseException {
+    JsonObject jsonObj = json.getAsJsonObject();
+
+    String name = jsonObj.get("name").getAsString();
+    Map<String, RuntimeSpecification> runnables = context.deserialize(
+      jsonObj.get("runnables"), new TypeToken<Map<String, RuntimeSpecification>>(){}.getType());
+    List<TwillSpecification.Order> orders = context.deserialize(
+      jsonObj.get("orders"), new TypeToken<List<TwillSpecification.Order>>(){}.getType());
+
+    JsonElement handler = jsonObj.get("handler");
+    EventHandlerSpecification eventHandler = null;
+    if (handler != null && !handler.isJsonNull()) {
+      eventHandler = context.deserialize(handler, EventHandlerSpecification.class);
+    }
+
+    return new DefaultTwillSpecification(name, runnables, orders, eventHandler);
+  }
+
+  static final class TwillSpecificationOrderCoder implements JsonSerializer<TwillSpecification.Order>,
+                                                             JsonDeserializer<TwillSpecification.Order> {
+
+    @Override
+    public JsonElement serialize(TwillSpecification.Order src, Type typeOfSrc, JsonSerializationContext context) {
+      JsonObject json = new JsonObject();
+      json.add("names", context.serialize(src.getNames(), new TypeToken<Set<String>>(){}.getType()));
+      json.addProperty("type", src.getType().name());
+      return json;
+    }
+
+    @Override
+    public TwillSpecification.Order deserialize(JsonElement json, Type typeOfT,
+                                                JsonDeserializationContext context) throws JsonParseException {
+      JsonObject jsonObj = json.getAsJsonObject();
+
+      Set<String> names = context.deserialize(jsonObj.get("names"), new TypeToken<Set<String>>(){}.getType());
+      TwillSpecification.Order.Type type = TwillSpecification.Order.Type.valueOf(jsonObj.get("type").getAsString());
+
+      return new DefaultTwillSpecification.DefaultOrder(names, type);
+    }
+  }
+
+  static final class EventHandlerSpecificationCoder implements JsonSerializer<EventHandlerSpecification>,
+                                                               JsonDeserializer<EventHandlerSpecification> {
+
+    @Override
+    public JsonElement serialize(EventHandlerSpecification src, Type typeOfSrc, JsonSerializationContext context) {
+      JsonObject json = new JsonObject();
+      json.addProperty("classname", src.getClassName());
+      json.add("configs", context.serialize(src.getConfigs(), new TypeToken<Map<String, String>>(){}.getType()));
+      return json;
+    }
+
+    @Override
+    public EventHandlerSpecification deserialize(JsonElement json, Type typeOfT,
+                                                 JsonDeserializationContext context) throws JsonParseException {
+      JsonObject jsonObj = json.getAsJsonObject();
+      String className = jsonObj.get("classname").getAsString();
+      Map<String, String> configs = context.deserialize(jsonObj.get("configs"),
+                                                        new TypeToken<Map<String, String>>() {
+                                                        }.getType());
+
+      return new DefaultEventHandlerSpecification(className, configs);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/kafka/EmbeddedKafkaServer.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/kafka/EmbeddedKafkaServer.java b/twill-core/src/main/java/org/apache/twill/internal/kafka/EmbeddedKafkaServer.java
new file mode 100644
index 0000000..14dfc70
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/kafka/EmbeddedKafkaServer.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.kafka;
+
+import com.google.common.base.Throwables;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.AbstractIdleService;
+
+import java.io.File;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.util.List;
+import java.util.Properties;
+
+/**
+ *
+ */
+public final class EmbeddedKafkaServer extends AbstractIdleService {
+
+  private static final String KAFAK_CONFIG_CLASS = "kafka.server.KafkaConfig";
+  private static final String KAFKA_SERVER_CLASS = "kafka.server.KafkaServerStartable";
+
+  private final Object server;
+
+  public EmbeddedKafkaServer(File kafkaDir, Properties properties) {
+    this(createClassLoader(kafkaDir), properties);
+  }
+
+  public EmbeddedKafkaServer(ClassLoader classLoader, Properties properties) {
+    try {
+      Class<?> configClass = classLoader.loadClass(KAFAK_CONFIG_CLASS);
+      Object config = configClass.getConstructor(Properties.class).newInstance(properties);
+
+      Class<?> serverClass = classLoader.loadClass(KAFKA_SERVER_CLASS);
+      server = serverClass.getConstructor(configClass).newInstance(config);
+    } catch (Exception e) {
+      throw Throwables.propagate(e);
+    }
+  }
+
+  @Override
+  protected void startUp() throws Exception {
+    server.getClass().getMethod("startup").invoke(server);
+  }
+
+  @Override
+  protected void shutDown() throws Exception {
+    server.getClass().getMethod("shutdown").invoke(server);
+    server.getClass().getMethod("awaitShutdown").invoke(server);
+  }
+
+  private static ClassLoader createClassLoader(File kafkaDir) {
+    ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
+    ClassLoader thisClassLoader = EmbeddedKafkaServer.class.getClassLoader();
+    ClassLoader parent = contextClassLoader != null
+                            ? contextClassLoader
+                            : thisClassLoader != null
+                                ? thisClassLoader : ClassLoader.getSystemClassLoader();
+
+    return new URLClassLoader(findJars(kafkaDir, Lists.<URL>newArrayList()).toArray(new URL[0]), parent);
+  }
+
+  private static List<URL> findJars(File dir, List<URL> urls) {
+    try {
+      for (File file : dir.listFiles()) {
+        if (file.isDirectory()) {
+          findJars(file, urls);
+        } else if (file.getName().endsWith(".jar")) {
+          urls.add(file.toURI().toURL());
+        }
+      }
+      return urls;
+    } catch (MalformedURLException e) {
+      throw Throwables.propagate(e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/kafka/client/AbstractCompressedMessageSetEncoder.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/kafka/client/AbstractCompressedMessageSetEncoder.java b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/AbstractCompressedMessageSetEncoder.java
new file mode 100644
index 0000000..a9c3381
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/AbstractCompressedMessageSetEncoder.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.kafka.client;
+
+import com.google.common.base.Throwables;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBufferOutputStream;
+import org.jboss.netty.buffer.ChannelBuffers;
+
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ * A base implementation of {@link MessageSetEncoder} that do message compression.
+ */
+abstract class AbstractCompressedMessageSetEncoder extends AbstractMessageSetEncoder {
+
+  private final Compression compression;
+  private ChannelBufferOutputStream os;
+  private OutputStream compressedOutput;
+
+
+  protected AbstractCompressedMessageSetEncoder(Compression compression) {
+    this.compression = compression;
+    try {
+      this.os = new ChannelBufferOutputStream(ChannelBuffers.dynamicBuffer());
+      this.compressedOutput = createCompressedStream(os);
+    } catch (IOException e) {
+      // Should never happen
+      throw Throwables.propagate(e);
+    }
+  }
+
+  @Override
+  public final MessageSetEncoder add(ChannelBuffer payload) {
+    try {
+      ChannelBuffer encoded = encodePayload(payload);
+      encoded.readBytes(compressedOutput, encoded.readableBytes());
+    } catch (IOException e) {
+      throw Throwables.propagate(e);
+    }
+    return this;
+
+  }
+
+  @Override
+  public final ChannelBuffer finish() {
+    try {
+      compressedOutput.close();
+      ChannelBuffer buf = prefixLength(encodePayload(os.buffer(), compression));
+      compressedOutput = createCompressedStream(os);
+      os.buffer().clear();
+
+      return buf;
+
+    } catch (IOException e) {
+      throw Throwables.propagate(e);
+    }
+
+  }
+
+  protected abstract OutputStream createCompressedStream(OutputStream os) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/kafka/client/AbstractMessageSetEncoder.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/kafka/client/AbstractMessageSetEncoder.java b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/AbstractMessageSetEncoder.java
new file mode 100644
index 0000000..9955d6a
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/AbstractMessageSetEncoder.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.kafka.client;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBuffers;
+
+import java.util.zip.CRC32;
+
+/**
+ * A base implementation of {@link MessageSetEncoder}.
+ */
+abstract class AbstractMessageSetEncoder implements MessageSetEncoder {
+
+  private static final ThreadLocal<CRC32> CRC32_LOCAL = new ThreadLocal<CRC32>() {
+    @Override
+    protected CRC32 initialValue() {
+      return new CRC32();
+    }
+  };
+
+  protected final int computeCRC32(ChannelBuffer buffer) {
+    CRC32 crc32 = CRC32_LOCAL.get();
+    crc32.reset();
+
+    if (buffer.hasArray()) {
+      crc32.update(buffer.array(), buffer.arrayOffset() + buffer.readerIndex(), buffer.readableBytes());
+    } else {
+      byte[] bytes = new byte[buffer.readableBytes()];
+      buffer.getBytes(buffer.readerIndex(), bytes);
+      crc32.update(bytes);
+    }
+    return (int) crc32.getValue();
+  }
+
+  protected final ChannelBuffer encodePayload(ChannelBuffer payload) {
+    return encodePayload(payload, Compression.NONE);
+  }
+
+  protected final ChannelBuffer encodePayload(ChannelBuffer payload, Compression compression) {
+    ChannelBuffer header = ChannelBuffers.buffer(10);
+
+    int crc = computeCRC32(payload);
+
+    int magic = ((compression == Compression.NONE) ? 0 : 1);
+
+    // Message length = 1 byte magic + (optional 1 compression byte) + 4 bytes crc + payload length
+    header.writeInt(5 + magic + payload.readableBytes());
+    // Magic number = 0 for non-compressed data
+    header.writeByte(magic);
+    if (magic > 0) {
+      header.writeByte(compression.getCode());
+    }
+    header.writeInt(crc);
+
+    return ChannelBuffers.wrappedBuffer(header, payload);
+  }
+
+  protected final ChannelBuffer prefixLength(ChannelBuffer buffer) {
+    ChannelBuffer sizeBuf = ChannelBuffers.buffer(4);
+    sizeBuf.writeInt(buffer.readableBytes());
+    return ChannelBuffers.wrappedBuffer(sizeBuf, buffer);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/kafka/client/BasicFetchedMessage.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/kafka/client/BasicFetchedMessage.java b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/BasicFetchedMessage.java
new file mode 100644
index 0000000..286bf82
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/BasicFetchedMessage.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.kafka.client;
+
+import org.apache.twill.kafka.client.FetchedMessage;
+
+import java.nio.ByteBuffer;
+
+/**
+ *
+ */
+final class BasicFetchedMessage implements FetchedMessage {
+
+  private final long offset;
+  private final ByteBuffer buffer;
+
+  BasicFetchedMessage(long offset, ByteBuffer buffer) {
+    this.offset = offset;
+    this.buffer = buffer;
+  }
+
+  @Override
+  public long getOffset() {
+    return offset;
+  }
+
+  @Override
+  public ByteBuffer getBuffer() {
+    return buffer;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/kafka/client/Bufferer.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/kafka/client/Bufferer.java b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/Bufferer.java
new file mode 100644
index 0000000..c1fb4f2
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/Bufferer.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.kafka.client;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBuffers;
+
+/**
+ * A class to help buffering data of format [len][payload-of-len].
+ */
+final class Bufferer {
+
+  private ChannelBuffer currentBuffer = null;
+  private int currentSize = -1;
+
+  void apply(ChannelBuffer buffer) {
+    currentBuffer = concatBuffer(currentBuffer, buffer);
+  }
+
+  /**
+   * Returns the buffer if the buffer data is ready to be consumed,
+   * otherwise return {@link ChannelBuffers#EMPTY_BUFFER}.
+   */
+  ChannelBuffer getNext() {
+    if (currentSize < 0) {
+      if (currentBuffer.readableBytes() < 4) {
+        return ChannelBuffers.EMPTY_BUFFER;
+      }
+      currentSize = currentBuffer.readInt();
+    }
+
+    // Keep buffering if less then required number of bytes
+    if (currentBuffer.readableBytes() < currentSize) {
+      return ChannelBuffers.EMPTY_BUFFER;
+    }
+
+    ChannelBuffer result = currentBuffer.readSlice(currentSize);
+    currentSize = -1;
+
+    return result;
+  }
+
+  private ChannelBuffer concatBuffer(ChannelBuffer current, ChannelBuffer buffer) {
+    return current == null ? buffer : ChannelBuffers.wrappedBuffer(current, buffer);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/kafka/client/Compression.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/kafka/client/Compression.java b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/Compression.java
new file mode 100644
index 0000000..3355b9f
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/Compression.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.kafka.client;
+
+/**
+ * Enum for indicating compression method.
+ */
+public enum Compression {
+  NONE(0),
+  GZIP(1),
+  SNAPPY(2);
+
+  private final int code;
+
+  Compression(int code) {
+    this.code = code;
+  }
+
+  public int getCode() {
+    return code;
+  }
+
+  public static Compression fromCode(int code) {
+    switch (code) {
+      case 0:
+        return NONE;
+      case 1:
+        return GZIP;
+      case 2:
+        return SNAPPY;
+    }
+    throw new IllegalArgumentException("Unknown compression code.");
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/kafka/client/ConnectionPool.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/kafka/client/ConnectionPool.java b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/ConnectionPool.java
new file mode 100644
index 0000000..c2865ba
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/ConnectionPool.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.kafka.client;
+
+import com.google.common.collect.Maps;
+import org.jboss.netty.bootstrap.ClientBootstrap;
+import org.jboss.netty.channel.ChannelFuture;
+import org.jboss.netty.channel.ChannelFutureListener;
+import org.jboss.netty.channel.group.ChannelGroup;
+import org.jboss.netty.channel.group.ChannelGroupFuture;
+import org.jboss.netty.channel.group.ChannelGroupFutureListener;
+import org.jboss.netty.channel.group.DefaultChannelGroup;
+
+import java.net.InetSocketAddress;
+import java.util.Queue;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.ConcurrentMap;
+
+/**
+ * Provides netty socket connection reuse.
+ */
+final class ConnectionPool {
+
+  private final ClientBootstrap bootstrap;
+  private final ChannelGroup channelGroup;
+  private final ConcurrentMap<InetSocketAddress, Queue<ChannelFuture>> connections;
+
+  /**
+   * For releasing a connection back to the pool.
+   */
+  interface ConnectionReleaser {
+    void release();
+  }
+
+  /**
+   * Result of a connect request.
+   */
+  interface ConnectResult extends ConnectionReleaser {
+    ChannelFuture getChannelFuture();
+  }
+
+  ConnectionPool(ClientBootstrap bootstrap) {
+    this.bootstrap = bootstrap;
+    this.channelGroup = new DefaultChannelGroup();
+    this.connections = Maps.newConcurrentMap();
+  }
+
+  ConnectResult connect(InetSocketAddress address) {
+    Queue<ChannelFuture> channelFutures = connections.get(address);
+    if (channelFutures == null) {
+      channelFutures = new ConcurrentLinkedQueue<ChannelFuture>();
+      Queue<ChannelFuture> result = connections.putIfAbsent(address, channelFutures);
+      channelFutures = result == null ? channelFutures : result;
+    }
+
+    ChannelFuture channelFuture = channelFutures.poll();
+    while (channelFuture != null) {
+      if (channelFuture.isSuccess() && channelFuture.getChannel().isConnected()) {
+        return new SimpleConnectResult(address, channelFuture);
+      }
+      channelFuture = channelFutures.poll();
+    }
+
+    channelFuture = bootstrap.connect(address);
+    channelFuture.addListener(new ChannelFutureListener() {
+      @Override
+      public void operationComplete(ChannelFuture future) throws Exception {
+        if (future.isSuccess()) {
+          channelGroup.add(future.getChannel());
+        }
+      }
+    });
+    return new SimpleConnectResult(address, channelFuture);
+  }
+
+  ChannelGroupFuture close() {
+    ChannelGroupFuture result = channelGroup.close();
+    result.addListener(new ChannelGroupFutureListener() {
+      @Override
+      public void operationComplete(ChannelGroupFuture future) throws Exception {
+        bootstrap.releaseExternalResources();
+      }
+    });
+    return result;
+  }
+
+  private final class SimpleConnectResult implements ConnectResult {
+
+    private final InetSocketAddress address;
+    private final ChannelFuture future;
+
+
+    private SimpleConnectResult(InetSocketAddress address, ChannelFuture future) {
+      this.address = address;
+      this.future = future;
+    }
+
+    @Override
+    public ChannelFuture getChannelFuture() {
+      return future;
+    }
+
+    @Override
+    public void release() {
+      if (future.isSuccess()) {
+        connections.get(address).offer(future);
+      }
+    }
+  }
+}


[19/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/AbstractExecutionServiceController.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/AbstractExecutionServiceController.java b/twill-core/src/main/java/org/apache/twill/internal/AbstractExecutionServiceController.java
new file mode 100644
index 0000000..974639d
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/AbstractExecutionServiceController.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.api.RunId;
+import org.apache.twill.api.ServiceController;
+import org.apache.twill.common.Threads;
+import com.google.common.util.concurrent.AbstractIdleService;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.Service;
+
+import java.util.Queue;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.Executor;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ *
+ */
+public abstract class AbstractExecutionServiceController implements ServiceController {
+
+  private final RunId runId;
+  private final ListenerExecutors listenerExecutors;
+  private final Service serviceDelegate;
+
+  protected AbstractExecutionServiceController(RunId runId) {
+    this.runId = runId;
+    this.listenerExecutors = new ListenerExecutors();
+    this.serviceDelegate = new ServiceDelegate();
+  }
+
+  protected abstract void startUp();
+
+  protected abstract void shutDown();
+
+  @Override
+  public final RunId getRunId() {
+    return runId;
+  }
+
+  @Override
+  public final void addListener(Listener listener, Executor executor) {
+    listenerExecutors.addListener(new ListenerExecutor(listener, executor));
+  }
+
+  @Override
+  public final ListenableFuture<State> start() {
+    serviceDelegate.addListener(listenerExecutors, Threads.SAME_THREAD_EXECUTOR);
+    return serviceDelegate.start();
+  }
+
+  @Override
+  public final State startAndWait() {
+    return Futures.getUnchecked(start());
+  }
+
+  @Override
+  public final boolean isRunning() {
+    return serviceDelegate.isRunning();
+  }
+
+  @Override
+  public final State state() {
+    return serviceDelegate.state();
+  }
+
+  @Override
+  public final State stopAndWait() {
+    return Futures.getUnchecked(stop());
+  }
+
+  @Override
+  public final ListenableFuture<State> stop() {
+    return serviceDelegate.stop();
+  }
+
+  protected Executor executor(final State state) {
+    return new Executor() {
+      @Override
+      public void execute(Runnable command) {
+        Thread t = new Thread(command, getClass().getSimpleName() + " " + state);
+        t.setDaemon(true);
+        t.start();
+      }
+    };
+  }
+
+
+  private final class ServiceDelegate extends AbstractIdleService {
+    @Override
+    protected void startUp() throws Exception {
+      AbstractExecutionServiceController.this.startUp();
+    }
+
+    @Override
+    protected void shutDown() throws Exception {
+      AbstractExecutionServiceController.this.shutDown();
+    }
+
+    @Override
+    protected Executor executor(State state) {
+      return AbstractExecutionServiceController.this.executor(state);
+    }
+  }
+
+  /**
+   * Inner class for dispatching listener call back to a list of listeners
+   */
+  private static final class ListenerExecutors implements Listener {
+
+    private interface Callback {
+      void call(Listener listener);
+    }
+
+    private final Queue<ListenerExecutor> listeners = new ConcurrentLinkedQueue<ListenerExecutor>();
+    private final AtomicReference<Callback> lastState = new AtomicReference<Callback>();
+
+    private synchronized void addListener(final ListenerExecutor listener) {
+      listeners.add(listener);
+      Callback callback = lastState.get();
+      if (callback != null) {
+        callback.call(listener);
+      }
+    }
+
+    @Override
+    public synchronized void starting() {
+      lastState.set(new Callback() {
+        @Override
+        public void call(Listener listener) {
+          listener.starting();
+        }
+      });
+      for (ListenerExecutor listener : listeners) {
+        listener.starting();
+      }
+    }
+
+    @Override
+    public synchronized void running() {
+      lastState.set(new Callback() {
+        @Override
+        public void call(Listener listener) {
+          listener.running();
+        }
+      });
+      for (ListenerExecutor listener : listeners) {
+        listener.running();
+      }
+    }
+
+    @Override
+    public synchronized void stopping(final State from) {
+      lastState.set(new Callback() {
+        @Override
+        public void call(Listener listener) {
+          listener.stopping(from);
+        }
+      });
+      for (ListenerExecutor listener : listeners) {
+        listener.stopping(from);
+      }
+    }
+
+    @Override
+    public synchronized void terminated(final State from) {
+      lastState.set(new Callback() {
+        @Override
+        public void call(Listener listener) {
+          listener.terminated(from);
+        }
+      });
+      for (ListenerExecutor listener : listeners) {
+        listener.terminated(from);
+      }
+    }
+
+    @Override
+    public synchronized void failed(final State from, final Throwable failure) {
+      lastState.set(new Callback() {
+        @Override
+        public void call(Listener listener) {
+          listener.failed(from, failure);
+        }
+      });
+      for (ListenerExecutor listener : listeners) {
+        listener.failed(from, failure);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/AbstractTwillController.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/AbstractTwillController.java b/twill-core/src/main/java/org/apache/twill/internal/AbstractTwillController.java
new file mode 100644
index 0000000..5806f9d
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/AbstractTwillController.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.api.RunId;
+import org.apache.twill.api.TwillController;
+import org.apache.twill.api.logging.LogEntry;
+import org.apache.twill.api.logging.LogHandler;
+import org.apache.twill.discovery.Discoverable;
+import org.apache.twill.discovery.DiscoveryServiceClient;
+import org.apache.twill.discovery.ZKDiscoveryService;
+import org.apache.twill.internal.json.StackTraceElementCodec;
+import org.apache.twill.internal.kafka.client.SimpleKafkaClient;
+import org.apache.twill.internal.logging.LogEntryDecoder;
+import org.apache.twill.internal.state.SystemMessages;
+import org.apache.twill.kafka.client.FetchedMessage;
+import org.apache.twill.kafka.client.KafkaClient;
+import org.apache.twill.zookeeper.ZKClient;
+import org.apache.twill.zookeeper.ZKClients;
+import com.google.common.base.Charsets;
+import com.google.common.collect.Iterables;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Iterator;
+import java.util.Queue;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * A abstract base class for {@link org.apache.twill.api.TwillController} implementation that uses Zookeeper to controller a
+ * running twill application.
+ */
+public abstract class AbstractTwillController extends AbstractZKServiceController implements TwillController {
+
+  private static final Logger LOG = LoggerFactory.getLogger(AbstractTwillController.class);
+  private static final int MAX_KAFKA_FETCH_SIZE = 1048576;
+  private static final long SHUTDOWN_TIMEOUT_MS = 2000;
+  private static final long LOG_FETCH_TIMEOUT_MS = 5000;
+
+  private final Queue<LogHandler> logHandlers;
+  private final KafkaClient kafkaClient;
+  private final DiscoveryServiceClient discoveryServiceClient;
+  private final LogPollerThread logPoller;
+
+  public AbstractTwillController(RunId runId, ZKClient zkClient, Iterable<LogHandler> logHandlers) {
+    super(runId, zkClient);
+    this.logHandlers = new ConcurrentLinkedQueue<LogHandler>();
+    this.kafkaClient = new SimpleKafkaClient(ZKClients.namespace(zkClient, "/" + runId.getId() + "/kafka"));
+    this.discoveryServiceClient = new ZKDiscoveryService(zkClient);
+    Iterables.addAll(this.logHandlers, logHandlers);
+    this.logPoller = new LogPollerThread(runId, kafkaClient, logHandlers);
+  }
+
+  @Override
+  protected void doStartUp() {
+    if (!logHandlers.isEmpty()) {
+      logPoller.start();
+    }
+  }
+
+  @Override
+  protected void doShutDown() {
+    logPoller.terminate();
+    try {
+      // Wait for the poller thread to stop.
+      logPoller.join(SHUTDOWN_TIMEOUT_MS);
+    } catch (InterruptedException e) {
+      LOG.warn("Joining of log poller thread interrupted.", e);
+    }
+  }
+
+  @Override
+  public final synchronized void addLogHandler(LogHandler handler) {
+    logHandlers.add(handler);
+    if (!logPoller.isAlive()) {
+      logPoller.start();
+    }
+  }
+
+  @Override
+  public final Iterable<Discoverable> discoverService(String serviceName) {
+    return discoveryServiceClient.discover(serviceName);
+  }
+
+  @Override
+  public final ListenableFuture<Integer> changeInstances(String runnable, int newCount) {
+    return sendMessage(SystemMessages.setInstances(runnable, newCount), newCount);
+  }
+
+  private static final class LogPollerThread extends Thread {
+
+    private final KafkaClient kafkaClient;
+    private final Iterable<LogHandler> logHandlers;
+    private volatile boolean running = true;
+
+    LogPollerThread(RunId runId, KafkaClient kafkaClient, Iterable<LogHandler> logHandlers) {
+      super("twill-log-poller-" + runId.getId());
+      setDaemon(true);
+      this.kafkaClient = kafkaClient;
+      this.logHandlers = logHandlers;
+    }
+
+    @Override
+    public void run() {
+      LOG.info("Twill log poller thread '{}' started.", getName());
+      kafkaClient.startAndWait();
+      Gson gson = new GsonBuilder().registerTypeAdapter(LogEntry.class, new LogEntryDecoder())
+        .registerTypeAdapter(StackTraceElement.class, new StackTraceElementCodec())
+        .create();
+
+      while (running && !isInterrupted()) {
+        long offset;
+        try {
+          // Get the earliest offset
+          long[] offsets = kafkaClient.getOffset(Constants.LOG_TOPIC, 0, -2, 1).get(LOG_FETCH_TIMEOUT_MS,
+                                                                                    TimeUnit.MILLISECONDS);
+          // Should have one entry
+          offset = offsets[0];
+        } catch (Throwable t) {
+          // Keep retrying
+          LOG.warn("Failed to fetch offsets from Kafka. Retrying.", t);
+          continue;
+        }
+
+        // Now fetch log messages from Kafka
+        Iterator<FetchedMessage> messageIterator = kafkaClient.consume(Constants.LOG_TOPIC, 0,
+                                                                       offset, MAX_KAFKA_FETCH_SIZE);
+        try {
+          while (messageIterator.hasNext()) {
+            String json = Charsets.UTF_8.decode(messageIterator.next().getBuffer()).toString();
+            try {
+              LogEntry entry = gson.fromJson(json, LogEntry.class);
+              if (entry != null) {
+                invokeHandlers(entry);
+              }
+            } catch (Exception e) {
+              LOG.error("Failed to decode log entry {}", json, e);
+            }
+          }
+        } catch (Throwable t) {
+          LOG.warn("Exception while fetching log message from Kafka. Retrying.", t);
+          continue;
+        }
+      }
+
+      kafkaClient.stopAndWait();
+      LOG.info("Twill log poller thread stopped.");
+    }
+
+    void terminate() {
+      running = false;
+      interrupt();
+    }
+
+    private void invokeHandlers(LogEntry entry) {
+      for (LogHandler handler : logHandlers) {
+        handler.onLog(entry);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/AbstractZKServiceController.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/AbstractZKServiceController.java b/twill-core/src/main/java/org/apache/twill/internal/AbstractZKServiceController.java
new file mode 100644
index 0000000..98cc2b8
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/AbstractZKServiceController.java
@@ -0,0 +1,314 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.api.Command;
+import org.apache.twill.api.RunId;
+import org.apache.twill.api.ServiceController;
+import org.apache.twill.common.Threads;
+import org.apache.twill.internal.json.StackTraceElementCodec;
+import org.apache.twill.internal.json.StateNodeCodec;
+import org.apache.twill.internal.state.Message;
+import org.apache.twill.internal.state.Messages;
+import org.apache.twill.internal.state.StateNode;
+import org.apache.twill.internal.state.SystemMessages;
+import org.apache.twill.zookeeper.NodeData;
+import org.apache.twill.zookeeper.ZKClient;
+import com.google.common.base.Charsets;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.gson.GsonBuilder;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.data.Stat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * An abstract base class for implementing a {@link ServiceController} using ZooKeeper as a means for
+ * communicating with the remote service. This is designed to work in pair with the {@link ZKServiceDecorator}.
+ */
+public abstract class AbstractZKServiceController extends AbstractExecutionServiceController {
+
+  private static final Logger LOG = LoggerFactory.getLogger(AbstractZKServiceController.class);
+
+  private final ZKClient zkClient;
+  private final InstanceNodeDataCallback instanceNodeDataCallback;
+  private final StateNodeDataCallback stateNodeDataCallback;
+  private final List<ListenableFuture<?>> messageFutures;
+  private ListenableFuture<State> stopMessageFuture;
+
+  protected AbstractZKServiceController(RunId runId, ZKClient zkClient) {
+    super(runId);
+    this.zkClient = zkClient;
+    this.instanceNodeDataCallback = new InstanceNodeDataCallback();
+    this.stateNodeDataCallback = new StateNodeDataCallback();
+    this.messageFutures = Lists.newLinkedList();
+  }
+
+  @Override
+  public final ListenableFuture<Command> sendCommand(Command command) {
+    return sendMessage(Messages.createForAll(command), command);
+  }
+
+  @Override
+  public final ListenableFuture<Command> sendCommand(String runnableName, Command command) {
+    return sendMessage(Messages.createForRunnable(runnableName, command), command);
+  }
+
+  @Override
+  protected final void startUp() {
+    // Watch for instance node existence.
+    actOnExists(getInstancePath(), new Runnable() {
+      @Override
+      public void run() {
+        watchInstanceNode();
+      }
+    });
+
+    // Watch for state node data
+    actOnExists(getZKPath("state"), new Runnable() {
+      @Override
+      public void run() {
+        watchStateNode();
+      }
+    });
+
+    doStartUp();
+  }
+
+  @Override
+  protected final synchronized void shutDown() {
+    if (stopMessageFuture == null) {
+      stopMessageFuture = ZKMessages.sendMessage(zkClient, getMessagePrefix(),
+                                                 SystemMessages.stopApplication(), State.TERMINATED);
+    }
+
+    // Cancel all pending message futures.
+    for (ListenableFuture<?> future : messageFutures) {
+      future.cancel(true);
+    }
+
+    doShutDown();
+  }
+
+  /**
+   * Sends a {@link Message} to the remote service. Returns a future that will be completed when the message
+   * has been processed.
+   * @param message The message to send.
+   * @param result Object to set into the future when message is being processed.
+   * @param <V> Type of the result.
+   * @return A {@link ListenableFuture} that will be completed when the message has been processed.
+   */
+  protected final synchronized <V> ListenableFuture<V> sendMessage(Message message, V result) {
+    if (!isRunning()) {
+      return Futures.immediateFailedFuture(new IllegalStateException("Cannot send message to non-running application"));
+    }
+    final ListenableFuture<V> messageFuture = ZKMessages.sendMessage(zkClient, getMessagePrefix(), message, result);
+    messageFutures.add(messageFuture);
+    messageFuture.addListener(new Runnable() {
+      @Override
+      public void run() {
+        // If the completion is triggered when stopping, do nothing.
+        if (state() == State.STOPPING) {
+          return;
+        }
+        synchronized (AbstractZKServiceController.this) {
+          messageFutures.remove(messageFuture);
+        }
+      }
+    }, Threads.SAME_THREAD_EXECUTOR);
+
+    return messageFuture;
+  }
+
+  protected final ListenableFuture<State> getStopMessageFuture() {
+    return stopMessageFuture;
+  }
+
+  /**
+   * Called during startup. Executed in the startup thread.
+   */
+  protected abstract void doStartUp();
+
+  /**
+   * Called during shutdown. Executed in the shutdown thread.
+   */
+  protected abstract void doShutDown();
+
+  /**
+   * Called when an update on the live instance node is detected.
+   * @param nodeData The updated live instance node data or {@code null} if there is an error when fetching
+   *                 the node data.
+   */
+  protected abstract void instanceNodeUpdated(NodeData nodeData);
+
+  /**
+   * Called when an update on the state node is detected.
+   * @param stateNode The update state node data or {@code null} if there is an error when fetching the node data.
+   */
+  protected abstract void stateNodeUpdated(StateNode stateNode);
+
+  protected synchronized void forceShutDown() {
+    if (stopMessageFuture == null) {
+      // In force shutdown, don't send message.
+      stopMessageFuture = Futures.immediateFuture(State.TERMINATED);
+    }
+    stop();
+  }
+
+
+  private void actOnExists(final String path, final Runnable action) {
+    // Watch for node existence.
+    final AtomicBoolean nodeExists = new AtomicBoolean(false);
+    Futures.addCallback(zkClient.exists(path, new Watcher() {
+      @Override
+      public void process(WatchedEvent event) {
+        // When node is created, call the action.
+        // Other event type would be handled by the action.
+        if (event.getType() == Event.EventType.NodeCreated && nodeExists.compareAndSet(false, true)) {
+          action.run();
+        }
+      }
+    }), new FutureCallback<Stat>() {
+      @Override
+      public void onSuccess(Stat result) {
+        if (result != null && nodeExists.compareAndSet(false, true)) {
+          action.run();
+        }
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        LOG.error("Failed in exists call to {}. Shutting down service.", path, t);
+        forceShutDown();
+      }
+    }, Threads.SAME_THREAD_EXECUTOR);
+  }
+
+  private void watchInstanceNode() {
+    Futures.addCallback(zkClient.getData(getInstancePath(), new Watcher() {
+      @Override
+      public void process(WatchedEvent event) {
+        State state = state();
+        if (state != State.NEW && state != State.STARTING && state != State.RUNNING) {
+          // Ignore ZK node events when it is in stopping sequence.
+          return;
+        }
+        switch (event.getType()) {
+          case NodeDataChanged:
+            watchInstanceNode();
+            break;
+          case NodeDeleted:
+            // When the ephemeral node goes away, treat the remote service stopped.
+            forceShutDown();
+            break;
+          default:
+            LOG.info("Ignore ZK event for instance node: {}", event);
+        }
+      }
+    }), instanceNodeDataCallback, Threads.SAME_THREAD_EXECUTOR);
+  }
+
+  private void watchStateNode() {
+    Futures.addCallback(zkClient.getData(getZKPath("state"), new Watcher() {
+      @Override
+      public void process(WatchedEvent event) {
+        State state = state();
+        if (state != State.NEW && state != State.STARTING && state != State.RUNNING) {
+          // Ignore ZK node events when it is in stopping sequence.
+          return;
+        }
+        switch (event.getType()) {
+          case NodeDataChanged:
+            watchStateNode();
+            break;
+          default:
+            LOG.info("Ignore ZK event for state node: {}", event);
+        }
+      }
+    }), stateNodeDataCallback, Threads.SAME_THREAD_EXECUTOR);
+  }
+
+  /**
+   * Returns the path prefix for creating sequential message node for the remote service.
+   */
+  private String getMessagePrefix() {
+    return getZKPath("messages/msg");
+  }
+
+  /**
+   * Returns the zookeeper node path for the ephemeral instance node for this runId.
+   */
+  private String getInstancePath() {
+    return String.format("/instances/%s", getRunId().getId());
+  }
+
+  private String getZKPath(String path) {
+    return String.format("/%s/%s", getRunId().getId(), path);
+  }
+
+  private final class InstanceNodeDataCallback implements FutureCallback<NodeData> {
+
+    @Override
+    public void onSuccess(NodeData result) {
+      instanceNodeUpdated(result);
+    }
+
+    @Override
+    public void onFailure(Throwable t) {
+      LOG.error("Failed in fetching instance node data.", t);
+      if (t instanceof KeeperException && ((KeeperException) t).code() == KeeperException.Code.NONODE) {
+        // If the node is gone, treat the remote service stopped.
+        forceShutDown();
+      } else {
+        instanceNodeUpdated(null);
+      }
+    }
+  }
+
+  private final class StateNodeDataCallback implements FutureCallback<NodeData> {
+
+    @Override
+    public void onSuccess(NodeData result) {
+      byte[] data = result.getData();
+      if (data == null) {
+        stateNodeUpdated(null);
+        return;
+      }
+      StateNode stateNode = new GsonBuilder().registerTypeAdapter(StateNode.class, new StateNodeCodec())
+        .registerTypeAdapter(StackTraceElement.class, new StackTraceElementCodec())
+        .create()
+        .fromJson(new String(data, Charsets.UTF_8), StateNode.class);
+
+      stateNodeUpdated(stateNode);
+    }
+
+    @Override
+    public void onFailure(Throwable t) {
+      LOG.error("Failed in fetching state node data.", t);
+      stateNodeUpdated(null);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/ApplicationBundler.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/ApplicationBundler.java b/twill-core/src/main/java/org/apache/twill/internal/ApplicationBundler.java
new file mode 100644
index 0000000..a0e9a71
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/ApplicationBundler.java
@@ -0,0 +1,362 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.filesystem.Location;
+import org.apache.twill.internal.utils.Dependencies;
+import com.google.common.base.Function;
+import com.google.common.base.Splitter;
+import com.google.common.base.Throwables;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import com.google.common.io.ByteStreams;
+import com.google.common.io.Files;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedOutputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+import java.net.URL;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Queue;
+import java.util.Set;
+import java.util.jar.JarEntry;
+import java.util.jar.JarOutputStream;
+import java.util.zip.CRC32;
+import java.util.zip.CheckedOutputStream;
+
+/**
+ * This class builds jar files based on class dependencies.
+ */
+public final class ApplicationBundler {
+
+  private static final Logger LOG = LoggerFactory.getLogger(ApplicationBundler.class);
+  
+  public static final String SUBDIR_CLASSES = "classes/";
+  public static final String SUBDIR_LIB = "lib/";
+  public static final String SUBDIR_RESOURCES = "resources/";
+
+  private final List<String> excludePackages;
+  private final List<String> includePackages;
+  private final Set<String> bootstrapClassPaths;
+  private final CRC32 crc32;
+
+  /**
+   * Constructs a ApplicationBundler.
+   *
+   * @param excludePackages Class packages to exclude
+   */
+  public ApplicationBundler(Iterable<String> excludePackages) {
+    this(excludePackages, ImmutableList.<String>of());
+  }
+
+  /**
+   * Constructs a ApplicationBundler.
+   *
+   * @param excludePackages Class packages to exclude
+   * @param includePackages Class packages that should be included. Anything in this list will override the
+   *                        one provided in excludePackages.
+   */
+  public ApplicationBundler(Iterable<String> excludePackages, Iterable<String> includePackages) {
+    this.excludePackages = ImmutableList.copyOf(excludePackages);
+    this.includePackages = ImmutableList.copyOf(includePackages);
+
+    ImmutableSet.Builder<String> builder = ImmutableSet.builder();
+    for (String classpath : Splitter.on(File.pathSeparatorChar).split(System.getProperty("sun.boot.class.path"))) {
+      File file = new File(classpath);
+      builder.add(file.getAbsolutePath());
+      try {
+        builder.add(file.getCanonicalPath());
+      } catch (IOException e) {
+        // Ignore the exception and proceed.
+      }
+    }
+    this.bootstrapClassPaths = builder.build();
+    this.crc32 = new CRC32();
+
+  }
+
+  public void createBundle(Location target, Iterable<Class<?>> classes) throws IOException {
+    createBundle(target, classes, ImmutableList.<URI>of());
+  }
+
+  /**
+   * Same as calling {@link #createBundle(Location, Iterable)}.
+   */
+  public void createBundle(Location target, Class<?> clz, Class<?>...classes) throws IOException {
+    createBundle(target, ImmutableSet.<Class<?>>builder().add(clz).add(classes).build());
+  }
+
+  /**
+   * Creates a jar file which includes all the given classes and all the classes that they depended on.
+   * The jar will also include all classes and resources under the packages as given as include packages
+   * in the constructor.
+   *
+   * @param target Where to save the target jar file.
+   * @param resources Extra resources to put into the jar file. If resource is a jar file, it'll be put under
+   *                  lib/ entry, otherwise under the resources/ entry.
+   * @param classes Set of classes to start the dependency traversal.
+   * @throws IOException
+   */
+  public void createBundle(Location target, Iterable<Class<?>> classes, Iterable<URI> resources) throws IOException {
+    LOG.debug("start creating bundle {}. building a temporary file locally at first", target.getName());
+    // Write the jar to local tmp file first
+    File tmpJar = File.createTempFile(target.getName(), ".tmp");
+    try {
+      Set<String> entries = Sets.newHashSet();
+      JarOutputStream jarOut = new JarOutputStream(new FileOutputStream(tmpJar));
+      try {
+        // Find class dependencies
+        findDependencies(classes, entries, jarOut);
+
+        // Add extra resources
+        for (URI resource : resources) {
+          copyResource(resource, entries, jarOut);
+        }
+      } finally {
+        jarOut.close();
+      }
+      LOG.debug("copying temporary bundle to destination {} ({} bytes)", target.toURI(), tmpJar.length());
+      // Copy the tmp jar into destination.
+      OutputStream os = null; 
+      try {
+        os = new BufferedOutputStream(target.getOutputStream());
+        Files.copy(tmpJar, os);
+      } catch (IOException e) {
+        throw new IOException("failed to copy bundle from " + tmpJar.toURI() + " to " + target.toURI(), e);
+      } finally {
+        if (os != null) {
+          os.close();
+        }
+      }
+      LOG.debug("finished creating bundle at {}", target.toURI());
+    } finally {
+      tmpJar.delete();
+      LOG.debug("cleaned up local temporary for bundle {}", tmpJar.toURI());
+    }
+  }
+
+  private void findDependencies(Iterable<Class<?>> classes, final Set<String> entries,
+                                final JarOutputStream jarOut) throws IOException {
+
+    Iterable<String> classNames = Iterables.transform(classes, new Function<Class<?>, String>() {
+      @Override
+      public String apply(Class<?> input) {
+        return input.getName();
+      }
+    });
+
+    ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+    if (classLoader == null) {
+      classLoader = getClass().getClassLoader();
+    }
+    Dependencies.findClassDependencies(classLoader, new Dependencies.ClassAcceptor() {
+      @Override
+      public boolean accept(String className, URL classUrl, URL classPathUrl) {
+        if (bootstrapClassPaths.contains(classPathUrl.getFile())) {
+          return false;
+        }
+
+        boolean shouldInclude = false;
+        for (String include : includePackages) {
+          if (className.startsWith(include)) {
+            shouldInclude = true;
+            break;
+          }
+        }
+
+        if (!shouldInclude) {
+          for (String exclude : excludePackages) {
+            if (className.startsWith(exclude)) {
+              return false;
+            }
+          }
+        }
+
+        putEntry(className, classUrl, classPathUrl, entries, jarOut);
+        return true;
+      }
+    }, classNames);
+  }
+
+  private void putEntry(String className, URL classUrl, URL classPathUrl, Set<String> entries, JarOutputStream jarOut) {
+    String classPath = classPathUrl.getFile();
+    if (classPath.endsWith(".jar")) {
+      saveDirEntry(SUBDIR_LIB, entries, jarOut);
+      saveEntry(SUBDIR_LIB + classPath.substring(classPath.lastIndexOf('/') + 1), classPathUrl, entries, jarOut, false);
+    } else {
+      // Class file, put it under the classes directory
+      saveDirEntry(SUBDIR_CLASSES, entries, jarOut);
+      if ("file".equals(classPathUrl.getProtocol())) {
+        // Copy every files under the classPath
+        try {
+          copyDir(new File(classPathUrl.toURI()), SUBDIR_CLASSES, entries, jarOut);
+        } catch (Exception e) {
+          throw Throwables.propagate(e);
+        }
+      } else {
+        String entry = SUBDIR_CLASSES + className.replace('.', '/') + ".class";
+        saveDirEntry(entry.substring(0, entry.lastIndexOf('/') + 1), entries, jarOut);
+        saveEntry(entry, classUrl, entries, jarOut, true);
+      }
+    }
+  }
+
+  /**
+   * Saves a directory entry to the jar output.
+   */
+  private void saveDirEntry(String path, Set<String> entries, JarOutputStream jarOut) {
+    if (entries.contains(path)) {
+      return;
+    }
+
+    try {
+      String entry = "";
+      for (String dir : Splitter.on('/').omitEmptyStrings().split(path)) {
+        entry += dir + '/';
+        if (entries.add(entry)) {
+          JarEntry jarEntry = new JarEntry(entry);
+          jarEntry.setMethod(JarOutputStream.STORED);
+          jarEntry.setSize(0L);
+          jarEntry.setCrc(0L);
+          jarOut.putNextEntry(jarEntry);
+          jarOut.closeEntry();
+        }
+      }
+    } catch (IOException e) {
+      throw Throwables.propagate(e);
+    }
+  }
+
+  /**
+   * Saves a class entry to the jar output.
+   */
+  private void saveEntry(String entry, URL url, Set<String> entries, JarOutputStream jarOut, boolean compress) {
+    LOG.debug("adding bundle entry " + entry);
+    if (!entries.add(entry)) {
+      return;
+    }
+    try {
+      JarEntry jarEntry = new JarEntry(entry);
+      InputStream is = url.openStream();
+
+      try {
+        if (compress) {
+          jarOut.putNextEntry(jarEntry);
+          ByteStreams.copy(is, jarOut);
+        } else {
+          crc32.reset();
+          TransferByteOutputStream os = new TransferByteOutputStream();
+          CheckedOutputStream checkedOut = new CheckedOutputStream(os, crc32);
+          ByteStreams.copy(is, checkedOut);
+          checkedOut.close();
+
+          long size = os.size();
+          jarEntry.setMethod(JarEntry.STORED);
+          jarEntry.setSize(size);
+          jarEntry.setCrc(checkedOut.getChecksum().getValue());
+          jarOut.putNextEntry(jarEntry);
+          os.transfer(jarOut);
+        }
+      } finally {
+        is.close();
+      }
+      jarOut.closeEntry();
+    } catch (Exception e) {
+      throw Throwables.propagate(e);
+    }
+  }
+
+
+  /**
+   * Copies all entries under the file path.
+   */
+  private void copyDir(File baseDir, String entryPrefix,
+                       Set<String> entries, JarOutputStream jarOut) throws IOException {
+    LOG.debug("adding whole dir {} to bundle at '{}'", baseDir, entryPrefix);
+    URI baseUri = baseDir.toURI();
+    Queue<File> queue = Lists.newLinkedList();
+    Collections.addAll(queue, baseDir.listFiles());
+    while (!queue.isEmpty()) {
+      File file = queue.remove();
+
+      String entry = entryPrefix + baseUri.relativize(file.toURI()).getPath();
+      if (entries.add(entry)) {
+        jarOut.putNextEntry(new JarEntry(entry));
+        if (file.isFile()) {
+          try {
+            Files.copy(file, jarOut);
+          } catch (IOException e) {
+            throw new IOException("failure copying from " + file.getAbsoluteFile() + " to JAR file entry " + entry, e);
+          }
+        }
+        jarOut.closeEntry();
+      }
+
+      if (file.isDirectory()) {
+        File[] files = file.listFiles();
+        if (files != null) {
+          queue.addAll(Arrays.asList(files));
+        }
+      }
+    }
+  }
+
+  private void copyResource(URI resource, Set<String> entries, JarOutputStream jarOut) throws IOException {
+    if ("file".equals(resource.getScheme())) {
+      File file = new File(resource);
+      if (file.isDirectory()) {
+        saveDirEntry(SUBDIR_RESOURCES, entries, jarOut);
+        copyDir(file, SUBDIR_RESOURCES, entries, jarOut);
+        return;
+      }
+    }
+
+    URL url = resource.toURL();
+    String path = url.getFile();
+    String prefix = path.endsWith(".jar") ? SUBDIR_LIB : SUBDIR_RESOURCES;
+    path = prefix + path.substring(path.lastIndexOf('/') + 1);
+
+    saveDirEntry(prefix, entries, jarOut);
+    jarOut.putNextEntry(new JarEntry(path));
+    InputStream is = url.openStream();
+    try {
+      ByteStreams.copy(is, jarOut);
+    } finally {
+      is.close();
+    }
+  }
+
+  private static final class TransferByteOutputStream extends ByteArrayOutputStream {
+
+    public void transfer(OutputStream os) throws IOException {
+      os.write(buf, 0, count);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/Arguments.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/Arguments.java b/twill-core/src/main/java/org/apache/twill/internal/Arguments.java
new file mode 100644
index 0000000..a78547c
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/Arguments.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMultimap;
+import com.google.common.collect.Multimap;
+
+import java.util.List;
+
+/**
+ * Class that encapsulate application arguments and per runnable arguments.
+ */
+public final class Arguments {
+
+  private final List<String> arguments;
+  private final Multimap<String, String> runnableArguments;
+
+  public Arguments(List<String> arguments, Multimap<String, String> runnableArguments) {
+    this.arguments = ImmutableList.copyOf(arguments);
+    this.runnableArguments = ImmutableMultimap.copyOf(runnableArguments);
+  }
+
+  public List<String> getArguments() {
+    return arguments;
+  }
+
+  public Multimap<String, String> getRunnableArguments() {
+    return runnableArguments;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/BasicTwillContext.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/BasicTwillContext.java b/twill-core/src/main/java/org/apache/twill/internal/BasicTwillContext.java
new file mode 100644
index 0000000..61bdaef
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/BasicTwillContext.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.api.RunId;
+import org.apache.twill.api.TwillContext;
+import org.apache.twill.api.TwillRunnableSpecification;
+import org.apache.twill.common.Cancellable;
+import org.apache.twill.discovery.Discoverable;
+import org.apache.twill.discovery.DiscoveryService;
+
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+
+/**
+ *
+ */
+public final class BasicTwillContext implements TwillContext {
+
+  private final RunId runId;
+  private final RunId appRunId;
+  private final InetAddress host;
+  private final String[] args;
+  private final String[] appArgs;
+  private final TwillRunnableSpecification spec;
+  private final int instanceId;
+  private final DiscoveryService discoveryService;
+  private final int allowedMemoryMB;
+  private final int virtualCores;
+  private volatile int instanceCount;
+
+  public BasicTwillContext(RunId runId, RunId appRunId, InetAddress host, String[] args, String[] appArgs,
+                           TwillRunnableSpecification spec, int instanceId, DiscoveryService discoveryService,
+                           int instanceCount, int allowedMemoryMB, int virtualCores) {
+    this.runId = runId;
+    this.appRunId = appRunId;
+    this.host = host;
+    this.args = args;
+    this.appArgs = appArgs;
+    this.spec = spec;
+    this.instanceId = instanceId;
+    this.discoveryService = discoveryService;
+    this.instanceCount = instanceCount;
+    this.allowedMemoryMB = allowedMemoryMB;
+    this.virtualCores = virtualCores;
+  }
+
+  @Override
+  public RunId getRunId() {
+    return runId;
+  }
+
+  @Override
+  public RunId getApplicationRunId() {
+    return appRunId;
+  }
+
+  @Override
+  public int getInstanceCount() {
+    return instanceCount;
+  }
+
+  public void setInstanceCount(int count) {
+    this.instanceCount = count;
+  }
+
+  @Override
+  public InetAddress getHost() {
+    return host;
+  }
+
+  @Override
+  public String[] getArguments() {
+    return args;
+  }
+
+  @Override
+  public String[] getApplicationArguments() {
+    return appArgs;
+  }
+
+  @Override
+  public TwillRunnableSpecification getSpecification() {
+    return spec;
+  }
+
+  @Override
+  public int getInstanceId() {
+    return instanceId;
+  }
+
+  @Override
+  public int getVirtualCores() {
+    return virtualCores;
+  }
+
+  @Override
+  public int getMaxMemoryMB() {
+    return allowedMemoryMB;
+  }
+
+  @Override
+  public Cancellable announce(final String serviceName, final int port) {
+    return discoveryService.register(new Discoverable() {
+      @Override
+      public String getName() {
+        return serviceName;
+      }
+
+      @Override
+      public InetSocketAddress getSocketAddress() {
+        return new InetSocketAddress(getHost(), port);
+      }
+    });
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/Configs.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/Configs.java b/twill-core/src/main/java/org/apache/twill/internal/Configs.java
new file mode 100644
index 0000000..0fa1df8
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/Configs.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+/**
+ *
+ */
+public final class Configs {
+
+  public static final class Keys {
+    /**
+     * Size in MB of reserved memory for Java process (non-heap memory).
+     */
+    public static final String JAVA_RESERVED_MEMORY_MB = "twill.java.reserved.memory.mb";
+
+    private Keys() {
+    }
+  }
+
+  public static final class Defaults {
+    // By default have 200MB reserved for Java process.
+    public static final int JAVA_RESERVED_MEMORY_MB = 200;
+
+    private Defaults() {
+    }
+  }
+
+  private Configs() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/Constants.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/Constants.java b/twill-core/src/main/java/org/apache/twill/internal/Constants.java
new file mode 100644
index 0000000..0387d3e
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/Constants.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+/**
+ * This class contains collection of common constants used in Twill.
+ */
+public final class Constants {
+
+  public static final String LOG_TOPIC = "log";
+
+  /** Maximum number of seconds for AM to start. */
+  public static final int APPLICATION_MAX_START_SECONDS = 60;
+  /** Maximum number of seconds for AM to stop. */
+  public static final int APPLICATION_MAX_STOP_SECONDS = 60;
+
+  public static final long PROVISION_TIMEOUT = 30000;
+
+  /** Memory size of AM */
+  public static final int APP_MASTER_MEMORY_MB = 512;
+
+  public static final int APP_MASTER_RESERVED_MEMORY_MB = 150;
+
+  public static final String STDOUT = "stdout";
+  public static final String STDERR = "stderr";
+
+  /**
+   * Constants for names of internal files that are shared between client, AM and containers.
+   */
+  public static final class Files {
+
+    public static final String LAUNCHER_JAR = "launcher.jar";
+    public static final String APP_MASTER_JAR = "appMaster.jar";
+    public static final String CONTAINER_JAR = "container.jar";
+    public static final String LOCALIZE_FILES = "localizeFiles.json";
+    public static final String TWILL_SPEC = "twillSpec.json";
+    public static final String ARGUMENTS = "arguments.json";
+    public static final String LOGBACK_TEMPLATE = "logback-template.xml";
+    public static final String KAFKA = "kafka.tgz";
+    public static final String JVM_OPTIONS = "jvm.opts";
+    public static final String CREDENTIALS = "credentials.store";
+
+    private Files() {
+    }
+  }
+
+  private Constants() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/ContainerInfo.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/ContainerInfo.java b/twill-core/src/main/java/org/apache/twill/internal/ContainerInfo.java
new file mode 100644
index 0000000..67c21d3
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/ContainerInfo.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import java.net.InetAddress;
+
+/**
+ * Represents information of the container that the processing is/will be running in.
+ */
+public interface ContainerInfo {
+
+  String getId();
+
+  InetAddress getHost();
+
+  int getPort();
+
+  int getMemoryMB();
+
+  int getVirtualCores();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/ContainerLiveNodeData.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/ContainerLiveNodeData.java b/twill-core/src/main/java/org/apache/twill/internal/ContainerLiveNodeData.java
new file mode 100644
index 0000000..705943c
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/ContainerLiveNodeData.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+/**
+ *
+ */
+public final class ContainerLiveNodeData {
+
+  private final String containerId;
+  private final String host;
+
+  public ContainerLiveNodeData(String containerId, String host) {
+    this.containerId = containerId;
+    this.host = host;
+  }
+
+  public String getContainerId() {
+    return containerId;
+  }
+
+  public String getHost() {
+    return host;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/EnvContainerInfo.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/EnvContainerInfo.java b/twill-core/src/main/java/org/apache/twill/internal/EnvContainerInfo.java
new file mode 100644
index 0000000..fd50028
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/EnvContainerInfo.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+
+/**
+ * A {@link ContainerInfo} based on information on the environment.
+ */
+public final class EnvContainerInfo implements ContainerInfo {
+  private final String id;
+  private final InetAddress host;
+  private final int port;
+  private final int virtualCores;
+  private final int memoryMB;
+
+  public EnvContainerInfo() throws UnknownHostException {
+    id = System.getenv(EnvKeys.YARN_CONTAINER_ID);
+    host = InetAddress.getByName(System.getenv(EnvKeys.YARN_CONTAINER_HOST));
+    port = Integer.parseInt(System.getenv(EnvKeys.YARN_CONTAINER_PORT));
+    virtualCores = Integer.parseInt(System.getenv(EnvKeys.YARN_CONTAINER_VIRTUAL_CORES));
+    memoryMB = Integer.parseInt(System.getenv(EnvKeys.YARN_CONTAINER_MEMORY_MB));
+  }
+
+  @Override
+  public String getId() {
+    return id;
+  }
+
+  @Override
+  public InetAddress getHost() {
+    return host;
+  }
+
+  @Override
+  public int getPort() {
+    return port;
+  }
+
+  @Override
+  public int getMemoryMB() {
+    return memoryMB;
+  }
+
+  @Override
+  public int getVirtualCores() {
+    return virtualCores;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/EnvKeys.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/EnvKeys.java b/twill-core/src/main/java/org/apache/twill/internal/EnvKeys.java
new file mode 100644
index 0000000..9bf6523
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/EnvKeys.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+/**
+ * Places for define common environment keys.
+ */
+public final class EnvKeys {
+
+  public static final String TWILL_ZK_CONNECT = "TWILL_ZK_CONNECT";
+  public static final String TWILL_APP_RUN_ID = "TWILL_APP_RUN_ID";
+  public static final String TWILL_RUN_ID = "TWILL_RUN_ID";
+  public static final String TWILL_INSTANCE_ID = "TWILL_INSTANCE_ID";
+  public static final String TWILL_INSTANCE_COUNT = "TWILL_INSTANCE_COUNT";
+  public static final String TWILL_RESERVED_MEMORY_MB = "TWILL_RESERVED_MEMORY_MB";
+
+  public static final String TWILL_FS_USER = "TWILL_FS_USER";
+
+  /**
+   * Cluster filesystem directory for storing twill app related files.
+   */
+  public static final String TWILL_APP_DIR = "TWILL_APP_DIR";
+
+  public static final String TWILL_APP_NAME = "TWILL_APP_NAME";
+  public static final String TWILL_RUNNABLE_NAME = "TWILL_RUNNABLE_NAME";
+
+  public static final String TWILL_LOG_KAFKA_ZK = "TWILL_LOG_KAFKA_ZK";
+
+  public static final String YARN_APP_ID = "YARN_APP_ID";
+  public static final String YARN_APP_ID_CLUSTER_TIME = "YARN_APP_ID_CLUSTER_TIME";
+  public static final String YARN_APP_ID_STR = "YARN_APP_ID_STR";
+
+  public static final String YARN_CONTAINER_ID = "YARN_CONTAINER_ID";
+  public static final String YARN_CONTAINER_HOST = "YARN_CONTAINER_HOST";
+  public static final String YARN_CONTAINER_PORT = "YARN_CONTAINER_PORT";
+  /**
+   * Used to inform runnables of their resource usage.
+   */
+  public static final String YARN_CONTAINER_VIRTUAL_CORES = "YARN_CONTAINER_VIRTUAL_CORES";
+  public static final String YARN_CONTAINER_MEMORY_MB = "YARN_CONTAINER_MEMORY_MB";
+
+  private EnvKeys() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/ListenerExecutor.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/ListenerExecutor.java b/twill-core/src/main/java/org/apache/twill/internal/ListenerExecutor.java
new file mode 100644
index 0000000..9d3e156
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/ListenerExecutor.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import com.google.common.collect.Maps;
+import com.google.common.util.concurrent.Service;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.Executor;
+
+/**
+ * Wrapper for {@link Service.Listener} to have callback executed on a given {@link Executor}.
+ * Also make sure each method is called at most once.
+ */
+final class ListenerExecutor implements Service.Listener {
+
+  private static final Logger LOG = LoggerFactory.getLogger(ListenerExecutor.class);
+
+  private final Service.Listener delegate;
+  private final Executor executor;
+  private final ConcurrentMap<Service.State, Boolean> callStates = Maps.newConcurrentMap();
+
+  ListenerExecutor(Service.Listener delegate, Executor executor) {
+    this.delegate = delegate;
+    this.executor = executor;
+  }
+
+  @Override
+  public void starting() {
+    if (hasCalled(Service.State.STARTING)) {
+      return;
+    }
+    executor.execute(new Runnable() {
+      @Override
+      public void run() {
+        try {
+          delegate.starting();
+        } catch (Throwable t) {
+          LOG.warn("Exception thrown from listener", t);
+        }
+      }
+    });
+  }
+
+  @Override
+  public void running() {
+    if (hasCalled(Service.State.RUNNING)) {
+      return;
+    }
+    executor.execute(new Runnable() {
+      @Override
+      public void run() {
+        try {
+          delegate.running();
+        } catch (Throwable t) {
+          LOG.warn("Exception thrown from listener", t);
+        }
+      }
+    });
+  }
+
+  @Override
+  public void stopping(final Service.State from) {
+    if (hasCalled(Service.State.STOPPING)) {
+      return;
+    }
+    executor.execute(new Runnable() {
+      @Override
+      public void run() {
+        try {
+          delegate.stopping(from);
+        } catch (Throwable t) {
+          LOG.warn("Exception thrown from listener", t);
+        }
+      }
+    });
+  }
+
+  @Override
+  public void terminated(final Service.State from) {
+    if (hasCalled(Service.State.TERMINATED)) {
+      return;
+    }
+    executor.execute(new Runnable() {
+      @Override
+      public void run() {
+        try {
+          delegate.terminated(from);
+        } catch (Throwable t) {
+          LOG.warn("Exception thrown from listener", t);
+        }
+      }
+    });
+  }
+
+  @Override
+  public void failed(final Service.State from, final Throwable failure) {
+    // Both failed and terminate are using the same state for checking as only either one could be called.
+    if (hasCalled(Service.State.TERMINATED)) {
+      return;
+    }
+    executor.execute(new Runnable() {
+      @Override
+      public void run() {
+        try {
+          delegate.failed(from, failure);
+        } catch (Throwable t) {
+          LOG.warn("Exception thrown from listener", t);
+        }
+      }
+    });
+  }
+
+  private boolean hasCalled(Service.State state) {
+    return callStates.putIfAbsent(state, true) != null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/LogOnlyEventHandler.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/LogOnlyEventHandler.java b/twill-core/src/main/java/org/apache/twill/internal/LogOnlyEventHandler.java
new file mode 100644
index 0000000..4f71a05
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/LogOnlyEventHandler.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.api.EventHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ *
+ */
+public final class LogOnlyEventHandler extends EventHandler {
+
+  private static final Logger LOG = LoggerFactory.getLogger(LogOnlyEventHandler.class);
+
+  @Override
+  public TimeoutAction launchTimeout(Iterable<TimeoutEvent> timeoutEvents) {
+    for (TimeoutEvent event : timeoutEvents) {
+      LOG.info("Requested {} containers for runnable {}, only got {} after {} ms.",
+               event.getExpectedInstances(), event.getRunnableName(),
+               event.getActualInstances(), System.currentTimeMillis() - event.getRequestTime());
+    }
+    return TimeoutAction.recheck(Constants.PROVISION_TIMEOUT, TimeUnit.MILLISECONDS);
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/ProcessController.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/ProcessController.java b/twill-core/src/main/java/org/apache/twill/internal/ProcessController.java
new file mode 100644
index 0000000..4453838
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/ProcessController.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.common.Cancellable;
+
+/**
+ * For controlling a launch yarn process.
+ *
+ * @param <R> Report type.
+ */
+public interface ProcessController<R> extends Cancellable {
+
+  R getReport();
+
+  /**
+   * Request to stop the running process.
+   */
+  void cancel();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/ProcessLauncher.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/ProcessLauncher.java b/twill-core/src/main/java/org/apache/twill/internal/ProcessLauncher.java
new file mode 100644
index 0000000..e48a226
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/ProcessLauncher.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.api.LocalFile;
+
+import java.util.Map;
+
+/**
+ * Class for launching container process.
+ *
+ * @param <T> Type of the object that contains information about the container that the process is going to launch.
+ */
+public interface ProcessLauncher<T> {
+
+  /**
+   * Returns information about the container that this launch would launch process in.
+   */
+  T getContainerInfo();
+
+  /**
+   * Returns a preparer with the given default set of environments, resources and credentials.
+   */
+  <C> PrepareLaunchContext prepareLaunch(Map<String, String> environments,
+                                         Iterable<LocalFile> resources, C credentials);
+
+  /**
+   * For setting up the launcher.
+   */
+  interface PrepareLaunchContext {
+
+    ResourcesAdder withResources();
+
+    AfterResources noResources();
+
+    interface ResourcesAdder {
+      MoreResources add(LocalFile localFile);
+    }
+
+    interface AfterResources {
+      EnvironmentAdder withEnvironment();
+
+      AfterEnvironment noEnvironment();
+    }
+
+    interface EnvironmentAdder {
+      <V> MoreEnvironment add(String key, V value);
+    }
+
+    interface MoreEnvironment extends EnvironmentAdder, AfterEnvironment {
+    }
+
+    interface AfterEnvironment {
+      CommandAdder withCommands();
+    }
+
+    interface MoreResources extends ResourcesAdder, AfterResources { }
+
+    interface CommandAdder {
+      StdOutSetter add(String cmd, String...args);
+    }
+
+    interface StdOutSetter {
+      StdErrSetter redirectOutput(String stdout);
+
+      StdErrSetter noOutput();
+    }
+
+    interface StdErrSetter {
+      MoreCommand redirectError(String stderr);
+
+      MoreCommand noError();
+    }
+
+    interface MoreCommand extends CommandAdder {
+      <R> ProcessController<R> launch();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/SingleRunnableApplication.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/SingleRunnableApplication.java b/twill-core/src/main/java/org/apache/twill/internal/SingleRunnableApplication.java
new file mode 100644
index 0000000..a52afe1
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/SingleRunnableApplication.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.api.ResourceSpecification;
+import org.apache.twill.api.TwillApplication;
+import org.apache.twill.api.TwillRunnable;
+import org.apache.twill.api.TwillRunnableSpecification;
+import org.apache.twill.api.TwillSpecification;
+
+/**
+ * A simple {@link org.apache.twill.api.TwillApplication} that contains only one {@link org.apache.twill.api.TwillRunnable}.
+ */
+public class SingleRunnableApplication implements TwillApplication {
+
+  private final TwillRunnable runnable;
+  private final ResourceSpecification resourceSpec;
+
+  public SingleRunnableApplication(TwillRunnable runnable, ResourceSpecification resourceSpec) {
+    this.runnable = runnable;
+    this.resourceSpec = resourceSpec;
+  }
+
+  @Override
+  public TwillSpecification configure() {
+    TwillRunnableSpecification runnableSpec = runnable.configure();
+    return TwillSpecification.Builder.with()
+      .setName(runnableSpec.getName())
+      .withRunnable().add(runnableSpec.getName(), runnable, resourceSpec)
+      .noLocalFiles()
+      .anyOrder()
+      .build();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/TwillContainerController.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/TwillContainerController.java b/twill-core/src/main/java/org/apache/twill/internal/TwillContainerController.java
new file mode 100644
index 0000000..8b090bd
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/TwillContainerController.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.api.ServiceController;
+import org.apache.twill.internal.state.Message;
+import com.google.common.util.concurrent.ListenableFuture;
+
+/**
+ * A {@link ServiceController} that allows sending a message directly. Internal use only.
+ */
+public interface TwillContainerController extends ServiceController {
+
+  ListenableFuture<Message> sendMessage(Message message);
+
+  /**
+   * Calls to indicated that the container that this controller is associated with is completed.
+   * Any resources it hold will be releases and all pending futures will be cancelled.
+   */
+  void completed(int exitStatus);
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/TwillContainerLauncher.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/TwillContainerLauncher.java b/twill-core/src/main/java/org/apache/twill/internal/TwillContainerLauncher.java
new file mode 100644
index 0000000..63f8732
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/TwillContainerLauncher.java
@@ -0,0 +1,181 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.api.LocalFile;
+import org.apache.twill.api.RunId;
+import org.apache.twill.api.RuntimeSpecification;
+import org.apache.twill.filesystem.Location;
+import org.apache.twill.internal.state.Message;
+import org.apache.twill.internal.state.StateNode;
+import org.apache.twill.launcher.TwillLauncher;
+import org.apache.twill.zookeeper.NodeData;
+import org.apache.twill.zookeeper.ZKClient;
+import com.google.common.util.concurrent.ListenableFuture;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+
+/**
+ * This class helps launching a container.
+ */
+public final class TwillContainerLauncher {
+
+  private static final Logger LOG = LoggerFactory.getLogger(TwillContainerLauncher.class);
+
+  private static final double HEAP_MIN_RATIO = 0.7d;
+
+  private final RuntimeSpecification runtimeSpec;
+  private final ProcessLauncher.PrepareLaunchContext launchContext;
+  private final ZKClient zkClient;
+  private final int instanceCount;
+  private final String jvmOpts;
+  private final int reservedMemory;
+  private final Location secureStoreLocation;
+
+  public TwillContainerLauncher(RuntimeSpecification runtimeSpec, ProcessLauncher.PrepareLaunchContext launchContext,
+                                ZKClient zkClient, int instanceCount, String jvmOpts, int reservedMemory,
+                                Location secureStoreLocation) {
+    this.runtimeSpec = runtimeSpec;
+    this.launchContext = launchContext;
+    this.zkClient = zkClient;
+    this.instanceCount = instanceCount;
+    this.jvmOpts = jvmOpts;
+    this.reservedMemory = reservedMemory;
+    this.secureStoreLocation = secureStoreLocation;
+  }
+
+  public TwillContainerController start(RunId runId, int instanceId, Class<?> mainClass, String classPath) {
+    ProcessLauncher.PrepareLaunchContext.AfterResources afterResources = null;
+    ProcessLauncher.PrepareLaunchContext.ResourcesAdder resourcesAdder = null;
+
+    // Adds all file to be localized to container
+    if (!runtimeSpec.getLocalFiles().isEmpty()) {
+      resourcesAdder = launchContext.withResources();
+
+      for (LocalFile localFile : runtimeSpec.getLocalFiles()) {
+        afterResources = resourcesAdder.add(localFile);
+      }
+    }
+
+    // Optionally localize secure store.
+    try {
+      if (secureStoreLocation != null && secureStoreLocation.exists()) {
+        if (resourcesAdder == null) {
+          resourcesAdder = launchContext.withResources();
+        }
+        afterResources = resourcesAdder.add(new DefaultLocalFile(Constants.Files.CREDENTIALS,
+                                                                 secureStoreLocation.toURI(),
+                                                                 secureStoreLocation.lastModified(),
+                                                                 secureStoreLocation.length(), false, null));
+      }
+    } catch (IOException e) {
+      LOG.warn("Failed to launch container with secure store {}.", secureStoreLocation.toURI());
+    }
+
+    if (afterResources == null) {
+      afterResources = launchContext.noResources();
+    }
+
+    int memory = runtimeSpec.getResourceSpecification().getMemorySize();
+    if (((double) (memory - reservedMemory) / memory) >= HEAP_MIN_RATIO) {
+      // Reduce -Xmx by the reserved memory size.
+      memory = runtimeSpec.getResourceSpecification().getMemorySize() - reservedMemory;
+    } else {
+      // If it is a small VM, just discount it by the min ratio.
+      memory = (int) Math.ceil(memory * HEAP_MIN_RATIO);
+    }
+
+    // Currently no reporting is supported for runnable containers
+    ProcessController<Void> processController = afterResources
+      .withEnvironment()
+      .add(EnvKeys.TWILL_RUN_ID, runId.getId())
+      .add(EnvKeys.TWILL_RUNNABLE_NAME, runtimeSpec.getName())
+      .add(EnvKeys.TWILL_INSTANCE_ID, Integer.toString(instanceId))
+      .add(EnvKeys.TWILL_INSTANCE_COUNT, Integer.toString(instanceCount))
+      .withCommands()
+      .add("java",
+           "-Djava.io.tmpdir=tmp",
+           "-Dyarn.container=$" + EnvKeys.YARN_CONTAINER_ID,
+           "-Dtwill.runnable=$" + EnvKeys.TWILL_APP_NAME + ".$" + EnvKeys.TWILL_RUNNABLE_NAME,
+           "-cp", Constants.Files.LAUNCHER_JAR + ":" + classPath,
+           "-Xmx" + memory + "m",
+           jvmOpts,
+           TwillLauncher.class.getName(),
+           Constants.Files.CONTAINER_JAR,
+           mainClass.getName(),
+           Boolean.TRUE.toString())
+      .redirectOutput(Constants.STDOUT).redirectError(Constants.STDERR)
+      .launch();
+
+    TwillContainerControllerImpl controller = new TwillContainerControllerImpl(zkClient, runId, processController);
+    controller.start();
+    return controller;
+  }
+
+  private static final class TwillContainerControllerImpl extends AbstractZKServiceController
+                                                          implements TwillContainerController {
+
+    private final ProcessController<Void> processController;
+
+    protected TwillContainerControllerImpl(ZKClient zkClient, RunId runId,
+                                           ProcessController<Void> processController) {
+      super(runId, zkClient);
+      this.processController = processController;
+    }
+
+    @Override
+    protected void doStartUp() {
+      // No-op
+    }
+
+    @Override
+    protected void doShutDown() {
+      // No-op
+    }
+
+    @Override
+    protected void instanceNodeUpdated(NodeData nodeData) {
+      // No-op
+    }
+
+    @Override
+    protected void stateNodeUpdated(StateNode stateNode) {
+      // No-op
+    }
+
+    @Override
+    public ListenableFuture<Message> sendMessage(Message message) {
+      return sendMessage(message, message);
+    }
+
+    @Override
+    public synchronized void completed(int exitStatus) {
+      if (exitStatus != 0) {  // If a container terminated with exit code != 0, treat it as error
+//        fireStateChange(new StateNode(State.FAILED, new StackTraceElement[0]));
+      }
+      forceShutDown();
+    }
+
+    @Override
+    public void kill() {
+      processController.cancel();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/ZKMessages.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/ZKMessages.java b/twill-core/src/main/java/org/apache/twill/internal/ZKMessages.java
new file mode 100644
index 0000000..03575dd
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/ZKMessages.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.internal.state.Message;
+import org.apache.twill.internal.state.MessageCodec;
+import org.apache.twill.zookeeper.ZKClient;
+import org.apache.twill.zookeeper.ZKOperations;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
+import org.apache.zookeeper.CreateMode;
+
+/**
+ *
+ */
+public final class ZKMessages {
+
+  /**
+   * Creates a message node in zookeeper. The message node created is a PERSISTENT_SEQUENTIAL node.
+   *
+   * @param zkClient The ZooKeeper client for interacting with ZooKeeper.
+   * @param messagePathPrefix ZooKeeper path prefix for the message node.
+   * @param message The {@link Message} object for the content of the message node.
+   * @param completionResult Object to set to the result future when the message is processed.
+   * @param <V> Type of the completion result.
+   * @return A {@link ListenableFuture} that will be completed when the message is consumed, which indicated
+   *         by deletion of the node. If there is exception during the process, it will be reflected
+   *         to the future returned.
+   */
+  public static <V> ListenableFuture<V> sendMessage(final ZKClient zkClient, String messagePathPrefix,
+                                                    Message message, final V completionResult) {
+    SettableFuture<V> result = SettableFuture.create();
+    sendMessage(zkClient, messagePathPrefix, message, result, completionResult);
+    return result;
+  }
+
+  /**
+   * Creates a message node in zookeeper. The message node created is a PERSISTENT_SEQUENTIAL node.
+   *
+   * @param zkClient The ZooKeeper client for interacting with ZooKeeper.
+   * @param messagePathPrefix ZooKeeper path prefix for the message node.
+   * @param message The {@link Message} object for the content of the message node.
+   * @param completion A {@link SettableFuture} to reflect the result of message process completion.
+   * @param completionResult Object to set to the result future when the message is processed.
+   * @param <V> Type of the completion result.
+   */
+  public static <V> void sendMessage(final ZKClient zkClient, String messagePathPrefix, Message message,
+                                     final SettableFuture<V> completion, final V completionResult) {
+
+    // Creates a message and watch for its deletion for completion.
+    Futures.addCallback(zkClient.create(messagePathPrefix, MessageCodec.encode(message),
+                                        CreateMode.PERSISTENT_SEQUENTIAL), new FutureCallback<String>() {
+      @Override
+      public void onSuccess(String path) {
+        Futures.addCallback(ZKOperations.watchDeleted(zkClient, path), new FutureCallback<String>() {
+          @Override
+          public void onSuccess(String result) {
+            completion.set(completionResult);
+          }
+
+          @Override
+          public void onFailure(Throwable t) {
+            completion.setException(t);
+          }
+        });
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        completion.setException(t);
+      }
+    });
+  }
+
+  private ZKMessages() {
+  }
+}


[26/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/common/src/main/java/org/apache/twill/filesystem/Location.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/twill/filesystem/Location.java b/common/src/main/java/org/apache/twill/filesystem/Location.java
deleted file mode 100644
index dee9546..0000000
--- a/common/src/main/java/org/apache/twill/filesystem/Location.java
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.filesystem;
-
-import javax.annotation.Nullable;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.URI;
-
-/**
- * This interface defines the location and operations of a resource on the filesystem.
- * <p>
- * {@link Location} is agnostic to the type of file system the resource is on.
- * </p>
- */
-public interface Location {
-  /**
-   * Suffix added to every temp file name generated with {@link #getTempFile(String)}.
-   */
-  static final String TEMP_FILE_SUFFIX = ".tmp";
-
-  /**
-   * Checks if the this location exists.
-   *
-   * @return true if found; false otherwise.
-   * @throws IOException
-   */
-  boolean exists() throws IOException;
-
-  /**
-   * @return Returns the name of the file or directory denoteed by this abstract pathname.
-   */
-  String getName();
-
-  /**
-   * Atomically creates a new, empty file named by this abstract pathname if and only if a file with this name
-   * does not yet exist.
-   * @return {@code true} if the file is successfully create, {@code false} otherwise.
-   * @throws IOException
-   */
-  boolean createNew() throws IOException;
-
-  /**
-   * @return An {@link java.io.InputStream} for this location.
-   * @throws IOException
-   */
-  InputStream getInputStream() throws IOException;
-
-  /**
-   * @return An {@link java.io.OutputStream} for this location.
-   * @throws IOException
-   */
-  OutputStream getOutputStream() throws IOException;
-
-  /**
-   * Creates an {@link OutputStream} for this location with the given permission. The actual permission supported
-   * depends on implementation.
-   *
-   * @param permission A POSIX permission string.
-   * @return An {@link OutputStream} for writing to this location.
-   * @throws IOException If failed to create the {@link OutputStream}.
-   */
-  OutputStream getOutputStream(String permission) throws IOException;
-
-  /**
-   * Appends the child to the current {@link Location}.
-   * <p>
-   * Returns a new instance of Location.
-   * </p>
-   *
-   * @param child to be appended to this location.
-   * @return A new instance of {@link Location}
-   * @throws IOException
-   */
-  Location append(String child) throws IOException;
-
-  /**
-   * Returns unique location for temporary file to be placed near this location.
-   * Allows all temp files to follow same pattern for easier management of them.
-   * @param suffix part of the file name to include in the temp file name
-   * @return location of the temp file
-   * @throws IOException
-   */
-  Location getTempFile(String suffix) throws IOException;
-
-  /**
-   * @return A {@link java.net.URI} for this location.
-   */
-  URI toURI();
-
-  /**
-   * Deletes the file or directory denoted by this abstract pathname. If this
-   * pathname denotes a directory, then the directory must be empty in order
-   * to be deleted.
-   *
-   * @return true if and only if the file or directory is successfully delete; false otherwise.
-   */
-  boolean delete() throws IOException;
-
-  /**
-   * Deletes the file or directory denoted by this abstract pathname. If this
-   * pathname denotes a directory and {@code recursive} is {@code true}, then content of the
-   * directory will be deleted recursively, otherwise the directory must be empty in order to be deleted.
-   * Note that when calling this method with {@code recursive = true} for a directory, any
-   * failure during deletion will have some entries inside the directory being deleted while some are not.
-   *
-   * @param recursive Indicate if recursively delete a directory. Ignored if the pathname represents a file.
-   * @return true if and only if the file or directory is successfully delete; false otherwise.
-   */
-  boolean delete(boolean recursive) throws IOException;
-
-  /**
-   * Moves the file or directory denoted by this abstract pathname.
-   *
-   * @param destination destination location
-   * @return new location if and only if the file or directory is successfully moved; null otherwise.
-   */
-  @Nullable
-  Location renameTo(Location destination) throws IOException;
-
-  /**
-   * Creates the directory named by this abstract pathname, including any necessary
-   * but nonexistent parent directories.
-   *
-   * @return true if and only if the renaming succeeded; false otherwise
-   */
-  boolean mkdirs() throws IOException;
-
-  /**
-   * @return Length of file.
-   */
-  long length() throws IOException;
-
-  /**
-   * @return Last modified time of file.
-   */
-  long lastModified() throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/common/src/main/java/org/apache/twill/filesystem/LocationFactories.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/twill/filesystem/LocationFactories.java b/common/src/main/java/org/apache/twill/filesystem/LocationFactories.java
deleted file mode 100644
index 751a632..0000000
--- a/common/src/main/java/org/apache/twill/filesystem/LocationFactories.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.filesystem;
-
-import com.google.common.base.Throwables;
-
-import java.io.IOException;
-import java.net.URI;
-
-/**
- * Providers helper methods for creating different {@link LocationFactory}.
- */
-public final class LocationFactories {
-
-  /**
-   * Creates a {@link LocationFactory} that always applies the giving namespace prefix.
-   */
-  public static LocationFactory namespace(LocationFactory delegate, final String namespace) {
-    return new ForwardingLocationFactory(delegate) {
-      @Override
-      public Location create(String path) {
-        try {
-          Location base = getDelegate().create(namespace);
-          return base.append(path);
-        } catch (IOException e) {
-          throw Throwables.propagate(e);
-        }
-      }
-
-      @Override
-      public Location create(URI uri) {
-        if (uri.isAbsolute()) {
-          return getDelegate().create(uri);
-        }
-        try {
-          Location base = getDelegate().create(namespace);
-          return base.append(uri.getPath());
-        } catch (IOException e) {
-          throw Throwables.propagate(e);
-        }
-      }
-
-      @Override
-      public Location getHomeLocation() {
-        return getDelegate().getHomeLocation();
-      }
-    };
-  }
-
-  private LocationFactories() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/common/src/main/java/org/apache/twill/filesystem/LocationFactory.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/twill/filesystem/LocationFactory.java b/common/src/main/java/org/apache/twill/filesystem/LocationFactory.java
deleted file mode 100644
index f88d94d..0000000
--- a/common/src/main/java/org/apache/twill/filesystem/LocationFactory.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.filesystem;
-
-import java.net.URI;
-
-/**
- * Factory for creating instance of {@link Location}.
- */
-public interface LocationFactory {
-
-  /**
-   * Creates an instance of {@link Location} of the given path.
-   * @param path The path representing the location.
-   * @return An instance of {@link Location}.
-   */
-  Location create(String path);
-
-  /**
-   * Creates an instance of {@link Location} based on {@link java.net.URI} <code>uri</code>.
-   *
-   * @param uri to the resource on the filesystem.
-   * @return An instance of {@link Location}
-   */
-  Location create(URI uri);
-
-  /**
-   * Returns the home location.
-   */
-  Location getHomeLocation();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/common/src/test/java/org/apache/twill/common/ServicesTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/twill/common/ServicesTest.java b/common/src/test/java/org/apache/twill/common/ServicesTest.java
deleted file mode 100644
index c0aa7ee..0000000
--- a/common/src/test/java/org/apache/twill/common/ServicesTest.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.common;
-
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.AbstractIdleService;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.Service;
-import org.junit.Assert;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-/**
- * Unit test for {@link Services} methods.
- */
-public class ServicesTest {
-
-  private static final Logger LOG = LoggerFactory.getLogger(ServicesTest.class);
-
-  @Test
-  public void testChain() throws ExecutionException, InterruptedException {
-    AtomicBoolean transiting = new AtomicBoolean(false);
-    Service s1 = new DummyService("s1", transiting);
-    Service s2 = new DummyService("s2", transiting);
-    Service s3 = new DummyService("s3", transiting);
-
-    Futures.allAsList(Services.chainStart(s1, s2, s3).get()).get();
-    Futures.allAsList(Services.chainStop(s3, s2, s1).get()).get();
-  }
-
-  @Test
-  public void testCompletion() throws ExecutionException, InterruptedException {
-    Service service = new DummyService("s1", new AtomicBoolean());
-    ListenableFuture<Service.State> completion = Services.getCompletionFuture(service);
-
-    service.start();
-    service.stop();
-
-    completion.get();
-
-    AtomicBoolean transiting = new AtomicBoolean();
-    service = new DummyService("s2", transiting);
-    completion = Services.getCompletionFuture(service);
-
-    service.startAndWait();
-    transiting.set(true);
-    service.stop();
-
-    try {
-      completion.get();
-      Assert.assertTrue(false);
-    } catch (ExecutionException e) {
-      // Expected
-    }
-  }
-
-  private static final class DummyService extends AbstractIdleService {
-
-    private final String name;
-    private final AtomicBoolean transiting;
-
-    private DummyService(String name, AtomicBoolean transiting) {
-      this.name = name;
-      this.transiting = transiting;
-    }
-
-    @Override
-    protected void startUp() throws Exception {
-      Preconditions.checkState(transiting.compareAndSet(false, true));
-      LOG.info("Starting: " + name);
-      TimeUnit.MILLISECONDS.sleep(500);
-      LOG.info("Started: " + name);
-      Preconditions.checkState(transiting.compareAndSet(true, false));
-    }
-
-    @Override
-    protected void shutDown() throws Exception {
-      Preconditions.checkState(transiting.compareAndSet(false, true));
-      LOG.info("Stopping: " + name);
-      TimeUnit.MILLISECONDS.sleep(500);
-      LOG.info("Stopped: " + name);
-      Preconditions.checkState(transiting.compareAndSet(true, false));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/common/src/test/java/org/apache/twill/filesystem/LocalLocationTest.java
----------------------------------------------------------------------
diff --git a/common/src/test/java/org/apache/twill/filesystem/LocalLocationTest.java b/common/src/test/java/org/apache/twill/filesystem/LocalLocationTest.java
deleted file mode 100644
index 198f77f..0000000
--- a/common/src/test/java/org/apache/twill/filesystem/LocalLocationTest.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.filesystem;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.URI;
-
-/**
- *
- */
-public class LocalLocationTest {
-
-  @Test
-  public void testDelete() throws IOException {
-    LocationFactory factory = new LocalLocationFactory(new File(System.getProperty("java.io.tmpdir")));
-
-    Location base = factory.create("test").getTempFile(".tmp");
-    Assert.assertTrue(base.mkdirs());
-
-    Assert.assertTrue(base.append("test1").getTempFile(".tmp").createNew());
-    Assert.assertTrue(base.append("test2").getTempFile(".tmp").createNew());
-
-    Location subDir = base.append("test3");
-    Assert.assertTrue(subDir.mkdirs());
-
-    Assert.assertTrue(subDir.append("test4").getTempFile(".tmp").createNew());
-    Assert.assertTrue(subDir.append("test5").getTempFile(".tmp").createNew());
-
-    Assert.assertTrue(base.delete(true));
-    Assert.assertFalse(base.exists());
-  }
-
-  @Test
-  public void testHelper() {
-    LocationFactory factory = LocationFactories.namespace(
-                                new LocalLocationFactory(new File(System.getProperty("java.io.tmpdir"))),
-                                "testhelper");
-
-    Location location = factory.create("test");
-    Assert.assertTrue(location.toURI().getPath().endsWith("testhelper/test"));
-
-    location = factory.create(URI.create("test2"));
-    Assert.assertTrue(location.toURI().getPath().endsWith("testhelper/test2"));
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/pom.xml
----------------------------------------------------------------------
diff --git a/core/pom.xml b/core/pom.xml
deleted file mode 100644
index faff711..0000000
--- a/core/pom.xml
+++ /dev/null
@@ -1,89 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <parent>
-        <artifactId>twill-parent</artifactId>
-        <groupId>org.apache.twill</groupId>
-        <version>0.1.0-SNAPSHOT</version>
-    </parent>
-    <modelVersion>4.0.0</modelVersion>
-
-    <artifactId>twill-core</artifactId>
-    <name>Twill core library</name>
-
-    <dependencies>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>twill-api</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>twill-zookeeper</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>twill-discovery-core</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-        <dependency>
-            <groupId>com.google.guava</groupId>
-            <artifactId>guava</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>com.google.code.gson</groupId>
-            <artifactId>gson</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>io.netty</groupId>
-            <artifactId>netty</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.xerial.snappy</groupId>
-            <artifactId>snappy-java</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.ow2.asm</groupId>
-            <artifactId>asm-all</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.slf4j</groupId>
-            <artifactId>slf4j-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>ch.qos.logback</groupId>
-            <artifactId>logback-core</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>ch.qos.logback</groupId>
-            <artifactId>logback-classic</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>junit</groupId>
-            <artifactId>junit</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.commons</groupId>
-            <artifactId>commons-compress</artifactId>
-        </dependency>
-    </dependencies>
-</project>

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/AbstractExecutionServiceController.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/AbstractExecutionServiceController.java b/core/src/main/java/org/apache/twill/internal/AbstractExecutionServiceController.java
deleted file mode 100644
index 974639d..0000000
--- a/core/src/main/java/org/apache/twill/internal/AbstractExecutionServiceController.java
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.api.RunId;
-import org.apache.twill.api.ServiceController;
-import org.apache.twill.common.Threads;
-import com.google.common.util.concurrent.AbstractIdleService;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.Service;
-
-import java.util.Queue;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.Executor;
-import java.util.concurrent.atomic.AtomicReference;
-
-/**
- *
- */
-public abstract class AbstractExecutionServiceController implements ServiceController {
-
-  private final RunId runId;
-  private final ListenerExecutors listenerExecutors;
-  private final Service serviceDelegate;
-
-  protected AbstractExecutionServiceController(RunId runId) {
-    this.runId = runId;
-    this.listenerExecutors = new ListenerExecutors();
-    this.serviceDelegate = new ServiceDelegate();
-  }
-
-  protected abstract void startUp();
-
-  protected abstract void shutDown();
-
-  @Override
-  public final RunId getRunId() {
-    return runId;
-  }
-
-  @Override
-  public final void addListener(Listener listener, Executor executor) {
-    listenerExecutors.addListener(new ListenerExecutor(listener, executor));
-  }
-
-  @Override
-  public final ListenableFuture<State> start() {
-    serviceDelegate.addListener(listenerExecutors, Threads.SAME_THREAD_EXECUTOR);
-    return serviceDelegate.start();
-  }
-
-  @Override
-  public final State startAndWait() {
-    return Futures.getUnchecked(start());
-  }
-
-  @Override
-  public final boolean isRunning() {
-    return serviceDelegate.isRunning();
-  }
-
-  @Override
-  public final State state() {
-    return serviceDelegate.state();
-  }
-
-  @Override
-  public final State stopAndWait() {
-    return Futures.getUnchecked(stop());
-  }
-
-  @Override
-  public final ListenableFuture<State> stop() {
-    return serviceDelegate.stop();
-  }
-
-  protected Executor executor(final State state) {
-    return new Executor() {
-      @Override
-      public void execute(Runnable command) {
-        Thread t = new Thread(command, getClass().getSimpleName() + " " + state);
-        t.setDaemon(true);
-        t.start();
-      }
-    };
-  }
-
-
-  private final class ServiceDelegate extends AbstractIdleService {
-    @Override
-    protected void startUp() throws Exception {
-      AbstractExecutionServiceController.this.startUp();
-    }
-
-    @Override
-    protected void shutDown() throws Exception {
-      AbstractExecutionServiceController.this.shutDown();
-    }
-
-    @Override
-    protected Executor executor(State state) {
-      return AbstractExecutionServiceController.this.executor(state);
-    }
-  }
-
-  /**
-   * Inner class for dispatching listener call back to a list of listeners
-   */
-  private static final class ListenerExecutors implements Listener {
-
-    private interface Callback {
-      void call(Listener listener);
-    }
-
-    private final Queue<ListenerExecutor> listeners = new ConcurrentLinkedQueue<ListenerExecutor>();
-    private final AtomicReference<Callback> lastState = new AtomicReference<Callback>();
-
-    private synchronized void addListener(final ListenerExecutor listener) {
-      listeners.add(listener);
-      Callback callback = lastState.get();
-      if (callback != null) {
-        callback.call(listener);
-      }
-    }
-
-    @Override
-    public synchronized void starting() {
-      lastState.set(new Callback() {
-        @Override
-        public void call(Listener listener) {
-          listener.starting();
-        }
-      });
-      for (ListenerExecutor listener : listeners) {
-        listener.starting();
-      }
-    }
-
-    @Override
-    public synchronized void running() {
-      lastState.set(new Callback() {
-        @Override
-        public void call(Listener listener) {
-          listener.running();
-        }
-      });
-      for (ListenerExecutor listener : listeners) {
-        listener.running();
-      }
-    }
-
-    @Override
-    public synchronized void stopping(final State from) {
-      lastState.set(new Callback() {
-        @Override
-        public void call(Listener listener) {
-          listener.stopping(from);
-        }
-      });
-      for (ListenerExecutor listener : listeners) {
-        listener.stopping(from);
-      }
-    }
-
-    @Override
-    public synchronized void terminated(final State from) {
-      lastState.set(new Callback() {
-        @Override
-        public void call(Listener listener) {
-          listener.terminated(from);
-        }
-      });
-      for (ListenerExecutor listener : listeners) {
-        listener.terminated(from);
-      }
-    }
-
-    @Override
-    public synchronized void failed(final State from, final Throwable failure) {
-      lastState.set(new Callback() {
-        @Override
-        public void call(Listener listener) {
-          listener.failed(from, failure);
-        }
-      });
-      for (ListenerExecutor listener : listeners) {
-        listener.failed(from, failure);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/AbstractTwillController.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/AbstractTwillController.java b/core/src/main/java/org/apache/twill/internal/AbstractTwillController.java
deleted file mode 100644
index 5806f9d..0000000
--- a/core/src/main/java/org/apache/twill/internal/AbstractTwillController.java
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.api.RunId;
-import org.apache.twill.api.TwillController;
-import org.apache.twill.api.logging.LogEntry;
-import org.apache.twill.api.logging.LogHandler;
-import org.apache.twill.discovery.Discoverable;
-import org.apache.twill.discovery.DiscoveryServiceClient;
-import org.apache.twill.discovery.ZKDiscoveryService;
-import org.apache.twill.internal.json.StackTraceElementCodec;
-import org.apache.twill.internal.kafka.client.SimpleKafkaClient;
-import org.apache.twill.internal.logging.LogEntryDecoder;
-import org.apache.twill.internal.state.SystemMessages;
-import org.apache.twill.kafka.client.FetchedMessage;
-import org.apache.twill.kafka.client.KafkaClient;
-import org.apache.twill.zookeeper.ZKClient;
-import org.apache.twill.zookeeper.ZKClients;
-import com.google.common.base.Charsets;
-import com.google.common.collect.Iterables;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.gson.Gson;
-import com.google.gson.GsonBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Iterator;
-import java.util.Queue;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.TimeUnit;
-
-/**
- * A abstract base class for {@link org.apache.twill.api.TwillController} implementation that uses Zookeeper to controller a
- * running twill application.
- */
-public abstract class AbstractTwillController extends AbstractZKServiceController implements TwillController {
-
-  private static final Logger LOG = LoggerFactory.getLogger(AbstractTwillController.class);
-  private static final int MAX_KAFKA_FETCH_SIZE = 1048576;
-  private static final long SHUTDOWN_TIMEOUT_MS = 2000;
-  private static final long LOG_FETCH_TIMEOUT_MS = 5000;
-
-  private final Queue<LogHandler> logHandlers;
-  private final KafkaClient kafkaClient;
-  private final DiscoveryServiceClient discoveryServiceClient;
-  private final LogPollerThread logPoller;
-
-  public AbstractTwillController(RunId runId, ZKClient zkClient, Iterable<LogHandler> logHandlers) {
-    super(runId, zkClient);
-    this.logHandlers = new ConcurrentLinkedQueue<LogHandler>();
-    this.kafkaClient = new SimpleKafkaClient(ZKClients.namespace(zkClient, "/" + runId.getId() + "/kafka"));
-    this.discoveryServiceClient = new ZKDiscoveryService(zkClient);
-    Iterables.addAll(this.logHandlers, logHandlers);
-    this.logPoller = new LogPollerThread(runId, kafkaClient, logHandlers);
-  }
-
-  @Override
-  protected void doStartUp() {
-    if (!logHandlers.isEmpty()) {
-      logPoller.start();
-    }
-  }
-
-  @Override
-  protected void doShutDown() {
-    logPoller.terminate();
-    try {
-      // Wait for the poller thread to stop.
-      logPoller.join(SHUTDOWN_TIMEOUT_MS);
-    } catch (InterruptedException e) {
-      LOG.warn("Joining of log poller thread interrupted.", e);
-    }
-  }
-
-  @Override
-  public final synchronized void addLogHandler(LogHandler handler) {
-    logHandlers.add(handler);
-    if (!logPoller.isAlive()) {
-      logPoller.start();
-    }
-  }
-
-  @Override
-  public final Iterable<Discoverable> discoverService(String serviceName) {
-    return discoveryServiceClient.discover(serviceName);
-  }
-
-  @Override
-  public final ListenableFuture<Integer> changeInstances(String runnable, int newCount) {
-    return sendMessage(SystemMessages.setInstances(runnable, newCount), newCount);
-  }
-
-  private static final class LogPollerThread extends Thread {
-
-    private final KafkaClient kafkaClient;
-    private final Iterable<LogHandler> logHandlers;
-    private volatile boolean running = true;
-
-    LogPollerThread(RunId runId, KafkaClient kafkaClient, Iterable<LogHandler> logHandlers) {
-      super("twill-log-poller-" + runId.getId());
-      setDaemon(true);
-      this.kafkaClient = kafkaClient;
-      this.logHandlers = logHandlers;
-    }
-
-    @Override
-    public void run() {
-      LOG.info("Twill log poller thread '{}' started.", getName());
-      kafkaClient.startAndWait();
-      Gson gson = new GsonBuilder().registerTypeAdapter(LogEntry.class, new LogEntryDecoder())
-        .registerTypeAdapter(StackTraceElement.class, new StackTraceElementCodec())
-        .create();
-
-      while (running && !isInterrupted()) {
-        long offset;
-        try {
-          // Get the earliest offset
-          long[] offsets = kafkaClient.getOffset(Constants.LOG_TOPIC, 0, -2, 1).get(LOG_FETCH_TIMEOUT_MS,
-                                                                                    TimeUnit.MILLISECONDS);
-          // Should have one entry
-          offset = offsets[0];
-        } catch (Throwable t) {
-          // Keep retrying
-          LOG.warn("Failed to fetch offsets from Kafka. Retrying.", t);
-          continue;
-        }
-
-        // Now fetch log messages from Kafka
-        Iterator<FetchedMessage> messageIterator = kafkaClient.consume(Constants.LOG_TOPIC, 0,
-                                                                       offset, MAX_KAFKA_FETCH_SIZE);
-        try {
-          while (messageIterator.hasNext()) {
-            String json = Charsets.UTF_8.decode(messageIterator.next().getBuffer()).toString();
-            try {
-              LogEntry entry = gson.fromJson(json, LogEntry.class);
-              if (entry != null) {
-                invokeHandlers(entry);
-              }
-            } catch (Exception e) {
-              LOG.error("Failed to decode log entry {}", json, e);
-            }
-          }
-        } catch (Throwable t) {
-          LOG.warn("Exception while fetching log message from Kafka. Retrying.", t);
-          continue;
-        }
-      }
-
-      kafkaClient.stopAndWait();
-      LOG.info("Twill log poller thread stopped.");
-    }
-
-    void terminate() {
-      running = false;
-      interrupt();
-    }
-
-    private void invokeHandlers(LogEntry entry) {
-      for (LogHandler handler : logHandlers) {
-        handler.onLog(entry);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/AbstractZKServiceController.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/AbstractZKServiceController.java b/core/src/main/java/org/apache/twill/internal/AbstractZKServiceController.java
deleted file mode 100644
index 98cc2b8..0000000
--- a/core/src/main/java/org/apache/twill/internal/AbstractZKServiceController.java
+++ /dev/null
@@ -1,314 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.api.Command;
-import org.apache.twill.api.RunId;
-import org.apache.twill.api.ServiceController;
-import org.apache.twill.common.Threads;
-import org.apache.twill.internal.json.StackTraceElementCodec;
-import org.apache.twill.internal.json.StateNodeCodec;
-import org.apache.twill.internal.state.Message;
-import org.apache.twill.internal.state.Messages;
-import org.apache.twill.internal.state.StateNode;
-import org.apache.twill.internal.state.SystemMessages;
-import org.apache.twill.zookeeper.NodeData;
-import org.apache.twill.zookeeper.ZKClient;
-import com.google.common.base.Charsets;
-import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.gson.GsonBuilder;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-/**
- * An abstract base class for implementing a {@link ServiceController} using ZooKeeper as a means for
- * communicating with the remote service. This is designed to work in pair with the {@link ZKServiceDecorator}.
- */
-public abstract class AbstractZKServiceController extends AbstractExecutionServiceController {
-
-  private static final Logger LOG = LoggerFactory.getLogger(AbstractZKServiceController.class);
-
-  private final ZKClient zkClient;
-  private final InstanceNodeDataCallback instanceNodeDataCallback;
-  private final StateNodeDataCallback stateNodeDataCallback;
-  private final List<ListenableFuture<?>> messageFutures;
-  private ListenableFuture<State> stopMessageFuture;
-
-  protected AbstractZKServiceController(RunId runId, ZKClient zkClient) {
-    super(runId);
-    this.zkClient = zkClient;
-    this.instanceNodeDataCallback = new InstanceNodeDataCallback();
-    this.stateNodeDataCallback = new StateNodeDataCallback();
-    this.messageFutures = Lists.newLinkedList();
-  }
-
-  @Override
-  public final ListenableFuture<Command> sendCommand(Command command) {
-    return sendMessage(Messages.createForAll(command), command);
-  }
-
-  @Override
-  public final ListenableFuture<Command> sendCommand(String runnableName, Command command) {
-    return sendMessage(Messages.createForRunnable(runnableName, command), command);
-  }
-
-  @Override
-  protected final void startUp() {
-    // Watch for instance node existence.
-    actOnExists(getInstancePath(), new Runnable() {
-      @Override
-      public void run() {
-        watchInstanceNode();
-      }
-    });
-
-    // Watch for state node data
-    actOnExists(getZKPath("state"), new Runnable() {
-      @Override
-      public void run() {
-        watchStateNode();
-      }
-    });
-
-    doStartUp();
-  }
-
-  @Override
-  protected final synchronized void shutDown() {
-    if (stopMessageFuture == null) {
-      stopMessageFuture = ZKMessages.sendMessage(zkClient, getMessagePrefix(),
-                                                 SystemMessages.stopApplication(), State.TERMINATED);
-    }
-
-    // Cancel all pending message futures.
-    for (ListenableFuture<?> future : messageFutures) {
-      future.cancel(true);
-    }
-
-    doShutDown();
-  }
-
-  /**
-   * Sends a {@link Message} to the remote service. Returns a future that will be completed when the message
-   * has been processed.
-   * @param message The message to send.
-   * @param result Object to set into the future when message is being processed.
-   * @param <V> Type of the result.
-   * @return A {@link ListenableFuture} that will be completed when the message has been processed.
-   */
-  protected final synchronized <V> ListenableFuture<V> sendMessage(Message message, V result) {
-    if (!isRunning()) {
-      return Futures.immediateFailedFuture(new IllegalStateException("Cannot send message to non-running application"));
-    }
-    final ListenableFuture<V> messageFuture = ZKMessages.sendMessage(zkClient, getMessagePrefix(), message, result);
-    messageFutures.add(messageFuture);
-    messageFuture.addListener(new Runnable() {
-      @Override
-      public void run() {
-        // If the completion is triggered when stopping, do nothing.
-        if (state() == State.STOPPING) {
-          return;
-        }
-        synchronized (AbstractZKServiceController.this) {
-          messageFutures.remove(messageFuture);
-        }
-      }
-    }, Threads.SAME_THREAD_EXECUTOR);
-
-    return messageFuture;
-  }
-
-  protected final ListenableFuture<State> getStopMessageFuture() {
-    return stopMessageFuture;
-  }
-
-  /**
-   * Called during startup. Executed in the startup thread.
-   */
-  protected abstract void doStartUp();
-
-  /**
-   * Called during shutdown. Executed in the shutdown thread.
-   */
-  protected abstract void doShutDown();
-
-  /**
-   * Called when an update on the live instance node is detected.
-   * @param nodeData The updated live instance node data or {@code null} if there is an error when fetching
-   *                 the node data.
-   */
-  protected abstract void instanceNodeUpdated(NodeData nodeData);
-
-  /**
-   * Called when an update on the state node is detected.
-   * @param stateNode The update state node data or {@code null} if there is an error when fetching the node data.
-   */
-  protected abstract void stateNodeUpdated(StateNode stateNode);
-
-  protected synchronized void forceShutDown() {
-    if (stopMessageFuture == null) {
-      // In force shutdown, don't send message.
-      stopMessageFuture = Futures.immediateFuture(State.TERMINATED);
-    }
-    stop();
-  }
-
-
-  private void actOnExists(final String path, final Runnable action) {
-    // Watch for node existence.
-    final AtomicBoolean nodeExists = new AtomicBoolean(false);
-    Futures.addCallback(zkClient.exists(path, new Watcher() {
-      @Override
-      public void process(WatchedEvent event) {
-        // When node is created, call the action.
-        // Other event type would be handled by the action.
-        if (event.getType() == Event.EventType.NodeCreated && nodeExists.compareAndSet(false, true)) {
-          action.run();
-        }
-      }
-    }), new FutureCallback<Stat>() {
-      @Override
-      public void onSuccess(Stat result) {
-        if (result != null && nodeExists.compareAndSet(false, true)) {
-          action.run();
-        }
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        LOG.error("Failed in exists call to {}. Shutting down service.", path, t);
-        forceShutDown();
-      }
-    }, Threads.SAME_THREAD_EXECUTOR);
-  }
-
-  private void watchInstanceNode() {
-    Futures.addCallback(zkClient.getData(getInstancePath(), new Watcher() {
-      @Override
-      public void process(WatchedEvent event) {
-        State state = state();
-        if (state != State.NEW && state != State.STARTING && state != State.RUNNING) {
-          // Ignore ZK node events when it is in stopping sequence.
-          return;
-        }
-        switch (event.getType()) {
-          case NodeDataChanged:
-            watchInstanceNode();
-            break;
-          case NodeDeleted:
-            // When the ephemeral node goes away, treat the remote service stopped.
-            forceShutDown();
-            break;
-          default:
-            LOG.info("Ignore ZK event for instance node: {}", event);
-        }
-      }
-    }), instanceNodeDataCallback, Threads.SAME_THREAD_EXECUTOR);
-  }
-
-  private void watchStateNode() {
-    Futures.addCallback(zkClient.getData(getZKPath("state"), new Watcher() {
-      @Override
-      public void process(WatchedEvent event) {
-        State state = state();
-        if (state != State.NEW && state != State.STARTING && state != State.RUNNING) {
-          // Ignore ZK node events when it is in stopping sequence.
-          return;
-        }
-        switch (event.getType()) {
-          case NodeDataChanged:
-            watchStateNode();
-            break;
-          default:
-            LOG.info("Ignore ZK event for state node: {}", event);
-        }
-      }
-    }), stateNodeDataCallback, Threads.SAME_THREAD_EXECUTOR);
-  }
-
-  /**
-   * Returns the path prefix for creating sequential message node for the remote service.
-   */
-  private String getMessagePrefix() {
-    return getZKPath("messages/msg");
-  }
-
-  /**
-   * Returns the zookeeper node path for the ephemeral instance node for this runId.
-   */
-  private String getInstancePath() {
-    return String.format("/instances/%s", getRunId().getId());
-  }
-
-  private String getZKPath(String path) {
-    return String.format("/%s/%s", getRunId().getId(), path);
-  }
-
-  private final class InstanceNodeDataCallback implements FutureCallback<NodeData> {
-
-    @Override
-    public void onSuccess(NodeData result) {
-      instanceNodeUpdated(result);
-    }
-
-    @Override
-    public void onFailure(Throwable t) {
-      LOG.error("Failed in fetching instance node data.", t);
-      if (t instanceof KeeperException && ((KeeperException) t).code() == KeeperException.Code.NONODE) {
-        // If the node is gone, treat the remote service stopped.
-        forceShutDown();
-      } else {
-        instanceNodeUpdated(null);
-      }
-    }
-  }
-
-  private final class StateNodeDataCallback implements FutureCallback<NodeData> {
-
-    @Override
-    public void onSuccess(NodeData result) {
-      byte[] data = result.getData();
-      if (data == null) {
-        stateNodeUpdated(null);
-        return;
-      }
-      StateNode stateNode = new GsonBuilder().registerTypeAdapter(StateNode.class, new StateNodeCodec())
-        .registerTypeAdapter(StackTraceElement.class, new StackTraceElementCodec())
-        .create()
-        .fromJson(new String(data, Charsets.UTF_8), StateNode.class);
-
-      stateNodeUpdated(stateNode);
-    }
-
-    @Override
-    public void onFailure(Throwable t) {
-      LOG.error("Failed in fetching state node data.", t);
-      stateNodeUpdated(null);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/ApplicationBundler.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/ApplicationBundler.java b/core/src/main/java/org/apache/twill/internal/ApplicationBundler.java
deleted file mode 100644
index a0e9a71..0000000
--- a/core/src/main/java/org/apache/twill/internal/ApplicationBundler.java
+++ /dev/null
@@ -1,362 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.filesystem.Location;
-import org.apache.twill.internal.utils.Dependencies;
-import com.google.common.base.Function;
-import com.google.common.base.Splitter;
-import com.google.common.base.Throwables;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-import com.google.common.io.ByteStreams;
-import com.google.common.io.Files;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.BufferedOutputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.URI;
-import java.net.URL;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Queue;
-import java.util.Set;
-import java.util.jar.JarEntry;
-import java.util.jar.JarOutputStream;
-import java.util.zip.CRC32;
-import java.util.zip.CheckedOutputStream;
-
-/**
- * This class builds jar files based on class dependencies.
- */
-public final class ApplicationBundler {
-
-  private static final Logger LOG = LoggerFactory.getLogger(ApplicationBundler.class);
-  
-  public static final String SUBDIR_CLASSES = "classes/";
-  public static final String SUBDIR_LIB = "lib/";
-  public static final String SUBDIR_RESOURCES = "resources/";
-
-  private final List<String> excludePackages;
-  private final List<String> includePackages;
-  private final Set<String> bootstrapClassPaths;
-  private final CRC32 crc32;
-
-  /**
-   * Constructs a ApplicationBundler.
-   *
-   * @param excludePackages Class packages to exclude
-   */
-  public ApplicationBundler(Iterable<String> excludePackages) {
-    this(excludePackages, ImmutableList.<String>of());
-  }
-
-  /**
-   * Constructs a ApplicationBundler.
-   *
-   * @param excludePackages Class packages to exclude
-   * @param includePackages Class packages that should be included. Anything in this list will override the
-   *                        one provided in excludePackages.
-   */
-  public ApplicationBundler(Iterable<String> excludePackages, Iterable<String> includePackages) {
-    this.excludePackages = ImmutableList.copyOf(excludePackages);
-    this.includePackages = ImmutableList.copyOf(includePackages);
-
-    ImmutableSet.Builder<String> builder = ImmutableSet.builder();
-    for (String classpath : Splitter.on(File.pathSeparatorChar).split(System.getProperty("sun.boot.class.path"))) {
-      File file = new File(classpath);
-      builder.add(file.getAbsolutePath());
-      try {
-        builder.add(file.getCanonicalPath());
-      } catch (IOException e) {
-        // Ignore the exception and proceed.
-      }
-    }
-    this.bootstrapClassPaths = builder.build();
-    this.crc32 = new CRC32();
-
-  }
-
-  public void createBundle(Location target, Iterable<Class<?>> classes) throws IOException {
-    createBundle(target, classes, ImmutableList.<URI>of());
-  }
-
-  /**
-   * Same as calling {@link #createBundle(Location, Iterable)}.
-   */
-  public void createBundle(Location target, Class<?> clz, Class<?>...classes) throws IOException {
-    createBundle(target, ImmutableSet.<Class<?>>builder().add(clz).add(classes).build());
-  }
-
-  /**
-   * Creates a jar file which includes all the given classes and all the classes that they depended on.
-   * The jar will also include all classes and resources under the packages as given as include packages
-   * in the constructor.
-   *
-   * @param target Where to save the target jar file.
-   * @param resources Extra resources to put into the jar file. If resource is a jar file, it'll be put under
-   *                  lib/ entry, otherwise under the resources/ entry.
-   * @param classes Set of classes to start the dependency traversal.
-   * @throws IOException
-   */
-  public void createBundle(Location target, Iterable<Class<?>> classes, Iterable<URI> resources) throws IOException {
-    LOG.debug("start creating bundle {}. building a temporary file locally at first", target.getName());
-    // Write the jar to local tmp file first
-    File tmpJar = File.createTempFile(target.getName(), ".tmp");
-    try {
-      Set<String> entries = Sets.newHashSet();
-      JarOutputStream jarOut = new JarOutputStream(new FileOutputStream(tmpJar));
-      try {
-        // Find class dependencies
-        findDependencies(classes, entries, jarOut);
-
-        // Add extra resources
-        for (URI resource : resources) {
-          copyResource(resource, entries, jarOut);
-        }
-      } finally {
-        jarOut.close();
-      }
-      LOG.debug("copying temporary bundle to destination {} ({} bytes)", target.toURI(), tmpJar.length());
-      // Copy the tmp jar into destination.
-      OutputStream os = null; 
-      try {
-        os = new BufferedOutputStream(target.getOutputStream());
-        Files.copy(tmpJar, os);
-      } catch (IOException e) {
-        throw new IOException("failed to copy bundle from " + tmpJar.toURI() + " to " + target.toURI(), e);
-      } finally {
-        if (os != null) {
-          os.close();
-        }
-      }
-      LOG.debug("finished creating bundle at {}", target.toURI());
-    } finally {
-      tmpJar.delete();
-      LOG.debug("cleaned up local temporary for bundle {}", tmpJar.toURI());
-    }
-  }
-
-  private void findDependencies(Iterable<Class<?>> classes, final Set<String> entries,
-                                final JarOutputStream jarOut) throws IOException {
-
-    Iterable<String> classNames = Iterables.transform(classes, new Function<Class<?>, String>() {
-      @Override
-      public String apply(Class<?> input) {
-        return input.getName();
-      }
-    });
-
-    ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
-    if (classLoader == null) {
-      classLoader = getClass().getClassLoader();
-    }
-    Dependencies.findClassDependencies(classLoader, new Dependencies.ClassAcceptor() {
-      @Override
-      public boolean accept(String className, URL classUrl, URL classPathUrl) {
-        if (bootstrapClassPaths.contains(classPathUrl.getFile())) {
-          return false;
-        }
-
-        boolean shouldInclude = false;
-        for (String include : includePackages) {
-          if (className.startsWith(include)) {
-            shouldInclude = true;
-            break;
-          }
-        }
-
-        if (!shouldInclude) {
-          for (String exclude : excludePackages) {
-            if (className.startsWith(exclude)) {
-              return false;
-            }
-          }
-        }
-
-        putEntry(className, classUrl, classPathUrl, entries, jarOut);
-        return true;
-      }
-    }, classNames);
-  }
-
-  private void putEntry(String className, URL classUrl, URL classPathUrl, Set<String> entries, JarOutputStream jarOut) {
-    String classPath = classPathUrl.getFile();
-    if (classPath.endsWith(".jar")) {
-      saveDirEntry(SUBDIR_LIB, entries, jarOut);
-      saveEntry(SUBDIR_LIB + classPath.substring(classPath.lastIndexOf('/') + 1), classPathUrl, entries, jarOut, false);
-    } else {
-      // Class file, put it under the classes directory
-      saveDirEntry(SUBDIR_CLASSES, entries, jarOut);
-      if ("file".equals(classPathUrl.getProtocol())) {
-        // Copy every files under the classPath
-        try {
-          copyDir(new File(classPathUrl.toURI()), SUBDIR_CLASSES, entries, jarOut);
-        } catch (Exception e) {
-          throw Throwables.propagate(e);
-        }
-      } else {
-        String entry = SUBDIR_CLASSES + className.replace('.', '/') + ".class";
-        saveDirEntry(entry.substring(0, entry.lastIndexOf('/') + 1), entries, jarOut);
-        saveEntry(entry, classUrl, entries, jarOut, true);
-      }
-    }
-  }
-
-  /**
-   * Saves a directory entry to the jar output.
-   */
-  private void saveDirEntry(String path, Set<String> entries, JarOutputStream jarOut) {
-    if (entries.contains(path)) {
-      return;
-    }
-
-    try {
-      String entry = "";
-      for (String dir : Splitter.on('/').omitEmptyStrings().split(path)) {
-        entry += dir + '/';
-        if (entries.add(entry)) {
-          JarEntry jarEntry = new JarEntry(entry);
-          jarEntry.setMethod(JarOutputStream.STORED);
-          jarEntry.setSize(0L);
-          jarEntry.setCrc(0L);
-          jarOut.putNextEntry(jarEntry);
-          jarOut.closeEntry();
-        }
-      }
-    } catch (IOException e) {
-      throw Throwables.propagate(e);
-    }
-  }
-
-  /**
-   * Saves a class entry to the jar output.
-   */
-  private void saveEntry(String entry, URL url, Set<String> entries, JarOutputStream jarOut, boolean compress) {
-    LOG.debug("adding bundle entry " + entry);
-    if (!entries.add(entry)) {
-      return;
-    }
-    try {
-      JarEntry jarEntry = new JarEntry(entry);
-      InputStream is = url.openStream();
-
-      try {
-        if (compress) {
-          jarOut.putNextEntry(jarEntry);
-          ByteStreams.copy(is, jarOut);
-        } else {
-          crc32.reset();
-          TransferByteOutputStream os = new TransferByteOutputStream();
-          CheckedOutputStream checkedOut = new CheckedOutputStream(os, crc32);
-          ByteStreams.copy(is, checkedOut);
-          checkedOut.close();
-
-          long size = os.size();
-          jarEntry.setMethod(JarEntry.STORED);
-          jarEntry.setSize(size);
-          jarEntry.setCrc(checkedOut.getChecksum().getValue());
-          jarOut.putNextEntry(jarEntry);
-          os.transfer(jarOut);
-        }
-      } finally {
-        is.close();
-      }
-      jarOut.closeEntry();
-    } catch (Exception e) {
-      throw Throwables.propagate(e);
-    }
-  }
-
-
-  /**
-   * Copies all entries under the file path.
-   */
-  private void copyDir(File baseDir, String entryPrefix,
-                       Set<String> entries, JarOutputStream jarOut) throws IOException {
-    LOG.debug("adding whole dir {} to bundle at '{}'", baseDir, entryPrefix);
-    URI baseUri = baseDir.toURI();
-    Queue<File> queue = Lists.newLinkedList();
-    Collections.addAll(queue, baseDir.listFiles());
-    while (!queue.isEmpty()) {
-      File file = queue.remove();
-
-      String entry = entryPrefix + baseUri.relativize(file.toURI()).getPath();
-      if (entries.add(entry)) {
-        jarOut.putNextEntry(new JarEntry(entry));
-        if (file.isFile()) {
-          try {
-            Files.copy(file, jarOut);
-          } catch (IOException e) {
-            throw new IOException("failure copying from " + file.getAbsoluteFile() + " to JAR file entry " + entry, e);
-          }
-        }
-        jarOut.closeEntry();
-      }
-
-      if (file.isDirectory()) {
-        File[] files = file.listFiles();
-        if (files != null) {
-          queue.addAll(Arrays.asList(files));
-        }
-      }
-    }
-  }
-
-  private void copyResource(URI resource, Set<String> entries, JarOutputStream jarOut) throws IOException {
-    if ("file".equals(resource.getScheme())) {
-      File file = new File(resource);
-      if (file.isDirectory()) {
-        saveDirEntry(SUBDIR_RESOURCES, entries, jarOut);
-        copyDir(file, SUBDIR_RESOURCES, entries, jarOut);
-        return;
-      }
-    }
-
-    URL url = resource.toURL();
-    String path = url.getFile();
-    String prefix = path.endsWith(".jar") ? SUBDIR_LIB : SUBDIR_RESOURCES;
-    path = prefix + path.substring(path.lastIndexOf('/') + 1);
-
-    saveDirEntry(prefix, entries, jarOut);
-    jarOut.putNextEntry(new JarEntry(path));
-    InputStream is = url.openStream();
-    try {
-      ByteStreams.copy(is, jarOut);
-    } finally {
-      is.close();
-    }
-  }
-
-  private static final class TransferByteOutputStream extends ByteArrayOutputStream {
-
-    public void transfer(OutputStream os) throws IOException {
-      os.write(buf, 0, count);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/Arguments.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/Arguments.java b/core/src/main/java/org/apache/twill/internal/Arguments.java
deleted file mode 100644
index a78547c..0000000
--- a/core/src/main/java/org/apache/twill/internal/Arguments.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMultimap;
-import com.google.common.collect.Multimap;
-
-import java.util.List;
-
-/**
- * Class that encapsulate application arguments and per runnable arguments.
- */
-public final class Arguments {
-
-  private final List<String> arguments;
-  private final Multimap<String, String> runnableArguments;
-
-  public Arguments(List<String> arguments, Multimap<String, String> runnableArguments) {
-    this.arguments = ImmutableList.copyOf(arguments);
-    this.runnableArguments = ImmutableMultimap.copyOf(runnableArguments);
-  }
-
-  public List<String> getArguments() {
-    return arguments;
-  }
-
-  public Multimap<String, String> getRunnableArguments() {
-    return runnableArguments;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/BasicTwillContext.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/BasicTwillContext.java b/core/src/main/java/org/apache/twill/internal/BasicTwillContext.java
deleted file mode 100644
index 61bdaef..0000000
--- a/core/src/main/java/org/apache/twill/internal/BasicTwillContext.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.api.RunId;
-import org.apache.twill.api.TwillContext;
-import org.apache.twill.api.TwillRunnableSpecification;
-import org.apache.twill.common.Cancellable;
-import org.apache.twill.discovery.Discoverable;
-import org.apache.twill.discovery.DiscoveryService;
-
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-
-/**
- *
- */
-public final class BasicTwillContext implements TwillContext {
-
-  private final RunId runId;
-  private final RunId appRunId;
-  private final InetAddress host;
-  private final String[] args;
-  private final String[] appArgs;
-  private final TwillRunnableSpecification spec;
-  private final int instanceId;
-  private final DiscoveryService discoveryService;
-  private final int allowedMemoryMB;
-  private final int virtualCores;
-  private volatile int instanceCount;
-
-  public BasicTwillContext(RunId runId, RunId appRunId, InetAddress host, String[] args, String[] appArgs,
-                           TwillRunnableSpecification spec, int instanceId, DiscoveryService discoveryService,
-                           int instanceCount, int allowedMemoryMB, int virtualCores) {
-    this.runId = runId;
-    this.appRunId = appRunId;
-    this.host = host;
-    this.args = args;
-    this.appArgs = appArgs;
-    this.spec = spec;
-    this.instanceId = instanceId;
-    this.discoveryService = discoveryService;
-    this.instanceCount = instanceCount;
-    this.allowedMemoryMB = allowedMemoryMB;
-    this.virtualCores = virtualCores;
-  }
-
-  @Override
-  public RunId getRunId() {
-    return runId;
-  }
-
-  @Override
-  public RunId getApplicationRunId() {
-    return appRunId;
-  }
-
-  @Override
-  public int getInstanceCount() {
-    return instanceCount;
-  }
-
-  public void setInstanceCount(int count) {
-    this.instanceCount = count;
-  }
-
-  @Override
-  public InetAddress getHost() {
-    return host;
-  }
-
-  @Override
-  public String[] getArguments() {
-    return args;
-  }
-
-  @Override
-  public String[] getApplicationArguments() {
-    return appArgs;
-  }
-
-  @Override
-  public TwillRunnableSpecification getSpecification() {
-    return spec;
-  }
-
-  @Override
-  public int getInstanceId() {
-    return instanceId;
-  }
-
-  @Override
-  public int getVirtualCores() {
-    return virtualCores;
-  }
-
-  @Override
-  public int getMaxMemoryMB() {
-    return allowedMemoryMB;
-  }
-
-  @Override
-  public Cancellable announce(final String serviceName, final int port) {
-    return discoveryService.register(new Discoverable() {
-      @Override
-      public String getName() {
-        return serviceName;
-      }
-
-      @Override
-      public InetSocketAddress getSocketAddress() {
-        return new InetSocketAddress(getHost(), port);
-      }
-    });
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/Configs.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/Configs.java b/core/src/main/java/org/apache/twill/internal/Configs.java
deleted file mode 100644
index 0fa1df8..0000000
--- a/core/src/main/java/org/apache/twill/internal/Configs.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-/**
- *
- */
-public final class Configs {
-
-  public static final class Keys {
-    /**
-     * Size in MB of reserved memory for Java process (non-heap memory).
-     */
-    public static final String JAVA_RESERVED_MEMORY_MB = "twill.java.reserved.memory.mb";
-
-    private Keys() {
-    }
-  }
-
-  public static final class Defaults {
-    // By default have 200MB reserved for Java process.
-    public static final int JAVA_RESERVED_MEMORY_MB = 200;
-
-    private Defaults() {
-    }
-  }
-
-  private Configs() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/Constants.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/Constants.java b/core/src/main/java/org/apache/twill/internal/Constants.java
deleted file mode 100644
index 0387d3e..0000000
--- a/core/src/main/java/org/apache/twill/internal/Constants.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-/**
- * This class contains collection of common constants used in Twill.
- */
-public final class Constants {
-
-  public static final String LOG_TOPIC = "log";
-
-  /** Maximum number of seconds for AM to start. */
-  public static final int APPLICATION_MAX_START_SECONDS = 60;
-  /** Maximum number of seconds for AM to stop. */
-  public static final int APPLICATION_MAX_STOP_SECONDS = 60;
-
-  public static final long PROVISION_TIMEOUT = 30000;
-
-  /** Memory size of AM */
-  public static final int APP_MASTER_MEMORY_MB = 512;
-
-  public static final int APP_MASTER_RESERVED_MEMORY_MB = 150;
-
-  public static final String STDOUT = "stdout";
-  public static final String STDERR = "stderr";
-
-  /**
-   * Constants for names of internal files that are shared between client, AM and containers.
-   */
-  public static final class Files {
-
-    public static final String LAUNCHER_JAR = "launcher.jar";
-    public static final String APP_MASTER_JAR = "appMaster.jar";
-    public static final String CONTAINER_JAR = "container.jar";
-    public static final String LOCALIZE_FILES = "localizeFiles.json";
-    public static final String TWILL_SPEC = "twillSpec.json";
-    public static final String ARGUMENTS = "arguments.json";
-    public static final String LOGBACK_TEMPLATE = "logback-template.xml";
-    public static final String KAFKA = "kafka.tgz";
-    public static final String JVM_OPTIONS = "jvm.opts";
-    public static final String CREDENTIALS = "credentials.store";
-
-    private Files() {
-    }
-  }
-
-  private Constants() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/ContainerInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/ContainerInfo.java b/core/src/main/java/org/apache/twill/internal/ContainerInfo.java
deleted file mode 100644
index 67c21d3..0000000
--- a/core/src/main/java/org/apache/twill/internal/ContainerInfo.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import java.net.InetAddress;
-
-/**
- * Represents information of the container that the processing is/will be running in.
- */
-public interface ContainerInfo {
-
-  String getId();
-
-  InetAddress getHost();
-
-  int getPort();
-
-  int getMemoryMB();
-
-  int getVirtualCores();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/ContainerLiveNodeData.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/ContainerLiveNodeData.java b/core/src/main/java/org/apache/twill/internal/ContainerLiveNodeData.java
deleted file mode 100644
index 705943c..0000000
--- a/core/src/main/java/org/apache/twill/internal/ContainerLiveNodeData.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-/**
- *
- */
-public final class ContainerLiveNodeData {
-
-  private final String containerId;
-  private final String host;
-
-  public ContainerLiveNodeData(String containerId, String host) {
-    this.containerId = containerId;
-    this.host = host;
-  }
-
-  public String getContainerId() {
-    return containerId;
-  }
-
-  public String getHost() {
-    return host;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/EnvContainerInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/EnvContainerInfo.java b/core/src/main/java/org/apache/twill/internal/EnvContainerInfo.java
deleted file mode 100644
index fd50028..0000000
--- a/core/src/main/java/org/apache/twill/internal/EnvContainerInfo.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-
-/**
- * A {@link ContainerInfo} based on information on the environment.
- */
-public final class EnvContainerInfo implements ContainerInfo {
-  private final String id;
-  private final InetAddress host;
-  private final int port;
-  private final int virtualCores;
-  private final int memoryMB;
-
-  public EnvContainerInfo() throws UnknownHostException {
-    id = System.getenv(EnvKeys.YARN_CONTAINER_ID);
-    host = InetAddress.getByName(System.getenv(EnvKeys.YARN_CONTAINER_HOST));
-    port = Integer.parseInt(System.getenv(EnvKeys.YARN_CONTAINER_PORT));
-    virtualCores = Integer.parseInt(System.getenv(EnvKeys.YARN_CONTAINER_VIRTUAL_CORES));
-    memoryMB = Integer.parseInt(System.getenv(EnvKeys.YARN_CONTAINER_MEMORY_MB));
-  }
-
-  @Override
-  public String getId() {
-    return id;
-  }
-
-  @Override
-  public InetAddress getHost() {
-    return host;
-  }
-
-  @Override
-  public int getPort() {
-    return port;
-  }
-
-  @Override
-  public int getMemoryMB() {
-    return memoryMB;
-  }
-
-  @Override
-  public int getVirtualCores() {
-    return virtualCores;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/EnvKeys.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/EnvKeys.java b/core/src/main/java/org/apache/twill/internal/EnvKeys.java
deleted file mode 100644
index 9bf6523..0000000
--- a/core/src/main/java/org/apache/twill/internal/EnvKeys.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-/**
- * Places for define common environment keys.
- */
-public final class EnvKeys {
-
-  public static final String TWILL_ZK_CONNECT = "TWILL_ZK_CONNECT";
-  public static final String TWILL_APP_RUN_ID = "TWILL_APP_RUN_ID";
-  public static final String TWILL_RUN_ID = "TWILL_RUN_ID";
-  public static final String TWILL_INSTANCE_ID = "TWILL_INSTANCE_ID";
-  public static final String TWILL_INSTANCE_COUNT = "TWILL_INSTANCE_COUNT";
-  public static final String TWILL_RESERVED_MEMORY_MB = "TWILL_RESERVED_MEMORY_MB";
-
-  public static final String TWILL_FS_USER = "TWILL_FS_USER";
-
-  /**
-   * Cluster filesystem directory for storing twill app related files.
-   */
-  public static final String TWILL_APP_DIR = "TWILL_APP_DIR";
-
-  public static final String TWILL_APP_NAME = "TWILL_APP_NAME";
-  public static final String TWILL_RUNNABLE_NAME = "TWILL_RUNNABLE_NAME";
-
-  public static final String TWILL_LOG_KAFKA_ZK = "TWILL_LOG_KAFKA_ZK";
-
-  public static final String YARN_APP_ID = "YARN_APP_ID";
-  public static final String YARN_APP_ID_CLUSTER_TIME = "YARN_APP_ID_CLUSTER_TIME";
-  public static final String YARN_APP_ID_STR = "YARN_APP_ID_STR";
-
-  public static final String YARN_CONTAINER_ID = "YARN_CONTAINER_ID";
-  public static final String YARN_CONTAINER_HOST = "YARN_CONTAINER_HOST";
-  public static final String YARN_CONTAINER_PORT = "YARN_CONTAINER_PORT";
-  /**
-   * Used to inform runnables of their resource usage.
-   */
-  public static final String YARN_CONTAINER_VIRTUAL_CORES = "YARN_CONTAINER_VIRTUAL_CORES";
-  public static final String YARN_CONTAINER_MEMORY_MB = "YARN_CONTAINER_MEMORY_MB";
-
-  private EnvKeys() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/ListenerExecutor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/ListenerExecutor.java b/core/src/main/java/org/apache/twill/internal/ListenerExecutor.java
deleted file mode 100644
index 9d3e156..0000000
--- a/core/src/main/java/org/apache/twill/internal/ListenerExecutor.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import com.google.common.collect.Maps;
-import com.google.common.util.concurrent.Service;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.Executor;
-
-/**
- * Wrapper for {@link Service.Listener} to have callback executed on a given {@link Executor}.
- * Also make sure each method is called at most once.
- */
-final class ListenerExecutor implements Service.Listener {
-
-  private static final Logger LOG = LoggerFactory.getLogger(ListenerExecutor.class);
-
-  private final Service.Listener delegate;
-  private final Executor executor;
-  private final ConcurrentMap<Service.State, Boolean> callStates = Maps.newConcurrentMap();
-
-  ListenerExecutor(Service.Listener delegate, Executor executor) {
-    this.delegate = delegate;
-    this.executor = executor;
-  }
-
-  @Override
-  public void starting() {
-    if (hasCalled(Service.State.STARTING)) {
-      return;
-    }
-    executor.execute(new Runnable() {
-      @Override
-      public void run() {
-        try {
-          delegate.starting();
-        } catch (Throwable t) {
-          LOG.warn("Exception thrown from listener", t);
-        }
-      }
-    });
-  }
-
-  @Override
-  public void running() {
-    if (hasCalled(Service.State.RUNNING)) {
-      return;
-    }
-    executor.execute(new Runnable() {
-      @Override
-      public void run() {
-        try {
-          delegate.running();
-        } catch (Throwable t) {
-          LOG.warn("Exception thrown from listener", t);
-        }
-      }
-    });
-  }
-
-  @Override
-  public void stopping(final Service.State from) {
-    if (hasCalled(Service.State.STOPPING)) {
-      return;
-    }
-    executor.execute(new Runnable() {
-      @Override
-      public void run() {
-        try {
-          delegate.stopping(from);
-        } catch (Throwable t) {
-          LOG.warn("Exception thrown from listener", t);
-        }
-      }
-    });
-  }
-
-  @Override
-  public void terminated(final Service.State from) {
-    if (hasCalled(Service.State.TERMINATED)) {
-      return;
-    }
-    executor.execute(new Runnable() {
-      @Override
-      public void run() {
-        try {
-          delegate.terminated(from);
-        } catch (Throwable t) {
-          LOG.warn("Exception thrown from listener", t);
-        }
-      }
-    });
-  }
-
-  @Override
-  public void failed(final Service.State from, final Throwable failure) {
-    // Both failed and terminate are using the same state for checking as only either one could be called.
-    if (hasCalled(Service.State.TERMINATED)) {
-      return;
-    }
-    executor.execute(new Runnable() {
-      @Override
-      public void run() {
-        try {
-          delegate.failed(from, failure);
-        } catch (Throwable t) {
-          LOG.warn("Exception thrown from listener", t);
-        }
-      }
-    });
-  }
-
-  private boolean hasCalled(Service.State state) {
-    return callStates.putIfAbsent(state, true) != null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/LogOnlyEventHandler.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/LogOnlyEventHandler.java b/core/src/main/java/org/apache/twill/internal/LogOnlyEventHandler.java
deleted file mode 100644
index 4f71a05..0000000
--- a/core/src/main/java/org/apache/twill/internal/LogOnlyEventHandler.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.api.EventHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.concurrent.TimeUnit;
-
-/**
- *
- */
-public final class LogOnlyEventHandler extends EventHandler {
-
-  private static final Logger LOG = LoggerFactory.getLogger(LogOnlyEventHandler.class);
-
-  @Override
-  public TimeoutAction launchTimeout(Iterable<TimeoutEvent> timeoutEvents) {
-    for (TimeoutEvent event : timeoutEvents) {
-      LOG.info("Requested {} containers for runnable {}, only got {} after {} ms.",
-               event.getExpectedInstances(), event.getRunnableName(),
-               event.getActualInstances(), System.currentTimeMillis() - event.getRequestTime());
-    }
-    return TimeoutAction.recheck(Constants.PROVISION_TIMEOUT, TimeUnit.MILLISECONDS);
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/ProcessController.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/ProcessController.java b/core/src/main/java/org/apache/twill/internal/ProcessController.java
deleted file mode 100644
index 4453838..0000000
--- a/core/src/main/java/org/apache/twill/internal/ProcessController.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.common.Cancellable;
-
-/**
- * For controlling a launch yarn process.
- *
- * @param <R> Report type.
- */
-public interface ProcessController<R> extends Cancellable {
-
-  R getReport();
-
-  /**
-   * Request to stop the running process.
-   */
-  void cancel();
-}


[24/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/kafka/EmbeddedKafkaServer.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/kafka/EmbeddedKafkaServer.java b/core/src/main/java/org/apache/twill/internal/kafka/EmbeddedKafkaServer.java
deleted file mode 100644
index 14dfc70..0000000
--- a/core/src/main/java/org/apache/twill/internal/kafka/EmbeddedKafkaServer.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.kafka;
-
-import com.google.common.base.Throwables;
-import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.AbstractIdleService;
-
-import java.io.File;
-import java.net.MalformedURLException;
-import java.net.URL;
-import java.net.URLClassLoader;
-import java.util.List;
-import java.util.Properties;
-
-/**
- *
- */
-public final class EmbeddedKafkaServer extends AbstractIdleService {
-
-  private static final String KAFAK_CONFIG_CLASS = "kafka.server.KafkaConfig";
-  private static final String KAFKA_SERVER_CLASS = "kafka.server.KafkaServerStartable";
-
-  private final Object server;
-
-  public EmbeddedKafkaServer(File kafkaDir, Properties properties) {
-    this(createClassLoader(kafkaDir), properties);
-  }
-
-  public EmbeddedKafkaServer(ClassLoader classLoader, Properties properties) {
-    try {
-      Class<?> configClass = classLoader.loadClass(KAFAK_CONFIG_CLASS);
-      Object config = configClass.getConstructor(Properties.class).newInstance(properties);
-
-      Class<?> serverClass = classLoader.loadClass(KAFKA_SERVER_CLASS);
-      server = serverClass.getConstructor(configClass).newInstance(config);
-    } catch (Exception e) {
-      throw Throwables.propagate(e);
-    }
-  }
-
-  @Override
-  protected void startUp() throws Exception {
-    server.getClass().getMethod("startup").invoke(server);
-  }
-
-  @Override
-  protected void shutDown() throws Exception {
-    server.getClass().getMethod("shutdown").invoke(server);
-    server.getClass().getMethod("awaitShutdown").invoke(server);
-  }
-
-  private static ClassLoader createClassLoader(File kafkaDir) {
-    ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
-    ClassLoader thisClassLoader = EmbeddedKafkaServer.class.getClassLoader();
-    ClassLoader parent = contextClassLoader != null
-                            ? contextClassLoader
-                            : thisClassLoader != null
-                                ? thisClassLoader : ClassLoader.getSystemClassLoader();
-
-    return new URLClassLoader(findJars(kafkaDir, Lists.<URL>newArrayList()).toArray(new URL[0]), parent);
-  }
-
-  private static List<URL> findJars(File dir, List<URL> urls) {
-    try {
-      for (File file : dir.listFiles()) {
-        if (file.isDirectory()) {
-          findJars(file, urls);
-        } else if (file.getName().endsWith(".jar")) {
-          urls.add(file.toURI().toURL());
-        }
-      }
-      return urls;
-    } catch (MalformedURLException e) {
-      throw Throwables.propagate(e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/kafka/client/AbstractCompressedMessageSetEncoder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/kafka/client/AbstractCompressedMessageSetEncoder.java b/core/src/main/java/org/apache/twill/internal/kafka/client/AbstractCompressedMessageSetEncoder.java
deleted file mode 100644
index a9c3381..0000000
--- a/core/src/main/java/org/apache/twill/internal/kafka/client/AbstractCompressedMessageSetEncoder.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.kafka.client;
-
-import com.google.common.base.Throwables;
-import org.jboss.netty.buffer.ChannelBuffer;
-import org.jboss.netty.buffer.ChannelBufferOutputStream;
-import org.jboss.netty.buffer.ChannelBuffers;
-
-import java.io.IOException;
-import java.io.OutputStream;
-
-/**
- * A base implementation of {@link MessageSetEncoder} that do message compression.
- */
-abstract class AbstractCompressedMessageSetEncoder extends AbstractMessageSetEncoder {
-
-  private final Compression compression;
-  private ChannelBufferOutputStream os;
-  private OutputStream compressedOutput;
-
-
-  protected AbstractCompressedMessageSetEncoder(Compression compression) {
-    this.compression = compression;
-    try {
-      this.os = new ChannelBufferOutputStream(ChannelBuffers.dynamicBuffer());
-      this.compressedOutput = createCompressedStream(os);
-    } catch (IOException e) {
-      // Should never happen
-      throw Throwables.propagate(e);
-    }
-  }
-
-  @Override
-  public final MessageSetEncoder add(ChannelBuffer payload) {
-    try {
-      ChannelBuffer encoded = encodePayload(payload);
-      encoded.readBytes(compressedOutput, encoded.readableBytes());
-    } catch (IOException e) {
-      throw Throwables.propagate(e);
-    }
-    return this;
-
-  }
-
-  @Override
-  public final ChannelBuffer finish() {
-    try {
-      compressedOutput.close();
-      ChannelBuffer buf = prefixLength(encodePayload(os.buffer(), compression));
-      compressedOutput = createCompressedStream(os);
-      os.buffer().clear();
-
-      return buf;
-
-    } catch (IOException e) {
-      throw Throwables.propagate(e);
-    }
-
-  }
-
-  protected abstract OutputStream createCompressedStream(OutputStream os) throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/kafka/client/AbstractMessageSetEncoder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/kafka/client/AbstractMessageSetEncoder.java b/core/src/main/java/org/apache/twill/internal/kafka/client/AbstractMessageSetEncoder.java
deleted file mode 100644
index 9955d6a..0000000
--- a/core/src/main/java/org/apache/twill/internal/kafka/client/AbstractMessageSetEncoder.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.kafka.client;
-
-import org.jboss.netty.buffer.ChannelBuffer;
-import org.jboss.netty.buffer.ChannelBuffers;
-
-import java.util.zip.CRC32;
-
-/**
- * A base implementation of {@link MessageSetEncoder}.
- */
-abstract class AbstractMessageSetEncoder implements MessageSetEncoder {
-
-  private static final ThreadLocal<CRC32> CRC32_LOCAL = new ThreadLocal<CRC32>() {
-    @Override
-    protected CRC32 initialValue() {
-      return new CRC32();
-    }
-  };
-
-  protected final int computeCRC32(ChannelBuffer buffer) {
-    CRC32 crc32 = CRC32_LOCAL.get();
-    crc32.reset();
-
-    if (buffer.hasArray()) {
-      crc32.update(buffer.array(), buffer.arrayOffset() + buffer.readerIndex(), buffer.readableBytes());
-    } else {
-      byte[] bytes = new byte[buffer.readableBytes()];
-      buffer.getBytes(buffer.readerIndex(), bytes);
-      crc32.update(bytes);
-    }
-    return (int) crc32.getValue();
-  }
-
-  protected final ChannelBuffer encodePayload(ChannelBuffer payload) {
-    return encodePayload(payload, Compression.NONE);
-  }
-
-  protected final ChannelBuffer encodePayload(ChannelBuffer payload, Compression compression) {
-    ChannelBuffer header = ChannelBuffers.buffer(10);
-
-    int crc = computeCRC32(payload);
-
-    int magic = ((compression == Compression.NONE) ? 0 : 1);
-
-    // Message length = 1 byte magic + (optional 1 compression byte) + 4 bytes crc + payload length
-    header.writeInt(5 + magic + payload.readableBytes());
-    // Magic number = 0 for non-compressed data
-    header.writeByte(magic);
-    if (magic > 0) {
-      header.writeByte(compression.getCode());
-    }
-    header.writeInt(crc);
-
-    return ChannelBuffers.wrappedBuffer(header, payload);
-  }
-
-  protected final ChannelBuffer prefixLength(ChannelBuffer buffer) {
-    ChannelBuffer sizeBuf = ChannelBuffers.buffer(4);
-    sizeBuf.writeInt(buffer.readableBytes());
-    return ChannelBuffers.wrappedBuffer(sizeBuf, buffer);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/kafka/client/BasicFetchedMessage.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/kafka/client/BasicFetchedMessage.java b/core/src/main/java/org/apache/twill/internal/kafka/client/BasicFetchedMessage.java
deleted file mode 100644
index 286bf82..0000000
--- a/core/src/main/java/org/apache/twill/internal/kafka/client/BasicFetchedMessage.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.kafka.client;
-
-import org.apache.twill.kafka.client.FetchedMessage;
-
-import java.nio.ByteBuffer;
-
-/**
- *
- */
-final class BasicFetchedMessage implements FetchedMessage {
-
-  private final long offset;
-  private final ByteBuffer buffer;
-
-  BasicFetchedMessage(long offset, ByteBuffer buffer) {
-    this.offset = offset;
-    this.buffer = buffer;
-  }
-
-  @Override
-  public long getOffset() {
-    return offset;
-  }
-
-  @Override
-  public ByteBuffer getBuffer() {
-    return buffer;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/kafka/client/Bufferer.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/kafka/client/Bufferer.java b/core/src/main/java/org/apache/twill/internal/kafka/client/Bufferer.java
deleted file mode 100644
index c1fb4f2..0000000
--- a/core/src/main/java/org/apache/twill/internal/kafka/client/Bufferer.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.kafka.client;
-
-import org.jboss.netty.buffer.ChannelBuffer;
-import org.jboss.netty.buffer.ChannelBuffers;
-
-/**
- * A class to help buffering data of format [len][payload-of-len].
- */
-final class Bufferer {
-
-  private ChannelBuffer currentBuffer = null;
-  private int currentSize = -1;
-
-  void apply(ChannelBuffer buffer) {
-    currentBuffer = concatBuffer(currentBuffer, buffer);
-  }
-
-  /**
-   * Returns the buffer if the buffer data is ready to be consumed,
-   * otherwise return {@link ChannelBuffers#EMPTY_BUFFER}.
-   */
-  ChannelBuffer getNext() {
-    if (currentSize < 0) {
-      if (currentBuffer.readableBytes() < 4) {
-        return ChannelBuffers.EMPTY_BUFFER;
-      }
-      currentSize = currentBuffer.readInt();
-    }
-
-    // Keep buffering if less then required number of bytes
-    if (currentBuffer.readableBytes() < currentSize) {
-      return ChannelBuffers.EMPTY_BUFFER;
-    }
-
-    ChannelBuffer result = currentBuffer.readSlice(currentSize);
-    currentSize = -1;
-
-    return result;
-  }
-
-  private ChannelBuffer concatBuffer(ChannelBuffer current, ChannelBuffer buffer) {
-    return current == null ? buffer : ChannelBuffers.wrappedBuffer(current, buffer);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/kafka/client/Compression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/kafka/client/Compression.java b/core/src/main/java/org/apache/twill/internal/kafka/client/Compression.java
deleted file mode 100644
index 3355b9f..0000000
--- a/core/src/main/java/org/apache/twill/internal/kafka/client/Compression.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.kafka.client;
-
-/**
- * Enum for indicating compression method.
- */
-public enum Compression {
-  NONE(0),
-  GZIP(1),
-  SNAPPY(2);
-
-  private final int code;
-
-  Compression(int code) {
-    this.code = code;
-  }
-
-  public int getCode() {
-    return code;
-  }
-
-  public static Compression fromCode(int code) {
-    switch (code) {
-      case 0:
-        return NONE;
-      case 1:
-        return GZIP;
-      case 2:
-        return SNAPPY;
-    }
-    throw new IllegalArgumentException("Unknown compression code.");
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/kafka/client/ConnectionPool.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/kafka/client/ConnectionPool.java b/core/src/main/java/org/apache/twill/internal/kafka/client/ConnectionPool.java
deleted file mode 100644
index c2865ba..0000000
--- a/core/src/main/java/org/apache/twill/internal/kafka/client/ConnectionPool.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.kafka.client;
-
-import com.google.common.collect.Maps;
-import org.jboss.netty.bootstrap.ClientBootstrap;
-import org.jboss.netty.channel.ChannelFuture;
-import org.jboss.netty.channel.ChannelFutureListener;
-import org.jboss.netty.channel.group.ChannelGroup;
-import org.jboss.netty.channel.group.ChannelGroupFuture;
-import org.jboss.netty.channel.group.ChannelGroupFutureListener;
-import org.jboss.netty.channel.group.DefaultChannelGroup;
-
-import java.net.InetSocketAddress;
-import java.util.Queue;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.ConcurrentMap;
-
-/**
- * Provides netty socket connection reuse.
- */
-final class ConnectionPool {
-
-  private final ClientBootstrap bootstrap;
-  private final ChannelGroup channelGroup;
-  private final ConcurrentMap<InetSocketAddress, Queue<ChannelFuture>> connections;
-
-  /**
-   * For releasing a connection back to the pool.
-   */
-  interface ConnectionReleaser {
-    void release();
-  }
-
-  /**
-   * Result of a connect request.
-   */
-  interface ConnectResult extends ConnectionReleaser {
-    ChannelFuture getChannelFuture();
-  }
-
-  ConnectionPool(ClientBootstrap bootstrap) {
-    this.bootstrap = bootstrap;
-    this.channelGroup = new DefaultChannelGroup();
-    this.connections = Maps.newConcurrentMap();
-  }
-
-  ConnectResult connect(InetSocketAddress address) {
-    Queue<ChannelFuture> channelFutures = connections.get(address);
-    if (channelFutures == null) {
-      channelFutures = new ConcurrentLinkedQueue<ChannelFuture>();
-      Queue<ChannelFuture> result = connections.putIfAbsent(address, channelFutures);
-      channelFutures = result == null ? channelFutures : result;
-    }
-
-    ChannelFuture channelFuture = channelFutures.poll();
-    while (channelFuture != null) {
-      if (channelFuture.isSuccess() && channelFuture.getChannel().isConnected()) {
-        return new SimpleConnectResult(address, channelFuture);
-      }
-      channelFuture = channelFutures.poll();
-    }
-
-    channelFuture = bootstrap.connect(address);
-    channelFuture.addListener(new ChannelFutureListener() {
-      @Override
-      public void operationComplete(ChannelFuture future) throws Exception {
-        if (future.isSuccess()) {
-          channelGroup.add(future.getChannel());
-        }
-      }
-    });
-    return new SimpleConnectResult(address, channelFuture);
-  }
-
-  ChannelGroupFuture close() {
-    ChannelGroupFuture result = channelGroup.close();
-    result.addListener(new ChannelGroupFutureListener() {
-      @Override
-      public void operationComplete(ChannelGroupFuture future) throws Exception {
-        bootstrap.releaseExternalResources();
-      }
-    });
-    return result;
-  }
-
-  private final class SimpleConnectResult implements ConnectResult {
-
-    private final InetSocketAddress address;
-    private final ChannelFuture future;
-
-
-    private SimpleConnectResult(InetSocketAddress address, ChannelFuture future) {
-      this.address = address;
-      this.future = future;
-    }
-
-    @Override
-    public ChannelFuture getChannelFuture() {
-      return future;
-    }
-
-    @Override
-    public void release() {
-      if (future.isSuccess()) {
-        connections.get(address).offer(future);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/kafka/client/GZipMessageSetEncoder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/kafka/client/GZipMessageSetEncoder.java b/core/src/main/java/org/apache/twill/internal/kafka/client/GZipMessageSetEncoder.java
deleted file mode 100644
index daa0c2c..0000000
--- a/core/src/main/java/org/apache/twill/internal/kafka/client/GZipMessageSetEncoder.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.kafka.client;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.zip.GZIPOutputStream;
-
-/**
- * A {@link MessageSetEncoder} that compress message set using GZIP.
- */
-final class GZipMessageSetEncoder extends AbstractCompressedMessageSetEncoder {
-
-  GZipMessageSetEncoder() {
-    super(Compression.GZIP);
-  }
-
-  @Override
-  protected OutputStream createCompressedStream(OutputStream os) throws IOException {
-    return new GZIPOutputStream(os);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/kafka/client/IdentityMessageSetEncoder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/kafka/client/IdentityMessageSetEncoder.java b/core/src/main/java/org/apache/twill/internal/kafka/client/IdentityMessageSetEncoder.java
deleted file mode 100644
index 51dc746..0000000
--- a/core/src/main/java/org/apache/twill/internal/kafka/client/IdentityMessageSetEncoder.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.kafka.client;
-
-import org.jboss.netty.buffer.ChannelBuffer;
-import org.jboss.netty.buffer.ChannelBuffers;
-
-/**
- * A pass-through {@link MessageSetEncoder}.
- */
-final class IdentityMessageSetEncoder extends AbstractMessageSetEncoder {
-
-  private ChannelBuffer messageSets = ChannelBuffers.EMPTY_BUFFER;
-
-  @Override
-  public MessageSetEncoder add(ChannelBuffer payload) {
-    messageSets = ChannelBuffers.wrappedBuffer(messageSets, encodePayload(payload));
-    return this;
-  }
-
-  @Override
-  public ChannelBuffer finish() {
-    ChannelBuffer buf = prefixLength(messageSets);
-    messageSets = ChannelBuffers.EMPTY_BUFFER;
-    return buf;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaBrokerCache.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaBrokerCache.java b/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaBrokerCache.java
deleted file mode 100644
index f2bb815..0000000
--- a/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaBrokerCache.java
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.kafka.client;
-
-import org.apache.twill.common.Threads;
-import org.apache.twill.zookeeper.NodeChildren;
-import org.apache.twill.zookeeper.NodeData;
-import org.apache.twill.zookeeper.ZKClient;
-import com.google.common.base.Charsets;
-import com.google.common.base.Function;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.ImmutableSortedMap;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.AbstractIdleService;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.net.InetSocketAddress;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.SortedMap;
-
-/**
- * A Service to cache kafka broker information by subscribing to ZooKeeper.
- */
-final class KafkaBrokerCache extends AbstractIdleService {
-
-  private static final Logger LOG = LoggerFactory.getLogger(KafkaBrokerCache.class);
-
-  private static final String BROKERS_PATH = "/brokers";
-
-  private final ZKClient zkClient;
-  private final Map<String, InetSocketAddress> brokers;
-  // topicBrokers is from topic->partition size->brokerId
-  private final Map<String, SortedMap<Integer, Set<String>>> topicBrokers;
-  private final Runnable invokeGetBrokers = new Runnable() {
-    @Override
-    public void run() {
-      getBrokers();
-    }
-  };
-  private final Runnable invokeGetTopics = new Runnable() {
-    @Override
-    public void run() {
-      getTopics();
-    }
-  };
-
-  KafkaBrokerCache(ZKClient zkClient) {
-    this.zkClient = zkClient;
-    this.brokers = Maps.newConcurrentMap();
-    this.topicBrokers = Maps.newConcurrentMap();
-  }
-
-  @Override
-  protected void startUp() throws Exception {
-    getBrokers();
-    getTopics();
-  }
-
-  @Override
-  protected void shutDown() throws Exception {
-    // No-op
-  }
-
-  public int getPartitionSize(String topic) {
-    SortedMap<Integer, Set<String>> partitionBrokers = topicBrokers.get(topic);
-    if (partitionBrokers == null || partitionBrokers.isEmpty()) {
-      return 1;
-    }
-    return partitionBrokers.lastKey();
-  }
-
-  public TopicBroker getBrokerAddress(String topic, int partition) {
-    SortedMap<Integer, Set<String>> partitionBrokers = topicBrokers.get(topic);
-    if (partitionBrokers == null || partitionBrokers.isEmpty()) {
-      return pickRandomBroker(topic);
-    }
-
-    // If the requested partition is greater than supported partition size, randomly pick one
-    if (partition >= partitionBrokers.lastKey()) {
-      return pickRandomBroker(topic);
-    }
-
-    // Randomly pick a partition size and randomly pick a broker from it
-    Random random = new Random();
-    partitionBrokers = partitionBrokers.tailMap(partition + 1);
-    List<Integer> sizes = Lists.newArrayList(partitionBrokers.keySet());
-    Integer partitionSize = pickRandomItem(sizes, random);
-    List<String> ids = Lists.newArrayList(partitionBrokers.get(partitionSize));
-    InetSocketAddress address = brokers.get(ids.get(new Random().nextInt(ids.size())));
-    return address == null ? pickRandomBroker(topic) : new TopicBroker(topic, address, partitionSize);
-  }
-
-  private TopicBroker pickRandomBroker(String topic) {
-    Map.Entry<String, InetSocketAddress> entry = Iterables.getFirst(brokers.entrySet(), null);
-    if (entry == null) {
-      return null;
-    }
-    InetSocketAddress address = entry.getValue();
-    return new TopicBroker(topic, address, 0);
-  }
-
-  private <T> T pickRandomItem(List<T> list, Random random) {
-    return list.get(random.nextInt(list.size()));
-  }
-
-  private void getBrokers() {
-    final String idsPath = BROKERS_PATH + "/ids";
-
-    Futures.addCallback(zkClient.getChildren(idsPath, new Watcher() {
-      @Override
-      public void process(WatchedEvent event) {
-        getBrokers();
-      }
-    }), new ExistsOnFailureFutureCallback<NodeChildren>(idsPath, invokeGetBrokers) {
-      @Override
-      public void onSuccess(NodeChildren result) {
-        Set<String> children = ImmutableSet.copyOf(result.getChildren());
-        for (String child : children) {
-          getBrokenData(idsPath + "/" + child, child);
-        }
-        // Remove all removed brokers
-        removeDiff(children, brokers);
-      }
-    });
-  }
-
-  private void getTopics() {
-    final String topicsPath = BROKERS_PATH + "/topics";
-    Futures.addCallback(zkClient.getChildren(topicsPath, new Watcher() {
-      @Override
-      public void process(WatchedEvent event) {
-        getTopics();
-      }
-    }), new ExistsOnFailureFutureCallback<NodeChildren>(topicsPath, invokeGetTopics) {
-      @Override
-      public void onSuccess(NodeChildren result) {
-        Set<String> children = ImmutableSet.copyOf(result.getChildren());
-
-        // Process new children
-        for (String topic : ImmutableSet.copyOf(Sets.difference(children, topicBrokers.keySet()))) {
-          getTopic(topicsPath + "/" + topic, topic);
-        }
-
-        // Remove old children
-        removeDiff(children, topicBrokers);
-      }
-    });
-  }
-
-  private void getBrokenData(String path, final String brokerId) {
-    Futures.addCallback(zkClient.getData(path), new FutureCallback<NodeData>() {
-      @Override
-      public void onSuccess(NodeData result) {
-        String data = new String(result.getData(), Charsets.UTF_8);
-        String hostPort = data.substring(data.indexOf(':') + 1);
-        int idx = hostPort.indexOf(':');
-        brokers.put(brokerId, new InetSocketAddress(hostPort.substring(0, idx),
-                                                    Integer.parseInt(hostPort.substring(idx + 1))));
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        // No-op, the watch on the parent node will handle it.
-      }
-    });
-  }
-
-  private void getTopic(final String path, final String topic) {
-    Futures.addCallback(zkClient.getChildren(path, new Watcher() {
-      @Override
-      public void process(WatchedEvent event) {
-        // Other event type changes are either could be ignored or handled by parent watcher
-        if (event.getType() == Event.EventType.NodeChildrenChanged) {
-          getTopic(path, topic);
-        }
-      }
-    }), new FutureCallback<NodeChildren>() {
-      @Override
-      public void onSuccess(NodeChildren result) {
-        List<String> children = result.getChildren();
-        final List<ListenableFuture<BrokerPartition>> futures = Lists.newArrayListWithCapacity(children.size());
-
-        // Fetch data from each broken node
-        for (final String brokerId : children) {
-          Futures.transform(zkClient.getData(path + "/" + brokerId), new Function<NodeData, BrokerPartition>() {
-            @Override
-            public BrokerPartition apply(NodeData input) {
-              return new BrokerPartition(brokerId, Integer.parseInt(new String(input.getData(), Charsets.UTF_8)));
-            }
-          });
-        }
-
-        // When all fetching is done, build the partition size->broker map for this topic
-        Futures.successfulAsList(futures).addListener(new Runnable() {
-          @Override
-          public void run() {
-            Map<Integer, Set<String>> partitionBrokers = Maps.newHashMap();
-            for (ListenableFuture<BrokerPartition> future : futures) {
-              try {
-                BrokerPartition info = future.get();
-                Set<String> brokerSet = partitionBrokers.get(info.getPartitionSize());
-                if (brokerSet == null) {
-                  brokerSet = Sets.newHashSet();
-                  partitionBrokers.put(info.getPartitionSize(), brokerSet);
-                }
-                brokerSet.add(info.getBrokerId());
-              } catch (Exception e) {
-                // Exception is ignored, as it will be handled by parent watcher
-              }
-            }
-            topicBrokers.put(topic, ImmutableSortedMap.copyOf(partitionBrokers));
-          }
-        }, Threads.SAME_THREAD_EXECUTOR);
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        // No-op. Failure would be handled by parent watcher already (e.g. node not exists -> children change in parent)
-      }
-    });
-  }
-
-  private <K, V> void removeDiff(Set<K> keys, Map<K, V> map) {
-    for (K key : ImmutableSet.copyOf(Sets.difference(map.keySet(), keys))) {
-      map.remove(key);
-    }
-  }
-
-  private abstract class ExistsOnFailureFutureCallback<V> implements FutureCallback<V> {
-
-    private final String path;
-    private final Runnable action;
-
-    protected ExistsOnFailureFutureCallback(String path, Runnable action) {
-      this.path = path;
-      this.action = action;
-    }
-
-    @Override
-    public final void onFailure(Throwable t) {
-      if (!isNotExists(t)) {
-        LOG.error("Fail to watch for kafka brokers: " + path, t);
-        return;
-      }
-
-      waitExists(path);
-    }
-
-    private boolean isNotExists(Throwable t) {
-      return ((t instanceof KeeperException) && ((KeeperException) t).code() == KeeperException.Code.NONODE);
-    }
-
-    private void waitExists(String path) {
-      LOG.info("Path " + path + " not exists. Watch for creation.");
-
-      // If the node doesn't exists, use the "exists" call to watch for node creation.
-      Futures.addCallback(zkClient.exists(path, new Watcher() {
-        @Override
-        public void process(WatchedEvent event) {
-          if (event.getType() == Event.EventType.NodeCreated || event.getType() == Event.EventType.NodeDeleted) {
-            action.run();
-          }
-        }
-      }), new FutureCallback<Stat>() {
-        @Override
-        public void onSuccess(Stat result) {
-          // If path exists, get children again, otherwise wait for watch to get triggered
-          if (result != null) {
-            action.run();
-          }
-        }
-        @Override
-        public void onFailure(Throwable t) {
-          action.run();
-        }
-      });
-    }
-  }
-
-  private static final class BrokerPartition {
-    private final String brokerId;
-    private final int partitionSize;
-
-    private BrokerPartition(String brokerId, int partitionSize) {
-      this.brokerId = brokerId;
-      this.partitionSize = partitionSize;
-    }
-
-    public String getBrokerId() {
-      return brokerId;
-    }
-
-    public int getPartitionSize() {
-      return partitionSize;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequest.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequest.java b/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequest.java
deleted file mode 100644
index 7b43f8a..0000000
--- a/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequest.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.kafka.client;
-
-import org.jboss.netty.buffer.ChannelBuffer;
-
-/**
- *
- */
-final class KafkaRequest {
-
-  public enum Type {
-    PRODUCE(0),
-    FETCH(1),
-    MULTI_FETCH(2),
-    MULTI_PRODUCE(3),
-    OFFSETS(4);
-
-    private final short id;
-
-    private Type(int id) {
-      this.id = (short) id;
-    }
-
-    public short getId() {
-      return id;
-    }
-  }
-
-  private final Type type;
-  private final String topic;
-  private final int partition;
-  private final ChannelBuffer body;
-  private final ResponseHandler responseHandler;
-
-
-  public static KafkaRequest createProduce(String topic, int partition, ChannelBuffer body) {
-    return new KafkaRequest(Type.PRODUCE, topic, partition, body, ResponseHandler.NO_OP);
-  }
-
-  public static KafkaRequest createFetch(String topic, int partition, ChannelBuffer body, ResponseHandler handler) {
-    return new KafkaRequest(Type.FETCH, topic, partition, body, handler);
-  }
-
-  public static KafkaRequest createOffsets(String topic, int partition, ChannelBuffer body, ResponseHandler handler) {
-    return new KafkaRequest(Type.OFFSETS, topic, partition, body, handler);
-  }
-
-  private KafkaRequest(Type type, String topic, int partition, ChannelBuffer body, ResponseHandler responseHandler) {
-    this.type = type;
-    this.topic = topic;
-    this.partition = partition;
-    this.body = body;
-    this.responseHandler = responseHandler;
-  }
-
-  Type getType() {
-    return type;
-  }
-
-  String getTopic() {
-    return topic;
-  }
-
-  int getPartition() {
-    return partition;
-  }
-
-  ChannelBuffer getBody() {
-    return body;
-  }
-
-  ResponseHandler getResponseHandler() {
-    return responseHandler;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequestEncoder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequestEncoder.java b/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequestEncoder.java
deleted file mode 100644
index ef78c76..0000000
--- a/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequestEncoder.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.kafka.client;
-
-import com.google.common.base.Charsets;
-import org.jboss.netty.buffer.ChannelBuffer;
-import org.jboss.netty.buffer.ChannelBuffers;
-import org.jboss.netty.channel.Channel;
-import org.jboss.netty.channel.ChannelHandlerContext;
-import org.jboss.netty.handler.codec.oneone.OneToOneEncoder;
-
-import java.nio.ByteBuffer;
-
-/**
- *
- */
-final class KafkaRequestEncoder extends OneToOneEncoder {
-
-  @Override
-  protected Object encode(ChannelHandlerContext ctx, Channel channel, Object msg) throws Exception {
-    if (!(msg instanceof KafkaRequest)) {
-      return msg;
-    }
-    KafkaRequest req = (KafkaRequest) msg;
-    ByteBuffer topic = Charsets.UTF_8.encode(req.getTopic());
-
-    ChannelBuffer buffer = ChannelBuffers.dynamicBuffer(16 + topic.remaining() + req.getBody().readableBytes());
-    int writerIdx = buffer.writerIndex();
-    buffer.writerIndex(writerIdx + 4);    // Reserves 4 bytes for message length
-
-    // Write out <REQUEST_TYPE>, <TOPIC_LENGTH>, <TOPIC>, <PARTITION>
-    buffer.writeShort(req.getType().getId());
-    buffer.writeShort(topic.remaining());
-    buffer.writeBytes(topic);
-    buffer.writeInt(req.getPartition());
-
-    // Write out the size of the whole buffer (excluding the size field) at the beginning
-    buffer.setInt(writerIdx, buffer.readableBytes() - 4 + req.getBody().readableBytes());
-
-    ChannelBuffer buf = ChannelBuffers.wrappedBuffer(buffer, req.getBody());
-    buf = buf.readBytes(buf.readableBytes());
-
-    return buf;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequestSender.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequestSender.java b/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequestSender.java
deleted file mode 100644
index fbc552c..0000000
--- a/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequestSender.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.kafka.client;
-
-/**
- *
- */
-interface KafkaRequestSender {
-
-  void send(KafkaRequest request);
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponse.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponse.java b/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponse.java
deleted file mode 100644
index 68c1bd8..0000000
--- a/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponse.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.kafka.client;
-
-import org.apache.twill.kafka.client.FetchException;
-import org.jboss.netty.buffer.ChannelBuffer;
-
-/**
- *
- */
-final class KafkaResponse {
-
-  private final FetchException.ErrorCode errorCode;
-  private final ChannelBuffer body;
-  private final int size;
-
-  KafkaResponse(FetchException.ErrorCode errorCode, ChannelBuffer body, int size) {
-    this.errorCode = errorCode;
-    this.body = body;
-    this.size = size;
-  }
-
-  public int getSize() {
-    return size;
-  }
-
-  public FetchException.ErrorCode getErrorCode() {
-    return errorCode;
-  }
-
-  public ChannelBuffer getBody() {
-    return body;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponseDispatcher.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponseDispatcher.java b/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponseDispatcher.java
deleted file mode 100644
index 47f70ce..0000000
--- a/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponseDispatcher.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.kafka.client;
-
-import org.jboss.netty.channel.ChannelHandlerContext;
-import org.jboss.netty.channel.ExceptionEvent;
-import org.jboss.netty.channel.MessageEvent;
-import org.jboss.netty.channel.SimpleChannelHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.net.SocketException;
-import java.nio.channels.ClosedChannelException;
-
-/**
- *
- */
-final class KafkaResponseDispatcher extends SimpleChannelHandler {
-
-  private static final Logger LOG = LoggerFactory.getLogger(KafkaResponseDispatcher.class);
-
-  @Override
-  public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
-    Object attachment = ctx.getAttachment();
-    if (e.getMessage() instanceof KafkaResponse && attachment instanceof ResponseHandler) {
-      ((ResponseHandler) attachment).received((KafkaResponse) e.getMessage());
-    } else {
-      super.messageReceived(ctx, e);
-    }
-  }
-
-  @Override
-  public void writeRequested(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
-    if (e.getMessage() instanceof KafkaRequest) {
-      ctx.setAttachment(((KafkaRequest) e.getMessage()).getResponseHandler());
-    }
-    super.writeRequested(ctx, e);
-  }
-
-  @Override
-  public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception {
-    if (e.getCause() instanceof ClosedChannelException || e.getCause() instanceof SocketException) {
-      // No need to log for socket exception as the client has logic to retry.
-      return;
-    }
-    LOG.warn("Exception caught in kafka client connection.", e.getCause());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponseHandler.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponseHandler.java b/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponseHandler.java
deleted file mode 100644
index 5251e65..0000000
--- a/core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponseHandler.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.kafka.client;
-
-import org.apache.twill.kafka.client.FetchException;
-import org.jboss.netty.buffer.ChannelBuffer;
-import org.jboss.netty.channel.ChannelHandlerContext;
-import org.jboss.netty.channel.Channels;
-import org.jboss.netty.channel.MessageEvent;
-import org.jboss.netty.channel.SimpleChannelHandler;
-
-/**
- *
- */
-final class KafkaResponseHandler extends SimpleChannelHandler {
-
-  private final Bufferer bufferer = new Bufferer();
-
-  @Override
-  public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
-    Object msg = e.getMessage();
-    if (!(msg instanceof ChannelBuffer)) {
-      super.messageReceived(ctx, e);
-      return;
-    }
-
-    bufferer.apply((ChannelBuffer) msg);
-    ChannelBuffer buffer = bufferer.getNext();
-    while (buffer.readable()) {
-      // Send the response object upstream
-      Channels.fireMessageReceived(ctx, new KafkaResponse(FetchException.ErrorCode.fromCode(buffer.readShort()),
-                                                          buffer, buffer.readableBytes() + 6));
-      buffer = bufferer.getNext();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/kafka/client/MessageFetcher.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/kafka/client/MessageFetcher.java b/core/src/main/java/org/apache/twill/internal/kafka/client/MessageFetcher.java
deleted file mode 100644
index 0814917..0000000
--- a/core/src/main/java/org/apache/twill/internal/kafka/client/MessageFetcher.java
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.kafka.client;
-
-import org.apache.twill.common.Threads;
-import org.apache.twill.kafka.client.FetchException;
-import org.apache.twill.kafka.client.FetchedMessage;
-import com.google.common.base.Throwables;
-import com.google.common.collect.AbstractIterator;
-import com.google.common.io.ByteStreams;
-import org.jboss.netty.buffer.ChannelBuffer;
-import org.jboss.netty.buffer.ChannelBufferInputStream;
-import org.jboss.netty.buffer.ChannelBufferOutputStream;
-import org.jboss.netty.buffer.ChannelBuffers;
-import org.xerial.snappy.SnappyInputStream;
-
-import java.io.IOException;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.Executors;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.zip.GZIPInputStream;
-
-/**
- * This class is for consuming messages from a kafka topic.
- */
-final class MessageFetcher extends AbstractIterator<FetchedMessage> implements ResponseHandler {
-
-  private static final long BACKOFF_INTERVAL_MS = 100;
-
-  private final KafkaRequestSender sender;
-  private final String topic;
-  private final int partition;
-  private final int maxSize;
-  private final AtomicLong offset;
-  private final BlockingQueue<FetchResult> messages;
-  private final ScheduledExecutorService scheduler;
-  private volatile long backoffMillis;
-  private final Runnable sendFetchRequest = new Runnable() {
-    @Override
-    public void run() {
-      sendFetchRequest();
-    }
-  };
-
-  MessageFetcher(String topic, int partition, long offset, int maxSize, KafkaRequestSender sender) {
-    this.topic = topic;
-    this.partition = partition;
-    this.sender = sender;
-    this.offset = new AtomicLong(offset);
-    this.maxSize = maxSize;
-    this.messages = new LinkedBlockingQueue<FetchResult>();
-    this.scheduler = Executors.newSingleThreadScheduledExecutor(
-                        Threads.createDaemonThreadFactory("kafka-" + topic + "-consumer"));
-  }
-
-  @Override
-  public void received(KafkaResponse response) {
-    if (response.getErrorCode() != FetchException.ErrorCode.OK) {
-      messages.add(FetchResult.failure(new FetchException("Error in fetching: " + response.getErrorCode(),
-                                                          response.getErrorCode())));
-      return;
-    }
-
-    try {
-      if (decodeResponse(response.getBody(), -1)) {
-        backoffMillis = 0;
-      } else {
-        backoffMillis = Math.max(backoffMillis + BACKOFF_INTERVAL_MS, 1000);
-        scheduler.schedule(sendFetchRequest, backoffMillis, TimeUnit.MILLISECONDS);
-      }
-    } catch (Throwable t) {
-      messages.add(FetchResult.failure(t));
-    }
-  }
-
-  private boolean decodeResponse(ChannelBuffer buffer, long nextOffset) {
-    boolean hasMessage = false;
-    boolean computeOffset = nextOffset < 0;
-    while (buffer.readableBytes() >= 4) {
-      int size = buffer.readInt();
-      if (buffer.readableBytes() < size) {
-        if (!hasMessage) {
-          throw new IllegalStateException("Size too small");
-        }
-        break;
-      }
-      nextOffset = computeOffset ? offset.addAndGet(size + 4) : nextOffset;
-      decodeMessage(size, buffer, nextOffset);
-      hasMessage = true;
-    }
-    return hasMessage;
-
-  }
-
-  private void decodeMessage(int size, ChannelBuffer buffer, long nextOffset) {
-    int readerIdx = buffer.readerIndex();
-    int magic = buffer.readByte();
-    Compression compression = magic == 0 ? Compression.NONE : Compression.fromCode(buffer.readByte());
-    int crc = buffer.readInt();
-
-    ChannelBuffer payload = buffer.readSlice(size - (buffer.readerIndex() - readerIdx));
-
-    // Verify CRC?
-    enqueueMessage(compression, payload, nextOffset);
-  }
-
-  private void enqueueMessage(Compression compression, ChannelBuffer payload, long nextOffset) {
-    switch (compression) {
-      case NONE:
-        messages.add(FetchResult.success(new BasicFetchedMessage(nextOffset, payload.toByteBuffer())));
-        break;
-      case GZIP:
-        decodeResponse(gunzip(payload), nextOffset);
-        break;
-      case SNAPPY:
-        decodeResponse(unsnappy(payload), nextOffset);
-        break;
-    }
-  }
-
-  private ChannelBuffer gunzip(ChannelBuffer source) {
-    ChannelBufferOutputStream output = new ChannelBufferOutputStream(
-                                              ChannelBuffers.dynamicBuffer(source.readableBytes() * 2));
-    try {
-      try {
-        GZIPInputStream gzipInput = new GZIPInputStream(new ChannelBufferInputStream(source));
-        try {
-          ByteStreams.copy(gzipInput, output);
-          return output.buffer();
-        } finally {
-          gzipInput.close();
-        }
-      } finally {
-        output.close();
-      }
-    } catch (IOException e) {
-      throw Throwables.propagate(e);
-    }
-  }
-
-  private ChannelBuffer unsnappy(ChannelBuffer source) {
-    ChannelBufferOutputStream output = new ChannelBufferOutputStream(
-                                              ChannelBuffers.dynamicBuffer(source.readableBytes() * 2));
-    try {
-      try {
-        SnappyInputStream snappyInput = new SnappyInputStream(new ChannelBufferInputStream(source));
-        try {
-          ByteStreams.copy(snappyInput, output);
-          return output.buffer();
-        } finally {
-          snappyInput.close();
-        }
-      } finally {
-        output.close();
-      }
-    } catch (IOException e) {
-      throw Throwables.propagate(e);
-    }
-  }
-
-  private void sendFetchRequest() {
-    ChannelBuffer fetchBody = ChannelBuffers.buffer(12);
-    fetchBody.writeLong(offset.get());
-    fetchBody.writeInt(maxSize);
-    sender.send(KafkaRequest.createFetch(topic, partition, fetchBody, MessageFetcher.this));
-  }
-
-  @Override
-  protected FetchedMessage computeNext() {
-    FetchResult result = messages.poll();
-    if (result != null) {
-      return getMessage(result);
-    }
-
-    try {
-      sendFetchRequest();
-      return getMessage(messages.take());
-    } catch (InterruptedException e) {
-      scheduler.shutdownNow();
-      return endOfData();
-    }
-  }
-
-  private FetchedMessage getMessage(FetchResult result) {
-    try {
-      if (result.isSuccess()) {
-        return result.getMessage();
-      } else {
-        throw result.getErrorCause();
-      }
-    } catch (Throwable t) {
-      throw Throwables.propagate(t);
-    }
-  }
-
-  private static final class FetchResult {
-    private final FetchedMessage message;
-    private final Throwable errorCause;
-
-    static FetchResult success(FetchedMessage message) {
-      return new FetchResult(message, null);
-    }
-
-    static FetchResult failure(Throwable cause) {
-      return new FetchResult(null, cause);
-    }
-
-    private FetchResult(FetchedMessage message, Throwable errorCause) {
-      this.message = message;
-      this.errorCause = errorCause;
-    }
-
-    public FetchedMessage getMessage() {
-      return message;
-    }
-
-    public Throwable getErrorCause() {
-      return errorCause;
-    }
-
-    public boolean isSuccess() {
-      return message != null;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/kafka/client/MessageSetEncoder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/kafka/client/MessageSetEncoder.java b/core/src/main/java/org/apache/twill/internal/kafka/client/MessageSetEncoder.java
deleted file mode 100644
index 49008cc..0000000
--- a/core/src/main/java/org/apache/twill/internal/kafka/client/MessageSetEncoder.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.kafka.client;
-
-import org.jboss.netty.buffer.ChannelBuffer;
-
-/**
- * This represents a set of messages that goes into the same message set and get encoded as
- * single kafka message set.
- */
-interface MessageSetEncoder {
-
-  MessageSetEncoder add(ChannelBuffer payload);
-
-  ChannelBuffer finish();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/kafka/client/ResponseHandler.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/kafka/client/ResponseHandler.java b/core/src/main/java/org/apache/twill/internal/kafka/client/ResponseHandler.java
deleted file mode 100644
index f681b85..0000000
--- a/core/src/main/java/org/apache/twill/internal/kafka/client/ResponseHandler.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.kafka.client;
-
-/**
- * Represents handler for kafka response.
- */
-interface ResponseHandler {
-
-  ResponseHandler NO_OP = new ResponseHandler() {
-    @Override
-    public void received(KafkaResponse response) {
-      // No-op
-    }
-  };
-
-  void received(KafkaResponse response);
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/kafka/client/SimpleKafkaClient.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/kafka/client/SimpleKafkaClient.java b/core/src/main/java/org/apache/twill/internal/kafka/client/SimpleKafkaClient.java
deleted file mode 100644
index 8ff4856..0000000
--- a/core/src/main/java/org/apache/twill/internal/kafka/client/SimpleKafkaClient.java
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.kafka.client;
-
-import org.apache.twill.common.Threads;
-import org.apache.twill.kafka.client.FetchException;
-import org.apache.twill.kafka.client.FetchedMessage;
-import org.apache.twill.kafka.client.KafkaClient;
-import org.apache.twill.kafka.client.PreparePublish;
-import org.apache.twill.zookeeper.ZKClient;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.primitives.Ints;
-import com.google.common.primitives.Longs;
-import com.google.common.util.concurrent.AbstractIdleService;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
-import org.jboss.netty.bootstrap.ClientBootstrap;
-import org.jboss.netty.buffer.ChannelBuffer;
-import org.jboss.netty.buffer.ChannelBuffers;
-import org.jboss.netty.channel.Channel;
-import org.jboss.netty.channel.ChannelFuture;
-import org.jboss.netty.channel.ChannelFutureListener;
-import org.jboss.netty.channel.ChannelPipeline;
-import org.jboss.netty.channel.ChannelPipelineFactory;
-import org.jboss.netty.channel.Channels;
-import org.jboss.netty.channel.socket.nio.NioClientBossPool;
-import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory;
-import org.jboss.netty.channel.socket.nio.NioWorkerPool;
-import org.jboss.netty.util.HashedWheelTimer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.nio.ByteBuffer;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-
-/**
- * Basic implementation of {@link KafkaClient}.
- */
-public final class SimpleKafkaClient extends AbstractIdleService implements KafkaClient {
-
-  private static final Logger LOG = LoggerFactory.getLogger(SimpleKafkaClient.class);
-  private static final int BROKER_POLL_INTERVAL = 100;
-
-  private final KafkaBrokerCache brokerCache;
-  private ClientBootstrap bootstrap;
-  private ConnectionPool connectionPool;
-
-  public SimpleKafkaClient(ZKClient zkClient) {
-    this.brokerCache = new KafkaBrokerCache(zkClient);
-  }
-
-  @Override
-  protected void startUp() throws Exception {
-    brokerCache.startAndWait();
-    ThreadFactory threadFactory = Threads.createDaemonThreadFactory("kafka-client-netty-%d");
-    NioClientBossPool bossPool = new NioClientBossPool(Executors.newSingleThreadExecutor(threadFactory), 1,
-                                                       new HashedWheelTimer(threadFactory), null);
-    NioWorkerPool workerPool = new NioWorkerPool(Executors.newFixedThreadPool(4, threadFactory), 4);
-
-    bootstrap = new ClientBootstrap(new NioClientSocketChannelFactory(bossPool, workerPool));
-    bootstrap.setPipelineFactory(new KafkaChannelPipelineFactory());
-    connectionPool = new ConnectionPool(bootstrap);
-  }
-
-  @Override
-  protected void shutDown() throws Exception {
-    connectionPool.close();
-    bootstrap.releaseExternalResources();
-    brokerCache.stopAndWait();
-  }
-
-  @Override
-  public PreparePublish preparePublish(final String topic, final Compression compression) {
-    final Map<Integer, MessageSetEncoder> encoders = Maps.newHashMap();
-
-    return new PreparePublish() {
-      @Override
-      public PreparePublish add(byte[] payload, Object partitionKey) {
-        return add(ByteBuffer.wrap(payload), partitionKey);
-      }
-
-      @Override
-      public PreparePublish add(ByteBuffer payload, Object partitionKey) {
-        // TODO: Partition
-        int partition = 0;
-
-        MessageSetEncoder encoder = encoders.get(partition);
-        if (encoder == null) {
-          encoder = getEncoder(compression);
-          encoders.put(partition, encoder);
-        }
-        encoder.add(ChannelBuffers.wrappedBuffer(payload));
-
-        return this;
-      }
-
-      @Override
-      public ListenableFuture<?> publish() {
-        List<ListenableFuture<?>> futures = Lists.newArrayListWithCapacity(encoders.size());
-        for (Map.Entry<Integer, MessageSetEncoder> entry : encoders.entrySet()) {
-          futures.add(doPublish(topic, entry.getKey(), entry.getValue().finish()));
-        }
-        encoders.clear();
-        return Futures.allAsList(futures);
-      }
-
-      private ListenableFuture<?> doPublish(String topic, int partition, ChannelBuffer messageSet) {
-        final KafkaRequest request = KafkaRequest.createProduce(topic, partition, messageSet);
-        final SettableFuture<?> result = SettableFuture.create();
-        final ConnectionPool.ConnectResult connection =
-              connectionPool.connect(getTopicBroker(topic, partition).getAddress());
-
-        connection.getChannelFuture().addListener(new ChannelFutureListener() {
-          @Override
-          public void operationComplete(ChannelFuture future) throws Exception {
-            try {
-              future.getChannel().write(request).addListener(getPublishChannelFutureListener(result, null, connection));
-            } catch (Exception e) {
-              result.setException(e);
-            }
-          }
-        });
-
-        return result;
-      }
-    };
-  }
-
-  @Override
-  public Iterator<FetchedMessage> consume(final String topic, final int partition, long offset, int maxSize) {
-    Preconditions.checkArgument(maxSize >= 10, "Message size cannot be smaller than 10.");
-
-    // Connect to broker. Consumer connection are long connection. No need to worry about reuse.
-    final AtomicReference<ChannelFuture> channelFutureRef = new AtomicReference<ChannelFuture>(
-          connectionPool.connect(getTopicBroker(topic, partition).getAddress()).getChannelFuture());
-
-    return new MessageFetcher(topic, partition, offset, maxSize, new KafkaRequestSender() {
-
-      @Override
-      public void send(final KafkaRequest request) {
-        if (!isRunning()) {
-          return;
-        }
-        try {
-          // Try to send the request
-          Channel channel = channelFutureRef.get().getChannel();
-          if (!channel.write(request).await().isSuccess()) {
-            // If failed, retry
-            channel.close();
-            ChannelFuture channelFuture = connectionPool.connect(
-                                              getTopicBroker(topic, partition).getAddress()).getChannelFuture();
-            channelFutureRef.set(channelFuture);
-            channelFuture.addListener(new ChannelFutureListener() {
-              @Override
-              public void operationComplete(ChannelFuture channelFuture) throws Exception {
-                send(request);
-              }
-            });
-          }
-        } catch (InterruptedException e) {
-          // Ignore it
-          LOG.info("Interrupted when sending consume request", e);
-        }
-      }
-    });
-  }
-
-  @Override
-  public ListenableFuture<long[]> getOffset(final String topic, final int partition, long time, int maxOffsets) {
-    final SettableFuture<long[]> resultFuture = SettableFuture.create();
-    final ChannelBuffer body = ChannelBuffers.buffer(Longs.BYTES + Ints.BYTES);
-    body.writeLong(time);
-    body.writeInt(maxOffsets);
-
-    connectionPool.connect(getTopicBroker(topic, partition).getAddress())
-                  .getChannelFuture().addListener(new ChannelFutureListener() {
-      @Override
-      public void operationComplete(ChannelFuture future) throws Exception {
-        if (checkFailure(future)) {
-          return;
-        }
-
-        future.getChannel().write(KafkaRequest.createOffsets(topic, partition, body, new ResponseHandler() {
-          @Override
-          public void received(KafkaResponse response) {
-            if (response.getErrorCode() != FetchException.ErrorCode.OK) {
-              resultFuture.setException(new FetchException("Failed to fetch offset.", response.getErrorCode()));
-            } else {
-              // Decode the offset response, which contains 4 bytes number of offsets, followed by number of offsets,
-              // each 8 bytes in size.
-              ChannelBuffer resultBuffer = response.getBody();
-              int size = resultBuffer.readInt();
-              long[] result = new long[size];
-              for (int i = 0; i < size; i++) {
-                result[i] = resultBuffer.readLong();
-              }
-              resultFuture.set(result);
-            }
-          }
-        })).addListener(new ChannelFutureListener() {
-          @Override
-          public void operationComplete(ChannelFuture future) throws Exception {
-            checkFailure(future);
-          }
-        });
-      }
-
-      private boolean checkFailure(ChannelFuture future) {
-        if (!future.isSuccess()) {
-          if (future.isCancelled()) {
-            resultFuture.cancel(true);
-          } else {
-            resultFuture.setException(future.getCause());
-          }
-          return true;
-        }
-        return false;
-      }
-    });
-
-    return resultFuture;
-  }
-
-  private TopicBroker getTopicBroker(String topic, int partition) {
-    TopicBroker topicBroker = brokerCache.getBrokerAddress(topic, partition);
-    while (topicBroker == null) {
-      try {
-        TimeUnit.MILLISECONDS.sleep(BROKER_POLL_INTERVAL);
-      } catch (InterruptedException e) {
-        return null;
-      }
-      topicBroker = brokerCache.getBrokerAddress(topic, partition);
-    }
-    return topicBroker;
-  }
-
-  private MessageSetEncoder getEncoder(Compression compression) {
-    switch (compression) {
-      case GZIP:
-        return new GZipMessageSetEncoder();
-      case SNAPPY:
-        return new SnappyMessageSetEncoder();
-      default:
-        return new IdentityMessageSetEncoder();
-    }
-  }
-
-  private <V> ChannelFutureListener getPublishChannelFutureListener(final SettableFuture<V> result, final V resultObj,
-                                                                    final ConnectionPool.ConnectionReleaser releaser) {
-    return new ChannelFutureListener() {
-      @Override
-      public void operationComplete(ChannelFuture future) throws Exception {
-        try {
-          if (future.isSuccess()) {
-            result.set(resultObj);
-          } else if (future.isCancelled()) {
-            result.cancel(true);
-          } else {
-            result.setException(future.getCause());
-          }
-        } finally {
-          releaser.release();
-        }
-      }
-    };
-  }
-
-  private static final class KafkaChannelPipelineFactory implements ChannelPipelineFactory {
-
-    @Override
-    public ChannelPipeline getPipeline() throws Exception {
-      ChannelPipeline pipeline = Channels.pipeline();
-
-      pipeline.addLast("encoder", new KafkaRequestEncoder());
-      pipeline.addLast("decoder", new KafkaResponseHandler());
-      pipeline.addLast("dispatcher", new KafkaResponseDispatcher());
-      return pipeline;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/kafka/client/SnappyMessageSetEncoder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/kafka/client/SnappyMessageSetEncoder.java b/core/src/main/java/org/apache/twill/internal/kafka/client/SnappyMessageSetEncoder.java
deleted file mode 100644
index bf18c08..0000000
--- a/core/src/main/java/org/apache/twill/internal/kafka/client/SnappyMessageSetEncoder.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.kafka.client;
-
-import org.xerial.snappy.SnappyOutputStream;
-
-import java.io.IOException;
-import java.io.OutputStream;
-
-/**
- * A {@link MessageSetEncoder} that compress messages using snappy.
- */
-final class SnappyMessageSetEncoder extends AbstractCompressedMessageSetEncoder {
-
-  SnappyMessageSetEncoder() {
-    super(Compression.SNAPPY);
-  }
-
-  @Override
-  protected OutputStream createCompressedStream(OutputStream os) throws IOException {
-    return new SnappyOutputStream(os);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/kafka/client/TopicBroker.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/kafka/client/TopicBroker.java b/core/src/main/java/org/apache/twill/internal/kafka/client/TopicBroker.java
deleted file mode 100644
index fd4bf03..0000000
--- a/core/src/main/java/org/apache/twill/internal/kafka/client/TopicBroker.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.kafka.client;
-
-import java.net.InetSocketAddress;
-
-/**
- * Represents broker information of a given topic.
- */
-final class TopicBroker {
-
-  private final String topic;
-  private final InetSocketAddress address;
-  private final int partitionSize;
-
-  TopicBroker(String topic, InetSocketAddress address, int partitionSize) {
-    this.topic = topic;
-    this.address = address;
-    this.partitionSize = partitionSize;
-  }
-
-  String getTopic() {
-    return topic;
-  }
-
-  InetSocketAddress getAddress() {
-    return address;
-  }
-
-  int getPartitionSize() {
-    return partitionSize;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/kafka/client/package-info.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/kafka/client/package-info.java b/core/src/main/java/org/apache/twill/internal/kafka/client/package-info.java
deleted file mode 100644
index f3f615c..0000000
--- a/core/src/main/java/org/apache/twill/internal/kafka/client/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * This package provides pure java kafka client implementation.
- */
-package org.apache.twill.internal.kafka.client;


[05/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/container/TwillContainerMain.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/container/TwillContainerMain.java b/yarn/src/main/java/org/apache/twill/internal/container/TwillContainerMain.java
deleted file mode 100644
index bbd6c10..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/container/TwillContainerMain.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.container;
-
-import org.apache.twill.api.LocalFile;
-import org.apache.twill.api.RunId;
-import org.apache.twill.api.RuntimeSpecification;
-import org.apache.twill.api.TwillRunnableSpecification;
-import org.apache.twill.api.TwillSpecification;
-import org.apache.twill.discovery.DiscoveryService;
-import org.apache.twill.discovery.ZKDiscoveryService;
-import org.apache.twill.internal.Arguments;
-import org.apache.twill.internal.BasicTwillContext;
-import org.apache.twill.internal.Constants;
-import org.apache.twill.internal.ContainerInfo;
-import org.apache.twill.internal.EnvContainerInfo;
-import org.apache.twill.internal.EnvKeys;
-import org.apache.twill.internal.RunIds;
-import org.apache.twill.internal.ServiceMain;
-import org.apache.twill.internal.json.ArgumentsCodec;
-import org.apache.twill.internal.json.TwillSpecificationAdapter;
-import org.apache.twill.zookeeper.RetryStrategies;
-import org.apache.twill.zookeeper.ZKClient;
-import org.apache.twill.zookeeper.ZKClientService;
-import org.apache.twill.zookeeper.ZKClientServices;
-import org.apache.twill.zookeeper.ZKClients;
-import com.google.common.base.Charsets;
-import com.google.common.base.Preconditions;
-import com.google.common.io.Files;
-import com.google.common.util.concurrent.Service;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.DataInputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.Reader;
-import java.util.concurrent.TimeUnit;
-
-/**
- *
- */
-public final class TwillContainerMain extends ServiceMain {
-
-  private static final Logger LOG = LoggerFactory.getLogger(TwillContainerMain.class);
-
-  /**
-   * Main method for launching a {@link TwillContainerService} which runs
-   * a {@link org.apache.twill.api.TwillRunnable}.
-   */
-  public static void main(final String[] args) throws Exception {
-    // Try to load the secure store from localized file, which AM requested RM to localize it for this container.
-    loadSecureStore();
-
-    String zkConnectStr = System.getenv(EnvKeys.TWILL_ZK_CONNECT);
-    File twillSpecFile = new File(Constants.Files.TWILL_SPEC);
-    RunId appRunId = RunIds.fromString(System.getenv(EnvKeys.TWILL_APP_RUN_ID));
-    RunId runId = RunIds.fromString(System.getenv(EnvKeys.TWILL_RUN_ID));
-    String runnableName = System.getenv(EnvKeys.TWILL_RUNNABLE_NAME);
-    int instanceId = Integer.parseInt(System.getenv(EnvKeys.TWILL_INSTANCE_ID));
-    int instanceCount = Integer.parseInt(System.getenv(EnvKeys.TWILL_INSTANCE_COUNT));
-
-    ZKClientService zkClientService = ZKClientServices.delegate(
-      ZKClients.reWatchOnExpire(
-        ZKClients.retryOnFailure(ZKClientService.Builder.of(zkConnectStr).build(),
-                                 RetryStrategies.fixDelay(1, TimeUnit.SECONDS))));
-
-    DiscoveryService discoveryService = new ZKDiscoveryService(zkClientService);
-
-    TwillSpecification twillSpec = loadTwillSpec(twillSpecFile);
-    renameLocalFiles(twillSpec.getRunnables().get(runnableName));
-    
-    TwillRunnableSpecification runnableSpec = twillSpec.getRunnables().get(runnableName).getRunnableSpecification();
-    ContainerInfo containerInfo = new EnvContainerInfo();
-    Arguments arguments = decodeArgs();
-    BasicTwillContext context = new BasicTwillContext(
-      runId, appRunId, containerInfo.getHost(),
-      arguments.getRunnableArguments().get(runnableName).toArray(new String[0]),
-      arguments.getArguments().toArray(new String[0]),
-      runnableSpec, instanceId, discoveryService, instanceCount,
-      containerInfo.getMemoryMB(), containerInfo.getVirtualCores()
-    );
-
-    Configuration conf = new YarnConfiguration(new HdfsConfiguration(new Configuration()));
-    Service service = new TwillContainerService(context, containerInfo,
-                                                getContainerZKClient(zkClientService, appRunId, runnableName),
-                                                runId, runnableSpec, getClassLoader(),
-                                                createAppLocation(conf));
-    new TwillContainerMain().doMain(zkClientService, service);
-  }
-
-  private static void loadSecureStore() throws IOException {
-    if (!UserGroupInformation.isSecurityEnabled()) {
-      return;
-    }
-
-    File file = new File(Constants.Files.CREDENTIALS);
-    if (file.exists()) {
-      Credentials credentials = new Credentials();
-      DataInputStream input = new DataInputStream(new FileInputStream(file));
-      try {
-        credentials.readTokenStorageStream(input);
-      } finally {
-        input.close();
-      }
-
-      UserGroupInformation.getCurrentUser().addCredentials(credentials);
-      LOG.info("Secure store updated from {}", file);
-    }
-  }
-
-  private static void renameLocalFiles(RuntimeSpecification runtimeSpec) {
-    for (LocalFile file : runtimeSpec.getLocalFiles()) {
-      if (file.isArchive()) {
-        String path = file.getURI().toString();
-        String name = file.getName() + (path.endsWith(".tar.gz") ? ".tar.gz" : path.substring(path.lastIndexOf('.')));
-        Preconditions.checkState(new File(name).renameTo(new File(file.getName())),
-                                 "Fail to rename file from %s to %s.",
-                                 name, file.getName());
-      }
-    }
-  }
-
-  private static ZKClient getContainerZKClient(ZKClient zkClient, RunId appRunId, String runnableName) {
-    return ZKClients.namespace(zkClient, String.format("/%s/runnables/%s", appRunId, runnableName));
-  }
-
-  /**
-   * Returns the ClassLoader for the runnable.
-   */
-  private static ClassLoader getClassLoader() {
-    ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
-    if (classLoader == null) {
-      return ClassLoader.getSystemClassLoader();
-    }
-    return classLoader;
-  }
-
-  private static TwillSpecification loadTwillSpec(File specFile) throws IOException {
-    Reader reader = Files.newReader(specFile, Charsets.UTF_8);
-    try {
-      return TwillSpecificationAdapter.create().fromJson(reader);
-    } finally {
-      reader.close();
-    }
-  }
-
-  private static Arguments decodeArgs() throws IOException {
-    return ArgumentsCodec.decode(Files.newReaderSupplier(new File(Constants.Files.ARGUMENTS), Charsets.UTF_8));
-  }
-
-  @Override
-  protected String getHostname() {
-    return System.getenv(EnvKeys.YARN_CONTAINER_HOST);
-  }
-
-  @Override
-  protected String getKafkaZKConnect() {
-    return System.getenv(EnvKeys.TWILL_LOG_KAFKA_ZK);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/container/TwillContainerService.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/container/TwillContainerService.java b/yarn/src/main/java/org/apache/twill/internal/container/TwillContainerService.java
deleted file mode 100644
index f5bc1f2..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/container/TwillContainerService.java
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.container;
-
-import org.apache.twill.api.Command;
-import org.apache.twill.api.RunId;
-import org.apache.twill.api.TwillRunnable;
-import org.apache.twill.api.TwillRunnableSpecification;
-import org.apache.twill.common.Threads;
-import org.apache.twill.filesystem.Location;
-import org.apache.twill.internal.AbstractTwillService;
-import org.apache.twill.internal.AbstractTwillService;
-import org.apache.twill.internal.BasicTwillContext;
-import org.apache.twill.internal.ContainerInfo;
-import org.apache.twill.internal.ContainerLiveNodeData;
-import org.apache.twill.internal.ZKServiceDecorator;
-import org.apache.twill.internal.logging.Loggings;
-import org.apache.twill.internal.state.Message;
-import org.apache.twill.internal.state.MessageCallback;
-import org.apache.twill.internal.utils.Instances;
-import org.apache.twill.zookeeper.ZKClient;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Supplier;
-import com.google.common.util.concurrent.AbstractExecutionThreadService;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.Service;
-import com.google.common.util.concurrent.SettableFuture;
-import com.google.gson.Gson;
-import com.google.gson.JsonElement;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-
-/**
- * This class act as a yarn container and run a {@link org.apache.twill.api.TwillRunnable}.
- */
-public final class TwillContainerService extends AbstractTwillService {
-
-  private static final Logger LOG = LoggerFactory.getLogger(TwillContainerService.class);
-
-  private final TwillRunnableSpecification specification;
-  private final ClassLoader classLoader;
-  private final ContainerLiveNodeData containerLiveNode;
-  private final BasicTwillContext context;
-  private final ZKServiceDecorator serviceDelegate;
-  private ExecutorService commandExecutor;
-  private TwillRunnable runnable;
-
-  public TwillContainerService(BasicTwillContext context, ContainerInfo containerInfo, ZKClient zkClient,
-                               RunId runId, TwillRunnableSpecification specification, ClassLoader classLoader,
-                               Location applicationLocation) {
-    super(applicationLocation);
-
-    this.specification = specification;
-    this.classLoader = classLoader;
-    this.serviceDelegate = new ZKServiceDecorator(zkClient, runId, createLiveNodeSupplier(), new ServiceDelegate());
-    this.context = context;
-    this.containerLiveNode = new ContainerLiveNodeData(containerInfo.getId(),
-                                                       containerInfo.getHost().getCanonicalHostName());
-  }
-
-  private ListenableFuture<String> processMessage(final String messageId, final Message message) {
-    LOG.debug("Message received: {} {}.", messageId, message);
-
-    if (handleSecureStoreUpdate(message)) {
-      return Futures.immediateFuture(messageId);
-    }
-
-    final SettableFuture<String> result = SettableFuture.create();
-    Command command = message.getCommand();
-    if (message.getType() == Message.Type.SYSTEM
-          && "instances".equals(command.getCommand()) && command.getOptions().containsKey("count")) {
-      context.setInstanceCount(Integer.parseInt(command.getOptions().get("count")));
-    }
-
-    commandExecutor.execute(new Runnable() {
-
-      @Override
-      public void run() {
-        try {
-          runnable.handleCommand(message.getCommand());
-          result.set(messageId);
-        } catch (Exception e) {
-          result.setException(e);
-        }
-      }
-    });
-    return result;
-  }
-
-  private Supplier<? extends JsonElement> createLiveNodeSupplier() {
-    return new Supplier<JsonElement>() {
-      @Override
-      public JsonElement get() {
-        return new Gson().toJsonTree(containerLiveNode);
-      }
-    };
-  }
-
-  @Override
-  protected Service getServiceDelegate() {
-    return serviceDelegate;
-  }
-
-  private final class ServiceDelegate extends AbstractExecutionThreadService implements MessageCallback {
-
-    @Override
-    protected void startUp() throws Exception {
-      commandExecutor = Executors.newSingleThreadExecutor(
-        Threads.createDaemonThreadFactory("runnable-command-executor"));
-
-      Class<?> runnableClass = classLoader.loadClass(specification.getClassName());
-      Preconditions.checkArgument(TwillRunnable.class.isAssignableFrom(runnableClass),
-                                  "Class %s is not instance of TwillRunnable.", specification.getClassName());
-
-      runnable = Instances.newInstance((Class<TwillRunnable>) runnableClass);
-      runnable.initialize(context);
-    }
-
-    @Override
-    protected void triggerShutdown() {
-      try {
-        runnable.stop();
-      } catch (Throwable t) {
-        LOG.error("Exception when stopping runnable.", t);
-      }
-    }
-
-    @Override
-    protected void shutDown() throws Exception {
-      commandExecutor.shutdownNow();
-      runnable.destroy();
-      Loggings.forceFlush();
-    }
-
-    @Override
-    protected void run() throws Exception {
-      runnable.run();
-    }
-
-    @Override
-    public ListenableFuture<String> onReceived(String messageId, Message message) {
-      if (state() == State.RUNNING) {
-        // Only process message if the service is still alive
-        return processMessage(messageId, message);
-      }
-      return Futures.immediateFuture(messageId);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/yarn/AbstractYarnProcessLauncher.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/yarn/AbstractYarnProcessLauncher.java b/yarn/src/main/java/org/apache/twill/internal/yarn/AbstractYarnProcessLauncher.java
deleted file mode 100644
index b810854..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/yarn/AbstractYarnProcessLauncher.java
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import org.apache.twill.api.LocalFile;
-import org.apache.twill.internal.ProcessController;
-import org.apache.twill.internal.ProcessLauncher;
-import org.apache.twill.internal.utils.Paths;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.yarn.api.ApplicationConstants;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-import java.util.Map;
-
-/**
- * Abstract class to help creating different types of process launcher that process on yarn.
- *
- * @param <T> Type of the object that contains information about the container that the process is going to launch.
- */
-public abstract class AbstractYarnProcessLauncher<T> implements ProcessLauncher<T> {
-
-  private static final Logger LOG = LoggerFactory.getLogger(AbstractYarnProcessLauncher.class);
-
-  private final T containerInfo;
-
-  protected AbstractYarnProcessLauncher(T containerInfo) {
-    this.containerInfo = containerInfo;
-  }
-
-  @Override
-  public T getContainerInfo() {
-    return containerInfo;
-  }
-
-  @Override
-  public <C> PrepareLaunchContext prepareLaunch(Map<String, String> environments,
-                                                Iterable<LocalFile> resources, C credentials) {
-    if (credentials != null) {
-      Preconditions.checkArgument(credentials instanceof Credentials, "Credentials should be of type %s",
-                                  Credentials.class.getName());
-    }
-    return new PrepareLaunchContextImpl(environments, resources, (Credentials) credentials);
-  }
-
-  /**
-   * Tells whether to append suffix to localize resource name for archive file type. Default is true.
-   */
-  protected boolean useArchiveSuffix() {
-    return true;
-  }
-
-  /**
-   * For children class to override to perform actual process launching.
-   */
-  protected abstract <R> ProcessController<R> doLaunch(YarnLaunchContext launchContext);
-
-  /**
-   * Implementation for the {@link PrepareLaunchContext}.
-   */
-  private final class PrepareLaunchContextImpl implements PrepareLaunchContext {
-
-    private final Credentials credentials;
-    private final YarnLaunchContext launchContext;
-    private final Map<String, YarnLocalResource> localResources;
-    private final Map<String, String> environment;
-    private final List<String> commands;
-
-    private PrepareLaunchContextImpl(Map<String, String> env, Iterable<LocalFile> localFiles, Credentials credentials) {
-      this.credentials = credentials;
-      this.launchContext = YarnUtils.createLaunchContext();
-      this.localResources = Maps.newHashMap();
-      this.environment = Maps.newHashMap(env);
-      this.commands = Lists.newLinkedList();
-
-      for (LocalFile localFile : localFiles) {
-        addLocalFile(localFile);
-      }
-    }
-
-    private void addLocalFile(LocalFile localFile) {
-      String name = localFile.getName();
-      // Always append the file extension as the resource name so that archive expansion by Yarn could work.
-      // Renaming would happen by the Container Launcher.
-      if (localFile.isArchive() && useArchiveSuffix()) {
-        String path = localFile.getURI().toString();
-        String suffix = Paths.getExtension(path);
-        if (!suffix.isEmpty()) {
-          name += '.' + suffix;
-        }
-      }
-      localResources.put(name, YarnUtils.createLocalResource(localFile));
-    }
-
-    @Override
-    public ResourcesAdder withResources() {
-      return new MoreResourcesImpl();
-    }
-
-    @Override
-    public AfterResources noResources() {
-      return new MoreResourcesImpl();
-    }
-
-    private final class MoreResourcesImpl implements MoreResources {
-
-      @Override
-      public MoreResources add(LocalFile localFile) {
-        addLocalFile(localFile);
-        return this;
-      }
-
-      @Override
-      public EnvironmentAdder withEnvironment() {
-        return finish();
-      }
-
-      @Override
-      public AfterEnvironment noEnvironment() {
-        return finish();
-      }
-
-      private MoreEnvironmentImpl finish() {
-        launchContext.setLocalResources(localResources);
-        return new MoreEnvironmentImpl();
-      }
-    }
-
-    private final class MoreEnvironmentImpl implements MoreEnvironment {
-
-      @Override
-      public CommandAdder withCommands() {
-        launchContext.setEnvironment(environment);
-        return new MoreCommandImpl();
-      }
-
-      @Override
-      public <V> MoreEnvironment add(String key, V value) {
-        environment.put(key, value.toString());
-        return this;
-      }
-    }
-
-    private final class MoreCommandImpl implements MoreCommand, StdOutSetter, StdErrSetter {
-
-      private final StringBuilder commandBuilder = new StringBuilder();
-
-      @Override
-      public StdOutSetter add(String cmd, String... args) {
-        commandBuilder.append(cmd);
-        for (String arg : args) {
-          commandBuilder.append(' ').append(arg);
-        }
-        return this;
-      }
-
-      @Override
-      public <R> ProcessController<R> launch() {
-        if (credentials != null && !credentials.getAllTokens().isEmpty()) {
-          for (Token<?> token : credentials.getAllTokens()) {
-            LOG.info("Launch with delegation token {}", token);
-          }
-          launchContext.setCredentials(credentials);
-        }
-        launchContext.setCommands(commands);
-        return doLaunch(launchContext);
-      }
-
-      @Override
-      public MoreCommand redirectError(String stderr) {
-        redirect(2, stderr);
-        return noError();
-      }
-
-      @Override
-      public MoreCommand noError() {
-        commands.add(commandBuilder.toString());
-        commandBuilder.setLength(0);
-        return this;
-      }
-
-      @Override
-      public StdErrSetter redirectOutput(String stdout) {
-        redirect(1, stdout);
-        return this;
-      }
-
-      @Override
-      public StdErrSetter noOutput() {
-        return this;
-      }
-
-      private void redirect(int type, String out) {
-        commandBuilder.append(' ')
-                      .append(type).append('>')
-                      .append(ApplicationConstants.LOG_DIR_EXPANSION_VAR).append('/').append(out);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/yarn/VersionDetectYarnAMClientFactory.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/yarn/VersionDetectYarnAMClientFactory.java b/yarn/src/main/java/org/apache/twill/internal/yarn/VersionDetectYarnAMClientFactory.java
deleted file mode 100644
index 6f47b6c..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/yarn/VersionDetectYarnAMClientFactory.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import com.google.common.base.Throwables;
-import org.apache.hadoop.conf.Configuration;
-
-/**
- *
- */
-public final class VersionDetectYarnAMClientFactory implements YarnAMClientFactory {
-
-  private final Configuration conf;
-
-  public VersionDetectYarnAMClientFactory(Configuration conf) {
-    this.conf = conf;
-  }
-
-  @Override
-  @SuppressWarnings("unchecked")
-  public YarnAMClient create() {
-    try {
-      Class<YarnAMClient> clz;
-      if (YarnUtils.isHadoop20()) {
-        // Uses hadoop-2.0 class
-        String clzName = getClass().getPackage().getName() + ".Hadoop20YarnAMClient";
-        clz = (Class<YarnAMClient>) Class.forName(clzName);
-      } else {
-        // Uses hadoop-2.1 class
-        String clzName = getClass().getPackage().getName() + ".Hadoop21YarnAMClient";
-        clz = (Class<YarnAMClient>) Class.forName(clzName);
-      }
-
-      return clz.getConstructor(Configuration.class).newInstance(conf);
-
-    } catch (Exception e) {
-      throw Throwables.propagate(e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/yarn/VersionDetectYarnAppClientFactory.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/yarn/VersionDetectYarnAppClientFactory.java b/yarn/src/main/java/org/apache/twill/internal/yarn/VersionDetectYarnAppClientFactory.java
deleted file mode 100644
index f9db959..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/yarn/VersionDetectYarnAppClientFactory.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import com.google.common.base.Throwables;
-import org.apache.hadoop.conf.Configuration;
-
-/**
- *
- */
-public final class VersionDetectYarnAppClientFactory implements YarnAppClientFactory {
-
-  @Override
-  @SuppressWarnings("unchecked")
-  public YarnAppClient create(Configuration configuration) {
-    try {
-      Class<YarnAppClient> clz;
-
-      if (YarnUtils.isHadoop20()) {
-        // Uses hadoop-2.0 class.
-        String clzName = getClass().getPackage().getName() + ".Hadoop20YarnAppClient";
-        clz = (Class<YarnAppClient>) Class.forName(clzName);
-      } else {
-        // Uses hadoop-2.1 class
-        String clzName = getClass().getPackage().getName() + ".Hadoop21YarnAppClient";
-        clz = (Class<YarnAppClient>) Class.forName(clzName);
-      }
-
-      return clz.getConstructor(Configuration.class).newInstance(configuration);
-
-    } catch (Exception e) {
-      throw Throwables.propagate(e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/yarn/YarnAMClient.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/yarn/YarnAMClient.java b/yarn/src/main/java/org/apache/twill/internal/yarn/YarnAMClient.java
deleted file mode 100644
index 83ba6a8..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/yarn/YarnAMClient.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import org.apache.twill.internal.ProcessLauncher;
-import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.Service;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.Priority;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.util.Records;
-
-import java.net.InetSocketAddress;
-import java.net.URL;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.Set;
-
-/**
- * This interface provides abstraction for AM to interacts with YARN to abstract out YARN version specific
- * code, making multi-version compatibility easier.
- */
-public interface YarnAMClient extends Service {
-
-  /**
-   * Builder for creating a container request.
-   */
-  abstract class ContainerRequestBuilder {
-
-    protected final Resource capability;
-    protected final int count;
-    protected final Set<String> hosts = Sets.newHashSet();
-    protected final Set<String> racks = Sets.newHashSet();
-    protected final Priority priority = Records.newRecord(Priority.class);
-
-    protected ContainerRequestBuilder(Resource capability, int count) {
-      this.capability = capability;
-      this.count = count;
-    }
-
-    public ContainerRequestBuilder addHosts(String firstHost, String...moreHosts) {
-      return add(hosts, firstHost, moreHosts);
-    }
-
-    public ContainerRequestBuilder addRacks(String firstRack, String...moreRacks) {
-      return add(racks, firstRack, moreRacks);
-    }
-
-    public ContainerRequestBuilder setPriority(int prio) {
-      priority.setPriority(prio);
-      return this;
-    }
-
-    /**
-     * Adds a container request. Returns an unique ID for the request.
-     */
-    public abstract String apply();
-
-    private <T> ContainerRequestBuilder add(Collection<T> collection, T first, T... more) {
-      collection.add(first);
-      Collections.addAll(collection, more);
-      return this;
-    }
-  }
-
-  ContainerId getContainerId();
-
-  String getHost();
-
-  /**
-   * Sets the tracker address and tracker url. This method should be called before calling {@link #start()}.
-   */
-  void setTracker(InetSocketAddress trackerAddr, URL trackerUrl);
-
-  /**
-   * Callback for allocate call.
-   */
-  // TODO: Move AM heartbeat logic into this interface so AM only needs to handle callback.
-  interface AllocateHandler {
-    void acquired(List<ProcessLauncher<YarnContainerInfo>> launchers);
-
-    void completed(List<YarnContainerStatus> completed);
-  }
-
-  void allocate(float progress, AllocateHandler handler) throws Exception;
-
-  ContainerRequestBuilder addContainerRequest(Resource capability);
-
-  ContainerRequestBuilder addContainerRequest(Resource capability, int count);
-
-  /**
-   * Notify a container request is fulfilled.
-   *
-   * Note: This method is needed to workaround a seemingly bug from AMRMClient implementation in YARN that if
-   * a container is requested after a previous container was acquired (with the same capability), multiple containers
-   * will get allocated instead of one.
-   *
-   * @param id The ID returned by {@link YarnAMClient.ContainerRequestBuilder#apply()}.
-   */
-  void completeContainerRequest(String id);
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/yarn/YarnAMClientFactory.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/yarn/YarnAMClientFactory.java b/yarn/src/main/java/org/apache/twill/internal/yarn/YarnAMClientFactory.java
deleted file mode 100644
index b2a1194..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/yarn/YarnAMClientFactory.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-/**
- *
- */
-public interface YarnAMClientFactory {
-
-  YarnAMClient create();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/yarn/YarnAppClient.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/yarn/YarnAppClient.java b/yarn/src/main/java/org/apache/twill/internal/yarn/YarnAppClient.java
deleted file mode 100644
index 71a9e68..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/yarn/YarnAppClient.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import org.apache.twill.api.TwillSpecification;
-import org.apache.twill.internal.ProcessController;
-import org.apache.twill.internal.ProcessLauncher;
-import com.google.common.util.concurrent.Service;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-
-/**
- * Interface for launching Yarn application from client.
- */
-public interface YarnAppClient extends Service {
-
-  /**
-   * Creates a {@link ProcessLauncher} for launching the application represented by the given spec.
-   */
-  ProcessLauncher<ApplicationId> createLauncher(TwillSpecification twillSpec) throws Exception;
-
-  /**
-   * Creates a {@link ProcessLauncher} for launching application with the given user and spec.
-   *
-   * @deprecated This method will get removed.
-   */
-  @Deprecated
-  ProcessLauncher<ApplicationId> createLauncher(String user, TwillSpecification twillSpec) throws Exception;
-
-  ProcessController<YarnApplicationReport> createProcessController(ApplicationId appId);
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/yarn/YarnAppClientFactory.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/yarn/YarnAppClientFactory.java b/yarn/src/main/java/org/apache/twill/internal/yarn/YarnAppClientFactory.java
deleted file mode 100644
index 70cecad..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/yarn/YarnAppClientFactory.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import org.apache.hadoop.conf.Configuration;
-
-/**
- *
- */
-public interface YarnAppClientFactory {
-
-  YarnAppClient create(Configuration configuration);
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/yarn/YarnApplicationReport.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/yarn/YarnApplicationReport.java b/yarn/src/main/java/org/apache/twill/internal/yarn/YarnApplicationReport.java
deleted file mode 100644
index 4dbb1d1..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/yarn/YarnApplicationReport.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
-import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.api.records.YarnApplicationState;
-
-/**
- * This interface is for adapting differences in ApplicationReport in different Hadoop version.
- */
-public interface YarnApplicationReport {
-
-  /**
-   * Get the <code>ApplicationId</code> of the application.
-   * @return <code>ApplicationId</code> of the application
-   */
-  ApplicationId getApplicationId();
-
-  /**
-   * Get the <code>ApplicationAttemptId</code> of the current
-   * attempt of the application
-   * @return <code>ApplicationAttemptId</code> of the attempt
-   */
-  ApplicationAttemptId getCurrentApplicationAttemptId();
-
-  /**
-   * Get the <em>queue</em> to which the application was submitted.
-   * @return <em>queue</em> to which the application was submitted
-   */
-  String getQueue();
-
-  /**
-   * Get the user-defined <em>name</em> of the application.
-   * @return <em>name</em> of the application
-   */
-  String getName();
-
-  /**
-   * Get the <em>host</em> on which the <code>ApplicationMaster</code>
-   * is running.
-   * @return <em>host</em> on which the <code>ApplicationMaster</code>
-   *         is running
-   */
-  String getHost();
-
-  /**
-   * Get the <em>RPC port</em> of the <code>ApplicationMaster</code>.
-   * @return <em>RPC port</em> of the <code>ApplicationMaster</code>
-   */
-  int getRpcPort();
-
-
-  /**
-   * Get the <code>YarnApplicationState</code> of the application.
-   * @return <code>YarnApplicationState</code> of the application
-   */
-  YarnApplicationState getYarnApplicationState();
-
-
-  /**
-   * Get  the <em>diagnositic information</em> of the application in case of
-   * errors.
-   * @return <em>diagnositic information</em> of the application in case
-   *         of errors
-   */
-  String getDiagnostics();
-
-
-  /**
-   * Get the <em>tracking url</em> for the application.
-   * @return <em>tracking url</em> for the application
-   */
-  String getTrackingUrl();
-
-
-  /**
-   * Get the original not-proxied <em>tracking url</em> for the application.
-   * This is intended to only be used by the proxy itself.
-   * @return the original not-proxied <em>tracking url</em> for the application
-   */
-  String getOriginalTrackingUrl();
-
-  /**
-   * Get the <em>start time</em> of the application.
-   * @return <em>start time</em> of the application
-   */
-  long getStartTime();
-
-
-  /**
-   * Get the <em>finish time</em> of the application.
-   * @return <em>finish time</em> of the application
-   */
-  long getFinishTime();
-
-
-  /**
-   * Get the <em>final finish status</em> of the application.
-   * @return <em>final finish status</em> of the application
-   */
-  FinalApplicationStatus getFinalApplicationStatus();
-
-  /**
-   * Retrieve the structure containing the job resources for this application
-   * @return the job resources structure for this application
-   */
-  ApplicationResourceUsageReport getApplicationResourceUsageReport();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/yarn/YarnContainerInfo.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/yarn/YarnContainerInfo.java b/yarn/src/main/java/org/apache/twill/internal/yarn/YarnContainerInfo.java
deleted file mode 100644
index e806da7..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/yarn/YarnContainerInfo.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import org.apache.twill.internal.ContainerInfo;
-
-/**
- *
- */
-public interface YarnContainerInfo extends ContainerInfo {
-
-  <T> T getContainer();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/yarn/YarnContainerStatus.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/yarn/YarnContainerStatus.java b/yarn/src/main/java/org/apache/twill/internal/yarn/YarnContainerStatus.java
deleted file mode 100644
index 57e712c..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/yarn/YarnContainerStatus.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import org.apache.hadoop.yarn.api.records.ContainerState;
-
-/**
- * This interface is for adapting differences in ContainerStatus between Hadoop 2.0 and 2.1
- */
-public interface YarnContainerStatus {
-
-  String getContainerId();
-
-  ContainerState getState();
-
-  int getExitStatus();
-
-  String getDiagnostics();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/yarn/YarnLaunchContext.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/yarn/YarnLaunchContext.java b/yarn/src/main/java/org/apache/twill/internal/yarn/YarnLaunchContext.java
deleted file mode 100644
index 984a1be..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/yarn/YarnLaunchContext.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
-
-import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.Map;
-
-/**
- * This interface is for adapting ContainerLaunchContext in different Hadoop version
- */
-public interface YarnLaunchContext {
-
-  <T> T getLaunchContext();
-
-  void setCredentials(Credentials credentials);
-
-  void setLocalResources(Map<String, YarnLocalResource> localResources);
-
-  void setServiceData(Map<String, ByteBuffer> serviceData);
-
-  Map<String, String> getEnvironment();
-
-  void setEnvironment(Map<String, String> environment);
-
-  List<String> getCommands();
-
-  void setCommands(List<String> commands);
-
-  void setApplicationACLs(Map<ApplicationAccessType, String> acls);
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/yarn/YarnLocalResource.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/yarn/YarnLocalResource.java b/yarn/src/main/java/org/apache/twill/internal/yarn/YarnLocalResource.java
deleted file mode 100644
index 9bfc224..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/yarn/YarnLocalResource.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import org.apache.hadoop.yarn.api.records.LocalResourceType;
-import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
-import org.apache.hadoop.yarn.api.records.URL;
-
-/**
- * A adapter interface for the LocalResource class/interface in different Hadoop version.
- */
-public interface YarnLocalResource {
-
-  /**
-   * Returns the actual LocalResource object in Yarn.
-   */
-  <T> T getLocalResource();
-
-  /**
-   * Get the <em>location</em> of the resource to be localized.
-   * @return <em>location</em> of the resource to be localized
-   */
-  URL getResource();
-
-  /**
-   * Set <em>location</em> of the resource to be localized.
-   * @param resource <em>location</em> of the resource to be localized
-   */
-  void setResource(URL resource);
-
-  /**
-   * Get the <em>size</em> of the resource to be localized.
-   * @return <em>size</em> of the resource to be localized
-   */
-  long getSize();
-
-  /**
-   * Set the <em>size</em> of the resource to be localized.
-   * @param size <em>size</em> of the resource to be localized
-   */
-  void setSize(long size);
-
-  /**
-   * Get the original <em>timestamp</em> of the resource to be localized, used
-   * for verification.
-   * @return <em>timestamp</em> of the resource to be localized
-   */
-  long getTimestamp();
-
-  /**
-   * Set the <em>timestamp</em> of the resource to be localized, used
-   * for verification.
-   * @param timestamp <em>timestamp</em> of the resource to be localized
-   */
-  void setTimestamp(long timestamp);
-
-  /**
-   * Get the <code>LocalResourceType</code> of the resource to be localized.
-   * @return <code>LocalResourceType</code> of the resource to be localized
-   */
-  LocalResourceType getType();
-
-  /**
-   * Set the <code>LocalResourceType</code> of the resource to be localized.
-   * @param type <code>LocalResourceType</code> of the resource to be localized
-   */
-  void setType(LocalResourceType type);
-
-  /**
-   * Get the <code>LocalResourceVisibility</code> of the resource to be
-   * localized.
-   * @return <code>LocalResourceVisibility</code> of the resource to be
-   *         localized
-   */
-  LocalResourceVisibility getVisibility();
-
-  /**
-   * Set the <code>LocalResourceVisibility</code> of the resource to be
-   * localized.
-   * @param visibility <code>LocalResourceVisibility</code> of the resource to be
-   *                   localized
-   */
-  void setVisibility(LocalResourceVisibility visibility);
-
-  /**
-   * Get the <em>pattern</em> that should be used to extract entries from the
-   * archive (only used when type is <code>PATTERN</code>).
-   * @return <em>pattern</em> that should be used to extract entries from the
-   * archive.
-   */
-  String getPattern();
-
-  /**
-   * Set the <em>pattern</em> that should be used to extract entries from the
-   * archive (only used when type is <code>PATTERN</code>).
-   * @param pattern <em>pattern</em> that should be used to extract entries
-   * from the archive.
-   */
-  void setPattern(String pattern);
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/yarn/YarnNMClient.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/yarn/YarnNMClient.java b/yarn/src/main/java/org/apache/twill/internal/yarn/YarnNMClient.java
deleted file mode 100644
index d863c91..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/yarn/YarnNMClient.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import org.apache.twill.common.Cancellable;
-
-/**
- * Abstraction for dealing with API differences in different hadoop yarn version
- */
-public interface YarnNMClient {
-
-  /**
-   * Starts a process based on the given launch context.
-   *
-   * @param containerInfo The containerInfo that the new process will launch in.
-   * @param launchContext Contains information about the process going to start.
-   * @return A {@link Cancellable} that when {@link Cancellable#cancel()}} is invoked,
-   *         it will try to shutdown the process.
-   *
-   */
-  Cancellable start(YarnContainerInfo containerInfo, YarnLaunchContext launchContext);
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/yarn/YarnUtils.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/yarn/YarnUtils.java b/yarn/src/main/java/org/apache/twill/internal/yarn/YarnUtils.java
deleted file mode 100644
index 4f7597b..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/yarn/YarnUtils.java
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import org.apache.twill.api.LocalFile;
-import org.apache.twill.filesystem.ForwardingLocationFactory;
-import org.apache.twill.filesystem.HDFSLocationFactory;
-import org.apache.twill.filesystem.LocationFactory;
-import com.google.common.base.Function;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Throwables;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Maps;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.io.DataInputByteBuffer;
-import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.LocalResourceType;
-import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.util.ConverterUtils;
-import org.apache.hadoop.yarn.util.Records;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.lang.reflect.Method;
-import java.net.InetSocketAddress;
-import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicReference;
-
-/**
- * Collection of helper methods to simplify YARN calls.
- */
-public class YarnUtils {
-
-  private static final Logger LOG = LoggerFactory.getLogger(YarnUtils.class);
-  private static final AtomicReference<Boolean> HADOOP_20 = new AtomicReference<Boolean>();
-
-  public static YarnLocalResource createLocalResource(LocalFile localFile) {
-    Preconditions.checkArgument(localFile.getLastModified() >= 0, "Last modified time should be >= 0.");
-    Preconditions.checkArgument(localFile.getSize() >= 0, "File size should be >= 0.");
-
-    YarnLocalResource resource = createAdapter(YarnLocalResource.class);
-    resource.setVisibility(LocalResourceVisibility.APPLICATION);
-    resource.setResource(ConverterUtils.getYarnUrlFromURI(localFile.getURI()));
-    resource.setTimestamp(localFile.getLastModified());
-    resource.setSize(localFile.getSize());
-    return setLocalResourceType(resource, localFile);
-  }
-
-  public static YarnLaunchContext createLaunchContext() {
-    return createAdapter(YarnLaunchContext.class);
-  }
-
-  // temporary workaround since older versions of hadoop don't have the getVirtualCores method.
-  public static int getVirtualCores(Resource resource) {
-    try {
-      Method getVirtualCores = Resource.class.getMethod("getVirtualCores");
-      return (Integer) getVirtualCores.invoke(resource);
-    } catch (Exception e) {
-      return 0;
-    }
-  }
-
-  /**
-   * Temporary workaround since older versions of hadoop don't have the setCores method.
-   *
-   * @param resource
-   * @param cores
-   * @return true if virtual cores was set, false if not.
-   */
-  public static boolean setVirtualCores(Resource resource, int cores) {
-    try {
-      Method setVirtualCores = Resource.class.getMethod("setVirtualCores", int.class);
-      setVirtualCores.invoke(resource, cores);
-    } catch (Exception e) {
-      // It's ok to ignore this exception, as it's using older version of API.
-      return false;
-    }
-    return true;
-  }
-
-  /**
-   * Creates {@link ApplicationId} from the given cluster timestamp and id.
-   */
-  public static ApplicationId createApplicationId(long timestamp, int id) {
-    try {
-      try {
-        // For Hadoop-2.1
-        Method method = ApplicationId.class.getMethod("newInstance", long.class, int.class);
-        return (ApplicationId) method.invoke(null, timestamp, id);
-      } catch (NoSuchMethodException e) {
-        // Try with Hadoop-2.0 way
-        ApplicationId appId = Records.newRecord(ApplicationId.class);
-
-        Method setClusterTimestamp = ApplicationId.class.getMethod("setClusterTimestamp", long.class);
-        Method setId = ApplicationId.class.getMethod("setId", int.class);
-
-        setClusterTimestamp.invoke(appId, timestamp);
-        setId.invoke(appId, id);
-
-        return appId;
-      }
-    } catch (Exception e) {
-      throw Throwables.propagate(e);
-    }
-  }
-
-  /**
-   * Helper method to get delegation tokens for the given LocationFactory.
-   * @param config The hadoop configuration.
-   * @param locationFactory The LocationFactory for generating tokens.
-   * @param credentials Credentials for storing tokens acquired.
-   * @return List of delegation Tokens acquired.
-   */
-  public static List<Token<?>> addDelegationTokens(Configuration config,
-                                                   LocationFactory locationFactory,
-                                                   Credentials credentials) throws IOException {
-    if (!UserGroupInformation.isSecurityEnabled()) {
-      LOG.debug("Security is not enabled");
-      return ImmutableList.of();
-    }
-
-    FileSystem fileSystem = getFileSystem(locationFactory);
-
-    if (fileSystem == null) {
-      LOG.debug("LocationFactory is not HDFS");
-      return ImmutableList.of();
-    }
-
-    String renewer = getYarnTokenRenewer(config);
-
-    Token<?>[] tokens = fileSystem.addDelegationTokens(renewer, credentials);
-    return tokens == null ? ImmutableList.<Token<?>>of() : ImmutableList.copyOf(tokens);
-  }
-
-  public static ByteBuffer encodeCredentials(Credentials credentials) {
-    try {
-      DataOutputBuffer out = new DataOutputBuffer();
-      credentials.writeTokenStorageToStream(out);
-      return ByteBuffer.wrap(out.getData(), 0, out.getLength());
-    } catch (IOException e) {
-      // Shouldn't throw
-      LOG.error("Failed to encode Credentials.", e);
-      throw Throwables.propagate(e);
-    }
-  }
-
-  /**
-   * Decodes {@link Credentials} from the given buffer.
-   * If the buffer is null or empty, it returns an empty Credentials.
-   */
-  public static Credentials decodeCredentials(ByteBuffer buffer) throws IOException {
-    Credentials credentials = new Credentials();
-    if (buffer != null && buffer.hasRemaining()) {
-      DataInputByteBuffer in = new DataInputByteBuffer();
-      in.reset(buffer);
-      credentials.readTokenStorageStream(in);
-    }
-    return credentials;
-  }
-
-  public static String getYarnTokenRenewer(Configuration config) throws IOException {
-    String rmHost = getRMAddress(config).getHostName();
-    String renewer = SecurityUtil.getServerPrincipal(config.get(YarnConfiguration.RM_PRINCIPAL), rmHost);
-
-    if (renewer == null || renewer.length() == 0) {
-      throw new IOException("No Kerberos principal for Yarn RM to use as renewer");
-    }
-
-    return renewer;
-  }
-
-  public static InetSocketAddress getRMAddress(Configuration config) {
-    return config.getSocketAddr(YarnConfiguration.RM_ADDRESS,
-                                YarnConfiguration.DEFAULT_RM_ADDRESS,
-                                YarnConfiguration.DEFAULT_RM_PORT);
-  }
-
-  /**
-   * Returns true if Hadoop-2.0 classes are in the classpath.
-   */
-  public static boolean isHadoop20() {
-    Boolean hadoop20 = HADOOP_20.get();
-    if (hadoop20 != null) {
-      return hadoop20;
-    }
-    try {
-      Class.forName("org.apache.hadoop.yarn.client.api.NMClient");
-      HADOOP_20.set(false);
-      return false;
-    } catch (ClassNotFoundException e) {
-      HADOOP_20.set(true);
-      return true;
-    }
-  }
-
-  /**
-   * Helper method to create adapter class for bridging between Hadoop 2.0 and 2.1
-   */
-  private static <T> T createAdapter(Class<T> clz) {
-    String className = clz.getPackage().getName();
-
-    if (isHadoop20()) {
-      className += ".Hadoop20" + clz.getSimpleName();
-    } else {
-      className += ".Hadoop21" + clz.getSimpleName();
-    }
-
-    try {
-      return (T) Class.forName(className).newInstance();
-    } catch (Exception e) {
-      throw Throwables.propagate(e);
-    }
-  }
-
-  private static YarnLocalResource setLocalResourceType(YarnLocalResource localResource, LocalFile localFile) {
-    if (localFile.isArchive()) {
-      if (localFile.getPattern() == null) {
-        localResource.setType(LocalResourceType.ARCHIVE);
-      } else {
-        localResource.setType(LocalResourceType.PATTERN);
-        localResource.setPattern(localFile.getPattern());
-      }
-    } else {
-      localResource.setType(LocalResourceType.FILE);
-    }
-    return localResource;
-  }
-
-  private static <T> Map<String, T> transformResource(Map<String, YarnLocalResource> from) {
-    return Maps.transformValues(from, new Function<YarnLocalResource, T>() {
-      @Override
-      public T apply(YarnLocalResource resource) {
-        return resource.getLocalResource();
-      }
-    });
-  }
-
-  /**
-   * Gets the Hadoop FileSystem from LocationFactory.
-   */
-  private static FileSystem getFileSystem(LocationFactory locationFactory) {
-    if (locationFactory instanceof HDFSLocationFactory) {
-      return ((HDFSLocationFactory) locationFactory).getFileSystem();
-    }
-    if (locationFactory instanceof ForwardingLocationFactory) {
-      return getFileSystem(((ForwardingLocationFactory) locationFactory).getDelegate());
-    }
-    return null;
-  }
-
-  private YarnUtils() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/yarn/package-info.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/yarn/package-info.java b/yarn/src/main/java/org/apache/twill/internal/yarn/package-info.java
deleted file mode 100644
index d6ec9f7..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/yarn/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * This package contains class for interacting with Yarn.
- */
-package org.apache.twill.internal.yarn;

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/yarn/LocationSecureStoreUpdater.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/yarn/LocationSecureStoreUpdater.java b/yarn/src/main/java/org/apache/twill/yarn/LocationSecureStoreUpdater.java
deleted file mode 100644
index 4d20c9c..0000000
--- a/yarn/src/main/java/org/apache/twill/yarn/LocationSecureStoreUpdater.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.yarn;
-
-import org.apache.twill.api.RunId;
-import org.apache.twill.api.SecureStore;
-import org.apache.twill.api.SecureStoreUpdater;
-import org.apache.twill.filesystem.LocationFactory;
-import org.apache.twill.internal.yarn.YarnUtils;
-import com.google.common.base.Throwables;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.Credentials;
-
-import java.io.IOException;
-
-/**
- * Package private class for updating location related secure store.
- */
-final class LocationSecureStoreUpdater implements SecureStoreUpdater {
-
-  private final Configuration configuration;
-  private final LocationFactory locationFactory;
-
-  LocationSecureStoreUpdater(Configuration configuration, LocationFactory locationFactory) {
-    this.configuration = configuration;
-    this.locationFactory = locationFactory;
-  }
-
-  @Override
-  public SecureStore update(String application, RunId runId) {
-    try {
-      Credentials credentials = new Credentials();
-      YarnUtils.addDelegationTokens(configuration, locationFactory, credentials);
-      return YarnSecureStore.create(credentials);
-    } catch (IOException e) {
-      throw Throwables.propagate(e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/yarn/ResourceReportClient.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/yarn/ResourceReportClient.java b/yarn/src/main/java/org/apache/twill/yarn/ResourceReportClient.java
deleted file mode 100644
index 2974c3f..0000000
--- a/yarn/src/main/java/org/apache/twill/yarn/ResourceReportClient.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.yarn;
-
-import org.apache.twill.api.ResourceReport;
-import org.apache.twill.internal.json.ResourceReportAdapter;
-import com.google.common.base.Charsets;
-import com.google.common.io.Closeables;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.BufferedReader;
-import java.io.InputStreamReader;
-import java.io.Reader;
-import java.net.URL;
-
-/**
- * Package private class to get {@link ResourceReport} from the application master.
- */
-final class ResourceReportClient {
-  private static final Logger LOG = LoggerFactory.getLogger(ResourceReportClient.class);
-
-  private final ResourceReportAdapter reportAdapter;
-  private final URL resourceUrl;
-
-  ResourceReportClient(URL resourceUrl) {
-    this.resourceUrl = resourceUrl;
-    this.reportAdapter = ResourceReportAdapter.create();
-  }
-
-  /**
-   * Returns the resource usage of the application fetched from the resource endpoint URL.
-   * @return A {@link ResourceReport} or {@code null} if failed to fetch the report.
-   */
-  public ResourceReport get() {
-    try {
-      Reader reader = new BufferedReader(new InputStreamReader(resourceUrl.openStream(), Charsets.UTF_8));
-      try {
-        return reportAdapter.fromJson(reader);
-      } finally {
-        Closeables.closeQuietly(reader);
-      }
-    } catch (Exception e) {
-      LOG.error("Exception getting resource report from {}.", resourceUrl, e);
-      return null;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/yarn/YarnSecureStore.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/yarn/YarnSecureStore.java b/yarn/src/main/java/org/apache/twill/yarn/YarnSecureStore.java
deleted file mode 100644
index e6f461a..0000000
--- a/yarn/src/main/java/org/apache/twill/yarn/YarnSecureStore.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.yarn;
-
-import org.apache.twill.api.SecureStore;
-import org.apache.hadoop.security.Credentials;
-
-/**
- * A {@link SecureStore} for hadoop credentials.
- */
-public final class YarnSecureStore implements SecureStore {
-
-  private final Credentials credentials;
-
-  public static SecureStore create(Credentials credentials) {
-    return new YarnSecureStore(credentials);
-  }
-
-  private YarnSecureStore(Credentials credentials) {
-    this.credentials = credentials;
-  }
-
-  @Override
-  public Credentials getStore() {
-    return credentials;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/yarn/YarnTwillController.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/yarn/YarnTwillController.java b/yarn/src/main/java/org/apache/twill/yarn/YarnTwillController.java
deleted file mode 100644
index 4c240fb..0000000
--- a/yarn/src/main/java/org/apache/twill/yarn/YarnTwillController.java
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.yarn;
-
-import org.apache.twill.api.ResourceReport;
-import org.apache.twill.api.RunId;
-import org.apache.twill.api.TwillController;
-import org.apache.twill.api.logging.LogHandler;
-import org.apache.twill.internal.AbstractTwillController;
-import org.apache.twill.internal.Constants;
-import org.apache.twill.internal.ProcessController;
-import org.apache.twill.internal.appmaster.TrackerService;
-import org.apache.twill.internal.state.StateNode;
-import org.apache.twill.internal.state.SystemMessages;
-import org.apache.twill.internal.yarn.YarnApplicationReport;
-import org.apache.twill.zookeeper.NodeData;
-import org.apache.twill.zookeeper.ZKClient;
-import com.google.common.base.Throwables;
-import com.google.common.collect.ImmutableList;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.Uninterruptibles;
-import org.apache.commons.lang.time.StopWatch;
-import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.api.records.YarnApplicationState;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.net.URI;
-import java.net.URL;
-import java.util.concurrent.Callable;
-import java.util.concurrent.TimeUnit;
-
-/**
- * A {@link org.apache.twill.api.TwillController} that controllers application running on Hadoop YARN.
- */
-final class YarnTwillController extends AbstractTwillController implements TwillController {
-
-  private static final Logger LOG = LoggerFactory.getLogger(YarnTwillController.class);
-
-  private final Callable<ProcessController<YarnApplicationReport>> startUp;
-  private ProcessController<YarnApplicationReport> processController;
-  private ResourceReportClient resourcesClient;
-
-  /**
-   * Creates an instance without any {@link LogHandler}.
-   */
-  YarnTwillController(RunId runId, ZKClient zkClient, Callable<ProcessController<YarnApplicationReport>> startUp) {
-    this(runId, zkClient, ImmutableList.<LogHandler>of(), startUp);
-  }
-
-  YarnTwillController(RunId runId, ZKClient zkClient, Iterable<LogHandler> logHandlers,
-                      Callable<ProcessController<YarnApplicationReport>> startUp) {
-    super(runId, zkClient, logHandlers);
-    this.startUp = startUp;
-  }
-
-
-  /**
-   * Sends a message to application to notify the secure store has be updated.
-   */
-  ListenableFuture<Void> secureStoreUpdated() {
-    return sendMessage(SystemMessages.SECURE_STORE_UPDATED, null);
-  }
-
-  @Override
-  protected void doStartUp() {
-    super.doStartUp();
-
-    // Submit and poll the status of the yarn application
-    try {
-      processController = startUp.call();
-
-      YarnApplicationReport report = processController.getReport();
-      LOG.debug("Application {} submit", report.getApplicationId());
-
-      YarnApplicationState state = report.getYarnApplicationState();
-      StopWatch stopWatch = new StopWatch();
-      stopWatch.start();
-      stopWatch.split();
-      long maxTime = TimeUnit.MILLISECONDS.convert(Constants.APPLICATION_MAX_START_SECONDS, TimeUnit.SECONDS);
-
-      LOG.info("Checking yarn application status");
-      while (!hasRun(state) && stopWatch.getSplitTime() < maxTime) {
-        report = processController.getReport();
-        state = report.getYarnApplicationState();
-        LOG.debug("Yarn application status: {}", state);
-        TimeUnit.SECONDS.sleep(1);
-        stopWatch.split();
-      }
-      LOG.info("Yarn application is in state {}", state);
-      if (state != YarnApplicationState.RUNNING) {
-        LOG.info("Yarn application is not in running state. Shutting down controller.",
-                 Constants.APPLICATION_MAX_START_SECONDS);
-        forceShutDown();
-      } else {
-        try {
-          URL resourceUrl = URI.create(String.format("http://%s:%d", report.getHost(), report.getRpcPort()))
-                               .resolve(TrackerService.PATH).toURL();
-          resourcesClient = new ResourceReportClient(resourceUrl);
-        } catch (IOException e) {
-          resourcesClient = null;
-        }
-      }
-    } catch (Exception e) {
-      throw Throwables.propagate(e);
-    }
-  }
-
-  @Override
-  protected void doShutDown() {
-    if (processController == null) {
-      LOG.warn("No process controller for application that is not submitted.");
-      return;
-    }
-
-    // Wait for the stop message being processed
-    try {
-      Uninterruptibles.getUninterruptibly(getStopMessageFuture(),
-                                          Constants.APPLICATION_MAX_STOP_SECONDS, TimeUnit.SECONDS);
-    } catch (Exception e) {
-      LOG.error("Failed to wait for stop message being processed.", e);
-      // Kill the application through yarn
-      kill();
-    }
-
-    // Poll application status from yarn
-    try {
-      StopWatch stopWatch = new StopWatch();
-      stopWatch.start();
-      stopWatch.split();
-      long maxTime = TimeUnit.MILLISECONDS.convert(Constants.APPLICATION_MAX_STOP_SECONDS, TimeUnit.SECONDS);
-
-      YarnApplicationReport report = processController.getReport();
-      FinalApplicationStatus finalStatus = report.getFinalApplicationStatus();
-      while (finalStatus == FinalApplicationStatus.UNDEFINED && stopWatch.getSplitTime() < maxTime) {
-        LOG.debug("Yarn application final status for {} {}", report.getApplicationId(), finalStatus);
-        TimeUnit.SECONDS.sleep(1);
-        stopWatch.split();
-        finalStatus = processController.getReport().getFinalApplicationStatus();
-      }
-      LOG.debug("Yarn application final status is {}", finalStatus);
-
-      // Application not finished after max stop time, kill the application
-      if (finalStatus == FinalApplicationStatus.UNDEFINED) {
-        kill();
-      }
-    } catch (Exception e) {
-      LOG.warn("Exception while waiting for application report: {}", e.getMessage(), e);
-      kill();
-    }
-
-    super.doShutDown();
-  }
-
-  @Override
-  public void kill() {
-    if (processController != null) {
-      YarnApplicationReport report = processController.getReport();
-      LOG.info("Killing application {}", report.getApplicationId());
-      processController.cancel();
-    } else {
-      LOG.warn("No process controller for application that is not submitted.");
-    }
-  }
-
-  @Override
-  protected void instanceNodeUpdated(NodeData nodeData) {
-
-  }
-
-  @Override
-  protected void stateNodeUpdated(StateNode stateNode) {
-
-  }
-
-  private boolean hasRun(YarnApplicationState state) {
-    switch (state) {
-      case RUNNING:
-      case FINISHED:
-      case FAILED:
-      case KILLED:
-        return true;
-    }
-    return false;
-  }
-
-  @Override
-  public ResourceReport getResourceReport() {
-    // in case the user calls this before starting, return null
-    return (resourcesClient == null) ? null : resourcesClient.get();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/yarn/YarnTwillControllerFactory.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/yarn/YarnTwillControllerFactory.java b/yarn/src/main/java/org/apache/twill/yarn/YarnTwillControllerFactory.java
deleted file mode 100644
index 11c2ae6..0000000
--- a/yarn/src/main/java/org/apache/twill/yarn/YarnTwillControllerFactory.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.yarn;
-
-import org.apache.twill.api.RunId;
-import org.apache.twill.api.logging.LogHandler;
-import org.apache.twill.internal.ProcessController;
-import org.apache.twill.internal.yarn.YarnApplicationReport;
-
-import java.util.concurrent.Callable;
-
-/**
- * Factory for creating {@link YarnTwillController}.
- */
-interface YarnTwillControllerFactory {
-
-  YarnTwillController create(RunId runId, Iterable<LogHandler> logHandlers,
-                             Callable<ProcessController<YarnApplicationReport>> startUp);
-}


[14/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnLocalResource.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnLocalResource.java b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnLocalResource.java
new file mode 100644
index 0000000..b327b94
--- /dev/null
+++ b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnLocalResource.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.URL;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ *
+ */
+public final class Hadoop20YarnLocalResource implements YarnLocalResource {
+
+  private final LocalResource localResource;
+
+  public Hadoop20YarnLocalResource() {
+    this.localResource = Records.newRecord(LocalResource.class);
+  }
+
+  @Override
+  public <T> T getLocalResource() {
+    return (T) localResource;
+  }
+
+  @Override
+  public URL getResource() {
+    return localResource.getResource();
+  }
+
+  @Override
+  public void setResource(URL resource) {
+    localResource.setResource(resource);
+  }
+
+  @Override
+  public long getSize() {
+    return localResource.getSize();
+  }
+
+  @Override
+  public void setSize(long size) {
+    localResource.setSize(size);
+  }
+
+  @Override
+  public long getTimestamp() {
+    return localResource.getTimestamp();
+  }
+
+  @Override
+  public void setTimestamp(long timestamp) {
+    localResource.setTimestamp(timestamp);
+  }
+
+  @Override
+  public LocalResourceType getType() {
+    return localResource.getType();
+  }
+
+  @Override
+  public void setType(LocalResourceType type) {
+    localResource.setType(type);
+  }
+
+  @Override
+  public LocalResourceVisibility getVisibility() {
+    return localResource.getVisibility();
+  }
+
+  @Override
+  public void setVisibility(LocalResourceVisibility visibility) {
+    localResource.setVisibility(visibility);
+  }
+
+  @Override
+  public String getPattern() {
+    return localResource.getPattern();
+  }
+
+  @Override
+  public void setPattern(String pattern) {
+    localResource.setPattern(pattern);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnNMClient.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnNMClient.java b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnNMClient.java
new file mode 100644
index 0000000..98ecc67
--- /dev/null
+++ b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnNMClient.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import org.apache.twill.common.Cancellable;
+import com.google.common.base.Throwables;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.yarn.api.ContainerManager;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.util.Records;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.net.InetSocketAddress;
+
+/**
+ *
+ */
+public final class Hadoop20YarnNMClient implements YarnNMClient {
+
+  private static final Logger LOG = LoggerFactory.getLogger(Hadoop20YarnNMClient.class);
+
+  private final YarnRPC yarnRPC;
+  private final Configuration yarnConf;
+
+  public Hadoop20YarnNMClient(YarnRPC yarnRPC, Configuration yarnConf) {
+    this.yarnRPC = yarnRPC;
+    this.yarnConf = yarnConf;
+  }
+
+  @Override
+  public Cancellable start(YarnContainerInfo containerInfo, YarnLaunchContext launchContext) {
+    ContainerLaunchContext context = launchContext.getLaunchContext();
+    context.setUser(System.getProperty("user.name"));
+
+    Container container = containerInfo.getContainer();
+
+    context.setContainerId(container.getId());
+    context.setResource(container.getResource());
+
+    StartContainerRequest startRequest = Records.newRecord(StartContainerRequest.class);
+    startRequest.setContainerLaunchContext(context);
+
+    ContainerManager manager = connectContainerManager(container);
+    try {
+      manager.startContainer(startRequest);
+      return new ContainerTerminator(container, manager);
+    } catch (YarnRemoteException e) {
+      LOG.error("Error in launching process", e);
+      throw Throwables.propagate(e);
+    }
+
+  }
+
+  /**
+   * Helper to connect to container manager (node manager).
+   */
+  private ContainerManager connectContainerManager(Container container) {
+    String cmIpPortStr = String.format("%s:%d", container.getNodeId().getHost(), container.getNodeId().getPort());
+    InetSocketAddress cmAddress = NetUtils.createSocketAddr(cmIpPortStr);
+    return ((ContainerManager) yarnRPC.getProxy(ContainerManager.class, cmAddress, yarnConf));
+  }
+
+  private static final class ContainerTerminator implements Cancellable {
+
+    private final Container container;
+    private final ContainerManager manager;
+
+    private ContainerTerminator(Container container, ContainerManager manager) {
+      this.container = container;
+      this.manager = manager;
+    }
+
+    @Override
+    public void cancel() {
+      LOG.info("Request to stop container {}.", container.getId());
+      StopContainerRequest stopRequest = Records.newRecord(StopContainerRequest.class);
+      stopRequest.setContainerId(container.getId());
+      try {
+        manager.stopContainer(stopRequest);
+        boolean completed = false;
+        while (!completed) {
+          GetContainerStatusRequest statusRequest = Records.newRecord(GetContainerStatusRequest.class);
+          statusRequest.setContainerId(container.getId());
+          GetContainerStatusResponse statusResponse = manager.getContainerStatus(statusRequest);
+          LOG.info("Container status: {} {}", statusResponse.getStatus(), statusResponse.getStatus().getDiagnostics());
+
+          completed = (statusResponse.getStatus().getState() == ContainerState.COMPLETE);
+        }
+        LOG.info("Container {} stopped.", container.getId());
+      } catch (YarnRemoteException e) {
+        LOG.error("Fail to stop container {}", container.getId(), e);
+        throw Throwables.propagate(e);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AMRMClient.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AMRMClient.java b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AMRMClient.java
new file mode 100644
index 0000000..26b6fa2
--- /dev/null
+++ b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AMRMClient.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn.ports;
+
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.service.Service;
+
+/**
+ * Ported from Apache Hadoop YARN.
+ */
+public interface AMRMClient extends Service {
+
+  /**
+   * Value used to define no locality.
+   */
+  static final String ANY = "*";
+
+  /**
+   * Object to represent container request for resources.
+   * Resources may be localized to nodes and racks.
+   * Resources may be assigned priorities.
+   * Can ask for multiple containers of a given type.
+   */
+  public static class ContainerRequest {
+    Resource capability;
+    String[] hosts;
+    String[] racks;
+    Priority priority;
+    int containerCount;
+
+    public ContainerRequest(Resource capability, String[] hosts,
+                            String[] racks, Priority priority, int containerCount) {
+      this.capability = capability;
+      this.hosts = (hosts != null ? hosts.clone() : null);
+      this.racks = (racks != null ? racks.clone() : null);
+      this.priority = priority;
+      this.containerCount = containerCount;
+    }
+    public String toString() {
+      StringBuilder sb = new StringBuilder();
+      sb.append("Capability[").append(capability).append("]");
+      sb.append("Priority[").append(priority).append("]");
+      sb.append("ContainerCount[").append(containerCount).append("]");
+      return sb.toString();
+    }
+  }
+
+  /**
+   * Register the application master. This must be called before any
+   * other interaction
+   * @param appHostName Name of the host on which master is running
+   * @param appHostPort Port master is listening on
+   * @param appTrackingUrl URL at which the master info can be seen
+   * @return <code>RegisterApplicationMasterResponse</code>
+   * @throws org.apache.hadoop.yarn.exceptions.YarnRemoteException
+   */
+  public RegisterApplicationMasterResponse
+  registerApplicationMaster(String appHostName,
+                            int appHostPort,
+                            String appTrackingUrl)
+    throws YarnRemoteException;
+
+  /**
+   * Request additional containers and receive new container allocations.
+   * Requests made via <code>addContainerRequest</code> are sent to the
+   * <code>ResourceManager</code>. New containers assigned to the master are
+   * retrieved. Status of completed containers and node health updates are
+   * also retrieved.
+   * This also doubles as a heartbeat to the ResourceManager and must be
+   * made periodically.
+   * The call may not always return any new allocations of containers.
+   * App should not make concurrent allocate requests. May cause request loss.
+   * @param progressIndicator Indicates progress made by the master
+   * @return the response of the allocate request
+   * @throws YarnRemoteException
+   */
+  public AllocationResponse allocate(float progressIndicator)
+    throws YarnRemoteException;
+
+  /**
+   * Unregister the Application Master. This must be called in the end.
+   * @param appStatus Success/Failure status of the master
+   * @param appMessage Diagnostics message on failure
+   * @param appTrackingUrl New URL to get master info
+   * @throws YarnRemoteException
+   */
+  public void unregisterApplicationMaster(FinalApplicationStatus appStatus,
+                                          String appMessage,
+                                          String appTrackingUrl)
+    throws YarnRemoteException;
+
+  /**
+   * Request containers for resources before calling <code>allocate</code>.
+   * @param req Resource request
+   */
+  public void addContainerRequest(ContainerRequest req);
+
+  /**
+   * Remove previous container request. The previous container request may have
+   * already been sent to the ResourceManager. So even after the remove request
+   * the app must be prepared to receive an allocation for the previous request
+   * even after the remove request
+   * @param req Resource request
+   */
+  public void removeContainerRequest(ContainerRequest req);
+
+  /**
+   * Release containers assigned by the Resource Manager. If the app cannot use
+   * the container or wants to give up the container then it can release it.
+   * The app needs to make new requests for the released resource capability if
+   * it still needs it. For example, if it released non-local resources
+   * @param containerId
+   */
+  public void releaseAssignedContainer(ContainerId containerId);
+
+  /**
+   * Get the currently available resources in the cluster.
+   * A valid value is available after a call to allocate has been made
+   * @return Currently available resources
+   */
+  public Resource getClusterAvailableResources();
+
+  /**
+   * Get the current number of nodes in the cluster.
+   * A valid values is available after a call to allocate has been made
+   * @return Current number of nodes in the cluster
+   */
+  public int getClusterNodeCount();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AMRMClientImpl.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AMRMClientImpl.java b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AMRMClientImpl.java
new file mode 100644
index 0000000..c1bd75a
--- /dev/null
+++ b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AMRMClientImpl.java
@@ -0,0 +1,412 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn.ports;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.AMRMProtocol;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.service.AbstractService;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.security.PrivilegedAction;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+/**
+ * Ported from Apache Hadoop YARN.
+ */
+public final class AMRMClientImpl extends AbstractService implements AMRMClient {
+
+  private static final Log LOG = LogFactory.getLog(AMRMClientImpl.class);
+
+  private final RecordFactory recordFactory =
+    RecordFactoryProvider.getRecordFactory(null);
+
+  private int lastResponseId = 0;
+
+  protected AMRMProtocol rmClient;
+  protected final ApplicationAttemptId appAttemptId;
+  protected Resource clusterAvailableResources;
+  protected int clusterNodeCount;
+
+  //Key -> Priority
+  //Value -> Map
+  //Key->ResourceName (e.g., hostname, rackname, *)
+  //Value->Map
+  //Key->Resource Capability
+  //Value->ResourceRequest
+  protected final
+  Map<Priority, Map<String, Map<Resource, ResourceRequest>>>
+    remoteRequestsTable =
+    new TreeMap<Priority, Map<String, Map<Resource, ResourceRequest>>>();
+
+  protected final Set<ResourceRequest> ask = new TreeSet<ResourceRequest>(
+    new org.apache.hadoop.yarn.util.BuilderUtils.ResourceRequestComparator());
+  protected final Set<ContainerId> release = new TreeSet<ContainerId>();
+
+  public AMRMClientImpl(ApplicationAttemptId appAttemptId) {
+    super(AMRMClientImpl.class.getName());
+    this.appAttemptId = appAttemptId;
+  }
+
+  @Override
+  public synchronized void init(Configuration conf) {
+    super.init(conf);
+  }
+
+  @Override
+  public synchronized void start() {
+    final YarnConfiguration conf = new YarnConfiguration(getConfig());
+    final YarnRPC rpc = YarnRPC.create(conf);
+    final InetSocketAddress rmAddress = conf.getSocketAddr(
+      YarnConfiguration.RM_SCHEDULER_ADDRESS,
+      YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS,
+      YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT);
+
+    UserGroupInformation currentUser;
+    try {
+      currentUser = UserGroupInformation.getCurrentUser();
+    } catch (IOException e) {
+      throw new YarnException(e);
+    }
+
+    if (UserGroupInformation.isSecurityEnabled()) {
+      String tokenURLEncodedStr = System.getenv().get(
+        ApplicationConstants.APPLICATION_MASTER_TOKEN_ENV_NAME);
+      Token<? extends TokenIdentifier> token = new Token<TokenIdentifier>();
+
+      try {
+        token.decodeFromUrlString(tokenURLEncodedStr);
+      } catch (IOException e) {
+        throw new YarnException(e);
+      }
+
+      SecurityUtil.setTokenService(token, rmAddress);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("AppMasterToken is " + token);
+      }
+      currentUser.addToken(token);
+    }
+
+    rmClient = currentUser.doAs(new PrivilegedAction<AMRMProtocol>() {
+      @Override
+      public AMRMProtocol run() {
+        return (AMRMProtocol) rpc.getProxy(AMRMProtocol.class, rmAddress,
+                                           conf);
+      }
+    });
+    LOG.debug("Connecting to ResourceManager at " + rmAddress);
+    super.start();
+  }
+
+  @Override
+  public synchronized void stop() {
+    if (this.rmClient != null) {
+      RPC.stopProxy(this.rmClient);
+    }
+    super.stop();
+  }
+
+  @Override
+  public RegisterApplicationMasterResponse registerApplicationMaster(
+    String appHostName, int appHostPort, String appTrackingUrl)
+    throws YarnRemoteException {
+    // do this only once ???
+    RegisterApplicationMasterRequest request = recordFactory
+      .newRecordInstance(RegisterApplicationMasterRequest.class);
+    synchronized (this) {
+      request.setApplicationAttemptId(appAttemptId);
+    }
+    request.setHost(appHostName);
+    request.setRpcPort(appHostPort);
+    if (appTrackingUrl != null) {
+      request.setTrackingUrl(appTrackingUrl);
+    }
+    RegisterApplicationMasterResponse response = rmClient
+      .registerApplicationMaster(request);
+    return response;
+  }
+
+  @Override
+  public AllocationResponse allocate(float progressIndicator)
+    throws YarnRemoteException {
+    AllocateResponse allocateResponse = null;
+    ArrayList<ResourceRequest> askList = null;
+    ArrayList<ContainerId> releaseList = null;
+    AllocateRequest allocateRequest = null;
+
+    try {
+      synchronized (this) {
+        askList = new ArrayList<ResourceRequest>(ask);
+        releaseList = new ArrayList<ContainerId>(release);
+        // optimistically clear this collection assuming no RPC failure
+        ask.clear();
+        release.clear();
+        allocateRequest = BuilderUtils
+          .newAllocateRequest(appAttemptId, lastResponseId, progressIndicator,
+                              askList, releaseList);
+      }
+
+      allocateResponse = rmClient.allocate(allocateRequest);
+      AllocationResponse response = AllocationResponses.create(allocateResponse);
+
+      synchronized (this) {
+        // update these on successful RPC
+        clusterNodeCount = allocateResponse.getNumClusterNodes();
+        lastResponseId = response.getResponseId();
+        clusterAvailableResources = response.getAvailableResources();
+      }
+
+      return response;
+    } finally {
+      // TODO how to differentiate remote YARN exception vs error in RPC
+      if (allocateResponse == null) {
+        // We hit an exception in allocate()
+        // Preserve ask and release for next call to allocate()
+        synchronized (this) {
+          release.addAll(releaseList);
+          // Requests could have been added or deleted during call to allocate.
+          // If requests were added/removed then there is nothing to do since
+          // the ResourceRequest object in ask would have the actual new value.
+          // If ask does not have this ResourceRequest then it was unchanged and
+          // so we can add the value back safely.
+          // This assumes that there will no concurrent calls to allocate() and
+          // so we don't have to worry about ask being changed in the
+          // synchronized block at the beginning of this method.
+          for (ResourceRequest oldAsk : askList) {
+            if (!ask.contains(oldAsk)) {
+              ask.add(oldAsk);
+            }
+          }
+        }
+      }
+    }
+  }
+
+  @Override
+  public void unregisterApplicationMaster(FinalApplicationStatus appStatus,
+                                          String appMessage, String appTrackingUrl) throws YarnRemoteException {
+    FinishApplicationMasterRequest request = recordFactory
+      .newRecordInstance(FinishApplicationMasterRequest.class);
+    request.setAppAttemptId(appAttemptId);
+    request.setFinishApplicationStatus(appStatus);
+    if (appMessage != null) {
+      request.setDiagnostics(appMessage);
+    }
+    if (appTrackingUrl != null) {
+      request.setTrackingUrl(appTrackingUrl);
+    }
+    rmClient.finishApplicationMaster(request);
+  }
+
+  @Override
+  public synchronized void addContainerRequest(ContainerRequest req) {
+    // Create resource requests
+    if (req.hosts != null) {
+      for (String host : req.hosts) {
+        addResourceRequest(req.priority, host, req.capability, req.containerCount);
+      }
+    }
+
+    if (req.racks != null) {
+      for (String rack : req.racks) {
+        addResourceRequest(req.priority, rack, req.capability, req.containerCount);
+      }
+    }
+
+    // Off switch
+    addResourceRequest(req.priority, ANY, req.capability, req.containerCount);
+  }
+
+  @Override
+  public synchronized void removeContainerRequest(ContainerRequest req) {
+    // Update resource requests
+    if (req.hosts != null) {
+      for (String hostName : req.hosts) {
+        decResourceRequest(req.priority, hostName, req.capability, req.containerCount);
+      }
+    }
+
+    if (req.racks != null) {
+      for (String rack : req.racks) {
+        decResourceRequest(req.priority, rack, req.capability, req.containerCount);
+      }
+    }
+
+    decResourceRequest(req.priority, ANY, req.capability, req.containerCount);
+  }
+
+  @Override
+  public synchronized void releaseAssignedContainer(ContainerId containerId) {
+    release.add(containerId);
+  }
+
+  @Override
+  public synchronized Resource getClusterAvailableResources() {
+    return clusterAvailableResources;
+  }
+
+  @Override
+  public synchronized int getClusterNodeCount() {
+    return clusterNodeCount;
+  }
+
+  private void addResourceRequestToAsk(ResourceRequest remoteRequest) {
+    // This code looks weird but is needed because of the following scenario.
+    // A ResourceRequest is removed from the remoteRequestTable. A 0 container 
+    // request is added to 'ask' to notify the RM about not needing it any more.
+    // Before the call to allocate, the user now requests more containers. If 
+    // the locations of the 0 size request and the new request are the same
+    // (with the difference being only container count), then the set comparator
+    // will consider both to be the same and not add the new request to ask. So 
+    // we need to check for the "same" request being present and remove it and 
+    // then add it back. The comparator is container count agnostic.
+    // This should happen only rarely but we do need to guard against it.
+    if (ask.contains(remoteRequest)) {
+      ask.remove(remoteRequest);
+    }
+    ask.add(remoteRequest);
+  }
+
+  private void addResourceRequest(Priority priority, String resourceName,
+                                  Resource capability, int containerCount) {
+    Map<String, Map<Resource, ResourceRequest>> remoteRequests =
+      this.remoteRequestsTable.get(priority);
+    if (remoteRequests == null) {
+      remoteRequests = new HashMap<String, Map<Resource, ResourceRequest>>();
+      this.remoteRequestsTable.put(priority, remoteRequests);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Added priority=" + priority);
+      }
+    }
+    Map<Resource, ResourceRequest> reqMap = remoteRequests.get(resourceName);
+    if (reqMap == null) {
+      reqMap = new HashMap<Resource, ResourceRequest>();
+      remoteRequests.put(resourceName, reqMap);
+    }
+    ResourceRequest remoteRequest = reqMap.get(capability);
+    if (remoteRequest == null) {
+      remoteRequest = BuilderUtils.
+        newResourceRequest(priority, resourceName, capability, 0);
+      reqMap.put(capability, remoteRequest);
+    }
+
+    remoteRequest.setNumContainers(remoteRequest.getNumContainers() + containerCount);
+
+    // Note this down for next interaction with ResourceManager
+    addResourceRequestToAsk(remoteRequest);
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("addResourceRequest:" + " applicationId="
+                  + appAttemptId + " priority=" + priority.getPriority()
+                  + " resourceName=" + resourceName + " numContainers="
+                  + remoteRequest.getNumContainers() + " #asks=" + ask.size());
+    }
+  }
+
+  private void decResourceRequest(Priority priority, String resourceName,
+                                  Resource capability, int containerCount) {
+    Map<String, Map<Resource, ResourceRequest>> remoteRequests =
+      this.remoteRequestsTable.get(priority);
+
+    if (remoteRequests == null) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Not decrementing resource as priority " + priority
+                    + " is not present in request table");
+      }
+      return;
+    }
+
+    Map<Resource, ResourceRequest> reqMap = remoteRequests.get(resourceName);
+    if (reqMap == null) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Not decrementing resource as " + resourceName
+                    + " is not present in request table");
+      }
+      return;
+    }
+    ResourceRequest remoteRequest = reqMap.get(capability);
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("BEFORE decResourceRequest:" + " applicationId="
+                  + appAttemptId + " priority=" + priority.getPriority()
+                  + " resourceName=" + resourceName + " numContainers="
+                  + remoteRequest.getNumContainers() + " #asks=" + ask.size());
+    }
+
+    remoteRequest.
+      setNumContainers(remoteRequest.getNumContainers() - containerCount);
+    if (remoteRequest.getNumContainers() < 0) {
+      // guard against spurious removals
+      remoteRequest.setNumContainers(0);
+    }
+    // Send the ResourceRequest to RM even if is 0 because it needs to override
+    // a previously sent value. If ResourceRequest was not sent previously then
+    // sending 0 ought to be a no-op on RM.
+    addResourceRequestToAsk(remoteRequest);
+
+    // Delete entries from map if no longer needed.
+    if (remoteRequest.getNumContainers() == 0) {
+      reqMap.remove(capability);
+      if (reqMap.size() == 0) {
+        remoteRequests.remove(resourceName);
+      }
+      if (remoteRequests.size() == 0) {
+        remoteRequestsTable.remove(priority);
+      }
+    }
+
+    if (LOG.isDebugEnabled()) {
+      LOG.info("AFTER decResourceRequest:" + " applicationId="
+                 + appAttemptId + " priority=" + priority.getPriority()
+                 + " resourceName=" + resourceName + " numContainers="
+                 + remoteRequest.getNumContainers() + " #asks=" + ask.size());
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AllocationResponse.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AllocationResponse.java b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AllocationResponse.java
new file mode 100644
index 0000000..89734fc
--- /dev/null
+++ b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AllocationResponse.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn.ports;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.Resource;
+
+import java.util.List;
+
+/**
+ * This interface is to abstract the differences in Vanilla Hadoop YARN 2.0 and CDH 4.4
+ */
+public interface AllocationResponse {
+
+  int getResponseId();
+
+  Resource getAvailableResources();
+
+  List<Container> getAllocatedContainers();
+
+  List<ContainerStatus> getCompletedContainersStatuses();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AllocationResponses.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AllocationResponses.java b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AllocationResponses.java
new file mode 100644
index 0000000..ea46c3b
--- /dev/null
+++ b/twill-yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AllocationResponses.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn.ports;
+
+import com.google.common.base.Throwables;
+import com.google.common.reflect.TypeToken;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.Resource;
+
+import java.util.List;
+
+/**
+ * Factory for building instance of {@link AllocationResponse} based on the response type.
+ */
+public final class AllocationResponses {
+
+  /**
+   * A hack for CDH 4.4.0, as the AllocateResponse class is being rewritten and diverted from YARN 2.0
+   */
+  private static final boolean IS_CDH_4_4;
+
+  static {
+    boolean result = false;
+    try {
+      try {
+        // See if it is standard YARN 2.0 AllocateResponse object.
+        AllocateResponse.class.getMethod("getAMResponse");
+      } catch (NoSuchMethodException e) {
+          // See if it is CDH 4.4 AllocateResponse object.
+        AllocationResponse.class.getMethod("getAllocatedContainers");
+        result = true;
+      }
+    } catch (Exception e) {
+      // Something very wrong in here, as it shouldn't arrive here.
+      e.printStackTrace();
+      throw Throwables.propagate(e);
+    }
+
+    IS_CDH_4_4 = result;
+  }
+
+  public static AllocationResponse create(Object response) {
+    if (IS_CDH_4_4) {
+      return new ReflectionAllocationResponse(response);
+    }
+
+    try {
+      Object amResponse = response.getClass().getMethod("getAMResponse").invoke(response);
+      return new ReflectionAllocationResponse(amResponse);
+    } catch (Exception e) {
+      throw Throwables.propagate(e);
+    }
+  }
+
+  private static final class ReflectionAllocationResponse implements AllocationResponse {
+
+    private final Object response;
+
+    private ReflectionAllocationResponse(Object response) {
+      this.response = response;
+    }
+
+    @Override
+    public int getResponseId() {
+      return call("getResponseId", TypeToken.of(Integer.class));
+    }
+
+    @Override
+    public Resource getAvailableResources() {
+      return call("getAvailableResources", TypeToken.of(Resource.class));
+    }
+
+    @Override
+    public List<Container> getAllocatedContainers() {
+      return call("getAllocatedContainers", new TypeToken<List<Container>>() {});
+    }
+
+    @Override
+    public List<ContainerStatus> getCompletedContainersStatuses() {
+      return call("getCompletedContainersStatuses", new TypeToken<List<ContainerStatus>>() {});
+    }
+
+    private <T> T call(String methodName, TypeToken<T> resultType) {
+      try {
+        return (T) resultType.getRawType().cast(response.getClass().getMethod(methodName).invoke(response));
+      } catch (Exception e) {
+        throw Throwables.propagate(e);
+      }
+    }
+  }
+
+  private AllocationResponses() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnAMClient.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnAMClient.java b/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnAMClient.java
new file mode 100644
index 0000000..ce8f90f
--- /dev/null
+++ b/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnAMClient.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import org.apache.twill.internal.ProcessLauncher;
+import org.apache.twill.internal.appmaster.RunnableProcessLauncher;
+import com.google.common.base.Function;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Multimap;
+import com.google.common.util.concurrent.AbstractIdleService;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.net.InetSocketAddress;
+import java.net.URL;
+import java.util.List;
+import java.util.UUID;
+
+/**
+ *
+ */
+public final class Hadoop21YarnAMClient extends AbstractIdleService implements YarnAMClient {
+
+  private static final Logger LOG = LoggerFactory.getLogger(Hadoop21YarnAMClient.class);
+
+  private static final Function<ContainerStatus, YarnContainerStatus> STATUS_TRANSFORM;
+
+  static {
+    STATUS_TRANSFORM = new Function<ContainerStatus, YarnContainerStatus>() {
+      @Override
+      public YarnContainerStatus apply(ContainerStatus status) {
+        return new Hadoop21YarnContainerStatus(status);
+      }
+    };
+  }
+
+  private final ContainerId containerId;
+  private final Multimap<String, AMRMClient.ContainerRequest> containerRequests;
+  private final AMRMClient<AMRMClient.ContainerRequest> amrmClient;
+  private final Hadoop21YarnNMClient nmClient;
+  private InetSocketAddress trackerAddr;
+  private URL trackerUrl;
+  private Resource maxCapability;
+
+  public Hadoop21YarnAMClient(Configuration conf) {
+    String masterContainerId = System.getenv().get(ApplicationConstants.Environment.CONTAINER_ID.name());
+    Preconditions.checkArgument(masterContainerId != null,
+                                "Missing %s from environment", ApplicationConstants.Environment.CONTAINER_ID.name());
+    this.containerId = ConverterUtils.toContainerId(masterContainerId);
+    this.containerRequests = ArrayListMultimap.create();
+
+    this.amrmClient = AMRMClient.createAMRMClient();
+    this.amrmClient.init(conf);
+    this.nmClient = new Hadoop21YarnNMClient(conf);
+  }
+
+  @Override
+  protected void startUp() throws Exception {
+    Preconditions.checkNotNull(trackerAddr, "Tracker address not set.");
+    Preconditions.checkNotNull(trackerUrl, "Tracker URL not set.");
+
+    amrmClient.start();
+    RegisterApplicationMasterResponse response = amrmClient.registerApplicationMaster(trackerAddr.getHostName(),
+                                                                                      trackerAddr.getPort(),
+                                                                                      trackerUrl.toString());
+    maxCapability = response.getMaximumResourceCapability();
+    nmClient.startAndWait();
+  }
+
+  @Override
+  protected void shutDown() throws Exception {
+    nmClient.stopAndWait();
+    amrmClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, trackerUrl.toString());
+    amrmClient.stop();
+  }
+
+  @Override
+  public ContainerId getContainerId() {
+    return containerId;
+  }
+
+  @Override
+  public String getHost() {
+    return System.getenv().get(ApplicationConstants.Environment.NM_HOST.name());
+  }
+
+  @Override
+  public void setTracker(InetSocketAddress trackerAddr, URL trackerUrl) {
+    this.trackerAddr = trackerAddr;
+    this.trackerUrl = trackerUrl;
+  }
+
+  @Override
+  public synchronized void allocate(float progress, AllocateHandler handler) throws Exception {
+    AllocateResponse allocateResponse = amrmClient.allocate(progress);
+    List<ProcessLauncher<YarnContainerInfo>> launchers
+      = Lists.newArrayListWithCapacity(allocateResponse.getAllocatedContainers().size());
+
+    for (Container container : allocateResponse.getAllocatedContainers()) {
+      launchers.add(new RunnableProcessLauncher(new Hadoop21YarnContainerInfo(container), nmClient));
+    }
+
+    if (!launchers.isEmpty()) {
+      handler.acquired(launchers);
+
+      // If no process has been launched through the given launcher, return the container.
+      for (ProcessLauncher<YarnContainerInfo> l : launchers) {
+        // This cast always works.
+        RunnableProcessLauncher launcher = (RunnableProcessLauncher) l;
+        if (!launcher.isLaunched()) {
+          Container container = launcher.getContainerInfo().getContainer();
+          LOG.info("Nothing to run in container, releasing it: {}", container);
+          amrmClient.releaseAssignedContainer(container.getId());
+        }
+      }
+    }
+
+    List<YarnContainerStatus> completed = ImmutableList.copyOf(
+      Iterables.transform(allocateResponse.getCompletedContainersStatuses(), STATUS_TRANSFORM));
+    if (!completed.isEmpty()) {
+      handler.completed(completed);
+    }
+  }
+
+  @Override
+  public ContainerRequestBuilder addContainerRequest(Resource capability) {
+    return addContainerRequest(capability, 1);
+  }
+
+  @Override
+  public ContainerRequestBuilder addContainerRequest(Resource capability, int count) {
+    return new ContainerRequestBuilder(adjustCapability(capability), count) {
+      @Override
+      public String apply() {
+        synchronized (Hadoop21YarnAMClient.this) {
+          String id = UUID.randomUUID().toString();
+
+          String[] hosts = this.hosts.isEmpty() ? null : this.hosts.toArray(new String[this.hosts.size()]);
+          String[] racks = this.racks.isEmpty() ? null : this.racks.toArray(new String[this.racks.size()]);
+
+          for (int i = 0; i < count; i++) {
+            AMRMClient.ContainerRequest request = new AMRMClient.ContainerRequest(capability, hosts, racks, priority);
+            containerRequests.put(id, request);
+            amrmClient.addContainerRequest(request);
+          }
+
+          return id;
+        }
+      }
+    };
+  }
+
+  @Override
+  public synchronized void completeContainerRequest(String id) {
+    for (AMRMClient.ContainerRequest request : containerRequests.removeAll(id)) {
+      amrmClient.removeContainerRequest(request);
+    }
+  }
+
+  private Resource adjustCapability(Resource resource) {
+    int cores = resource.getVirtualCores();
+    int updatedCores = Math.min(resource.getVirtualCores(), maxCapability.getVirtualCores());
+
+    if (cores != updatedCores) {
+      resource.setVirtualCores(updatedCores);
+      LOG.info("Adjust virtual cores requirement from {} to {}.", cores, updatedCores);
+    }
+
+    int updatedMemory = Math.min(resource.getMemory(), maxCapability.getMemory());
+    if (resource.getMemory() != updatedMemory) {
+      resource.setMemory(updatedMemory);
+      LOG.info("Adjust memory requirement from {} to {} MB.", resource.getMemory(), updatedMemory);
+    }
+
+    return resource;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnAppClient.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnAppClient.java b/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnAppClient.java
new file mode 100644
index 0000000..50b212d
--- /dev/null
+++ b/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnAppClient.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import org.apache.twill.api.TwillSpecification;
+import org.apache.twill.internal.ProcessController;
+import org.apache.twill.internal.ProcessLauncher;
+import org.apache.twill.internal.appmaster.ApplicationMasterProcessLauncher;
+import org.apache.twill.internal.appmaster.ApplicationSubmitter;
+import com.google.common.base.Throwables;
+import com.google.common.util.concurrent.AbstractIdleService;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.client.api.YarnClient;
+import org.apache.hadoop.yarn.client.api.YarnClientApplication;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ *
+ */
+public final class Hadoop21YarnAppClient extends AbstractIdleService implements YarnAppClient {
+
+  private static final Logger LOG = LoggerFactory.getLogger(Hadoop21YarnAppClient.class);
+  private final YarnClient yarnClient;
+
+  public Hadoop21YarnAppClient(Configuration configuration) {
+    this.yarnClient = YarnClient.createYarnClient();
+    yarnClient.init(configuration);
+  }
+
+  @Override
+  public ProcessLauncher<ApplicationId> createLauncher(TwillSpecification twillSpec) throws Exception {
+    // Request for new application
+    YarnClientApplication application = yarnClient.createApplication();
+    final GetNewApplicationResponse response = application.getNewApplicationResponse();
+    final ApplicationId appId = response.getApplicationId();
+
+    // Setup the context for application submission
+    final ApplicationSubmissionContext appSubmissionContext = application.getApplicationSubmissionContext();
+    appSubmissionContext.setApplicationId(appId);
+    appSubmissionContext.setApplicationName(twillSpec.getName());
+
+    ApplicationSubmitter submitter = new ApplicationSubmitter() {
+      @Override
+      public ProcessController<YarnApplicationReport> submit(YarnLaunchContext context, Resource capability) {
+        ContainerLaunchContext launchContext = context.getLaunchContext();
+
+        addRMToken(launchContext);
+        appSubmissionContext.setAMContainerSpec(launchContext);
+        appSubmissionContext.setResource(adjustMemory(response, capability));
+        appSubmissionContext.setMaxAppAttempts(2);
+
+        try {
+          yarnClient.submitApplication(appSubmissionContext);
+          return new ProcessControllerImpl(yarnClient, appId);
+        } catch (Exception e) {
+          LOG.error("Failed to submit application {}", appId, e);
+          throw Throwables.propagate(e);
+        }
+      }
+    };
+
+    return new ApplicationMasterProcessLauncher(appId, submitter);
+  }
+
+  private Resource adjustMemory(GetNewApplicationResponse response, Resource capability) {
+    int maxMemory = response.getMaximumResourceCapability().getMemory();
+    int updatedMemory = capability.getMemory();
+
+    if (updatedMemory > maxMemory) {
+      capability.setMemory(maxMemory);
+    }
+
+    return capability;
+  }
+
+  private void addRMToken(ContainerLaunchContext context) {
+    if (!UserGroupInformation.isSecurityEnabled()) {
+      return;
+    }
+
+    try {
+      Credentials credentials = YarnUtils.decodeCredentials(context.getTokens());
+
+      Configuration config = yarnClient.getConfig();
+      Token<TokenIdentifier> token = ConverterUtils.convertFromYarn(
+        yarnClient.getRMDelegationToken(new Text(YarnUtils.getYarnTokenRenewer(config))),
+        YarnUtils.getRMAddress(config));
+
+      LOG.info("Added RM delegation token {}", token);
+      credentials.addToken(token.getService(), token);
+
+      context.setTokens(YarnUtils.encodeCredentials(credentials));
+
+    } catch (Exception e) {
+      LOG.error("Fails to create credentials.", e);
+      throw Throwables.propagate(e);
+    }
+  }
+
+  @Override
+  public ProcessLauncher<ApplicationId> createLauncher(String user, TwillSpecification twillSpec) throws Exception {
+    // Ignore user
+    return createLauncher(twillSpec);
+  }
+
+  @Override
+  public ProcessController<YarnApplicationReport> createProcessController(ApplicationId appId) {
+    return new ProcessControllerImpl(yarnClient, appId);
+  }
+
+  @Override
+  protected void startUp() throws Exception {
+    yarnClient.start();
+  }
+
+  @Override
+  protected void shutDown() throws Exception {
+    yarnClient.stop();
+  }
+
+  private static final class ProcessControllerImpl implements ProcessController<YarnApplicationReport> {
+    private final YarnClient yarnClient;
+    private final ApplicationId appId;
+
+    public ProcessControllerImpl(YarnClient yarnClient, ApplicationId appId) {
+      this.yarnClient = yarnClient;
+      this.appId = appId;
+    }
+
+    @Override
+    public YarnApplicationReport getReport() {
+      try {
+        return new Hadoop21YarnApplicationReport(yarnClient.getApplicationReport(appId));
+      } catch (Exception e) {
+        LOG.error("Failed to get application report {}", appId, e);
+        throw Throwables.propagate(e);
+      }
+    }
+
+    @Override
+    public void cancel() {
+      try {
+        yarnClient.killApplication(appId);
+      } catch (Exception e) {
+        LOG.error("Failed to kill application {}", appId, e);
+        throw Throwables.propagate(e);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnApplicationReport.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnApplicationReport.java b/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnApplicationReport.java
new file mode 100644
index 0000000..6e614f5
--- /dev/null
+++ b/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnApplicationReport.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+
+/**
+ *
+ */
+public final class Hadoop21YarnApplicationReport implements YarnApplicationReport {
+
+  private final ApplicationReport report;
+
+  public Hadoop21YarnApplicationReport(ApplicationReport report) {
+    this.report = report;
+  }
+
+  @Override
+  public ApplicationId getApplicationId() {
+    return report.getApplicationId();
+  }
+
+  @Override
+  public ApplicationAttemptId getCurrentApplicationAttemptId() {
+    return report.getCurrentApplicationAttemptId();
+  }
+
+  @Override
+  public String getQueue() {
+    return report.getQueue();
+  }
+
+  @Override
+  public String getName() {
+    return report.getName();
+  }
+
+  @Override
+  public String getHost() {
+    return report.getHost();
+  }
+
+  @Override
+  public int getRpcPort() {
+    return report.getRpcPort();
+  }
+
+  @Override
+  public YarnApplicationState getYarnApplicationState() {
+    return report.getYarnApplicationState();
+  }
+
+  @Override
+  public String getDiagnostics() {
+    return report.getDiagnostics();
+  }
+
+  @Override
+  public String getTrackingUrl() {
+    return report.getTrackingUrl();
+  }
+
+  @Override
+  public String getOriginalTrackingUrl() {
+    return report.getOriginalTrackingUrl();
+  }
+
+  @Override
+  public long getStartTime() {
+    return report.getStartTime();
+  }
+
+  @Override
+  public long getFinishTime() {
+    return report.getFinishTime();
+  }
+
+  @Override
+  public FinalApplicationStatus getFinalApplicationStatus() {
+    return report.getFinalApplicationStatus();
+  }
+
+  @Override
+  public ApplicationResourceUsageReport getApplicationResourceUsageReport() {
+    return report.getApplicationResourceUsageReport();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnContainerInfo.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnContainerInfo.java b/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnContainerInfo.java
new file mode 100644
index 0000000..86903c1
--- /dev/null
+++ b/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnContainerInfo.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import com.google.common.base.Throwables;
+import org.apache.hadoop.yarn.api.records.Container;
+
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+
+/**
+ *
+ */
+public final class Hadoop21YarnContainerInfo implements YarnContainerInfo {
+
+  private final Container container;
+
+  public Hadoop21YarnContainerInfo(Container container) {
+    this.container = container;
+  }
+
+  @Override
+  public <T> T getContainer() {
+    return (T) container;
+  }
+
+  @Override
+  public String getId() {
+    return container.getId().toString();
+  }
+
+  @Override
+  public InetAddress getHost() {
+    try {
+      return InetAddress.getByName(container.getNodeId().getHost());
+    } catch (UnknownHostException e) {
+      throw Throwables.propagate(e);
+    }
+  }
+
+  @Override
+  public int getPort() {
+    return container.getNodeId().getPort();
+  }
+
+  @Override
+  public int getMemoryMB() {
+    return container.getResource().getMemory();
+  }
+
+  @Override
+  public int getVirtualCores() {
+    return container.getResource().getVirtualCores();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnContainerStatus.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnContainerStatus.java b/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnContainerStatus.java
new file mode 100644
index 0000000..f5758c7
--- /dev/null
+++ b/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnContainerStatus.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+
+/**
+ *
+ */
+public final class Hadoop21YarnContainerStatus implements YarnContainerStatus {
+
+  private final ContainerStatus containerStatus;
+
+  public Hadoop21YarnContainerStatus(ContainerStatus containerStatus) {
+    this.containerStatus = containerStatus;
+  }
+
+  @Override
+  public String getContainerId() {
+    return containerStatus.getContainerId().toString();
+  }
+
+  @Override
+  public ContainerState getState() {
+    return containerStatus.getState();
+  }
+
+  @Override
+  public int getExitStatus() {
+    return containerStatus.getExitStatus();
+  }
+
+  @Override
+  public String getDiagnostics() {
+    return containerStatus.getDiagnostics();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnLaunchContext.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnLaunchContext.java b/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnLaunchContext.java
new file mode 100644
index 0000000..8621f93
--- /dev/null
+++ b/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnLaunchContext.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import com.google.common.base.Function;
+import com.google.common.collect.Maps;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.util.Records;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public final class Hadoop21YarnLaunchContext implements YarnLaunchContext {
+
+  private static final Function<YarnLocalResource, LocalResource> RESOURCE_TRANSFORM;
+
+  static {
+    // Creates transform function from YarnLocalResource -> LocalResource
+    RESOURCE_TRANSFORM = new Function<YarnLocalResource, LocalResource>() {
+      @Override
+      public LocalResource apply(YarnLocalResource input) {
+        return input.getLocalResource();
+      }
+    };
+  }
+
+  private final ContainerLaunchContext launchContext;
+
+  public Hadoop21YarnLaunchContext() {
+    launchContext = Records.newRecord(ContainerLaunchContext.class);
+  }
+
+  @Override
+  public <T> T getLaunchContext() {
+    return (T) launchContext;
+  }
+
+  @Override
+  public void setCredentials(Credentials credentials) {
+    launchContext.setTokens(YarnUtils.encodeCredentials(credentials));
+  }
+
+  @Override
+  public void setLocalResources(Map<String, YarnLocalResource> localResources) {
+    launchContext.setLocalResources(Maps.transformValues(localResources, RESOURCE_TRANSFORM));
+  }
+
+  @Override
+  public void setServiceData(Map<String, ByteBuffer> serviceData) {
+    launchContext.setServiceData(serviceData);
+  }
+
+  @Override
+  public Map<String, String> getEnvironment() {
+    return launchContext.getEnvironment();
+  }
+
+  @Override
+  public void setEnvironment(Map<String, String> environment) {
+    launchContext.setEnvironment(environment);
+  }
+
+  @Override
+  public List<String> getCommands() {
+    return launchContext.getCommands();
+  }
+
+  @Override
+  public void setCommands(List<String> commands) {
+    launchContext.setCommands(commands);
+  }
+
+  @Override
+  public void setApplicationACLs(Map<ApplicationAccessType, String> acls) {
+    launchContext.setApplicationACLs(acls);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnLocalResource.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnLocalResource.java b/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnLocalResource.java
new file mode 100644
index 0000000..3f756bd
--- /dev/null
+++ b/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnLocalResource.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.URL;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ *
+ */
+public final class Hadoop21YarnLocalResource implements YarnLocalResource {
+
+  private final LocalResource localResource;
+
+  public Hadoop21YarnLocalResource() {
+    this.localResource = Records.newRecord(LocalResource.class);
+  }
+
+  @Override
+  public <T> T getLocalResource() {
+    return (T) localResource;
+  }
+
+  @Override
+  public URL getResource() {
+    return localResource.getResource();
+  }
+
+  @Override
+  public void setResource(URL resource) {
+    localResource.setResource(resource);
+  }
+
+  @Override
+  public long getSize() {
+    return localResource.getSize();
+  }
+
+  @Override
+  public void setSize(long size) {
+    localResource.setSize(size);
+  }
+
+  @Override
+  public long getTimestamp() {
+    return localResource.getTimestamp();
+  }
+
+  @Override
+  public void setTimestamp(long timestamp) {
+    localResource.setTimestamp(timestamp);
+  }
+
+  @Override
+  public LocalResourceType getType() {
+    return localResource.getType();
+  }
+
+  @Override
+  public void setType(LocalResourceType type) {
+    localResource.setType(type);
+  }
+
+  @Override
+  public LocalResourceVisibility getVisibility() {
+    return localResource.getVisibility();
+  }
+
+  @Override
+  public void setVisibility(LocalResourceVisibility visibility) {
+    localResource.setVisibility(visibility);
+  }
+
+  @Override
+  public String getPattern() {
+    return localResource.getPattern();
+  }
+
+  @Override
+  public void setPattern(String pattern) {
+    localResource.setPattern(pattern);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnNMClient.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnNMClient.java b/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnNMClient.java
new file mode 100644
index 0000000..d3a6a80
--- /dev/null
+++ b/twill-yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnNMClient.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import org.apache.twill.common.Cancellable;
+import com.google.common.base.Throwables;
+import com.google.common.util.concurrent.AbstractIdleService;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.client.api.NMClient;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ *
+ */
+public final class Hadoop21YarnNMClient extends AbstractIdleService implements YarnNMClient {
+
+  private static final Logger LOG = LoggerFactory.getLogger(Hadoop21YarnNMClient.class);
+
+  private final NMClient nmClient;
+
+  public Hadoop21YarnNMClient(Configuration configuration) {
+    this.nmClient = NMClient.createNMClient();
+    nmClient.init(configuration);
+  }
+
+  @Override
+  public Cancellable start(YarnContainerInfo containerInfo, YarnLaunchContext launchContext) {
+    try {
+      Container container = containerInfo.getContainer();
+      nmClient.startContainer(container, launchContext.<ContainerLaunchContext>getLaunchContext());
+      return new ContainerTerminator(container, nmClient);
+    } catch (Exception e) {
+      LOG.error("Error in launching process", e);
+      throw Throwables.propagate(e);
+    }
+
+  }
+
+  @Override
+  protected void startUp() throws Exception {
+    nmClient.start();
+  }
+
+  @Override
+  protected void shutDown() throws Exception {
+    nmClient.stop();
+  }
+
+  private static final class ContainerTerminator implements Cancellable {
+
+    private final Container container;
+    private final NMClient nmClient;
+
+    private ContainerTerminator(Container container, NMClient nmClient) {
+      this.container = container;
+      this.nmClient = nmClient;
+    }
+
+    @Override
+    public void cancel() {
+      LOG.info("Request to stop container {}.", container.getId());
+
+      try {
+        nmClient.stopContainer(container.getId(), container.getNodeId());
+        boolean completed = false;
+        while (!completed) {
+          ContainerStatus status = nmClient.getContainerStatus(container.getId(), container.getNodeId());
+          LOG.info("Container status: {} {}", status, status.getDiagnostics());
+
+          completed = (status.getState() == ContainerState.COMPLETE);
+        }
+        LOG.info("Container {} stopped.", container.getId());
+      } catch (Exception e) {
+        LOG.error("Fail to stop container {}", container.getId(), e);
+        throw Throwables.propagate(e);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/filesystem/HDFSLocation.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/filesystem/HDFSLocation.java b/twill-yarn/src/main/java/org/apache/twill/filesystem/HDFSLocation.java
new file mode 100644
index 0000000..b0eeb43
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/filesystem/HDFSLocation.java
@@ -0,0 +1,193 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.filesystem;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+import java.util.UUID;
+
+/**
+ * A concrete implementation of {@link Location} for the HDFS filesystem.
+ */
+final class HDFSLocation implements Location {
+  private final FileSystem fs;
+  private final Path path;
+
+  /**
+   * Constructs a HDFSLocation.
+   *
+   * @param fs  An instance of {@link FileSystem}
+   * @param path of the file.
+   */
+  HDFSLocation(FileSystem fs, Path path) {
+    this.fs = fs;
+    this.path = path;
+  }
+
+  /**
+   * Checks if this location exists on HDFS.
+   *
+   * @return true if found; false otherwise.
+   * @throws IOException
+   */
+  @Override
+  public boolean exists() throws IOException {
+    return fs.exists(path);
+  }
+
+  /**
+   * @return An {@link InputStream} for this location on HDFS.
+   * @throws IOException
+   */
+  @Override
+  public InputStream getInputStream() throws IOException {
+    return fs.open(path);
+  }
+
+  /**
+   * @return An {@link OutputStream} for this location on HDFS.
+   * @throws IOException
+   */
+  @Override
+  public OutputStream getOutputStream() throws IOException {
+    return fs.create(path);
+  }
+
+  @Override
+  public OutputStream getOutputStream(String permission) throws IOException {
+    Configuration conf = fs.getConf();
+    return fs.create(path,
+                     new FsPermission(permission),
+                     true,
+                     conf.getInt("io.file.buffer.size", 4096),
+                     fs.getDefaultReplication(path),
+                     fs.getDefaultBlockSize(path),
+                     null);
+  }
+
+  /**
+   * Appends the child to the current {@link Location} on HDFS.
+   * <p>
+   * Returns a new instance of Location.
+   * </p>
+   *
+   * @param child to be appended to this location.
+   * @return A new instance of {@link Location}
+   * @throws IOException
+   */
+  @Override
+  public Location append(String child) throws IOException {
+    if (child.startsWith("/")) {
+      child = child.substring(1);
+    }
+    return new HDFSLocation(fs, new Path(URI.create(path.toUri() + "/" + child)));
+  }
+
+  @Override
+  public Location getTempFile(String suffix) throws IOException {
+    Path path = new Path(
+      URI.create(this.path.toUri() + "." + UUID.randomUUID() + (suffix == null ? TEMP_FILE_SUFFIX : suffix)));
+    return new HDFSLocation(fs, path);
+  }
+
+  /**
+   * @return Returns the name of the file or directory denoteed by this abstract pathname.
+   */
+  @Override
+  public String getName() {
+    return path.getName();
+  }
+
+  @Override
+  public boolean createNew() throws IOException {
+    return fs.createNewFile(path);
+  }
+
+  /**
+   * @return A {@link URI} for this location on HDFS.
+   */
+  @Override
+  public URI toURI() {
+    return path.toUri();
+  }
+
+  /**
+   * Deletes the file or directory denoted by this abstract pathname. If this
+   * pathname denotes a directory, then the directory must be empty in order
+   * to be deleted.
+   *
+   * @return true if and only if the file or directory is successfully deleted; false otherwise.
+   */
+  @Override
+  public boolean delete() throws IOException {
+    return fs.delete(path, false);
+  }
+
+  @Override
+  public boolean delete(boolean recursive) throws IOException {
+    return fs.delete(path, true);
+  }
+
+  @Override
+  public Location renameTo(Location destination) throws IOException {
+    // Destination will always be of the same type as this location.
+    if (fs instanceof DistributedFileSystem) {
+      ((DistributedFileSystem) fs).rename(path, ((HDFSLocation) destination).path, Options.Rename.OVERWRITE);
+      return new HDFSLocation(fs, new Path(destination.toURI()));
+    }
+
+    if (fs.rename(path, ((HDFSLocation) destination).path)) {
+      return new HDFSLocation(fs, new Path(destination.toURI()));
+    } else {
+      return null;
+    }
+  }
+
+  /**
+   * Creates the directory named by this abstract pathname, including any necessary
+   * but nonexistent parent directories.
+   *
+   * @return true if and only if the renaming succeeded; false otherwise
+   */
+  @Override
+  public boolean mkdirs() throws IOException {
+    return fs.mkdirs(path);
+  }
+
+  /**
+   * @return Length of file.
+   */
+  @Override
+  public long length() throws IOException {
+    return fs.getFileStatus(path).getLen();
+  }
+
+  @Override
+  public long lastModified() throws IOException {
+    return fs.getFileStatus(path).getModificationTime();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/filesystem/HDFSLocationFactory.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/filesystem/HDFSLocationFactory.java b/twill-yarn/src/main/java/org/apache/twill/filesystem/HDFSLocationFactory.java
new file mode 100644
index 0000000..fa79391
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/filesystem/HDFSLocationFactory.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.filesystem;
+
+import com.google.common.base.Throwables;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+import java.io.IOException;
+import java.net.URI;
+
+/**
+ * A {@link LocationFactory} that creates HDFS {@link Location}.
+ */
+public final class HDFSLocationFactory implements LocationFactory {
+
+  private final FileSystem fileSystem;
+  private final String pathBase;
+
+  public HDFSLocationFactory(Configuration configuration) {
+    this(getFileSystem(configuration));
+  }
+  
+  public HDFSLocationFactory(Configuration configuration, String pathBase) {
+    this(getFileSystem(configuration), pathBase);
+  }
+
+  public HDFSLocationFactory(FileSystem fileSystem) {
+    this(fileSystem, "/");
+  }
+
+  public HDFSLocationFactory(FileSystem fileSystem, String pathBase) {
+    String base = pathBase.equals("/") ? "" : pathBase;
+    base = base.endsWith("/") ? base.substring(0, base.length() - 1) : base;
+
+    this.fileSystem = fileSystem;
+    this.pathBase = base;
+  }
+
+  @Override
+  public Location create(String path) {
+    if (path.startsWith("/")) {
+      path = path.substring(1);
+    }
+    return new HDFSLocation(fileSystem, new Path(fileSystem.getUri() + "/" + pathBase + "/" + path));
+  }
+
+  @Override
+  public Location create(URI uri) {
+    if (!uri.toString().startsWith(fileSystem.getUri().toString())) {
+      // It's a full URI
+      return new HDFSLocation(fileSystem, new Path(uri));
+    }
+    if (uri.isAbsolute()) {
+      return new HDFSLocation(fileSystem, new Path(fileSystem.getUri() + uri.getPath()));
+    }
+    return new HDFSLocation(fileSystem, new Path(fileSystem.getUri() + "/" + pathBase + "/" + uri.getPath()));
+  }
+
+  @Override
+  public Location getHomeLocation() {
+    return new HDFSLocation(fileSystem, fileSystem.getHomeDirectory());
+  }
+
+  /**
+   * Returns the underlying {@link FileSystem} object.
+   */
+  public FileSystem getFileSystem() {
+    return fileSystem;
+  }
+
+  private static FileSystem getFileSystem(Configuration configuration) {
+    try {
+      return FileSystem.get(configuration);
+    } catch (IOException e) {
+      throw Throwables.propagate(e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/filesystem/package-info.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/filesystem/package-info.java b/twill-yarn/src/main/java/org/apache/twill/filesystem/package-info.java
new file mode 100644
index 0000000..2ca09fd
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/filesystem/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Contains HDFS location classes.
+ */
+package org.apache.twill.filesystem;


[27/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/TwillRunnableSpecification.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/TwillRunnableSpecification.java b/api/src/main/java/org/apache/twill/api/TwillRunnableSpecification.java
deleted file mode 100644
index bbcc5d7..0000000
--- a/api/src/main/java/org/apache/twill/api/TwillRunnableSpecification.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-import org.apache.twill.internal.DefaultTwillRunnableSpecification;
-import com.google.common.collect.ImmutableMap;
-
-import java.util.Map;
-
-/**
- * Represents a specification of a {@link TwillRunnable}.
- */
-public interface TwillRunnableSpecification {
-
-  String getClassName();
-
-  String getName();
-
-  Map<String, String> getConfigs();
-
-  /**
-   * Builder for constructing {@link TwillRunnableSpecification}.
-   */
-  static final class Builder {
-
-    private String name;
-    private Map<String, String> args;
-
-    public static NameSetter with() {
-      return new Builder().new NameSetter();
-    }
-
-    public final class NameSetter {
-      public AfterName setName(String name) {
-        Builder.this.name = name;
-        return new AfterName();
-      }
-    }
-
-    public final class AfterName {
-      public AfterConfigs withConfigs(Map<String, String> args) {
-        Builder.this.args = args;
-        return new AfterConfigs();
-      }
-
-      public AfterConfigs noConfigs() {
-        Builder.this.args = ImmutableMap.of();
-        return new AfterConfigs();
-      }
-    }
-
-    public final class AfterConfigs {
-      public TwillRunnableSpecification build() {
-        return new DefaultTwillRunnableSpecification(null, name, args);
-      }
-    }
-
-    private Builder() {
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/TwillRunner.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/TwillRunner.java b/api/src/main/java/org/apache/twill/api/TwillRunner.java
deleted file mode 100644
index 0393a85..0000000
--- a/api/src/main/java/org/apache/twill/api/TwillRunner.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-import org.apache.twill.common.Cancellable;
-
-import java.util.concurrent.TimeUnit;
-
-/**
- * This interface prepares execution of {@link TwillRunnable} and {@link TwillApplication}.
- */
-public interface TwillRunner {
-
-  /**
-   * Interface to represents information of a live application.
-   */
-  interface LiveInfo {
-
-    /**
-     * Returns name of the application.
-     * @return Application name as a {@link String}.
-     */
-    String getApplicationName();
-
-    /**
-     * Returns {@link TwillController}s for all live instances of the application.
-     * @return An {@link Iterable} of {@link TwillController}.
-     */
-    Iterable<TwillController> getControllers();
-  }
-
-  /**
-   * Prepares to run the given {@link TwillRunnable} with {@link ResourceSpecification#BASIC} resource specification.
-   * @param runnable The runnable to run through Twill when {@link TwillPreparer#start()} is called.
-   * @return A {@link TwillPreparer} for setting up runtime options.
-   */
-  TwillPreparer prepare(TwillRunnable runnable);
-
-  /**
-   * Prepares to run the given {@link TwillRunnable} with the given resource specification.
-   * @param runnable The runnable to run through Twill when {@link TwillPreparer#start()} is called.
-   * @param resourceSpecification The resource specification for running the runnable.
-   * @return A {@link TwillPreparer} for setting up runtime options.
-   */
-  TwillPreparer prepare(TwillRunnable runnable, ResourceSpecification resourceSpecification);
-
-  /**
-   * Prepares to run the given {@link TwillApplication} as specified by the application.
-   * @param application The application to run through Twill when {@link TwillPreparer#start()} is called.
-   * @return A {@link TwillPreparer} for setting up runtime options.
-   */
-  TwillPreparer prepare(TwillApplication application);
-
-  /**
-   * Gets a {@link TwillController} for the given application and runId.
-   * @param applicationName Name of the application.
-   * @param runId The runId of the running application.
-   * @return A {@link TwillController} to interact with the application or null if no such runId is found.
-   */
-  TwillController lookup(String applicationName, RunId runId);
-
-  /**
-   * Gets an {@link Iterable} of {@link TwillController} for all running instances of the given application.
-   * @param applicationName Name of the application.
-   * @return A live {@link Iterable} that gives the latest {@link TwillController} set for all running
-   *         instances of the application when {@link Iterable#iterator()} is invoked.
-   */
-  Iterable<TwillController> lookup(String applicationName);
-
-  /**
-   * Gets an {@link Iterable} of {@link LiveInfo}.
-   * @return A live {@link Iterable} that gives the latest information on the set of applications that
-   *         have running instances when {@link Iterable#iterator()}} is invoked.
-   */
-  Iterable<LiveInfo> lookupLive();
-
-  /**
-   * Schedules a periodic update of SecureStore. The first call to the given {@link SecureStoreUpdater} will be made
-   * after {@code initialDelay}, and subsequently with the given {@code delay} between completion of one update
-   * and starting of the next. If exception is thrown on call
-   * {@link SecureStoreUpdater#update(String, RunId)}, the exception will only get logged
-   * and won't suppress the next update call.
-   *
-   * @param updater A {@link SecureStoreUpdater} for creating new SecureStore.
-   * @param initialDelay Delay before the first call to update method.
-   * @param delay Delay between completion of one update call to the next one.
-   * @param unit time unit for the initialDelay and delay.
-   * @return A {@link Cancellable} for cancelling the scheduled update.
-   */
-  Cancellable scheduleSecureStoreUpdate(final SecureStoreUpdater updater,
-                                        long initialDelay, long delay, TimeUnit unit);
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/TwillRunnerService.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/TwillRunnerService.java b/api/src/main/java/org/apache/twill/api/TwillRunnerService.java
deleted file mode 100644
index 76ec136..0000000
--- a/api/src/main/java/org/apache/twill/api/TwillRunnerService.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-import com.google.common.util.concurrent.Service;
-
-/**
- * A {@link TwillRunner} that extends {@link Service} to provide lifecycle management functions.
- * The {@link #start()} method needs to be called before calling any other method of this interface.
- * When done with this service, call {@link #stop()} to release any resources that it holds.
- */
-public interface TwillRunnerService extends TwillRunner, Service {
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/TwillSpecification.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/TwillSpecification.java b/api/src/main/java/org/apache/twill/api/TwillSpecification.java
deleted file mode 100644
index 00d171d..0000000
--- a/api/src/main/java/org/apache/twill/api/TwillSpecification.java
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-import org.apache.twill.internal.DefaultLocalFile;
-import org.apache.twill.internal.DefaultRuntimeSpecification;
-import org.apache.twill.internal.DefaultTwillRunnableSpecification;
-import org.apache.twill.internal.DefaultTwillSpecification;
-import com.google.common.base.Function;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-
-import javax.annotation.Nullable;
-import java.io.File;
-import java.net.URI;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Represents specification of a {@link TwillApplication}.
- */
-public interface TwillSpecification {
-
-  /**
-   * Defines execution order.
-   */
-  interface Order {
-
-    enum Type {
-      STARTED,
-      COMPLETED
-    }
-
-    /**
-     * @return Set of {@link TwillRunnable} name that belongs to this order.
-     */
-    Set<String> getNames();
-
-    Type getType();
-  }
-
-  /**
-   * @return Name of the application.
-   */
-  String getName();
-
-  /**
-   * @return A map from {@link TwillRunnable} name to {@link RuntimeSpecification}.
-   */
-  Map<String, RuntimeSpecification> getRunnables();
-
-  /**
-   * @return Returns a list of runnable names that should be executed in the given order.
-   */
-  List<Order> getOrders();
-
-  /**
-   * @return The {@link EventHandlerSpecification} for the {@link EventHandler} to be used for this application,
-   *         or {@code null} if no event handler has been provided.
-   */
-  @Nullable
-  EventHandlerSpecification getEventHandler();
-
-  /**
-   * Builder for constructing instance of {@link TwillSpecification}.
-   */
-  static final class Builder {
-
-    private String name;
-    private Map<String, RuntimeSpecification> runnables = Maps.newHashMap();
-    private List<Order> orders = Lists.newArrayList();
-    private EventHandlerSpecification eventHandler;
-
-    public static NameSetter with() {
-      return new Builder().new NameSetter();
-    }
-
-    public final class NameSetter {
-      public AfterName setName(String name) {
-        Builder.this.name = name;
-        return new AfterName();
-      }
-    }
-
-    public final class AfterName {
-      public MoreRunnable withRunnable() {
-        return new RunnableSetter();
-      }
-    }
-
-    public interface MoreRunnable {
-      RuntimeSpecificationAdder add(TwillRunnable runnable);
-
-      RuntimeSpecificationAdder add(TwillRunnable runnable, ResourceSpecification resourceSpec);
-
-      /**
-       * Adds a {@link TwillRunnable} with {@link ResourceSpecification#BASIC} resource specification.
-       * @param name Name of runnable
-       * @param runnable {@link TwillRunnable} to be run
-       * @return instance of {@link RuntimeSpecificationAdder}
-       */
-      RuntimeSpecificationAdder add(String name, TwillRunnable runnable);
-
-      RuntimeSpecificationAdder add(String name, TwillRunnable runnable, ResourceSpecification resourceSpec);
-    }
-
-    public interface AfterRunnable {
-      FirstOrder withOrder();
-
-      AfterOrder anyOrder();
-    }
-
-    public final class RunnableSetter implements MoreRunnable, AfterRunnable {
-
-      @Override
-      public RuntimeSpecificationAdder add(TwillRunnable runnable) {
-        return add(runnable.configure().getName(), runnable);
-      }
-
-      @Override
-      public RuntimeSpecificationAdder add(TwillRunnable runnable, ResourceSpecification resourceSpec) {
-        return add(runnable.configure().getName(), runnable, resourceSpec);
-      }
-
-      @Override
-      public RuntimeSpecificationAdder add(String name, TwillRunnable runnable) {
-        return add(name, runnable, ResourceSpecification.BASIC);
-      }
-
-      @Override
-      public RuntimeSpecificationAdder add(String name, TwillRunnable runnable,
-                                           final ResourceSpecification resourceSpec) {
-        final TwillRunnableSpecification spec = new DefaultTwillRunnableSpecification(
-                                            runnable.getClass().getName(), name, runnable.configure().getConfigs());
-        return new RuntimeSpecificationAdder(new Function<Collection<LocalFile>, RunnableSetter>() {
-          @Override
-          public RunnableSetter apply(Collection<LocalFile> files) {
-            runnables.put(spec.getName(), new DefaultRuntimeSpecification(spec.getName(), spec, resourceSpec, files));
-            return RunnableSetter.this;
-          }
-        });
-      }
-
-      @Override
-      public FirstOrder withOrder() {
-        return new OrderSetter();
-      }
-
-      @Override
-      public AfterOrder anyOrder() {
-        return new OrderSetter();
-      }
-    }
-
-    /**
-     * For setting runtime specific settings.
-     */
-    public final class RuntimeSpecificationAdder {
-
-      private final Function<Collection<LocalFile>, RunnableSetter> completer;
-
-      RuntimeSpecificationAdder(Function<Collection<LocalFile>, RunnableSetter> completer) {
-        this.completer = completer;
-      }
-
-      public LocalFileAdder withLocalFiles() {
-        return new MoreFile(completer);
-      }
-
-      public RunnableSetter noLocalFiles() {
-        return completer.apply(ImmutableList.<LocalFile>of());
-      }
-    }
-
-    public interface LocalFileAdder {
-      MoreFile add(String name, File file);
-
-      MoreFile add(String name, URI uri);
-
-      MoreFile add(String name, File file, boolean archive);
-
-      MoreFile add(String name, URI uri, boolean archive);
-
-      MoreFile add(String name, File file, String pattern);
-
-      MoreFile add(String name, URI uri, String pattern);
-    }
-
-    public final class MoreFile implements LocalFileAdder {
-
-      private final Function<Collection<LocalFile>, RunnableSetter> completer;
-      private final List<LocalFile> files = Lists.newArrayList();
-
-      public MoreFile(Function<Collection<LocalFile>, RunnableSetter> completer) {
-        this.completer = completer;
-      }
-
-      @Override
-      public MoreFile add(String name, File file) {
-        return add(name, file, false);
-      }
-
-      @Override
-      public MoreFile add(String name, URI uri) {
-        return add(name, uri, false);
-      }
-
-      @Override
-      public MoreFile add(String name, File file, boolean archive) {
-        return add(name, file.toURI(), archive);
-      }
-
-      @Override
-      public MoreFile add(String name, URI uri, boolean archive) {
-        files.add(new DefaultLocalFile(name, uri, -1, -1, archive, null));
-        return this;
-      }
-
-      @Override
-      public MoreFile add(String name, File file, String pattern) {
-        return add(name, file.toURI(), pattern);
-      }
-
-      @Override
-      public MoreFile add(String name, URI uri, String pattern) {
-        files.add(new DefaultLocalFile(name, uri, -1, -1, true, pattern));
-        return this;
-      }
-
-      public RunnableSetter apply() {
-        return completer.apply(files);
-      }
-    }
-
-    public interface FirstOrder {
-      NextOrder begin(String name, String...names);
-    }
-
-    public interface NextOrder extends AfterOrder {
-      NextOrder nextWhenStarted(String name, String...names);
-
-      NextOrder nextWhenCompleted(String name, String...names);
-    }
-
-    public interface AfterOrder {
-      AfterOrder withEventHandler(EventHandler handler);
-
-      TwillSpecification build();
-    }
-
-    public final class OrderSetter implements FirstOrder, NextOrder {
-      @Override
-      public NextOrder begin(String name, String... names) {
-        addOrder(Order.Type.STARTED, name, names);
-        return this;
-      }
-
-      @Override
-      public NextOrder nextWhenStarted(String name, String... names) {
-        addOrder(Order.Type.STARTED, name, names);
-        return this;
-      }
-
-      @Override
-      public NextOrder nextWhenCompleted(String name, String... names) {
-        addOrder(Order.Type.COMPLETED, name, names);
-        return this;
-      }
-
-      @Override
-      public AfterOrder withEventHandler(EventHandler handler) {
-        eventHandler = handler.configure();
-        return this;
-      }
-
-      @Override
-      public TwillSpecification build() {
-        // Set to track with runnable hasn't been assigned an order.
-        Set<String> runnableNames = Sets.newHashSet(runnables.keySet());
-        for (Order order : orders) {
-          runnableNames.removeAll(order.getNames());
-        }
-
-        // For all unordered runnables, add it to the end of orders list
-        orders.add(new DefaultTwillSpecification.DefaultOrder(runnableNames, Order.Type.STARTED));
-
-        return new DefaultTwillSpecification(name, runnables, orders, eventHandler);
-      }
-
-      private void addOrder(final Order.Type type, String name, String...names) {
-        Preconditions.checkArgument(name != null, "Name cannot be null.");
-        Preconditions.checkArgument(runnables.containsKey(name), "Runnable not exists.");
-
-        Set<String> runnableNames = Sets.newHashSet(name);
-        for (String runnableName : names) {
-          Preconditions.checkArgument(name != null, "Name cannot be null.");
-          Preconditions.checkArgument(runnables.containsKey(name), "Runnable not exists.");
-          runnableNames.add(runnableName);
-        }
-
-        orders.add(new DefaultTwillSpecification.DefaultOrder(runnableNames, type));
-      }
-    }
-
-    private Builder() {}
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/logging/LogEntry.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/logging/LogEntry.java b/api/src/main/java/org/apache/twill/api/logging/LogEntry.java
deleted file mode 100644
index 4995328..0000000
--- a/api/src/main/java/org/apache/twill/api/logging/LogEntry.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api.logging;
-
-/**
- * Represents a log entry emitted by application.
- */
-public interface LogEntry {
-
-  /**
-   * Log level.
-   */
-  enum Level {
-    FATAL,
-    ERROR,
-    WARN,
-    INFO,
-    DEBUG,
-    TRACE
-  }
-
-  String getLoggerName();
-
-  String getHost();
-
-  long getTimestamp();
-
-  Level getLogLevel();
-
-  String getSourceClassName();
-
-  String getSourceMethodName();
-
-  String getFileName();
-
-  int getLineNumber();
-
-  String getThreadName();
-
-  String getMessage();
-
-  StackTraceElement[] getStackTraces();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/logging/LogHandler.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/logging/LogHandler.java b/api/src/main/java/org/apache/twill/api/logging/LogHandler.java
deleted file mode 100644
index afded19..0000000
--- a/api/src/main/java/org/apache/twill/api/logging/LogHandler.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api.logging;
-
-/**
- *
- */
-public interface LogHandler {
-
-  void onLog(LogEntry logEntry);
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/logging/PrinterLogHandler.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/logging/PrinterLogHandler.java b/api/src/main/java/org/apache/twill/api/logging/PrinterLogHandler.java
deleted file mode 100644
index 71a2bca..0000000
--- a/api/src/main/java/org/apache/twill/api/logging/PrinterLogHandler.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api.logging;
-
-import com.google.common.base.Splitter;
-
-import java.io.PrintWriter;
-import java.text.DateFormat;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.Formatter;
-import java.util.TimeZone;
-
-/**
- * A {@link LogHandler} that prints the {@link LogEntry} through a {@link PrintWriter}.
- */
-public final class PrinterLogHandler implements LogHandler {
-
-  private static final ThreadLocal<DateFormat> DATE_FORMAT = new ThreadLocal<DateFormat>() {
-    @Override
-    protected DateFormat initialValue() {
-      DateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss,SSS'Z'");
-      format.setTimeZone(TimeZone.getTimeZone("UTC"));
-      return format;
-    }
-  };
-
-  private final PrintWriter writer;
-  private final Formatter formatter;
-
-  /**
-   * Creates a {@link PrinterLogHandler} which has {@link LogEntry} written to the given {@link PrintWriter}.
-   * @param writer The write that log entries will write to.
-   */
-  public PrinterLogHandler(PrintWriter writer) {
-    this.writer = writer;
-    this.formatter = new Formatter(writer);
-  }
-
-  @Override
-  public void onLog(LogEntry logEntry) {
-    String utc = timestampToUTC(logEntry.getTimestamp());
-
-    formatter.format("%s %-5s %s [%s] [%s] %s:%s(%s:%d) - %s\n",
-                     utc,
-                     logEntry.getLogLevel().name(),
-                     getShortenLoggerName(logEntry.getLoggerName()),
-                     logEntry.getHost(),
-                     logEntry.getThreadName(),
-                     getSimpleClassName(logEntry.getSourceClassName()),
-                     logEntry.getSourceMethodName(),
-                     logEntry.getFileName(),
-                     logEntry.getLineNumber(),
-                     logEntry.getMessage());
-    formatter.flush();
-
-    StackTraceElement[] stackTraces = logEntry.getStackTraces();
-    if (stackTraces != null) {
-      for (StackTraceElement stackTrace : stackTraces) {
-        writer.append("\tat ").append(stackTrace.toString());
-        writer.println();
-      }
-      writer.flush();
-    }
-  }
-
-  private String timestampToUTC(long timestamp) {
-    return DATE_FORMAT.get().format(new Date(timestamp));
-  }
-
-  private String getShortenLoggerName(String loggerName) {
-    StringBuilder builder = new StringBuilder();
-    String previous = null;
-    for (String part : Splitter.on('.').split(loggerName)) {
-      if (previous != null) {
-        builder.append(previous.charAt(0)).append('.');
-      }
-      previous = part;
-    }
-    return builder.append(previous).toString();
-  }
-
-  private String getSimpleClassName(String className) {
-    return className.substring(className.lastIndexOf('.') + 1);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/logging/package-info.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/logging/package-info.java b/api/src/main/java/org/apache/twill/api/logging/package-info.java
deleted file mode 100644
index e325c18..0000000
--- a/api/src/main/java/org/apache/twill/api/logging/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package contains class for handling logging events.
- */
-package org.apache.twill.api.logging;

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/package-info.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/package-info.java b/api/src/main/java/org/apache/twill/api/package-info.java
deleted file mode 100644
index 5d9df6b..0000000
--- a/api/src/main/java/org/apache/twill/api/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Classes in this package provides core functionality of the Twill library.
- */
-package org.apache.twill.api;

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/internal/DefaultEventHandlerSpecification.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/internal/DefaultEventHandlerSpecification.java b/api/src/main/java/org/apache/twill/internal/DefaultEventHandlerSpecification.java
deleted file mode 100644
index df21400..0000000
--- a/api/src/main/java/org/apache/twill/internal/DefaultEventHandlerSpecification.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.api.EventHandlerSpecification;
-import org.apache.twill.api.EventHandler;
-import org.apache.twill.api.EventHandlerSpecification;
-import org.apache.twill.api.EventHandlerSpecification;
-import com.google.common.collect.ImmutableMap;
-import org.apache.twill.api.EventHandlerSpecification;
-
-import java.util.Map;
-
-/**
- *
- */
-public class DefaultEventHandlerSpecification implements EventHandlerSpecification {
-
-  private final String className;
-  private final Map<String, String> configs;
-
-  public DefaultEventHandlerSpecification(String className, Map<String, String> configs) {
-    this.className = className;
-    this.configs = configs;
-  }
-
-  public DefaultEventHandlerSpecification(EventHandler eventHandler) {
-    EventHandlerSpecification spec = eventHandler.configure();
-    this.className = eventHandler.getClass().getName();
-    this.configs = ImmutableMap.copyOf(spec.getConfigs());
-  }
-
-  @Override
-  public String getClassName() {
-    return className;
-  }
-
-  @Override
-  public Map<String, String> getConfigs() {
-    return configs;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/internal/DefaultLocalFile.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/internal/DefaultLocalFile.java b/api/src/main/java/org/apache/twill/internal/DefaultLocalFile.java
deleted file mode 100644
index e43c0c0..0000000
--- a/api/src/main/java/org/apache/twill/internal/DefaultLocalFile.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.api.LocalFile;
-
-import javax.annotation.Nullable;
-import java.net.URI;
-
-/**
- * A straightforward implementation of {@link LocalFile}.
- */
-public final class DefaultLocalFile implements LocalFile {
-
-  private final String name;
-  private final URI uri;
-  private final long lastModified;
-  private final long size;
-  private final boolean archive;
-  private final String pattern;
-
-  public DefaultLocalFile(String name, URI uri, long lastModified,
-                          long size, boolean archive, @Nullable String pattern) {
-    this.name = name;
-    this.uri = uri;
-    this.lastModified = lastModified;
-    this.size = size;
-    this.archive = archive;
-    this.pattern = pattern;
-  }
-
-  @Override
-  public String getName() {
-    return name;
-  }
-
-  @Override
-  public URI getURI() {
-    return uri;
-  }
-
-  @Override
-  public long getLastModified() {
-    return lastModified;
-  }
-
-  @Override
-  public long getSize() {
-    return size;
-  }
-
-  @Override
-  public boolean isArchive() {
-    return archive;
-  }
-
-  @Override
-  public String getPattern() {
-    return pattern;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/internal/DefaultResourceReport.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/internal/DefaultResourceReport.java b/api/src/main/java/org/apache/twill/internal/DefaultResourceReport.java
deleted file mode 100644
index c4c8a29..0000000
--- a/api/src/main/java/org/apache/twill/internal/DefaultResourceReport.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.api.ResourceReport;
-import org.apache.twill.api.TwillRunResources;
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.Multimaps;
-import com.google.common.collect.SetMultimap;
-
-import java.util.Collection;
-import java.util.Map;
-
-/**
- * Implementation of {@link org.apache.twill.api.ResourceReport} with some
- * additional methods for maintaining the report.
- */
-public final class DefaultResourceReport implements ResourceReport {
-  private final SetMultimap<String, TwillRunResources> usedResources;
-  private final TwillRunResources appMasterResources;
-  private final String applicationId;
-
-  public DefaultResourceReport(String applicationId, TwillRunResources masterResources) {
-    this.applicationId = applicationId;
-    this.appMasterResources = masterResources;
-    this.usedResources = HashMultimap.create();
-  }
-
-  public DefaultResourceReport(String applicationId, TwillRunResources masterResources,
-                               Map<String, Collection<TwillRunResources>> resources) {
-    this.applicationId = applicationId;
-    this.appMasterResources = masterResources;
-    this.usedResources = HashMultimap.create();
-    for (Map.Entry<String, Collection<TwillRunResources>> entry : resources.entrySet()) {
-      this.usedResources.putAll(entry.getKey(), entry.getValue());
-    }
-  }
-
-  /**
-   * Add resources used by an instance of the runnable.
-   *
-   * @param runnableName name of runnable.
-   * @param resources resources to add.
-   */
-  public void addRunResources(String runnableName, TwillRunResources resources) {
-    usedResources.put(runnableName, resources);
-  }
-
-  /**
-   * Remove the resource corresponding to the given runnable and container.
-   *
-   * @param runnableName name of runnable.
-   * @param containerId container id of the runnable.
-   */
-  public void removeRunnableResources(String runnableName, String containerId) {
-    TwillRunResources toRemove = null;
-    // could be faster if usedResources was a Table, but that makes returning the
-    // report a little more complex, and this does not need to be terribly fast.
-    for (TwillRunResources resources : usedResources.get(runnableName)) {
-      if (resources.getContainerId().equals(containerId)) {
-        toRemove = resources;
-        break;
-      }
-    }
-    usedResources.remove(runnableName, toRemove);
-  }
-
-  /**
-   * Get all the run resources being used by all instances of the specified runnable.
-   *
-   * @param runnableName the runnable name.
-   * @return resources being used by all instances of the runnable.
-   */
-  @Override
-  public Collection<TwillRunResources> getRunnableResources(String runnableName) {
-    return usedResources.get(runnableName);
-  }
-
-  /**
-   * Get all the run resources being used across all runnables.
-   *
-   * @return all run resources used by all instances of all runnables.
-   */
-  @Override
-  public Map<String, Collection<TwillRunResources>> getResources() {
-    return Multimaps.unmodifiableSetMultimap(usedResources).asMap();
-  }
-
-  /**
-   * Get the resources application master is using.
-   *
-   * @return resources being used by the application master.
-   */
-  @Override
-  public TwillRunResources getAppMasterResources() {
-    return appMasterResources;
-  }
-
-  /**
-   * Get the id of the application master.
-   *
-   * @return id of the application master.
-   */
-  @Override
-  public String getApplicationId() {
-    return applicationId;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/internal/DefaultResourceSpecification.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/internal/DefaultResourceSpecification.java b/api/src/main/java/org/apache/twill/internal/DefaultResourceSpecification.java
deleted file mode 100644
index 1327ce5..0000000
--- a/api/src/main/java/org/apache/twill/internal/DefaultResourceSpecification.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.api.ResourceSpecification;
-
-/**
- * Straightforward implementation of {@link org.apache.twill.api.ResourceSpecification}.
- */
-public final class DefaultResourceSpecification implements ResourceSpecification {
-  private final int virtualCores;
-  private final int memorySize;
-  private final int instances;
-  private final int uplink;
-  private final int downlink;
-
-  public DefaultResourceSpecification(int virtualCores, int memorySize, int instances, int uplink, int downlink) {
-    this.virtualCores = virtualCores;
-    this.memorySize = memorySize;
-    this.instances = instances;
-    this.uplink = uplink;
-    this.downlink = downlink;
-  }
-
-  @Deprecated
-  @Override
-  public int getCores() {
-    return virtualCores;
-  }
-
-  @Override
-  public int getVirtualCores() {
-    return virtualCores;
-  }
-
-  @Override
-  public int getMemorySize() {
-    return memorySize;
-  }
-
-  @Override
-  public int getInstances() {
-    return instances;
-  }
-
-  @Override
-  public int getUplink() {
-    return uplink;
-  }
-
-  @Override
-  public int getDownlink() {
-    return downlink;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/internal/DefaultRuntimeSpecification.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/internal/DefaultRuntimeSpecification.java b/api/src/main/java/org/apache/twill/internal/DefaultRuntimeSpecification.java
deleted file mode 100644
index c4f496e..0000000
--- a/api/src/main/java/org/apache/twill/internal/DefaultRuntimeSpecification.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.api.LocalFile;
-import org.apache.twill.api.ResourceSpecification;
-import org.apache.twill.api.RuntimeSpecification;
-import org.apache.twill.api.TwillRunnableSpecification;
-import com.google.common.collect.ImmutableList;
-
-import java.util.Collection;
-
-/**
- * Straightforward implementation of {@link RuntimeSpecification}.
- */
-public final class DefaultRuntimeSpecification implements RuntimeSpecification {
-
-  private final String name;
-  private final TwillRunnableSpecification runnableSpec;
-  private final ResourceSpecification resourceSpec;
-  private final Collection<LocalFile> localFiles;
-
-  public DefaultRuntimeSpecification(String name,
-                                     TwillRunnableSpecification runnableSpec,
-                                     ResourceSpecification resourceSpec,
-                                     Collection<LocalFile> localFiles) {
-    this.name = name;
-    this.runnableSpec = runnableSpec;
-    this.resourceSpec = resourceSpec;
-    this.localFiles = ImmutableList.copyOf(localFiles);
-  }
-
-  @Override
-  public String getName() {
-    return name;
-  }
-
-  @Override
-  public TwillRunnableSpecification getRunnableSpecification() {
-    return runnableSpec;
-  }
-
-  @Override
-  public ResourceSpecification getResourceSpecification() {
-    return resourceSpec;
-  }
-
-  @Override
-  public Collection<LocalFile> getLocalFiles() {
-    return localFiles;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/internal/DefaultTwillRunResources.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/internal/DefaultTwillRunResources.java b/api/src/main/java/org/apache/twill/internal/DefaultTwillRunResources.java
deleted file mode 100644
index bd8f8f5..0000000
--- a/api/src/main/java/org/apache/twill/internal/DefaultTwillRunResources.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.api.TwillRunResources;
-
-/**
- *  Straightforward implementation of {@link org.apache.twill.api.TwillRunResources}.
- */
-public class DefaultTwillRunResources implements TwillRunResources {
-  private final String containerId;
-  private final int instanceId;
-  private final int virtualCores;
-  private final int memoryMB;
-  private final String host;
-
-  public DefaultTwillRunResources(int instanceId, String containerId,
-                                  int cores, int memoryMB, String host) {
-    this.instanceId = instanceId;
-    this.containerId = containerId;
-    this.virtualCores = cores;
-    this.memoryMB = memoryMB;
-    this.host = host;
-  }
-
-  /**
-   * @return instance id of the runnable.
-   */
-  @Override
-  public int getInstanceId() {
-    return instanceId;
-  }
-
-  /**
-   * @return id of the container the runnable is running in.
-   */
-  @Override
-  public String getContainerId() {
-    return containerId;
-  }
-
-  /**
-   * @return number of cores the runnable is allowed to use.  YARN must be at least v2.1.0 and
-   *   it must be configured to use cgroups in order for this to be a reflection of truth.
-   */
-  @Override
-  public int getVirtualCores() {
-    return virtualCores;
-  }
-
-  /**
-   * @return amount of memory in MB the runnable is allowed to use.
-   */
-  @Override
-  public int getMemoryMB() {
-    return memoryMB;
-  }
-
-  /**
-   * @return the host the runnable is running on.
-   */
-  @Override
-  public String getHost() {
-    return host;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (!(o instanceof TwillRunResources)) {
-      return false;
-    }
-    TwillRunResources other = (TwillRunResources) o;
-    return (instanceId == other.getInstanceId()) &&
-      containerId.equals(other.getContainerId()) &&
-      host.equals(other.getHost()) &&
-      (virtualCores == other.getVirtualCores()) &&
-      (memoryMB == other.getMemoryMB());
-  }
-
-  @Override
-  public int hashCode() {
-    int hash = 17;
-    hash = 31 *  hash + containerId.hashCode();
-    hash = 31 *  hash + host.hashCode();
-    hash = 31 *  hash + (int) (instanceId ^ (instanceId >>> 32));
-    hash = 31 *  hash + (int) (virtualCores ^ (virtualCores >>> 32));
-    hash = 31 *  hash + (int) (memoryMB ^ (memoryMB >>> 32));
-    return hash;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/internal/DefaultTwillRunnableSpecification.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/internal/DefaultTwillRunnableSpecification.java b/api/src/main/java/org/apache/twill/internal/DefaultTwillRunnableSpecification.java
deleted file mode 100644
index 14ea7f5..0000000
--- a/api/src/main/java/org/apache/twill/internal/DefaultTwillRunnableSpecification.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.api.TwillRunnableSpecification;
-import com.google.common.collect.ImmutableMap;
-
-import java.util.Map;
-
-/**
- * Straightforward implementation of {@link org.apache.twill.api.TwillRunnableSpecification}.
- */
-public final class DefaultTwillRunnableSpecification implements TwillRunnableSpecification {
-
-  private final String className;
-  private final String name;
-  private final Map<String, String> arguments;
-
-  public DefaultTwillRunnableSpecification(String className, String name, Map<String, String> arguments) {
-    this.className = className;
-    this.name = name;
-    this.arguments = ImmutableMap.copyOf(arguments);
-  }
-
-  public DefaultTwillRunnableSpecification(String className, TwillRunnableSpecification other) {
-    this.className = className;
-    this.name = other.getName();
-    this.arguments = ImmutableMap.copyOf(other.getConfigs());
-  }
-
-  @Override
-  public String getClassName() {
-    return className;
-  }
-
-  @Override
-  public String getName() {
-    return name;
-  }
-
-  @Override
-  public Map<String, String> getConfigs() {
-    return arguments;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/internal/DefaultTwillSpecification.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/internal/DefaultTwillSpecification.java b/api/src/main/java/org/apache/twill/internal/DefaultTwillSpecification.java
deleted file mode 100644
index 6bb2b15..0000000
--- a/api/src/main/java/org/apache/twill/internal/DefaultTwillSpecification.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.api.EventHandlerSpecification;
-import org.apache.twill.api.RuntimeSpecification;
-import org.apache.twill.api.TwillSpecification;
-import com.google.common.base.Objects;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-
-import javax.annotation.Nullable;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Straightforward implementation of {@link org.apache.twill.api.TwillSpecification}.
- */
-public final class DefaultTwillSpecification implements TwillSpecification {
-
-  private final String name;
-  private final Map<String, RuntimeSpecification> runnables;
-  private final List<Order> orders;
-  private final EventHandlerSpecification eventHandler;
-
-  public DefaultTwillSpecification(String name, Map<String, RuntimeSpecification> runnables,
-                                   List<Order> orders, EventHandlerSpecification eventHandler) {
-    this.name = name;
-    this.runnables = ImmutableMap.copyOf(runnables);
-    this.orders = ImmutableList.copyOf(orders);
-    this.eventHandler = eventHandler;
-  }
-
-  @Override
-  public String getName() {
-    return name;
-  }
-
-  @Override
-  public Map<String, RuntimeSpecification> getRunnables() {
-    return runnables;
-  }
-
-  @Override
-  public List<Order> getOrders() {
-    return orders;
-  }
-
-  @Nullable
-  @Override
-  public EventHandlerSpecification getEventHandler() {
-    return eventHandler;
-  }
-
-  /**
-   * Straightforward implementation of {@link Order}.
-   */
-  public static final class DefaultOrder implements Order {
-
-    private final Set<String> names;
-    private final Type type;
-
-    public DefaultOrder(Iterable<String> names, Type type) {
-      this.names = ImmutableSet.copyOf(names);
-      this.type = type;
-    }
-
-    @Override
-    public Set<String> getNames() {
-      return names;
-    }
-
-    @Override
-    public Type getType() {
-      return type;
-    }
-
-    @Override
-    public String toString() {
-      return Objects.toStringHelper(this)
-        .add("names", names)
-        .add("type", type)
-        .toString();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/internal/RunIds.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/internal/RunIds.java b/api/src/main/java/org/apache/twill/internal/RunIds.java
deleted file mode 100644
index 7249d81..0000000
--- a/api/src/main/java/org/apache/twill/internal/RunIds.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.api.RunId;
-import com.google.common.base.Preconditions;
-
-import java.util.UUID;
-
-/**
- * Factory class for creating instance of {@link org.apache.twill.api.RunId}.
- */
-public final class RunIds {
-
-  public static RunId generate() {
-    return new RunIdImpl(UUID.randomUUID().toString());
-  }
-
-  public static RunId fromString(String str) {
-    return new RunIdImpl(str);
-  }
-
-  private RunIds() {
-  }
-
-  private static final class RunIdImpl implements RunId {
-
-    final String id;
-
-    private RunIdImpl(String id) {
-      Preconditions.checkArgument(id != null, "RunId cannot be null.");
-      this.id = id;
-    }
-
-    @Override
-    public String getId() {
-      return id;
-    }
-
-    @Override
-    public String toString() {
-      return getId();
-    }
-
-    @Override
-    public boolean equals(Object other) {
-      if (this == other) {
-        return true;
-      }
-      if (other == null || !(other instanceof RunId)) {
-        return false;
-      }
-      return id.equals(((RunId)other).getId());
-    }
-
-    @Override
-    public int hashCode() {
-      return id.hashCode();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/internal/package-info.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/internal/package-info.java b/api/src/main/java/org/apache/twill/internal/package-info.java
deleted file mode 100644
index 8af8362..0000000
--- a/api/src/main/java/org/apache/twill/internal/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Internal classes for Twill API.
- */
-package org.apache.twill.internal;

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/common/pom.xml
----------------------------------------------------------------------
diff --git a/common/pom.xml b/common/pom.xml
deleted file mode 100644
index a4372f6..0000000
--- a/common/pom.xml
+++ /dev/null
@@ -1,51 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <parent>
-        <artifactId>twill-parent</artifactId>
-        <groupId>org.apache.twill</groupId>
-        <version>0.1.0-SNAPSHOT</version>
-    </parent>
-    <modelVersion>4.0.0</modelVersion>
-
-    <artifactId>twill-common</artifactId>
-    <name>Twill common library</name>
-
-    <dependencies>
-        <dependency>
-            <groupId>com.google.guava</groupId>
-            <artifactId>guava</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>com.google.code.findbugs</groupId>
-            <artifactId>jsr305</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.slf4j</groupId>
-            <artifactId>slf4j-api</artifactId>
-            <scope>test</scope>
-        </dependency>
-        <dependency>
-            <groupId>junit</groupId>
-            <artifactId>junit</artifactId>
-        </dependency>
-    </dependencies>
-</project>

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/common/src/main/java/org/apache/twill/common/Cancellable.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/twill/common/Cancellable.java b/common/src/main/java/org/apache/twill/common/Cancellable.java
deleted file mode 100644
index 08f22d3..0000000
--- a/common/src/main/java/org/apache/twill/common/Cancellable.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.twill.common;
-
-/**
- * Something, usually a task, that can be cancelled. Cancellation is performed by the cancel method.
- */
-public interface Cancellable {
-  /**
-   * Attempts to cancel execution of this task.
-   */
-  void cancel();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/common/src/main/java/org/apache/twill/common/ServiceListenerAdapter.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/twill/common/ServiceListenerAdapter.java b/common/src/main/java/org/apache/twill/common/ServiceListenerAdapter.java
deleted file mode 100644
index 527ba7d..0000000
--- a/common/src/main/java/org/apache/twill/common/ServiceListenerAdapter.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.common;
-
-import com.google.common.util.concurrent.Service;
-
-/**
- * An adapter for implementing {@link Service.Listener} with all method default to no-op.
- */
-public abstract class ServiceListenerAdapter implements Service.Listener {
-  @Override
-  public void starting() {
-    // No-op
-  }
-
-  @Override
-  public void running() {
-    // No-op
-  }
-
-  @Override
-  public void stopping(Service.State from) {
-    // No-op
-  }
-
-  @Override
-  public void terminated(Service.State from) {
-    // No-op
-  }
-
-  @Override
-  public void failed(Service.State from, Throwable failure) {
-    // No-op
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/common/src/main/java/org/apache/twill/common/Services.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/twill/common/Services.java b/common/src/main/java/org/apache/twill/common/Services.java
deleted file mode 100644
index 7e294f0..0000000
--- a/common/src/main/java/org/apache/twill/common/Services.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.common;
-
-import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.Service;
-import com.google.common.util.concurrent.SettableFuture;
-
-import java.util.List;
-import java.util.concurrent.atomic.AtomicInteger;
-
-/**
- * Utility methods for help dealing with {@link Service}.
- */
-public final class Services {
-
-  /**
-   * Starts a list of {@link Service} one by one. Starting of next Service is triggered from the callback listener
-   * thread of the previous Service.
-   *
-   * @param firstService First service to start.
-   * @param moreServices The rest services to start.
-   * @return A {@link ListenableFuture} that will be completed when all services are started, with the
-   *         result carries the completed {@link ListenableFuture} of each corresponding service in the
-   *         same order as they are passed to this method.
-   */
-  public static ListenableFuture<List<ListenableFuture<Service.State>>> chainStart(Service firstService,
-                                                                                   Service...moreServices) {
-    return doChain(true, firstService, moreServices);
-  }
-
-  /**
-   * Stops a list of {@link Service} one by one. It behaves the same as
-   * {@link #chainStart(com.google.common.util.concurrent.Service, com.google.common.util.concurrent.Service...)}
-   * except {@link com.google.common.util.concurrent.Service#stop()} is called instead of start.
-   *
-   * @param firstService First service to stop.
-   * @param moreServices The rest services to stop.
-   * @return A {@link ListenableFuture} that will be completed when all services are stopped.
-   * @see #chainStart(com.google.common.util.concurrent.Service, com.google.common.util.concurrent.Service...)
-   */
-  public static ListenableFuture<List<ListenableFuture<Service.State>>> chainStop(Service firstService,
-                                                                                  Service...moreServices) {
-    return doChain(false, firstService, moreServices);
-  }
-
-  /**
-   * Returns a {@link ListenableFuture} that will be completed when the given service is stopped. If the service
-   * stopped due to error, the failure cause would be reflected in the future.
-   *
-   * @param service The {@link Service} to block on.
-   * @return A {@link ListenableFuture} that will be completed when the service is stopped.
-   */
-  public static ListenableFuture<Service.State> getCompletionFuture(Service service) {
-    final SettableFuture<Service.State> resultFuture = SettableFuture.create();
-
-    service.addListener(new ServiceListenerAdapter() {
-      @Override
-      public void terminated(Service.State from) {
-        resultFuture.set(Service.State.TERMINATED);
-      }
-
-      @Override
-      public void failed(Service.State from, Throwable failure) {
-        resultFuture.setException(failure);
-      }
-    }, Threads.SAME_THREAD_EXECUTOR);
-
-    Service.State state = service.state();
-    if (state == Service.State.TERMINATED) {
-      return Futures.immediateFuture(state);
-    } else if (state == Service.State.FAILED) {
-      return Futures.immediateFailedFuture(new IllegalStateException("Service failed with unknown exception."));
-    }
-
-    return resultFuture;
-  }
-
-  /**
-   * Performs the actual logic of chain Service start/stop.
-   */
-  private static ListenableFuture<List<ListenableFuture<Service.State>>> doChain(boolean doStart,
-                                                                                 Service firstService,
-                                                                                 Service...moreServices) {
-    SettableFuture<List<ListenableFuture<Service.State>>> resultFuture = SettableFuture.create();
-    List<ListenableFuture<Service.State>> result = Lists.newArrayListWithCapacity(moreServices.length + 1);
-
-    ListenableFuture<Service.State> future = doStart ? firstService.start() : firstService.stop();
-    future.addListener(createChainListener(future, moreServices, new AtomicInteger(0), result, resultFuture, doStart),
-                       Threads.SAME_THREAD_EXECUTOR);
-    return resultFuture;
-  }
-
-  /**
-   * Returns a {@link Runnable} that can be used as a {@link ListenableFuture} listener to trigger
-   * further service action or completing the result future. Used by
-   * {@link #doChain(boolean, com.google.common.util.concurrent.Service, com.google.common.util.concurrent.Service...)}
-   */
-  private static Runnable createChainListener(final ListenableFuture<Service.State> future, final Service[] services,
-                                              final AtomicInteger idx,
-                                              final List<ListenableFuture<Service.State>> result,
-                                              final SettableFuture<List<ListenableFuture<Service.State>>> resultFuture,
-                                              final boolean doStart) {
-    return new Runnable() {
-
-      @Override
-      public void run() {
-        result.add(future);
-        int nextIdx = idx.getAndIncrement();
-        if (nextIdx == services.length) {
-          resultFuture.set(result);
-          return;
-        }
-        ListenableFuture<Service.State> actionFuture = doStart ? services[nextIdx].start() : services[nextIdx].stop();
-        actionFuture.addListener(createChainListener(actionFuture, services, idx, result, resultFuture, doStart),
-                                 Threads.SAME_THREAD_EXECUTOR);
-      }
-    };
-  }
-
-  private Services() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/common/src/main/java/org/apache/twill/common/Threads.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/twill/common/Threads.java b/common/src/main/java/org/apache/twill/common/Threads.java
deleted file mode 100644
index e33a677..0000000
--- a/common/src/main/java/org/apache/twill/common/Threads.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.common;
-
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-
-import java.util.concurrent.Executor;
-import java.util.concurrent.ThreadFactory;
-
-/**
- *
- */
-public final class Threads {
-
-  /**
-   * A executor that execute task from the submitter thread.
-   */
-  public static final Executor SAME_THREAD_EXECUTOR = MoreExecutors.sameThreadExecutor();
-
-  /**
-   * Handy method to create {@link ThreadFactory} that creates daemon threads with the given name format.
-   *
-   * @param nameFormat Name format for the thread names
-   * @return A {@link ThreadFactory}.
-   * @see ThreadFactoryBuilder
-   */
-  public static ThreadFactory createDaemonThreadFactory(String nameFormat) {
-    return new ThreadFactoryBuilder()
-      .setDaemon(true)
-      .setNameFormat(nameFormat)
-      .build();
-  }
-
-  private Threads() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/common/src/main/java/org/apache/twill/filesystem/ForwardingLocationFactory.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/twill/filesystem/ForwardingLocationFactory.java b/common/src/main/java/org/apache/twill/filesystem/ForwardingLocationFactory.java
deleted file mode 100644
index d25ea20..0000000
--- a/common/src/main/java/org/apache/twill/filesystem/ForwardingLocationFactory.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.filesystem;
-
-/**
- *
- */
-public abstract class ForwardingLocationFactory implements LocationFactory {
-
-  private final LocationFactory delegate;
-
-  protected ForwardingLocationFactory(LocationFactory delegate) {
-    this.delegate = delegate;
-  }
-
-  public LocationFactory getDelegate() {
-    return delegate;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/common/src/main/java/org/apache/twill/filesystem/LocalLocation.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/twill/filesystem/LocalLocation.java b/common/src/main/java/org/apache/twill/filesystem/LocalLocation.java
deleted file mode 100644
index d107eac..0000000
--- a/common/src/main/java/org/apache/twill/filesystem/LocalLocation.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.filesystem;
-
-import com.google.common.collect.Lists;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.URI;
-import java.util.Collections;
-import java.util.Deque;
-import java.util.UUID;
-
-/**
- * A concrete implementation of {@link Location} for the Local filesystem.
- */
-final class LocalLocation implements Location {
-  private final File file;
-
-  /**
-   * Constructs a LocalLocation.
-   *
-   * @param file to the file.
-   */
-  LocalLocation(File file) {
-    this.file = file;
-  }
-
-  /**
-   * Checks if the this location exists on local file system.
-   *
-   * @return true if found; false otherwise.
-   * @throws java.io.IOException
-   */
-  @Override
-  public boolean exists() throws IOException {
-    return file.exists();
-  }
-
-  /**
-   * @return An {@link java.io.InputStream} for this location on local filesystem.
-   * @throws IOException
-   */
-  @Override
-  public InputStream getInputStream() throws IOException {
-    File parent = file.getParentFile();
-    if (!parent.exists()) {
-      parent.mkdirs();
-    }
-    return new FileInputStream(file);
-  }
-
-  /**
-   * @return An {@link java.io.OutputStream} for this location on local filesystem.
-   * @throws IOException
-   */
-  @Override
-  public OutputStream getOutputStream() throws IOException {
-    File parent = file.getParentFile();
-    if (!parent.exists()) {
-      parent.mkdirs();
-    }
-    return new FileOutputStream(file);
-  }
-
-  /**
-   * Local location doesn't supports permission. It's the same as calling {@link #getOutputStream()}.
-   */
-  @Override
-  public OutputStream getOutputStream(String permission) throws IOException {
-    return getOutputStream();
-  }
-
-  /**
-   * @return Returns the name of the file or directory denoteed by this abstract pathname.
-   */
-  @Override
-  public String getName() {
-    return file.getName();
-  }
-
-  @Override
-  public boolean createNew() throws IOException {
-    return file.createNewFile();
-  }
-
-  /**
-   * Appends the child to the current {@link Location} on local filesystem.
-   * <p>
-   * Returns a new instance of Location.
-   * </p>
-   *
-   * @param child to be appended to this location.
-   * @return A new instance of {@link Location}
-   * @throws IOException
-   */
-  @Override
-  public Location append(String child) throws IOException {
-    return new LocalLocation(new File(file, child));
-  }
-
-  @Override
-  public Location getTempFile(String suffix) throws IOException {
-    return new LocalLocation(
-      new File(file.getAbsolutePath() + "." + UUID.randomUUID() + (suffix == null ? TEMP_FILE_SUFFIX : suffix)));
-  }
-
-  /**
-   * @return A {@link URI} for this location on local filesystem.
-   */
-  @Override
-  public URI toURI() {
-    return file.toURI();
-  }
-
-  /**
-   * Deletes the file or directory denoted by this abstract pathname. If this
-   * pathname denotes a directory, then the directory must be empty in order
-   * to be deleted.
-   *
-   * @return true if and only if the file or directory is successfully delete; false otherwise.
-   */
-  @Override
-  public boolean delete() throws IOException {
-    return file.delete();
-  }
-
-  @Override
-  public boolean delete(boolean recursive) throws IOException {
-    if (!recursive) {
-      return delete();
-    }
-
-    Deque<File> stack = Lists.newLinkedList();
-    stack.add(file);
-    while (!stack.isEmpty()) {
-      File f = stack.peekLast();
-      File[] files = f.listFiles();
-
-      if (files != null && files.length != 0) {
-        Collections.addAll(stack, files);
-      } else {
-        if (!f.delete()) {
-          return false;
-        }
-        stack.pollLast();
-      }
-    }
-    return true;
-  }
-
-  @Override
-  public Location renameTo(Location destination) throws IOException {
-    // destination will always be of the same type as this location
-    boolean success = file.renameTo(((LocalLocation) destination).file);
-    if (success) {
-      return new LocalLocation(((LocalLocation) destination).file);
-    } else {
-      return null;
-    }
-  }
-
-  /**
-   * Creates the directory named by this abstract pathname, including any necessary
-   * but nonexistent parent directories.
-   *
-   * @return true if and only if the renaming succeeded; false otherwise
-   */
-  @Override
-  public boolean mkdirs() throws IOException {
-    return file.mkdirs();
-  }
-
-  /**
-   * @return Length of file.
-   */
-  @Override
-  public long length() throws IOException {
-    return file.length();
-  }
-
-  @Override
-  public long lastModified() {
-    return file.lastModified();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/common/src/main/java/org/apache/twill/filesystem/LocalLocationFactory.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/twill/filesystem/LocalLocationFactory.java b/common/src/main/java/org/apache/twill/filesystem/LocalLocationFactory.java
deleted file mode 100644
index f44cd87..0000000
--- a/common/src/main/java/org/apache/twill/filesystem/LocalLocationFactory.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.filesystem;
-
-import java.io.File;
-import java.net.URI;
-
-/**
- * A {@link LocationFactory} for creating local file {@link Location}.
- */
-public final class LocalLocationFactory implements LocationFactory {
-
-  private final File basePath;
-
-  /**
-   * Constructs a LocalLocationFactory that Location created will be relative to system root.
-   */
-  public LocalLocationFactory() {
-    this(new File("/"));
-  }
-
-  public LocalLocationFactory(File basePath) {
-    this.basePath = basePath;
-  }
-
-  @Override
-  public Location create(String path) {
-    return new LocalLocation(new File(basePath, path));
-  }
-
-  @Override
-  public Location create(URI uri) {
-    if (uri.isAbsolute()) {
-      return new LocalLocation(new File(uri));
-    }
-    return new LocalLocation(new File(basePath, uri.getPath()));
-  }
-
-  @Override
-  public Location getHomeLocation() {
-    return new LocalLocation(new File(System.getProperty("user.home")));
-  }
-}


[04/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/yarn/YarnTwillPreparer.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/yarn/YarnTwillPreparer.java b/yarn/src/main/java/org/apache/twill/yarn/YarnTwillPreparer.java
deleted file mode 100644
index 17425d4..0000000
--- a/yarn/src/main/java/org/apache/twill/yarn/YarnTwillPreparer.java
+++ /dev/null
@@ -1,600 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.yarn;
-
-import com.google.common.base.Charsets;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicates;
-import com.google.common.base.Supplier;
-import com.google.common.base.Throwables;
-import com.google.common.collect.ArrayListMultimap;
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.ListMultimap;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Multimap;
-import com.google.common.collect.Sets;
-import com.google.common.io.ByteStreams;
-import com.google.common.io.CharStreams;
-import com.google.common.io.OutputSupplier;
-import com.google.common.reflect.TypeToken;
-import com.google.gson.GsonBuilder;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.twill.api.EventHandlerSpecification;
-import org.apache.twill.api.LocalFile;
-import org.apache.twill.api.RunId;
-import org.apache.twill.api.RuntimeSpecification;
-import org.apache.twill.api.SecureStore;
-import org.apache.twill.api.TwillController;
-import org.apache.twill.api.TwillPreparer;
-import org.apache.twill.api.TwillSpecification;
-import org.apache.twill.api.logging.LogHandler;
-import org.apache.twill.filesystem.Location;
-import org.apache.twill.filesystem.LocationFactory;
-import org.apache.twill.internal.ApplicationBundler;
-import org.apache.twill.internal.Arguments;
-import org.apache.twill.internal.Configs;
-import org.apache.twill.internal.Constants;
-import org.apache.twill.internal.DefaultLocalFile;
-import org.apache.twill.internal.DefaultRuntimeSpecification;
-import org.apache.twill.internal.DefaultTwillSpecification;
-import org.apache.twill.internal.EnvKeys;
-import org.apache.twill.internal.LogOnlyEventHandler;
-import org.apache.twill.internal.ProcessController;
-import org.apache.twill.internal.ProcessLauncher;
-import org.apache.twill.internal.RunIds;
-import org.apache.twill.internal.appmaster.ApplicationMasterMain;
-import org.apache.twill.internal.container.TwillContainerMain;
-import org.apache.twill.internal.json.ArgumentsCodec;
-import org.apache.twill.internal.json.LocalFileCodec;
-import org.apache.twill.internal.json.TwillSpecificationAdapter;
-import org.apache.twill.internal.utils.Dependencies;
-import org.apache.twill.internal.utils.Paths;
-import org.apache.twill.internal.yarn.YarnAppClient;
-import org.apache.twill.internal.yarn.YarnApplicationReport;
-import org.apache.twill.internal.yarn.YarnUtils;
-import org.apache.twill.launcher.TwillLauncher;
-import org.apache.twill.zookeeper.ZKClient;
-import org.apache.twill.zookeeper.ZKClients;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.BufferedOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.io.OutputStreamWriter;
-import java.io.Writer;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.net.URL;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.jar.JarEntry;
-import java.util.jar.JarOutputStream;
-
-/**
- * Implementation for {@link TwillPreparer} to prepare and launch distributed application on Hadoop YARN.
- */
-final class YarnTwillPreparer implements TwillPreparer {
-
-  private static final Logger LOG = LoggerFactory.getLogger(YarnTwillPreparer.class);
-  private static final String KAFKA_ARCHIVE = "kafka-0.7.2.tgz";
-
-  private final YarnConfiguration yarnConfig;
-  private final TwillSpecification twillSpec;
-  private final YarnAppClient yarnAppClient;
-  private final ZKClient zkClient;
-  private final LocationFactory locationFactory;
-  private final Supplier<String> jvmOpts;
-  private final YarnTwillControllerFactory controllerFactory;
-  private final RunId runId;
-
-  private final List<LogHandler> logHandlers = Lists.newArrayList();
-  private final List<String> arguments = Lists.newArrayList();
-  private final Set<Class<?>> dependencies = Sets.newIdentityHashSet();
-  private final List<URI> resources = Lists.newArrayList();
-  private final List<String> classPaths = Lists.newArrayList();
-  private final ListMultimap<String, String> runnableArgs = ArrayListMultimap.create();
-  private final Credentials credentials;
-  private final int reservedMemory;
-  private String user;
-
-  YarnTwillPreparer(YarnConfiguration yarnConfig, TwillSpecification twillSpec, YarnAppClient yarnAppClient,
-                    ZKClient zkClient, LocationFactory locationFactory, Supplier<String> jvmOpts,
-                    YarnTwillControllerFactory controllerFactory) {
-    this.yarnConfig = yarnConfig;
-    this.twillSpec = twillSpec;
-    this.yarnAppClient = yarnAppClient;
-    this.zkClient = ZKClients.namespace(zkClient, "/" + twillSpec.getName());
-    this.locationFactory = locationFactory;
-    this.jvmOpts = jvmOpts;
-    this.controllerFactory = controllerFactory;
-    this.runId = RunIds.generate();
-    this.credentials = createCredentials();
-    this.reservedMemory = yarnConfig.getInt(Configs.Keys.JAVA_RESERVED_MEMORY_MB,
-                                            Configs.Defaults.JAVA_RESERVED_MEMORY_MB);
-    this.user = System.getProperty("user.name");
-  }
-
-  @Override
-  public TwillPreparer addLogHandler(LogHandler handler) {
-    logHandlers.add(handler);
-    return this;
-  }
-
-  @Override
-  public TwillPreparer setUser(String user) {
-    this.user = user;
-    return this;
-  }
-
-  @Override
-  public TwillPreparer withApplicationArguments(String... args) {
-    return withApplicationArguments(ImmutableList.copyOf(args));
-  }
-
-  @Override
-  public TwillPreparer withApplicationArguments(Iterable<String> args) {
-    Iterables.addAll(arguments, args);
-    return this;
-  }
-
-  @Override
-  public TwillPreparer withArguments(String runnableName, String... args) {
-    return withArguments(runnableName, ImmutableList.copyOf(args));
-  }
-
-  @Override
-  public TwillPreparer withArguments(String runnableName, Iterable<String> args) {
-    runnableArgs.putAll(runnableName, args);
-    return this;
-  }
-
-  @Override
-  public TwillPreparer withDependencies(Class<?>... classes) {
-    return withDependencies(ImmutableList.copyOf(classes));
-  }
-
-  @Override
-  public TwillPreparer withDependencies(Iterable<Class<?>> classes) {
-    Iterables.addAll(dependencies, classes);
-    return this;
-  }
-
-  @Override
-  public TwillPreparer withResources(URI... resources) {
-    return withResources(ImmutableList.copyOf(resources));
-  }
-
-  @Override
-  public TwillPreparer withResources(Iterable<URI> resources) {
-    Iterables.addAll(this.resources, resources);
-    return this;
-  }
-
-  @Override
-  public TwillPreparer withClassPaths(String... classPaths) {
-    return withClassPaths(ImmutableList.copyOf(classPaths));
-  }
-
-  @Override
-  public TwillPreparer withClassPaths(Iterable<String> classPaths) {
-    Iterables.addAll(this.classPaths, classPaths);
-    return this;
-  }
-
-  @Override
-  public TwillPreparer addSecureStore(SecureStore secureStore) {
-    Object store = secureStore.getStore();
-    Preconditions.checkArgument(store instanceof Credentials, "Only Hadoop Credentials is supported.");
-    this.credentials.mergeAll((Credentials) store);
-    return this;
-  }
-
-  @Override
-  public TwillController start() {
-    try {
-      final ProcessLauncher<ApplicationId> launcher = yarnAppClient.createLauncher(user, twillSpec);
-      final ApplicationId appId = launcher.getContainerInfo();
-
-      Callable<ProcessController<YarnApplicationReport>> submitTask =
-        new Callable<ProcessController<YarnApplicationReport>>() {
-        @Override
-        public ProcessController<YarnApplicationReport> call() throws Exception {
-          String fsUser = locationFactory.getHomeLocation().getName();
-
-          // Local files needed by AM
-          Map<String, LocalFile> localFiles = Maps.newHashMap();
-          // Local files declared by runnables
-          Multimap<String, LocalFile> runnableLocalFiles = HashMultimap.create();
-
-          String vmOpts = jvmOpts.get();
-
-          createAppMasterJar(createBundler(), localFiles);
-          createContainerJar(createBundler(), localFiles);
-          populateRunnableLocalFiles(twillSpec, runnableLocalFiles);
-          saveSpecification(twillSpec, runnableLocalFiles, localFiles);
-          saveLogback(localFiles);
-          saveLauncher(localFiles);
-          saveKafka(localFiles);
-          saveVmOptions(vmOpts, localFiles);
-          saveArguments(new Arguments(arguments, runnableArgs), localFiles);
-          saveLocalFiles(localFiles, ImmutableSet.of(Constants.Files.TWILL_SPEC,
-                                                     Constants.Files.LOGBACK_TEMPLATE,
-                                                     Constants.Files.CONTAINER_JAR,
-                                                     Constants.Files.LAUNCHER_JAR,
-                                                     Constants.Files.ARGUMENTS));
-
-          LOG.debug("Submit AM container spec: {}", appId);
-          // java -Djava.io.tmpdir=tmp -cp launcher.jar:$HADOOP_CONF_DIR -XmxMemory
-          //     org.apache.twill.internal.TwillLauncher
-          //     appMaster.jar
-          //     org.apache.twill.internal.appmaster.ApplicationMasterMain
-          //     false
-          return launcher.prepareLaunch(
-            ImmutableMap.<String, String>builder()
-              .put(EnvKeys.TWILL_FS_USER, fsUser)
-              .put(EnvKeys.TWILL_APP_DIR, getAppLocation().toURI().toASCIIString())
-              .put(EnvKeys.TWILL_ZK_CONNECT, zkClient.getConnectString())
-              .put(EnvKeys.TWILL_RUN_ID, runId.getId())
-              .put(EnvKeys.TWILL_RESERVED_MEMORY_MB, Integer.toString(reservedMemory))
-              .put(EnvKeys.TWILL_APP_NAME, twillSpec.getName()).build(),
-            localFiles.values(), credentials)
-            .noResources()
-            .noEnvironment()
-            .withCommands().add(
-              "java",
-              "-Djava.io.tmpdir=tmp",
-              "-Dyarn.appId=$" + EnvKeys.YARN_APP_ID_STR,
-              "-Dtwill.app=$" + EnvKeys.TWILL_APP_NAME,
-              "-cp", Constants.Files.LAUNCHER_JAR + ":$HADOOP_CONF_DIR",
-              "-Xmx" + (Constants.APP_MASTER_MEMORY_MB - Constants.APP_MASTER_RESERVED_MEMORY_MB) + "m",
-              vmOpts,
-              TwillLauncher.class.getName(),
-              Constants.Files.APP_MASTER_JAR,
-              ApplicationMasterMain.class.getName(),
-              Boolean.FALSE.toString())
-            .redirectOutput(Constants.STDOUT)
-            .redirectError(Constants.STDERR)
-            .launch();
-        }
-      };
-
-      YarnTwillController controller = controllerFactory.create(runId, logHandlers, submitTask);
-      controller.start();
-      return controller;
-    } catch (Exception e) {
-      LOG.error("Failed to submit application {}", twillSpec.getName(), e);
-      throw Throwables.propagate(e);
-    }
-  }
-
-  private Credentials createCredentials() {
-    Credentials credentials = new Credentials();
-
-    try {
-      credentials.addAll(UserGroupInformation.getCurrentUser().getCredentials());
-
-      List<Token<?>> tokens = YarnUtils.addDelegationTokens(yarnConfig, locationFactory, credentials);
-      for (Token<?> token : tokens) {
-        LOG.debug("Delegation token acquired for {}, {}", locationFactory.getHomeLocation().toURI(), token);
-      }
-    } catch (IOException e) {
-      LOG.warn("Failed to check for secure login type. Not gathering any delegation token.", e);
-    }
-    return credentials;
-  }
-
-  private ApplicationBundler createBundler() {
-    return new ApplicationBundler(ImmutableList.<String>of());
-  }
-
-  private LocalFile createLocalFile(String name, Location location) throws IOException {
-    return createLocalFile(name, location, false);
-  }
-
-  private LocalFile createLocalFile(String name, Location location, boolean archive) throws IOException {
-    return new DefaultLocalFile(name, location.toURI(), location.lastModified(), location.length(), archive, null);
-  }
-
-  private void createAppMasterJar(ApplicationBundler bundler, Map<String, LocalFile> localFiles) throws IOException {
-    try {
-      LOG.debug("Create and copy {}", Constants.Files.APP_MASTER_JAR);
-      Location location = createTempLocation(Constants.Files.APP_MASTER_JAR);
-
-      List<Class<?>> classes = Lists.newArrayList();
-      classes.add(ApplicationMasterMain.class);
-
-      // Stuck in the yarnAppClient class to make bundler being able to pickup the right yarn-client version
-      classes.add(yarnAppClient.getClass());
-
-      // Add the TwillRunnableEventHandler class
-      if (twillSpec.getEventHandler() != null) {
-        classes.add(getClassLoader().loadClass(twillSpec.getEventHandler().getClassName()));
-      }
-
-      bundler.createBundle(location, classes);
-      LOG.debug("Done {}", Constants.Files.APP_MASTER_JAR);
-
-      localFiles.put(Constants.Files.APP_MASTER_JAR, createLocalFile(Constants.Files.APP_MASTER_JAR, location));
-    } catch (ClassNotFoundException e) {
-      throw Throwables.propagate(e);
-    }
-  }
-
-  private void createContainerJar(ApplicationBundler bundler, Map<String, LocalFile> localFiles) throws IOException {
-    try {
-      Set<Class<?>> classes = Sets.newIdentityHashSet();
-      classes.add(TwillContainerMain.class);
-      classes.addAll(dependencies);
-
-      ClassLoader classLoader = getClassLoader();
-      for (RuntimeSpecification spec : twillSpec.getRunnables().values()) {
-        classes.add(classLoader.loadClass(spec.getRunnableSpecification().getClassName()));
-      }
-
-      LOG.debug("Create and copy {}", Constants.Files.CONTAINER_JAR);
-      Location location = createTempLocation(Constants.Files.CONTAINER_JAR);
-      bundler.createBundle(location, classes, resources);
-      LOG.debug("Done {}", Constants.Files.CONTAINER_JAR);
-
-      localFiles.put(Constants.Files.CONTAINER_JAR, createLocalFile(Constants.Files.CONTAINER_JAR, location));
-
-    } catch (ClassNotFoundException e) {
-      throw Throwables.propagate(e);
-    }
-  }
-
-  /**
-   * Based on the given {@link TwillSpecification}, upload LocalFiles to Yarn Cluster.
-   * @param twillSpec The {@link TwillSpecification} for populating resource.
-   * @param localFiles A Multimap to store runnable name to transformed LocalFiles.
-   * @throws IOException
-   */
-  private void populateRunnableLocalFiles(TwillSpecification twillSpec,
-                                          Multimap<String, LocalFile> localFiles) throws IOException {
-
-    LOG.debug("Populating Runnable LocalFiles");
-    for (Map.Entry<String, RuntimeSpecification> entry: twillSpec.getRunnables().entrySet()) {
-      String runnableName = entry.getKey();
-      for (LocalFile localFile : entry.getValue().getLocalFiles()) {
-        Location location;
-
-        URI uri = localFile.getURI();
-        if ("hdfs".equals(uri.getScheme())) {
-          // Assuming the location factory is HDFS one. If it is not, it will failed, which is the correct behavior.
-          location = locationFactory.create(uri);
-        } else {
-          URL url = uri.toURL();
-          LOG.debug("Create and copy {} : {}", runnableName, url);
-          // Preserves original suffix for expansion.
-          location = copyFromURL(url, createTempLocation(Paths.appendSuffix(url.getFile(), localFile.getName())));
-          LOG.debug("Done {} : {}", runnableName, url);
-        }
-
-        localFiles.put(runnableName,
-                       new DefaultLocalFile(localFile.getName(), location.toURI(), location.lastModified(),
-                                            location.length(), localFile.isArchive(), localFile.getPattern()));
-      }
-    }
-    LOG.debug("Done Runnable LocalFiles");
-  }
-
-  private void saveSpecification(TwillSpecification spec, final Multimap<String, LocalFile> runnableLocalFiles,
-                                 Map<String, LocalFile> localFiles) throws IOException {
-    // Rewrite LocalFiles inside twillSpec
-    Map<String, RuntimeSpecification> runtimeSpec = Maps.transformEntries(
-      spec.getRunnables(), new Maps.EntryTransformer<String, RuntimeSpecification, RuntimeSpecification>() {
-      @Override
-      public RuntimeSpecification transformEntry(String key, RuntimeSpecification value) {
-        return new DefaultRuntimeSpecification(value.getName(), value.getRunnableSpecification(),
-                                               value.getResourceSpecification(), runnableLocalFiles.get(key));
-      }
-    });
-
-    // Serialize into a local temp file.
-    LOG.debug("Create and copy {}", Constants.Files.TWILL_SPEC);
-    Location location = createTempLocation(Constants.Files.TWILL_SPEC);
-    Writer writer = new OutputStreamWriter(location.getOutputStream(), Charsets.UTF_8);
-    try {
-      EventHandlerSpecification eventHandler = spec.getEventHandler();
-      if (eventHandler == null) {
-        eventHandler = new LogOnlyEventHandler().configure();
-      }
-
-      TwillSpecificationAdapter.create().toJson(
-        new DefaultTwillSpecification(spec.getName(), runtimeSpec, spec.getOrders(), eventHandler),
-        writer);
-    } finally {
-      writer.close();
-    }
-    LOG.debug("Done {}", Constants.Files.TWILL_SPEC);
-
-    localFiles.put(Constants.Files.TWILL_SPEC, createLocalFile(Constants.Files.TWILL_SPEC, location));
-  }
-
-  private void saveLogback(Map<String, LocalFile> localFiles) throws IOException {
-    LOG.debug("Create and copy {}", Constants.Files.LOGBACK_TEMPLATE);
-    Location location = copyFromURL(getClass().getClassLoader().getResource(Constants.Files.LOGBACK_TEMPLATE),
-                                    createTempLocation(Constants.Files.LOGBACK_TEMPLATE));
-    LOG.debug("Done {}", Constants.Files.LOGBACK_TEMPLATE);
-
-    localFiles.put(Constants.Files.LOGBACK_TEMPLATE, createLocalFile(Constants.Files.LOGBACK_TEMPLATE, location));
-  }
-
-  /**
-   * Creates the launcher.jar for launch the main application.
-   */
-  private void saveLauncher(Map<String, LocalFile> localFiles) throws URISyntaxException, IOException {
-
-    LOG.debug("Create and copy {}", Constants.Files.LAUNCHER_JAR);
-    Location location = createTempLocation(Constants.Files.LAUNCHER_JAR);
-
-    final String launcherName = TwillLauncher.class.getName();
-
-    // Create a jar file with the TwillLauncher optionally a json serialized classpath.json in it.
-    final JarOutputStream jarOut = new JarOutputStream(location.getOutputStream());
-    ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
-    if (classLoader == null) {
-      classLoader = getClass().getClassLoader();
-    }
-    Dependencies.findClassDependencies(classLoader, new Dependencies.ClassAcceptor() {
-      @Override
-      public boolean accept(String className, URL classUrl, URL classPathUrl) {
-        Preconditions.checkArgument(className.startsWith(launcherName),
-                                    "Launcher jar should not have dependencies: %s", className);
-        try {
-          jarOut.putNextEntry(new JarEntry(className.replace('.', '/') + ".class"));
-          InputStream is = classUrl.openStream();
-          try {
-            ByteStreams.copy(is, jarOut);
-          } finally {
-            is.close();
-          }
-        } catch (IOException e) {
-          throw Throwables.propagate(e);
-        }
-        return true;
-      }
-    }, TwillLauncher.class.getName());
-
-    try {
-      if (!classPaths.isEmpty()) {
-        jarOut.putNextEntry(new JarEntry("classpath"));
-        jarOut.write(Joiner.on(':').join(classPaths).getBytes(Charsets.UTF_8));
-      }
-    } finally {
-      jarOut.close();
-    }
-    LOG.debug("Done {}", Constants.Files.LAUNCHER_JAR);
-
-    localFiles.put(Constants.Files.LAUNCHER_JAR, createLocalFile(Constants.Files.LAUNCHER_JAR, location));
-  }
-
-  private void saveKafka(Map<String, LocalFile> localFiles) throws IOException {
-    LOG.debug("Copy {}", Constants.Files.KAFKA);
-    Location location = copyFromURL(getClass().getClassLoader().getResource(KAFKA_ARCHIVE),
-                                    createTempLocation(Constants.Files.KAFKA));
-    LOG.debug("Done {}", Constants.Files.KAFKA);
-
-    localFiles.put(Constants.Files.KAFKA, createLocalFile(Constants.Files.KAFKA, location, true));
-  }
-
-  private void saveVmOptions(String opts, Map<String, LocalFile> localFiles) throws IOException {
-    if (opts.isEmpty()) {
-      // If no vm options, no need to localize the file.
-      return;
-    }
-    LOG.debug("Copy {}", Constants.Files.JVM_OPTIONS);
-    final Location location = createTempLocation(Constants.Files.JVM_OPTIONS);
-    CharStreams.write(opts, new OutputSupplier<Writer>() {
-      @Override
-      public Writer getOutput() throws IOException {
-        return new OutputStreamWriter(location.getOutputStream(), Charsets.UTF_8);
-      }
-    });
-    LOG.debug("Done {}", Constants.Files.JVM_OPTIONS);
-
-    localFiles.put(Constants.Files.JVM_OPTIONS, createLocalFile(Constants.Files.JVM_OPTIONS, location));
-  }
-
-  private void saveArguments(Arguments arguments, Map<String, LocalFile> localFiles) throws IOException {
-    LOG.debug("Create and copy {}", Constants.Files.ARGUMENTS);
-    final Location location = createTempLocation(Constants.Files.ARGUMENTS);
-    ArgumentsCodec.encode(arguments, new OutputSupplier<Writer>() {
-      @Override
-      public Writer getOutput() throws IOException {
-        return new OutputStreamWriter(location.getOutputStream(), Charsets.UTF_8);
-      }
-    });
-    LOG.debug("Done {}", Constants.Files.ARGUMENTS);
-
-    localFiles.put(Constants.Files.ARGUMENTS, createLocalFile(Constants.Files.ARGUMENTS, location));
-  }
-
-  /**
-   * Serializes the list of files that needs to localize from AM to Container.
-   */
-  private void saveLocalFiles(Map<String, LocalFile> localFiles, Set<String> includes) throws IOException {
-    Map<String, LocalFile> localize = ImmutableMap.copyOf(Maps.filterKeys(localFiles, Predicates.in(includes)));
-    LOG.debug("Create and copy {}", Constants.Files.LOCALIZE_FILES);
-    Location location = createTempLocation(Constants.Files.LOCALIZE_FILES);
-    Writer writer = new OutputStreamWriter(location.getOutputStream(), Charsets.UTF_8);
-    try {
-      new GsonBuilder().registerTypeAdapter(LocalFile.class, new LocalFileCodec())
-        .create().toJson(localize.values(), new TypeToken<List<LocalFile>>() {
-      }.getType(), writer);
-    } finally {
-      writer.close();
-    }
-    LOG.debug("Done {}", Constants.Files.LOCALIZE_FILES);
-    localFiles.put(Constants.Files.LOCALIZE_FILES, createLocalFile(Constants.Files.LOCALIZE_FILES, location));
-  }
-
-  private Location copyFromURL(URL url, Location target) throws IOException {
-    InputStream is = url.openStream();
-    try {
-      OutputStream os = new BufferedOutputStream(target.getOutputStream());
-      try {
-        ByteStreams.copy(is, os);
-      } finally {
-        os.close();
-      }
-    } finally {
-      is.close();
-    }
-    return target;
-  }
-
-  private Location createTempLocation(String fileName) {
-    String name;
-    String suffix = Paths.getExtension(fileName);
-
-    name = fileName.substring(0, fileName.length() - suffix.length() - 1);
-
-    try {
-      return getAppLocation().append(name).getTempFile('.' + suffix);
-    } catch (IOException e) {
-      throw Throwables.propagate(e);
-    }
-  }
-
-  private Location getAppLocation() {
-    return locationFactory.create(String.format("/%s/%s", twillSpec.getName(), runId.getId()));
-  }
-
-  /**
-   * Returns the context ClassLoader if there is any, otherwise, returns ClassLoader of this class.
-   */
-  private ClassLoader getClassLoader() {
-    ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
-    return classLoader == null ? getClass().getClassLoader() : classLoader;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/yarn/YarnTwillRunnerService.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/yarn/YarnTwillRunnerService.java b/yarn/src/main/java/org/apache/twill/yarn/YarnTwillRunnerService.java
deleted file mode 100644
index 9335465..0000000
--- a/yarn/src/main/java/org/apache/twill/yarn/YarnTwillRunnerService.java
+++ /dev/null
@@ -1,583 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.yarn;
-
-import org.apache.twill.api.ResourceSpecification;
-import org.apache.twill.api.RunId;
-import org.apache.twill.api.SecureStore;
-import org.apache.twill.api.SecureStoreUpdater;
-import org.apache.twill.api.TwillApplication;
-import org.apache.twill.api.TwillController;
-import org.apache.twill.api.TwillPreparer;
-import org.apache.twill.api.TwillRunnable;
-import org.apache.twill.api.TwillRunnerService;
-import org.apache.twill.api.TwillSpecification;
-import org.apache.twill.api.logging.LogHandler;
-import org.apache.twill.common.Cancellable;
-import org.apache.twill.common.ServiceListenerAdapter;
-import org.apache.twill.common.Threads;
-import org.apache.twill.filesystem.HDFSLocationFactory;
-import org.apache.twill.filesystem.Location;
-import org.apache.twill.filesystem.LocationFactory;
-import org.apache.twill.internal.Constants;
-import org.apache.twill.internal.ProcessController;
-import org.apache.twill.internal.RunIds;
-import org.apache.twill.internal.SingleRunnableApplication;
-import org.apache.twill.internal.appmaster.ApplicationMasterLiveNodeData;
-import org.apache.twill.internal.yarn.VersionDetectYarnAppClientFactory;
-import org.apache.twill.internal.yarn.YarnAppClient;
-import org.apache.twill.internal.yarn.YarnApplicationReport;
-import org.apache.twill.internal.yarn.YarnUtils;
-import org.apache.twill.zookeeper.NodeChildren;
-import org.apache.twill.zookeeper.NodeData;
-import org.apache.twill.zookeeper.RetryStrategies;
-import org.apache.twill.zookeeper.ZKClient;
-import org.apache.twill.zookeeper.ZKClientService;
-import org.apache.twill.zookeeper.ZKClientServices;
-import org.apache.twill.zookeeper.ZKClients;
-import org.apache.twill.zookeeper.ZKOperations;
-import com.google.common.base.Charsets;
-import com.google.common.base.Function;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicate;
-import com.google.common.base.Suppliers;
-import com.google.common.base.Throwables;
-import com.google.common.collect.HashBasedTable;
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.ImmutableTable;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Iterators;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Multimap;
-import com.google.common.collect.Sets;
-import com.google.common.collect.Table;
-import com.google.common.util.concurrent.AbstractIdleService;
-import com.google.common.util.concurrent.Callables;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.gson.Gson;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-/**
- * An implementation of {@link org.apache.twill.api.TwillRunnerService} that runs application on a YARN cluster.
- */
-public final class YarnTwillRunnerService extends AbstractIdleService implements TwillRunnerService {
-
-  private static final Logger LOG = LoggerFactory.getLogger(YarnTwillRunnerService.class);
-
-  private static final int ZK_TIMEOUT = 10000;
-  private static final Function<String, RunId> STRING_TO_RUN_ID = new Function<String, RunId>() {
-    @Override
-    public RunId apply(String input) {
-      return RunIds.fromString(input);
-    }
-  };
-  private static final Function<YarnTwillController, TwillController> CAST_CONTROLLER =
-    new Function<YarnTwillController, TwillController>() {
-    @Override
-    public TwillController apply(YarnTwillController controller) {
-      return controller;
-    }
-  };
-
-  private final YarnConfiguration yarnConfig;
-  private final YarnAppClient yarnAppClient;
-  private final ZKClientService zkClientService;
-  private final LocationFactory locationFactory;
-  private final Table<String, RunId, YarnTwillController> controllers;
-  private ScheduledExecutorService secureStoreScheduler;
-
-  private Iterable<LiveInfo> liveInfos;
-  private Cancellable watchCancellable;
-  private volatile String jvmOptions = "";
-
-  public YarnTwillRunnerService(YarnConfiguration config, String zkConnect) {
-    this(config, zkConnect, new HDFSLocationFactory(getFileSystem(config), "/twill"));
-  }
-
-  public YarnTwillRunnerService(YarnConfiguration config, String zkConnect, LocationFactory locationFactory) {
-    this.yarnConfig = config;
-    this.yarnAppClient = new VersionDetectYarnAppClientFactory().create(config);
-    this.locationFactory = locationFactory;
-    this.zkClientService = getZKClientService(zkConnect);
-    this.controllers = HashBasedTable.create();
-  }
-
-  /**
-   * This methods sets the extra JVM options that will be passed to the java command line for every application
-   * started through this {@link YarnTwillRunnerService} instance. It only affects applications that are started
-   * after options is set.
-   *
-   * This is intended for advance usage. All options will be passed unchanged to the java command line. Invalid
-   * options could cause application not able to start.
-   *
-   * @param options extra JVM options.
-   */
-  public void setJVMOptions(String options) {
-    Preconditions.checkArgument(options != null, "JVM options cannot be null.");
-    this.jvmOptions = options;
-  }
-
-  @Override
-  public Cancellable scheduleSecureStoreUpdate(final SecureStoreUpdater updater,
-                                               long initialDelay, long delay, TimeUnit unit) {
-    if (!UserGroupInformation.isSecurityEnabled()) {
-      return new Cancellable() {
-        @Override
-        public void cancel() {
-          // No-op
-        }
-      };
-    }
-
-    synchronized (this) {
-      if (secureStoreScheduler == null) {
-        secureStoreScheduler = Executors.newSingleThreadScheduledExecutor(
-          Threads.createDaemonThreadFactory("secure-store-updater"));
-      }
-    }
-
-    final ScheduledFuture<?> future = secureStoreScheduler.scheduleWithFixedDelay(new Runnable() {
-      @Override
-      public void run() {
-        // Collects all <application, runId> pairs first
-        Multimap<String, RunId> liveApps = HashMultimap.create();
-        synchronized (YarnTwillRunnerService.this) {
-          for (Table.Cell<String, RunId, YarnTwillController> cell : controllers.cellSet()) {
-            liveApps.put(cell.getRowKey(), cell.getColumnKey());
-          }
-        }
-
-        // Collect all secure stores that needs to be updated.
-        Table<String, RunId, SecureStore> secureStores = HashBasedTable.create();
-        for (Map.Entry<String, RunId> entry : liveApps.entries()) {
-          try {
-            secureStores.put(entry.getKey(), entry.getValue(), updater.update(entry.getKey(), entry.getValue()));
-          } catch (Throwable t) {
-            LOG.warn("Exception thrown by SecureStoreUpdater {}", updater, t);
-          }
-        }
-
-        // Update secure stores.
-        updateSecureStores(secureStores);
-      }
-    }, initialDelay, delay, unit);
-
-    return new Cancellable() {
-      @Override
-      public void cancel() {
-        future.cancel(false);
-      }
-    };
-  }
-
-  @Override
-  public TwillPreparer prepare(TwillRunnable runnable) {
-    return prepare(runnable, ResourceSpecification.BASIC);
-  }
-
-  @Override
-  public TwillPreparer prepare(TwillRunnable runnable, ResourceSpecification resourceSpecification) {
-    return prepare(new SingleRunnableApplication(runnable, resourceSpecification));
-  }
-
-  @Override
-  public TwillPreparer prepare(TwillApplication application) {
-    Preconditions.checkState(isRunning(), "Service not start. Please call start() first.");
-    final TwillSpecification twillSpec = application.configure();
-    final String appName = twillSpec.getName();
-
-    return new YarnTwillPreparer(yarnConfig, twillSpec, yarnAppClient, zkClientService, locationFactory,
-                                 Suppliers.ofInstance(jvmOptions),
-                                 new YarnTwillControllerFactory() {
-      @Override
-      public YarnTwillController create(RunId runId, Iterable<LogHandler> logHandlers,
-                                        Callable<ProcessController<YarnApplicationReport>> startUp) {
-        ZKClient zkClient = ZKClients.namespace(zkClientService, "/" + appName);
-        YarnTwillController controller = listenController(new YarnTwillController(runId, zkClient,
-                                                                                  logHandlers, startUp));
-        synchronized (YarnTwillRunnerService.this) {
-          Preconditions.checkArgument(!controllers.contains(appName, runId),
-                                      "Application %s with runId %s is already running.", appName, runId);
-          controllers.put(appName, runId, controller);
-        }
-        return controller;
-      }
-    });
-  }
-
-  @Override
-  public synchronized TwillController lookup(String applicationName, final RunId runId) {
-    return controllers.get(applicationName, runId);
-  }
-
-  @Override
-  public Iterable<TwillController> lookup(final String applicationName) {
-    return new Iterable<TwillController>() {
-      @Override
-      public Iterator<TwillController> iterator() {
-        synchronized (YarnTwillRunnerService.this) {
-          return Iterators.transform(ImmutableList.copyOf(controllers.row(applicationName).values()).iterator(),
-                                     CAST_CONTROLLER);
-        }
-      }
-    };
-  }
-
-  @Override
-  public Iterable<LiveInfo> lookupLive() {
-    return liveInfos;
-  }
-
-  @Override
-  protected void startUp() throws Exception {
-    yarnAppClient.startAndWait();
-    zkClientService.startAndWait();
-
-    // Create the root node, so that the namespace root would get created if it is missing
-    // If the exception is caused by node exists, then it's ok. Otherwise propagate the exception.
-    ZKOperations.ignoreError(zkClientService.create("/", null, CreateMode.PERSISTENT),
-                             KeeperException.NodeExistsException.class, null).get();
-
-    watchCancellable = watchLiveApps();
-    liveInfos = createLiveInfos();
-
-    // Schedule an updater for updating HDFS delegation tokens
-    if (UserGroupInformation.isSecurityEnabled()) {
-      long delay = yarnConfig.getLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY,
-                                      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT);
-      scheduleSecureStoreUpdate(new LocationSecureStoreUpdater(yarnConfig, locationFactory),
-                                delay, delay, TimeUnit.MILLISECONDS);
-    }
-  }
-
-  @Override
-  protected void shutDown() throws Exception {
-    // Shutdown shouldn't stop any controllers, as stopping this client service should let the remote containers
-    // running. However, this assumes that this TwillRunnerService is a long running service and you only stop it
-    // when the JVM process is about to exit. Hence it is important that threads created in the controllers are
-    // daemon threads.
-    synchronized (this) {
-      if (secureStoreScheduler != null) {
-        secureStoreScheduler.shutdownNow();
-      }
-    }
-    watchCancellable.cancel();
-    zkClientService.stopAndWait();
-    yarnAppClient.stopAndWait();
-  }
-
-  private Cancellable watchLiveApps() {
-    final Map<String, Cancellable> watched = Maps.newConcurrentMap();
-
-    final AtomicBoolean cancelled = new AtomicBoolean(false);
-    // Watch child changes in the root, which gives all application names.
-    final Cancellable cancellable = ZKOperations.watchChildren(zkClientService, "/",
-                                                               new ZKOperations.ChildrenCallback() {
-      @Override
-      public void updated(NodeChildren nodeChildren) {
-        if (cancelled.get()) {
-          return;
-        }
-
-        Set<String> apps = ImmutableSet.copyOf(nodeChildren.getChildren());
-
-        // For each for the application name, watch for ephemeral nodes under /instances.
-        for (final String appName : apps) {
-          if (watched.containsKey(appName)) {
-            continue;
-          }
-
-          final String instancePath = String.format("/%s/instances", appName);
-          watched.put(appName,
-                      ZKOperations.watchChildren(zkClientService, instancePath, new ZKOperations.ChildrenCallback() {
-            @Override
-            public void updated(NodeChildren nodeChildren) {
-              if (cancelled.get()) {
-                return;
-              }
-              if (nodeChildren.getChildren().isEmpty()) {     // No more child, means no live instances
-                Cancellable removed = watched.remove(appName);
-                if (removed != null) {
-                  removed.cancel();
-                }
-                return;
-              }
-              synchronized (YarnTwillRunnerService.this) {
-                // For each of the children, which the node name is the runId,
-                // fetch the application Id and construct TwillController.
-                for (final RunId runId : Iterables.transform(nodeChildren.getChildren(), STRING_TO_RUN_ID)) {
-                  if (controllers.contains(appName, runId)) {
-                    continue;
-                  }
-                  updateController(appName, runId, cancelled);
-                }
-              }
-            }
-          }));
-        }
-
-        // Remove app watches for apps that are gone. Removal of controller from controllers table is done
-        // in the state listener attached to the twill controller.
-        for (String removeApp : Sets.difference(watched.keySet(), apps)) {
-          watched.remove(removeApp).cancel();
-        }
-      }
-    });
-    return new Cancellable() {
-      @Override
-      public void cancel() {
-        cancelled.set(true);
-        cancellable.cancel();
-        for (Cancellable c : watched.values()) {
-          c.cancel();
-        }
-      }
-    };
-  }
-
-  private YarnTwillController listenController(final YarnTwillController controller) {
-    controller.addListener(new ServiceListenerAdapter() {
-      @Override
-      public void terminated(State from) {
-        removeController();
-      }
-
-      @Override
-      public void failed(State from, Throwable failure) {
-        removeController();
-      }
-
-      private void removeController() {
-        synchronized (YarnTwillRunnerService.this) {
-          Iterables.removeIf(controllers.values(),
-                             new Predicate<TwillController>() {
-             @Override
-             public boolean apply(TwillController input) {
-               return input == controller;
-             }
-           });
-        }
-      }
-    }, Threads.SAME_THREAD_EXECUTOR);
-    return controller;
-  }
-
-  private ZKClientService getZKClientService(String zkConnect) {
-    return ZKClientServices.delegate(
-      ZKClients.reWatchOnExpire(
-        ZKClients.retryOnFailure(ZKClientService.Builder.of(zkConnect)
-                                   .setSessionTimeout(ZK_TIMEOUT)
-                                   .build(), RetryStrategies.exponentialDelay(100, 2000, TimeUnit.MILLISECONDS))));
-  }
-
-  private Iterable<LiveInfo> createLiveInfos() {
-    return new Iterable<LiveInfo>() {
-
-      @Override
-      public Iterator<LiveInfo> iterator() {
-        Map<String, Map<RunId, YarnTwillController>> controllerMap = ImmutableTable.copyOf(controllers).rowMap();
-        return Iterators.transform(controllerMap.entrySet().iterator(),
-                                   new Function<Map.Entry<String, Map<RunId, YarnTwillController>>, LiveInfo>() {
-          @Override
-          public LiveInfo apply(final Map.Entry<String, Map<RunId, YarnTwillController>> entry) {
-            return new LiveInfo() {
-              @Override
-              public String getApplicationName() {
-                return entry.getKey();
-              }
-
-              @Override
-              public Iterable<TwillController> getControllers() {
-                return Iterables.transform(entry.getValue().values(), CAST_CONTROLLER);
-              }
-            };
-          }
-        });
-      }
-    };
-  }
-
-  private void updateController(final String appName, final RunId runId, final AtomicBoolean cancelled) {
-    String instancePath = String.format("/%s/instances/%s", appName, runId.getId());
-
-    // Fetch the content node.
-    Futures.addCallback(zkClientService.getData(instancePath), new FutureCallback<NodeData>() {
-      @Override
-      public void onSuccess(NodeData result) {
-        if (cancelled.get()) {
-          return;
-        }
-        ApplicationId appId = getApplicationId(result);
-        if (appId == null) {
-          return;
-        }
-
-        synchronized (YarnTwillRunnerService.this) {
-          if (!controllers.contains(appName, runId)) {
-            ZKClient zkClient = ZKClients.namespace(zkClientService, "/" + appName);
-            YarnTwillController controller = listenController(
-              new YarnTwillController(runId, zkClient,
-                                      Callables.returning(yarnAppClient.createProcessController(appId))));
-            controllers.put(appName, runId, controller);
-            controller.start();
-          }
-        }
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        LOG.warn("Failed in fetching application instance node.", t);
-      }
-    }, Threads.SAME_THREAD_EXECUTOR);
-  }
-
-
-  /**
-   * Decodes application ID stored inside the node data.
-   * @param nodeData The node data to decode from. If it is {@code null}, this method would return {@code null}.
-   * @return The ApplicationId or {@code null} if failed to decode.
-   */
-  private ApplicationId getApplicationId(NodeData nodeData) {
-    byte[] data = nodeData == null ? null : nodeData.getData();
-    if (data == null) {
-      return null;
-    }
-
-    Gson gson = new Gson();
-    JsonElement json = gson.fromJson(new String(data, Charsets.UTF_8), JsonElement.class);
-    if (!json.isJsonObject()) {
-      LOG.warn("Unable to decode live data node.");
-      return null;
-    }
-
-    JsonObject jsonObj = json.getAsJsonObject();
-    json = jsonObj.get("data");
-    if (!json.isJsonObject()) {
-      LOG.warn("Property data not found in live data node.");
-      return null;
-    }
-
-    try {
-      ApplicationMasterLiveNodeData amLiveNode = gson.fromJson(json, ApplicationMasterLiveNodeData.class);
-      return YarnUtils.createApplicationId(amLiveNode.getAppIdClusterTime(), amLiveNode.getAppId());
-    } catch (Exception e) {
-      LOG.warn("Failed to decode application live node data.", e);
-      return null;
-    }
-  }
-
-  private void updateSecureStores(Table<String, RunId, SecureStore> secureStores) {
-    for (Table.Cell<String, RunId, SecureStore> cell : secureStores.cellSet()) {
-      Object store = cell.getValue().getStore();
-      if (!(store instanceof Credentials)) {
-        LOG.warn("Only Hadoop Credentials is supported. Ignore update for {}.", cell);
-        continue;
-      }
-
-      Credentials credentials = (Credentials) store;
-      if (credentials.getAllTokens().isEmpty()) {
-        // Nothing to update.
-        continue;
-      }
-
-      try {
-        updateCredentials(cell.getRowKey(), cell.getColumnKey(), credentials);
-        synchronized (YarnTwillRunnerService.this) {
-          // Notify the application for secure store updates if it is still running.
-          YarnTwillController controller = controllers.get(cell.getRowKey(), cell.getColumnKey());
-          if (controller != null) {
-            controller.secureStoreUpdated();
-          }
-        }
-      } catch (Throwable t) {
-        LOG.warn("Failed to update secure store for {}.", cell, t);
-      }
-    }
-  }
-
-  private void updateCredentials(String application, RunId runId, Credentials updates) throws IOException {
-    Location credentialsLocation = locationFactory.create(String.format("/%s/%s/%s", application, runId.getId(),
-                                                                        Constants.Files.CREDENTIALS));
-    // Try to read the old credentials.
-    Credentials credentials = new Credentials();
-    if (credentialsLocation.exists()) {
-      DataInputStream is = new DataInputStream(new BufferedInputStream(credentialsLocation.getInputStream()));
-      try {
-        credentials.readTokenStorageStream(is);
-      } finally {
-        is.close();
-      }
-    }
-
-    // Overwrite with the updates.
-    credentials.addAll(updates);
-
-    // Overwrite the credentials.
-    Location tmpLocation = credentialsLocation.getTempFile(Constants.Files.CREDENTIALS);
-
-    // Save the credentials store with user-only permission.
-    DataOutputStream os = new DataOutputStream(new BufferedOutputStream(tmpLocation.getOutputStream("600")));
-    try {
-      credentials.writeTokenStorageToStream(os);
-    } finally {
-      os.close();
-    }
-
-    // Rename the tmp file into the credentials location
-    tmpLocation.renameTo(credentialsLocation);
-
-    LOG.debug("Secure store for {} {} saved to {}.", application, runId, credentialsLocation.toURI());
-  }
-
-  private static FileSystem getFileSystem(YarnConfiguration configuration) {
-    try {
-      return FileSystem.get(configuration);
-    } catch (IOException e) {
-      throw Throwables.propagate(e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/yarn/package-info.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/yarn/package-info.java b/yarn/src/main/java/org/apache/twill/yarn/package-info.java
deleted file mode 100644
index b3cbc5e..0000000
--- a/yarn/src/main/java/org/apache/twill/yarn/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Classes in this package implement the Twill API for Apache Hadoop YARN.
- */
-package org.apache.twill.yarn;

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/resources/logback-template.xml
----------------------------------------------------------------------
diff --git a/yarn/src/main/resources/logback-template.xml b/yarn/src/main/resources/logback-template.xml
deleted file mode 100644
index 38cf6c8..0000000
--- a/yarn/src/main/resources/logback-template.xml
+++ /dev/null
@@ -1,11 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!-- Default logback configuration for twill library -->
-<configuration>
-
-    <logger name="org.apache.hadoop" level="WARN" />
-    <logger name="org.apache.zookeeper" level="WARN" />
-
-    <root level="INFO" />
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/test/java/org/apache/twill/yarn/BuggyServer.java
----------------------------------------------------------------------
diff --git a/yarn/src/test/java/org/apache/twill/yarn/BuggyServer.java b/yarn/src/test/java/org/apache/twill/yarn/BuggyServer.java
deleted file mode 100644
index bb1a583..0000000
--- a/yarn/src/test/java/org/apache/twill/yarn/BuggyServer.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.yarn;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.PrintWriter;
-
-/**
- * Server for testing that will die if you give it a 0.
- */
-public final class BuggyServer extends SocketServer {
-
-  private static final Logger LOG = LoggerFactory.getLogger(BuggyServer.class);
-
-  @Override
-  public void handleRequest(BufferedReader reader, PrintWriter writer) throws IOException {
-    String line = reader.readLine();
-    LOG.info("Received: " + line + " going to divide by it");
-    Integer toDivide = Integer.valueOf(line);
-    writer.println(Integer.toString(100 / toDivide));
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/test/java/org/apache/twill/yarn/DistributeShellTestRun.java
----------------------------------------------------------------------
diff --git a/yarn/src/test/java/org/apache/twill/yarn/DistributeShellTestRun.java b/yarn/src/test/java/org/apache/twill/yarn/DistributeShellTestRun.java
deleted file mode 100644
index 1054ec9..0000000
--- a/yarn/src/test/java/org/apache/twill/yarn/DistributeShellTestRun.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.yarn;
-
-import org.apache.twill.api.TwillController;
-import org.apache.twill.api.TwillRunner;
-import org.apache.twill.api.logging.PrinterLogHandler;
-import org.apache.twill.common.ServiceListenerAdapter;
-import org.apache.twill.common.Threads;
-import com.google.common.util.concurrent.Service;
-import org.junit.Assert;
-import org.junit.Ignore;
-import org.junit.Test;
-
-import java.io.PrintWriter;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-/**
- * This test is executed by {@link YarnTestSuite}.
- */
-public class DistributeShellTestRun {
-
-  @Ignore
-  @Test
-  public void testDistributedShell() throws InterruptedException {
-    TwillRunner twillRunner = YarnTestSuite.getTwillRunner();
-
-    TwillController controller = twillRunner.prepare(new DistributedShell("pwd", "ls -al"))
-                                            .addLogHandler(new PrinterLogHandler(new PrintWriter(System.out)))
-                                            .start();
-
-    final CountDownLatch stopLatch = new CountDownLatch(1);
-    controller.addListener(new ServiceListenerAdapter() {
-
-      @Override
-      public void terminated(Service.State from) {
-        stopLatch.countDown();
-      }
-
-      @Override
-      public void failed(Service.State from, Throwable failure) {
-        stopLatch.countDown();
-      }
-    }, Threads.SAME_THREAD_EXECUTOR);
-
-    Assert.assertTrue(stopLatch.await(10, TimeUnit.SECONDS));
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/test/java/org/apache/twill/yarn/DistributedShell.java
----------------------------------------------------------------------
diff --git a/yarn/src/test/java/org/apache/twill/yarn/DistributedShell.java b/yarn/src/test/java/org/apache/twill/yarn/DistributedShell.java
deleted file mode 100644
index c89371c..0000000
--- a/yarn/src/test/java/org/apache/twill/yarn/DistributedShell.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.yarn;
-
-import org.apache.twill.api.AbstractTwillRunnable;
-import com.google.common.base.Charsets;
-import com.google.common.base.Joiner;
-import com.google.common.base.Splitter;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStreamReader;
-
-/**
- *
- */
-public final class DistributedShell extends AbstractTwillRunnable {
-
-  private static final Logger LOG = LoggerFactory.getLogger(DistributedShell.class);
-
-  public DistributedShell(String...commands) {
-    super(ImmutableMap.of("cmds", Joiner.on(';').join(commands)));
-  }
-
-  @Override
-  public void run() {
-    for (String cmd : Splitter.on(';').split(getArgument("cmds"))) {
-      try {
-        Process process = new ProcessBuilder(ImmutableList.copyOf(Splitter.on(' ').split(cmd)))
-                              .redirectErrorStream(true).start();
-        BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), Charsets.US_ASCII));
-        try {
-          String line = reader.readLine();
-          while (line != null) {
-            LOG.info(line);
-            line = reader.readLine();
-          }
-        } finally {
-          reader.close();
-        }
-      } catch (IOException e) {
-        LOG.error("Fail to execute command " + cmd, e);
-      }
-    }
-  }
-
-  @Override
-  public void stop() {
-    // No-op
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/test/java/org/apache/twill/yarn/EchoServer.java
----------------------------------------------------------------------
diff --git a/yarn/src/test/java/org/apache/twill/yarn/EchoServer.java b/yarn/src/test/java/org/apache/twill/yarn/EchoServer.java
deleted file mode 100644
index 6b77e66..0000000
--- a/yarn/src/test/java/org/apache/twill/yarn/EchoServer.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.yarn;
-
-import org.apache.twill.api.Command;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.PrintWriter;
-
-/**
- * Test server that echoes back what it receives.
- */
-public final class EchoServer extends SocketServer {
-
-  private static final Logger LOG = LoggerFactory.getLogger(EchoServer.class);
-
-  @Override
-  public void handleRequest(BufferedReader reader, PrintWriter writer) throws IOException {
-    String line = reader.readLine();
-    LOG.info("Received: " + line);
-    if (line != null) {
-      writer.println(line);
-    }
-  }
-
-  @Override
-  public void handleCommand(Command command) throws Exception {
-    LOG.info("Command received: " + command + " " + getContext().getInstanceCount());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/test/java/org/apache/twill/yarn/EchoServerTestRun.java
----------------------------------------------------------------------
diff --git a/yarn/src/test/java/org/apache/twill/yarn/EchoServerTestRun.java b/yarn/src/test/java/org/apache/twill/yarn/EchoServerTestRun.java
deleted file mode 100644
index d868eef..0000000
--- a/yarn/src/test/java/org/apache/twill/yarn/EchoServerTestRun.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.yarn;
-
-import org.apache.twill.api.ResourceSpecification;
-import org.apache.twill.api.TwillController;
-import org.apache.twill.api.TwillRunner;
-import org.apache.twill.api.TwillRunnerService;
-import org.apache.twill.api.logging.PrinterLogHandler;
-import org.apache.twill.common.ServiceListenerAdapter;
-import org.apache.twill.common.Threads;
-import org.apache.twill.discovery.Discoverable;
-import com.google.common.base.Charsets;
-import com.google.common.io.LineReader;
-import org.junit.Assert;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
-import java.io.PrintWriter;
-import java.net.Socket;
-import java.net.URISyntaxException;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-/**
- * Using echo server to test various behavior of YarnTwillService.
- * This test is executed by {@link YarnTestSuite}.
- */
-public class EchoServerTestRun {
-
-  private static final Logger LOG = LoggerFactory.getLogger(EchoServerTestRun.class);
-
-  @Test
-  public void testEchoServer() throws InterruptedException, ExecutionException, IOException,
-    URISyntaxException, TimeoutException {
-    TwillRunner runner = YarnTestSuite.getTwillRunner();
-
-    TwillController controller = runner.prepare(new EchoServer(),
-                                                ResourceSpecification.Builder.with()
-                                                         .setVirtualCores(1)
-                                                         .setMemory(1, ResourceSpecification.SizeUnit.GIGA)
-                                                         .setInstances(2)
-                                                         .build())
-                                        .addLogHandler(new PrinterLogHandler(new PrintWriter(System.out, true)))
-                                        .withApplicationArguments("echo")
-                                        .withArguments("EchoServer", "echo2")
-                                        .start();
-
-    final CountDownLatch running = new CountDownLatch(1);
-    controller.addListener(new ServiceListenerAdapter() {
-      @Override
-      public void running() {
-        running.countDown();
-      }
-    }, Threads.SAME_THREAD_EXECUTOR);
-
-    Assert.assertTrue(running.await(30, TimeUnit.SECONDS));
-
-    Iterable<Discoverable> echoServices = controller.discoverService("echo");
-    Assert.assertTrue(YarnTestSuite.waitForSize(echoServices, 2, 60));
-
-    for (Discoverable discoverable : echoServices) {
-      String msg = "Hello: " + discoverable.getSocketAddress();
-
-      Socket socket = new Socket(discoverable.getSocketAddress().getAddress(),
-                                 discoverable.getSocketAddress().getPort());
-      try {
-        PrintWriter writer = new PrintWriter(new OutputStreamWriter(socket.getOutputStream(), Charsets.UTF_8), true);
-        LineReader reader = new LineReader(new InputStreamReader(socket.getInputStream(), Charsets.UTF_8));
-
-        writer.println(msg);
-        Assert.assertEquals(msg, reader.readLine());
-      } finally {
-        socket.close();
-      }
-    }
-
-    // Increase number of instances
-    controller.changeInstances("EchoServer", 3);
-    Assert.assertTrue(YarnTestSuite.waitForSize(echoServices, 3, 60));
-
-    echoServices = controller.discoverService("echo2");
-
-    // Decrease number of instances
-    controller.changeInstances("EchoServer", 1);
-    Assert.assertTrue(YarnTestSuite.waitForSize(echoServices, 1, 60));
-
-    // Increase number of instances again
-    controller.changeInstances("EchoServer", 2);
-    Assert.assertTrue(YarnTestSuite.waitForSize(echoServices, 2, 60));
-
-    // Make sure still only one app is running
-    Iterable<TwillRunner.LiveInfo> apps = runner.lookupLive();
-    Assert.assertTrue(YarnTestSuite.waitForSize(apps, 1, 60));
-
-    // Creates a new runner service to check it can regain control over running app.
-    TwillRunnerService runnerService = YarnTestSuite.createTwillRunnerService();
-    runnerService.startAndWait();
-
-    try {
-      Iterable <TwillController> controllers = runnerService.lookup("EchoServer");
-      Assert.assertTrue(YarnTestSuite.waitForSize(controllers, 1, 60));
-
-      for (TwillController c : controllers) {
-        LOG.info("Stopping application: " + c.getRunId());
-        c.stop().get(30, TimeUnit.SECONDS);
-      }
-
-      Assert.assertTrue(YarnTestSuite.waitForSize(apps, 0, 60));
-    } finally {
-      runnerService.stopAndWait();
-    }
-
-    // Sleep a bit before exiting.
-    TimeUnit.SECONDS.sleep(2);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/test/java/org/apache/twill/yarn/EnvironmentEchoServer.java
----------------------------------------------------------------------
diff --git a/yarn/src/test/java/org/apache/twill/yarn/EnvironmentEchoServer.java b/yarn/src/test/java/org/apache/twill/yarn/EnvironmentEchoServer.java
deleted file mode 100644
index 4be2472..0000000
--- a/yarn/src/test/java/org/apache/twill/yarn/EnvironmentEchoServer.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.yarn;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.PrintWriter;
-
-/**
- * Test server that returns back the value of the env key sent in.  Used to check env for
- * runnables is correctly set.
- */
-public class EnvironmentEchoServer extends SocketServer {
-
-  @Override
-  public void handleRequest(BufferedReader reader, PrintWriter writer) throws IOException {
-    String envKey = reader.readLine();
-    writer.println(System.getenv(envKey));
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/test/java/org/apache/twill/yarn/FailureRestartTestRun.java
----------------------------------------------------------------------
diff --git a/yarn/src/test/java/org/apache/twill/yarn/FailureRestartTestRun.java b/yarn/src/test/java/org/apache/twill/yarn/FailureRestartTestRun.java
deleted file mode 100644
index b3d3933..0000000
--- a/yarn/src/test/java/org/apache/twill/yarn/FailureRestartTestRun.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.yarn;
-
-import org.apache.twill.api.Command;
-import org.apache.twill.api.ResourceSpecification;
-import org.apache.twill.api.TwillController;
-import org.apache.twill.api.TwillRunner;
-import org.apache.twill.api.logging.PrinterLogHandler;
-import org.apache.twill.discovery.Discoverable;
-import com.google.common.base.Charsets;
-import com.google.common.collect.Sets;
-import com.google.common.io.LineReader;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
-import java.io.PrintWriter;
-import java.net.InetSocketAddress;
-import java.net.Socket;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-
-/**
- *
- */
-public class FailureRestartTestRun {
-
-  @Test
-  public void testFailureRestart() throws Exception {
-    TwillRunner runner = YarnTestSuite.getTwillRunner();
-
-    ResourceSpecification resource = ResourceSpecification.Builder.with()
-      .setVirtualCores(1)
-      .setMemory(512, ResourceSpecification.SizeUnit.MEGA)
-      .setInstances(2)
-      .build();
-    TwillController controller = runner.prepare(new FailureRunnable(), resource)
-      .withApplicationArguments("failure")
-      .withArguments(FailureRunnable.class.getSimpleName(), "failure2")
-      .addLogHandler(new PrinterLogHandler(new PrintWriter(System.out, true)))
-      .start();
-
-    Iterable<Discoverable> discoverables = controller.discoverService("failure");
-    Assert.assertTrue(YarnTestSuite.waitForSize(discoverables, 2, 60));
-
-    // Make sure we see the right instance IDs
-    Assert.assertEquals(Sets.newHashSet(0, 1), getInstances(discoverables));
-
-    // Kill server with instanceId = 0
-    controller.sendCommand(FailureRunnable.class.getSimpleName(), Command.Builder.of("kill0").build());
-
-    // Do a shot sleep, make sure the runnable is killed.
-    TimeUnit.SECONDS.sleep(5);
-
-    Assert.assertTrue(YarnTestSuite.waitForSize(discoverables, 2, 60));
-    // Make sure we see the right instance IDs
-    Assert.assertEquals(Sets.newHashSet(0, 1), getInstances(discoverables));
-
-    controller.stopAndWait();
-  }
-
-  private Set<Integer> getInstances(Iterable<Discoverable> discoverables) throws IOException {
-    Set<Integer> instances = Sets.newHashSet();
-    for (Discoverable discoverable : discoverables) {
-      InetSocketAddress socketAddress = discoverable.getSocketAddress();
-      Socket socket = new Socket(socketAddress.getAddress(), socketAddress.getPort());
-      try {
-        PrintWriter writer = new PrintWriter(new OutputStreamWriter(socket.getOutputStream(), Charsets.UTF_8), true);
-        LineReader reader = new LineReader(new InputStreamReader(socket.getInputStream(), Charsets.UTF_8));
-
-        String msg = "Failure";
-        writer.println(msg);
-
-        String line = reader.readLine();
-        Assert.assertTrue(line.endsWith(msg));
-        instances.add(Integer.parseInt(line.substring(0, line.length() - msg.length())));
-      } finally {
-        socket.close();
-      }
-    }
-    return instances;
-  }
-
-
-  public static final class FailureRunnable extends SocketServer {
-
-    private volatile boolean killed;
-
-    @Override
-    public void run() {
-      killed = false;
-      super.run();
-      if (killed) {
-        throw new RuntimeException("Exception");
-      }
-    }
-
-    @Override
-    public void handleCommand(Command command) throws Exception {
-      if (command.getCommand().equals("kill" + getContext().getInstanceId())) {
-        killed = true;
-        running = false;
-        serverSocket.close();
-      }
-    }
-
-    @Override
-    public void handleRequest(BufferedReader reader, PrintWriter writer) throws IOException {
-      String line = reader.readLine();
-      writer.println(getContext().getInstanceId() + line);
-      writer.flush();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/test/java/org/apache/twill/yarn/LocalFileTestRun.java
----------------------------------------------------------------------
diff --git a/yarn/src/test/java/org/apache/twill/yarn/LocalFileTestRun.java b/yarn/src/test/java/org/apache/twill/yarn/LocalFileTestRun.java
deleted file mode 100644
index de2c74c..0000000
--- a/yarn/src/test/java/org/apache/twill/yarn/LocalFileTestRun.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.yarn;
-
-import org.apache.twill.api.TwillApplication;
-import org.apache.twill.api.TwillController;
-import org.apache.twill.api.TwillRunner;
-import org.apache.twill.api.TwillSpecification;
-import org.apache.twill.api.logging.PrinterLogHandler;
-import org.apache.twill.discovery.Discoverable;
-import com.google.common.base.Charsets;
-import com.google.common.base.Preconditions;
-import com.google.common.io.ByteStreams;
-import com.google.common.io.Files;
-import com.google.common.io.LineReader;
-import org.junit.Assert;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
-import java.io.PrintWriter;
-import java.net.InetSocketAddress;
-import java.net.Socket;
-import java.util.concurrent.TimeUnit;
-import java.util.jar.JarEntry;
-import java.util.jar.JarOutputStream;
-
-/**
- * Test for local file transfer.
- */
-public class LocalFileTestRun {
-
-  @ClassRule
-  public static TemporaryFolder tmpFolder = new TemporaryFolder();
-
-  @Test
-  public void testLocalFile() throws Exception {
-    String header = Files.readFirstLine(new File(getClass().getClassLoader().getResource("header.txt").toURI()),
-                                        Charsets.UTF_8);
-
-    TwillRunner runner = YarnTestSuite.getTwillRunner();
-    if (runner instanceof YarnTwillRunnerService) {
-      ((YarnTwillRunnerService) runner).setJVMOptions("-verbose:gc -Xloggc:gc.log -XX:+PrintGCDetails");
-    }
-
-    TwillController controller = runner.prepare(new LocalFileApplication())
-      .withApplicationArguments("local")
-      .withArguments("LocalFileSocketServer", "local2")
-      .addLogHandler(new PrinterLogHandler(new PrintWriter(System.out, true)))
-      .start();
-
-    if (runner instanceof YarnTwillRunnerService) {
-      ((YarnTwillRunnerService) runner).setJVMOptions("");
-    }
-
-    Iterable<Discoverable> discoverables = controller.discoverService("local");
-    Assert.assertTrue(YarnTestSuite.waitForSize(discoverables, 1, 60));
-
-    InetSocketAddress socketAddress = discoverables.iterator().next().getSocketAddress();
-    Socket socket = new Socket(socketAddress.getAddress(), socketAddress.getPort());
-    try {
-      PrintWriter writer = new PrintWriter(new OutputStreamWriter(socket.getOutputStream(), Charsets.UTF_8), true);
-      LineReader reader = new LineReader(new InputStreamReader(socket.getInputStream(), Charsets.UTF_8));
-
-      String msg = "Local file test";
-      writer.println(msg);
-      Assert.assertEquals(header, reader.readLine());
-      Assert.assertEquals(msg, reader.readLine());
-    } finally {
-      socket.close();
-    }
-
-    controller.stopAndWait();
-
-    Assert.assertTrue(YarnTestSuite.waitForSize(discoverables, 0, 60));
-
-    TimeUnit.SECONDS.sleep(2);
-  }
-
-  public static final class LocalFileApplication implements TwillApplication {
-
-    private final File headerFile;
-
-    public LocalFileApplication() throws Exception {
-      // Create a jar file that contains the header.txt file inside.
-      headerFile = tmpFolder.newFile("header.jar");
-      JarOutputStream os = new JarOutputStream(new FileOutputStream(headerFile));
-      try {
-        os.putNextEntry(new JarEntry("header.txt"));
-        ByteStreams.copy(getClass().getClassLoader().getResourceAsStream("header.txt"), os);
-      } finally {
-        os.close();
-      }
-    }
-
-    @Override
-    public TwillSpecification configure() {
-      return TwillSpecification.Builder.with()
-        .setName("LocalFileApp")
-        .withRunnable()
-          .add(new LocalFileSocketServer())
-            .withLocalFiles()
-              .add("header", headerFile, true).apply()
-        .anyOrder()
-        .build();
-    }
-  }
-
-  public static final class LocalFileSocketServer extends SocketServer {
-
-    private static final Logger LOG = LoggerFactory.getLogger(LocalFileSocketServer.class);
-
-    @Override
-    public void handleRequest(BufferedReader reader, PrintWriter writer) throws IOException {
-      // Verify there is a gc.log file locally
-      Preconditions.checkState(new File("gc.log").exists());
-
-      LOG.info("handleRequest");
-      String header = Files.toString(new File("header/header.txt"), Charsets.UTF_8);
-      writer.write(header);
-      writer.println(reader.readLine());
-      LOG.info("Flushed response");
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/test/java/org/apache/twill/yarn/ProvisionTimeoutTestRun.java
----------------------------------------------------------------------
diff --git a/yarn/src/test/java/org/apache/twill/yarn/ProvisionTimeoutTestRun.java b/yarn/src/test/java/org/apache/twill/yarn/ProvisionTimeoutTestRun.java
deleted file mode 100644
index 0598ef1..0000000
--- a/yarn/src/test/java/org/apache/twill/yarn/ProvisionTimeoutTestRun.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.yarn;
-
-import org.apache.twill.api.AbstractTwillRunnable;
-import org.apache.twill.api.EventHandler;
-import org.apache.twill.api.EventHandlerContext;
-import org.apache.twill.api.ResourceSpecification;
-import org.apache.twill.api.TwillApplication;
-import org.apache.twill.api.TwillController;
-import org.apache.twill.api.TwillRunner;
-import org.apache.twill.api.TwillSpecification;
-import org.apache.twill.api.logging.PrinterLogHandler;
-import org.apache.twill.common.Services;
-import com.google.common.base.Throwables;
-import com.google.common.collect.ImmutableMap;
-import org.junit.Test;
-
-import java.io.PrintWriter;
-import java.util.Map;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-/**
- *
- */
-public class ProvisionTimeoutTestRun {
-
-  @Test
-  public void testProvisionTimeout() throws InterruptedException, ExecutionException, TimeoutException {
-    TwillRunner runner = YarnTestSuite.getTwillRunner();
-
-    TwillController controller = runner.prepare(new TimeoutApplication())
-                                       .addLogHandler(new PrinterLogHandler(new PrintWriter(System.out, true)))
-                                       .start();
-
-    // The provision should failed in 30 seconds after AM started, which AM could took a while to start.
-    // Hence we give 90 seconds max time here.
-    try {
-      Services.getCompletionFuture(controller).get(90, TimeUnit.SECONDS);
-    } finally {
-      // If it timeout, kill the app as cleanup.
-      controller.kill();
-    }
-  }
-
-  public static final class Handler extends EventHandler {
-
-    private boolean abort;
-
-    @Override
-    protected Map<String, String> getConfigs() {
-      return ImmutableMap.of("abort", "true");
-    }
-
-    @Override
-    public void initialize(EventHandlerContext context) {
-      this.abort = Boolean.parseBoolean(context.getSpecification().getConfigs().get("abort"));
-    }
-
-    @Override
-    public TimeoutAction launchTimeout(Iterable<TimeoutEvent> timeoutEvents) {
-      if (abort) {
-        return TimeoutAction.abort();
-      } else {
-        return TimeoutAction.recheck(10, TimeUnit.SECONDS);
-      }
-    }
-  }
-
-  public static final class TimeoutApplication implements TwillApplication {
-
-    @Override
-    public TwillSpecification configure() {
-      return TwillSpecification.Builder.with()
-        .setName("TimeoutApplication")
-        .withRunnable()
-        .add(new TimeoutRunnable(),
-             ResourceSpecification.Builder.with()
-               .setVirtualCores(1)
-               .setMemory(8, ResourceSpecification.SizeUnit.GIGA).build())
-        .noLocalFiles()
-        .anyOrder()
-        .withEventHandler(new Handler())
-        .build();
-    }
-  }
-
-  /**
-   * A runnable that do nothing, as it's not expected to get provisioned.
-   */
-  public static final class TimeoutRunnable extends AbstractTwillRunnable {
-
-    private final CountDownLatch latch = new CountDownLatch(1);
-
-    @Override
-    public void stop() {
-      latch.countDown();
-    }
-
-    @Override
-    public void run() {
-      // Simply block here
-      try {
-        latch.await();
-      } catch (InterruptedException e) {
-        throw Throwables.propagate(e);
-      }
-    }
-  }
-}


[03/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/test/java/org/apache/twill/yarn/ResourceReportTestRun.java
----------------------------------------------------------------------
diff --git a/yarn/src/test/java/org/apache/twill/yarn/ResourceReportTestRun.java b/yarn/src/test/java/org/apache/twill/yarn/ResourceReportTestRun.java
deleted file mode 100644
index 131f90a..0000000
--- a/yarn/src/test/java/org/apache/twill/yarn/ResourceReportTestRun.java
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.yarn;
-
-import org.apache.twill.api.ResourceReport;
-import org.apache.twill.api.ResourceSpecification;
-import org.apache.twill.api.TwillApplication;
-import org.apache.twill.api.TwillController;
-import org.apache.twill.api.TwillRunResources;
-import org.apache.twill.api.TwillRunner;
-import org.apache.twill.api.TwillSpecification;
-import org.apache.twill.api.logging.PrinterLogHandler;
-import org.apache.twill.common.ServiceListenerAdapter;
-import org.apache.twill.common.Threads;
-import org.apache.twill.discovery.Discoverable;
-import org.apache.twill.internal.EnvKeys;
-import com.google.common.base.Charsets;
-import com.google.common.collect.Maps;
-import com.google.common.io.LineReader;
-import org.junit.Assert;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
-import java.io.PrintWriter;
-import java.net.Socket;
-import java.net.URISyntaxException;
-import java.util.Collection;
-import java.util.Map;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-/**
- * Using echo server to test resource reports.
- * This test is executed by {@link org.apache.twill.yarn.YarnTestSuite}.
- */
-public class ResourceReportTestRun {
-
-  private static final Logger LOG = LoggerFactory.getLogger(ResourceReportTestRun.class);
-
-  private class ResourceApplication implements TwillApplication {
-    @Override
-    public TwillSpecification configure() {
-      return TwillSpecification.Builder.with()
-        .setName("ResourceApplication")
-        .withRunnable()
-          .add("echo1", new EchoServer(), ResourceSpecification.Builder.with()
-            .setVirtualCores(1)
-            .setMemory(128, ResourceSpecification.SizeUnit.MEGA)
-            .setInstances(2).build()).noLocalFiles()
-          .add("echo2", new EchoServer(), ResourceSpecification.Builder.with()
-            .setVirtualCores(2)
-            .setMemory(256, ResourceSpecification.SizeUnit.MEGA)
-            .setInstances(1).build()).noLocalFiles()
-        .anyOrder()
-        .build();
-    }
-  }
-
-  @Test
-  public void testRunnablesGetAllowedResourcesInEnv() throws InterruptedException, IOException,
-    TimeoutException, ExecutionException {
-    TwillRunner runner = YarnTestSuite.getTwillRunner();
-
-    ResourceSpecification resourceSpec = ResourceSpecification.Builder.with()
-      .setVirtualCores(1)
-      .setMemory(2048, ResourceSpecification.SizeUnit.MEGA)
-      .setInstances(1)
-      .build();
-    TwillController controller = runner.prepare(new EnvironmentEchoServer(), resourceSpec)
-      .addLogHandler(new PrinterLogHandler(new PrintWriter(System.out, true)))
-      .withApplicationArguments("envecho")
-      .withArguments("EnvironmentEchoServer", "echo2")
-      .start();
-
-    final CountDownLatch running = new CountDownLatch(1);
-    controller.addListener(new ServiceListenerAdapter() {
-      @Override
-      public void running() {
-        running.countDown();
-      }
-    }, Threads.SAME_THREAD_EXECUTOR);
-
-    Assert.assertTrue(running.await(30, TimeUnit.SECONDS));
-
-    Iterable<Discoverable> envEchoServices = controller.discoverService("envecho");
-    Assert.assertTrue(YarnTestSuite.waitForSize(envEchoServices, 1, 30));
-
-    // TODO: check virtual cores once yarn adds the ability
-    Map<String, String> expectedValues = Maps.newHashMap();
-    expectedValues.put(EnvKeys.YARN_CONTAINER_MEMORY_MB, "2048");
-    expectedValues.put(EnvKeys.TWILL_INSTANCE_COUNT, "1");
-
-    // check environment of the runnable.
-    Discoverable discoverable = envEchoServices.iterator().next();
-    for (Map.Entry<String, String> expected : expectedValues.entrySet()) {
-      Socket socket = new Socket(discoverable.getSocketAddress().getHostName(),
-                                 discoverable.getSocketAddress().getPort());
-      try {
-        PrintWriter writer = new PrintWriter(new OutputStreamWriter(socket.getOutputStream(), Charsets.UTF_8), true);
-        LineReader reader = new LineReader(new InputStreamReader(socket.getInputStream(), Charsets.UTF_8));
-        writer.println(expected.getKey());
-        Assert.assertEquals(expected.getValue(), reader.readLine());
-      } finally {
-        socket.close();
-      }
-    }
-
-    controller.stop().get(30, TimeUnit.SECONDS);
-    // Sleep a bit before exiting.
-    TimeUnit.SECONDS.sleep(2);
-  }
-
-  @Test
-  public void testResourceReportWithFailingContainers() throws InterruptedException, IOException,
-    TimeoutException, ExecutionException {
-    TwillRunner runner = YarnTestSuite.getTwillRunner();
-
-    ResourceSpecification resourceSpec = ResourceSpecification.Builder.with()
-      .setVirtualCores(1)
-      .setMemory(128, ResourceSpecification.SizeUnit.MEGA)
-      .setInstances(2)
-      .build();
-    TwillController controller = runner.prepare(new BuggyServer(), resourceSpec)
-      .addLogHandler(new PrinterLogHandler(new PrintWriter(System.out, true)))
-      .withApplicationArguments("echo")
-      .withArguments("BuggyServer", "echo2")
-      .start();
-
-    final CountDownLatch running = new CountDownLatch(1);
-    controller.addListener(new ServiceListenerAdapter() {
-      @Override
-      public void running() {
-        running.countDown();
-      }
-    }, Threads.SAME_THREAD_EXECUTOR);
-
-    Assert.assertTrue(running.await(30, TimeUnit.SECONDS));
-
-    Iterable<Discoverable> echoServices = controller.discoverService("echo");
-    Assert.assertTrue(YarnTestSuite.waitForSize(echoServices, 2, 60));
-    // check that we have 2 runnables.
-    ResourceReport report = controller.getResourceReport();
-    Assert.assertEquals(2, report.getRunnableResources("BuggyServer").size());
-
-    // cause a divide by 0 in one server
-    Discoverable discoverable = echoServices.iterator().next();
-    Socket socket = new Socket(discoverable.getSocketAddress().getAddress(),
-                               discoverable.getSocketAddress().getPort());
-    try {
-      PrintWriter writer = new PrintWriter(new OutputStreamWriter(socket.getOutputStream(), Charsets.UTF_8), true);
-      writer.println("0");
-    } finally {
-      socket.close();
-    }
-
-    // takes some time for app master to find out the container completed...
-    TimeUnit.SECONDS.sleep(5);
-    // check that we have 1 runnable, not 2.
-    report = controller.getResourceReport();
-    Assert.assertEquals(1, report.getRunnableResources("BuggyServer").size());
-
-    controller.stop().get(30, TimeUnit.SECONDS);
-    // Sleep a bit before exiting.
-    TimeUnit.SECONDS.sleep(2);
-  }
-
-  @Test
-  public void testResourceReport() throws InterruptedException, ExecutionException, IOException,
-    URISyntaxException, TimeoutException {
-    TwillRunner runner = YarnTestSuite.getTwillRunner();
-
-    TwillController controller = runner.prepare(new ResourceApplication())
-                                        .addLogHandler(new PrinterLogHandler(new PrintWriter(System.out, true)))
-                                        .withApplicationArguments("echo")
-                                        .withArguments("echo1", "echo1")
-                                        .withArguments("echo2", "echo2")
-                                        .start();
-
-    final CountDownLatch running = new CountDownLatch(1);
-    controller.addListener(new ServiceListenerAdapter() {
-      @Override
-      public void running() {
-        running.countDown();
-      }
-    }, Threads.SAME_THREAD_EXECUTOR);
-
-    Assert.assertTrue(running.await(30, TimeUnit.SECONDS));
-
-    // wait for 3 echo servers to come up
-    Iterable<Discoverable> echoServices = controller.discoverService("echo");
-    Assert.assertTrue(YarnTestSuite.waitForSize(echoServices, 3, 60));
-    ResourceReport report = controller.getResourceReport();
-    // make sure resources for echo1 and echo2 are there
-    Map<String, Collection<TwillRunResources>> usedResources = report.getResources();
-    Assert.assertEquals(2, usedResources.keySet().size());
-    Assert.assertTrue(usedResources.containsKey("echo1"));
-    Assert.assertTrue(usedResources.containsKey("echo2"));
-
-    Collection<TwillRunResources> echo1Resources = usedResources.get("echo1");
-    // 2 instances of echo1
-    Assert.assertEquals(2, echo1Resources.size());
-    // TODO: check cores after hadoop-2.1.0
-    for (TwillRunResources resources : echo1Resources) {
-      Assert.assertEquals(128, resources.getMemoryMB());
-    }
-
-    Collection<TwillRunResources> echo2Resources = usedResources.get("echo2");
-    // 2 instances of echo1
-    Assert.assertEquals(1, echo2Resources.size());
-    // TODO: check cores after hadoop-2.1.0
-    for (TwillRunResources resources : echo2Resources) {
-      Assert.assertEquals(256, resources.getMemoryMB());
-    }
-
-    // Decrease number of instances of echo1 from 2 to 1
-    controller.changeInstances("echo1", 1);
-    echoServices = controller.discoverService("echo1");
-    Assert.assertTrue(YarnTestSuite.waitForSize(echoServices, 1, 60));
-    report = controller.getResourceReport();
-
-    // make sure resources for echo1 and echo2 are there
-    usedResources = report.getResources();
-    Assert.assertEquals(2, usedResources.keySet().size());
-    Assert.assertTrue(usedResources.containsKey("echo1"));
-    Assert.assertTrue(usedResources.containsKey("echo2"));
-
-    echo1Resources = usedResources.get("echo1");
-    // 1 instance of echo1 now
-    Assert.assertEquals(1, echo1Resources.size());
-    // TODO: check cores after hadoop-2.1.0
-    for (TwillRunResources resources : echo1Resources) {
-      Assert.assertEquals(128, resources.getMemoryMB());
-    }
-
-    echo2Resources = usedResources.get("echo2");
-    // 2 instances of echo1
-    Assert.assertEquals(1, echo2Resources.size());
-    // TODO: check cores after hadoop-2.1.0
-    for (TwillRunResources resources : echo2Resources) {
-      Assert.assertEquals(256, resources.getMemoryMB());
-    }
-
-    controller.stop().get(30, TimeUnit.SECONDS);
-    // Sleep a bit before exiting.
-    TimeUnit.SECONDS.sleep(2);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/test/java/org/apache/twill/yarn/SocketServer.java
----------------------------------------------------------------------
diff --git a/yarn/src/test/java/org/apache/twill/yarn/SocketServer.java b/yarn/src/test/java/org/apache/twill/yarn/SocketServer.java
deleted file mode 100644
index 5148ed2..0000000
--- a/yarn/src/test/java/org/apache/twill/yarn/SocketServer.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.yarn;
-
-import org.apache.twill.api.AbstractTwillRunnable;
-import org.apache.twill.api.TwillContext;
-import org.apache.twill.api.TwillContext;
-import org.apache.twill.common.Cancellable;
-import com.google.common.base.Charsets;
-import com.google.common.base.Throwables;
-import com.google.common.collect.ImmutableList;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
-import java.io.PrintWriter;
-import java.net.ServerSocket;
-import java.net.Socket;
-import java.net.SocketException;
-import java.util.List;
-
-/**
- * Boilerplate for a server that announces itself and talks to clients through a socket.
- */
-public abstract class SocketServer extends AbstractTwillRunnable {
-
-  private static final Logger LOG = LoggerFactory.getLogger(SocketServer.class);
-
-  protected volatile boolean running;
-  protected volatile Thread runThread;
-  protected ServerSocket serverSocket;
-  protected Cancellable canceller;
-
-  @Override
-  public void initialize(TwillContext context) {
-    super.initialize(context);
-    running = true;
-    try {
-      serverSocket = new ServerSocket(0);
-      LOG.info("Server started: " + serverSocket.getLocalSocketAddress() +
-               ", id: " + context.getInstanceId() +
-               ", count: " + context.getInstanceCount());
-
-      final List<Cancellable> cancellables = ImmutableList.of(
-        context.announce(context.getApplicationArguments()[0], serverSocket.getLocalPort()),
-        context.announce(context.getArguments()[0], serverSocket.getLocalPort())
-      );
-      canceller = new Cancellable() {
-        @Override
-        public void cancel() {
-          for (Cancellable c : cancellables) {
-            c.cancel();
-          }
-        }
-      };
-    } catch (IOException e) {
-      throw Throwables.propagate(e);
-    }
-  }
-
-  @Override
-  public void run() {
-    try {
-      runThread = Thread.currentThread();
-      while (running) {
-        try {
-          Socket socket = serverSocket.accept();
-          try {
-            BufferedReader reader = new BufferedReader(new InputStreamReader(socket.getInputStream(), Charsets.UTF_8));
-            PrintWriter writer = new PrintWriter(new OutputStreamWriter(socket.getOutputStream()), true);
-            handleRequest(reader, writer);
-          } finally {
-            socket.close();
-          }
-        } catch (SocketException e) {
-          LOG.info("Socket exception: " + e);
-        }
-      }
-    } catch (Exception e) {
-      LOG.error(e.getMessage(), e);
-    }
-  }
-
-  @Override
-  public void stop() {
-    LOG.info("Stopping server");
-    canceller.cancel();
-    running = false;
-    Thread t = runThread;
-    if (t != null) {
-      t.interrupt();
-    }
-    try {
-      serverSocket.close();
-    } catch (IOException e) {
-      LOG.error("Exception while closing socket.", e);
-      throw Throwables.propagate(e);
-    }
-    serverSocket = null;
-  }
-
-  @Override
-  public void destroy() {
-    try {
-      if (serverSocket != null) {
-        serverSocket.close();
-      }
-    } catch (IOException e) {
-      LOG.error("Exception while closing socket.", e);
-      throw Throwables.propagate(e);
-    }
-  }
-
-  abstract public void handleRequest(BufferedReader reader, PrintWriter writer) throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/test/java/org/apache/twill/yarn/TaskCompletedTestRun.java
----------------------------------------------------------------------
diff --git a/yarn/src/test/java/org/apache/twill/yarn/TaskCompletedTestRun.java b/yarn/src/test/java/org/apache/twill/yarn/TaskCompletedTestRun.java
deleted file mode 100644
index 5a93271..0000000
--- a/yarn/src/test/java/org/apache/twill/yarn/TaskCompletedTestRun.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.yarn;
-
-import org.apache.twill.api.AbstractTwillRunnable;
-import org.apache.twill.api.ResourceSpecification;
-import org.apache.twill.api.TwillController;
-import org.apache.twill.api.TwillRunner;
-import org.apache.twill.api.logging.PrinterLogHandler;
-import org.apache.twill.common.ServiceListenerAdapter;
-import org.apache.twill.common.Threads;
-import com.google.common.base.Throwables;
-import com.google.common.util.concurrent.Service;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.io.PrintWriter;
-import java.util.Random;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Testing application master will shutdown itself when all tasks are completed.
- * This test is executed by {@link YarnTestSuite}.
- */
-public class TaskCompletedTestRun {
-
-  public static final class SleepTask extends AbstractTwillRunnable {
-
-    @Override
-    public void run() {
-      // Randomly sleep for 3-5 seconds.
-      try {
-        TimeUnit.SECONDS.sleep(new Random().nextInt(3) + 3);
-      } catch (InterruptedException e) {
-        throw Throwables.propagate(e);
-      }
-    }
-
-    @Override
-    public void stop() {
-      // No-op
-    }
-  }
-
-  @Test
-  public void testTaskCompleted() throws InterruptedException {
-    TwillRunner twillRunner = YarnTestSuite.getTwillRunner();
-    TwillController controller = twillRunner.prepare(new SleepTask(),
-                                                ResourceSpecification.Builder.with()
-                                                  .setVirtualCores(1)
-                                                  .setMemory(512, ResourceSpecification.SizeUnit.MEGA)
-                                                  .setInstances(3).build())
-                                            .addLogHandler(new PrinterLogHandler(new PrintWriter(System.out, true)))
-                                            .start();
-
-    final CountDownLatch runLatch = new CountDownLatch(1);
-    final CountDownLatch stopLatch = new CountDownLatch(1);
-    controller.addListener(new ServiceListenerAdapter() {
-
-      @Override
-      public void running() {
-        runLatch.countDown();
-      }
-
-      @Override
-      public void terminated(Service.State from) {
-        stopLatch.countDown();
-      }
-    }, Threads.SAME_THREAD_EXECUTOR);
-
-    Assert.assertTrue(runLatch.await(1, TimeUnit.MINUTES));
-
-    Assert.assertTrue(stopLatch.await(1, TimeUnit.MINUTES));
-
-    TimeUnit.SECONDS.sleep(2);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/test/java/org/apache/twill/yarn/TwillSpecificationTest.java
----------------------------------------------------------------------
diff --git a/yarn/src/test/java/org/apache/twill/yarn/TwillSpecificationTest.java b/yarn/src/test/java/org/apache/twill/yarn/TwillSpecificationTest.java
deleted file mode 100644
index 8be907b..0000000
--- a/yarn/src/test/java/org/apache/twill/yarn/TwillSpecificationTest.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.yarn;
-
-import org.apache.twill.api.AbstractTwillRunnable;
-import org.apache.twill.api.TwillSpecification;
-import com.google.common.collect.ImmutableSet;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.List;
-
-/**
- *
- */
-public class TwillSpecificationTest {
-
-  /**
-   * Dummy for test.
-   */
-  public static final class DummyRunnable extends AbstractTwillRunnable {
-
-    @Override
-    public void stop() {
-      // no-op
-    }
-
-    @Override
-    public void run() {
-      // no-op
-    }
-  }
-
-  @Test
-  public void testAnyOrder() {
-    TwillSpecification spec =
-      TwillSpecification.Builder.with()
-        .setName("Testing")
-        .withRunnable()
-        .add("r1", new DummyRunnable()).noLocalFiles()
-        .add("r2", new DummyRunnable()).noLocalFiles()
-        .add("r3", new DummyRunnable()).noLocalFiles()
-        .anyOrder()
-        .build();
-
-    Assert.assertEquals(3, spec.getRunnables().size());
-    List<TwillSpecification.Order> orders = spec.getOrders();
-    Assert.assertEquals(1, orders.size());
-    Assert.assertEquals(ImmutableSet.of("r1", "r2", "r3"), orders.get(0).getNames());
-  }
-
-  @Test
-  public void testOrder() {
-    TwillSpecification spec =
-      TwillSpecification.Builder.with()
-        .setName("Testing")
-        .withRunnable()
-        .add("r1", new DummyRunnable()).noLocalFiles()
-        .add("r2", new DummyRunnable()).noLocalFiles()
-        .add("r3", new DummyRunnable()).noLocalFiles()
-        .add("r4", new DummyRunnable()).noLocalFiles()
-        .withOrder().begin("r1", "r2").nextWhenStarted("r3")
-        .build();
-
-    Assert.assertEquals(4, spec.getRunnables().size());
-    List<TwillSpecification.Order> orders = spec.getOrders();
-    Assert.assertEquals(3, orders.size());
-    Assert.assertEquals(ImmutableSet.of("r1", "r2"), orders.get(0).getNames());
-    Assert.assertEquals(ImmutableSet.of("r3"), orders.get(1).getNames());
-    Assert.assertEquals(ImmutableSet.of("r4"), orders.get(2).getNames());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/test/java/org/apache/twill/yarn/YarnTestSuite.java
----------------------------------------------------------------------
diff --git a/yarn/src/test/java/org/apache/twill/yarn/YarnTestSuite.java b/yarn/src/test/java/org/apache/twill/yarn/YarnTestSuite.java
deleted file mode 100644
index b55d620..0000000
--- a/yarn/src/test/java/org/apache/twill/yarn/YarnTestSuite.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.yarn;
-
-import org.apache.twill.api.TwillRunner;
-import org.apache.twill.api.TwillRunnerService;
-import org.apache.twill.filesystem.LocalLocationFactory;
-import org.apache.twill.internal.zookeeper.InMemoryZKServer;
-import org.apache.twill.internal.yarn.YarnUtils;
-import com.google.common.collect.Iterables;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.MiniYARNCluster;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.rules.TemporaryFolder;
-import org.junit.runner.RunWith;
-import org.junit.runners.Suite;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Test suite for all tests with mini yarn cluster.
- */
-@RunWith(Suite.class)
-@Suite.SuiteClasses({
-                      EchoServerTestRun.class,
-                      ResourceReportTestRun.class,
-                      TaskCompletedTestRun.class,
-                      DistributeShellTestRun.class,
-                      LocalFileTestRun.class,
-                      FailureRestartTestRun.class,
-                      ProvisionTimeoutTestRun.class
-                    })
-public class YarnTestSuite {
-  private static final Logger LOG = LoggerFactory.getLogger(YarnTestSuite.class);
-
-  @ClassRule
-  public static TemporaryFolder tmpFolder = new TemporaryFolder();
-
-  private static InMemoryZKServer zkServer;
-  private static MiniYARNCluster cluster;
-  private static TwillRunnerService runnerService;
-  private static YarnConfiguration config;
-
-  @BeforeClass
-  public static final void init() throws IOException {
-    // Starts Zookeeper
-    zkServer = InMemoryZKServer.builder().build();
-    zkServer.startAndWait();
-
-    // Start YARN mini cluster
-    config = new YarnConfiguration(new Configuration());
-
-    if (YarnUtils.isHadoop20()) {
-      config.set("yarn.resourcemanager.scheduler.class",
-                 "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler");
-    } else {
-      config.set("yarn.resourcemanager.scheduler.class",
-                 "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler");
-      config.set("yarn.scheduler.capacity.resource-calculator",
-                 "org.apache.hadoop.yarn.util.resource.DominantResourceCalculator");
-    }
-    config.set("yarn.minicluster.fixed.ports", "true");
-    config.set("yarn.nodemanager.vmem-pmem-ratio", "20.1");
-    config.set("yarn.nodemanager.vmem-check-enabled", "false");
-    config.set("yarn.scheduler.minimum-allocation-mb", "128");
-    config.set("yarn.nodemanager.delete.debug-delay-sec", "3600");
-
-    cluster = new MiniYARNCluster("test-cluster", 1, 1, 1);
-    cluster.init(config);
-    cluster.start();
-
-    runnerService = createTwillRunnerService();
-    runnerService.startAndWait();
-  }
-
-  @AfterClass
-  public static final void finish() {
-    runnerService.stopAndWait();
-    cluster.stop();
-    zkServer.stopAndWait();
-  }
-
-  public static final TwillRunner getTwillRunner() {
-    return runnerService;
-  }
-
-  /**
-   * Creates an unstarted instance of {@link org.apache.twill.api.TwillRunnerService}.
-   */
-  public static final TwillRunnerService createTwillRunnerService() throws IOException {
-    return new YarnTwillRunnerService(config, zkServer.getConnectionStr() + "/twill",
-                                      new LocalLocationFactory(tmpFolder.newFolder()));
-  }
-
-  public static final <T> boolean waitForSize(Iterable<T> iterable, int count, int limit) throws InterruptedException {
-    int trial = 0;
-    int size = Iterables.size(iterable);
-    while (size != count && trial < limit) {
-      LOG.info("Waiting for {} size {} == {}", iterable, size, count);
-      TimeUnit.SECONDS.sleep(1);
-      trial++;
-      size = Iterables.size(iterable);
-    }
-    return trial < limit;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/test/resources/header.txt
----------------------------------------------------------------------
diff --git a/yarn/src/test/resources/header.txt b/yarn/src/test/resources/header.txt
deleted file mode 100644
index b6e25e6..0000000
--- a/yarn/src/test/resources/header.txt
+++ /dev/null
@@ -1 +0,0 @@
-Local file header

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/test/resources/logback-test.xml
----------------------------------------------------------------------
diff --git a/yarn/src/test/resources/logback-test.xml b/yarn/src/test/resources/logback-test.xml
deleted file mode 100644
index 2615cb4..0000000
--- a/yarn/src/test/resources/logback-test.xml
+++ /dev/null
@@ -1,17 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!-- Default logback configuration for twill library -->
-<configuration>
-    <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
-        <encoder>
-            <pattern>%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n</pattern>
-        </encoder>
-    </appender>
-
-    <logger name="org.apache.twill" level="DEBUG" />
-
-    <root level="WARN">
-        <appender-ref ref="STDOUT"/>
-    </root>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/pom.xml
----------------------------------------------------------------------
diff --git a/zookeeper/pom.xml b/zookeeper/pom.xml
deleted file mode 100644
index e76ee50..0000000
--- a/zookeeper/pom.xml
+++ /dev/null
@@ -1,67 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <parent>
-        <artifactId>twill-parent</artifactId>
-        <groupId>org.apache.twill</groupId>
-        <version>0.1.0-SNAPSHOT</version>
-    </parent>
-    <modelVersion>4.0.0</modelVersion>
-
-    <artifactId>twill-zookeeper</artifactId>
-    <name>Twill ZooKeeper client library</name>
-
-    <dependencies>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>twill-common</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-        <dependency>
-            <groupId>com.google.guava</groupId>
-            <artifactId>guava</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>com.google.code.findbugs</groupId>
-            <artifactId>jsr305</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.zookeeper</groupId>
-            <artifactId>zookeeper</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.slf4j</groupId>
-            <artifactId>slf4j-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>ch.qos.logback</groupId>
-            <artifactId>logback-core</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>ch.qos.logback</groupId>
-            <artifactId>logback-classic</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>junit</groupId>
-            <artifactId>junit</artifactId>
-        </dependency>
-    </dependencies>
-</project>

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/BasicNodeChildren.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/BasicNodeChildren.java b/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/BasicNodeChildren.java
deleted file mode 100644
index 9e4f55f..0000000
--- a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/BasicNodeChildren.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.zookeeper;
-
-import org.apache.twill.zookeeper.NodeChildren;
-import com.google.common.base.Objects;
-import org.apache.zookeeper.data.Stat;
-
-import java.util.List;
-
-/**
- *
- */
-final class BasicNodeChildren implements NodeChildren {
-
-  private final Stat stat;
-  private final List<String> children;
-
-  BasicNodeChildren(List<String> children, Stat stat) {
-    this.stat = stat;
-    this.children = children;
-  }
-
-  @Override
-  public Stat getStat() {
-    return stat;
-  }
-
-  @Override
-  public List<String> getChildren() {
-    return children;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || !(o instanceof NodeChildren)) {
-      return false;
-    }
-
-    NodeChildren that = (NodeChildren) o;
-    return stat.equals(that.getStat()) && children.equals(that.getChildren());
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hashCode(children, stat);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/BasicNodeData.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/BasicNodeData.java b/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/BasicNodeData.java
deleted file mode 100644
index 98a3a66..0000000
--- a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/BasicNodeData.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.zookeeper;
-
-import org.apache.twill.zookeeper.NodeData;
-import com.google.common.base.Objects;
-import org.apache.zookeeper.data.Stat;
-
-import java.util.Arrays;
-
-/**
- * A straightforward implementation for {@link NodeData}.
- */
-final class BasicNodeData implements NodeData {
-
-  private final byte[] data;
-  private final Stat stat;
-
-  BasicNodeData(byte[] data, Stat stat) {
-    this.data = data;
-    this.stat = stat;
-  }
-
-  @Override
-  public Stat getStat() {
-    return stat;
-  }
-
-  @Override
-  public byte[] getData() {
-    return data;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || !(o instanceof NodeData)) {
-      return false;
-    }
-
-    BasicNodeData that = (BasicNodeData) o;
-
-    return stat.equals(that.getStat()) && Arrays.equals(data, that.getData());
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hashCode(data, stat);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/DefaultZKClientService.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/DefaultZKClientService.java b/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/DefaultZKClientService.java
deleted file mode 100644
index c52fb08..0000000
--- a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/DefaultZKClientService.java
+++ /dev/null
@@ -1,525 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.zookeeper;
-
-import org.apache.twill.common.Threads;
-import org.apache.twill.zookeeper.NodeChildren;
-import org.apache.twill.zookeeper.NodeData;
-import org.apache.twill.zookeeper.OperationFuture;
-import org.apache.twill.zookeeper.ZKClientService;
-import com.google.common.base.Function;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Supplier;
-import com.google.common.util.concurrent.AbstractService;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.Service;
-import org.apache.zookeeper.AsyncCallback;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.ZooDefs;
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.data.ACL;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.annotation.Nullable;
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.concurrent.Executor;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-
-/**
- * The base implementation of {@link ZKClientService}.
- */
-public final class DefaultZKClientService implements ZKClientService {
-
-  private static final Logger LOG = LoggerFactory.getLogger(DefaultZKClientService.class);
-
-  private final String zkStr;
-  private final int sessionTimeout;
-  private final List<Watcher> connectionWatchers;
-  private final AtomicReference<ZooKeeper> zooKeeper;
-  private final Function<String, List<ACL>> aclMapper;
-  private final Service serviceDelegate;
-  private ExecutorService eventExecutor;
-
-  public DefaultZKClientService(String zkStr, int sessionTimeout, Watcher connectionWatcher) {
-    this.zkStr = zkStr;
-    this.sessionTimeout = sessionTimeout;
-    this.connectionWatchers = new CopyOnWriteArrayList<Watcher>();
-    addConnectionWatcher(connectionWatcher);
-
-    this.zooKeeper = new AtomicReference<ZooKeeper>();
-
-    // TODO (terence): Add ACL
-    aclMapper = new Function<String, List<ACL>>() {
-      @Override
-      public List<ACL> apply(String input) {
-        return ZooDefs.Ids.OPEN_ACL_UNSAFE;
-      }
-    };
-    serviceDelegate = new ServiceDelegate();
-  }
-
-  @Override
-  public Long getSessionId() {
-    ZooKeeper zk = zooKeeper.get();
-    return zk == null ? null : zk.getSessionId();
-  }
-
-  @Override
-  public String getConnectString() {
-    return zkStr;
-  }
-
-  @Override
-  public void addConnectionWatcher(Watcher watcher) {
-    if (watcher != null) {
-      connectionWatchers.add(wrapWatcher(watcher));
-    }
-  }
-
-  @Override
-  public OperationFuture<String> create(String path, byte[] data, CreateMode createMode) {
-    return create(path, data, createMode, true);
-  }
-
-  @Override
-  public OperationFuture<String> create(String path, @Nullable byte[] data,
-                                        CreateMode createMode, boolean createParent) {
-    return doCreate(path, data, createMode, createParent, false);
-  }
-
-  private OperationFuture<String> doCreate(final String path,
-                                        @Nullable final byte[] data,
-                                        final CreateMode createMode,
-                                        final boolean createParent,
-                                        final boolean ignoreNodeExists) {
-    final SettableOperationFuture<String> createFuture = SettableOperationFuture.create(path, eventExecutor);
-    getZooKeeper().create(path, data, aclMapper.apply(path), createMode, Callbacks.STRING, createFuture);
-    if (!createParent) {
-      return createFuture;
-    }
-
-    // If create parent is request, return a different future
-    final SettableOperationFuture<String> result = SettableOperationFuture.create(path, eventExecutor);
-    // Watch for changes in the original future
-    Futures.addCallback(createFuture, new FutureCallback<String>() {
-      @Override
-      public void onSuccess(String path) {
-        // Propagate if creation was successful
-        result.set(path);
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        // See if the failure can be handled
-        if (updateFailureResult(t, result, path, ignoreNodeExists)) {
-          return;
-        }
-        // Create the parent node
-        String parentPath = getParent(path);
-        if (parentPath.isEmpty()) {
-          result.setException(t);
-          return;
-        }
-        // Watch for parent creation complete
-        Futures.addCallback(
-          doCreate(parentPath, null, CreateMode.PERSISTENT, createParent, true), new FutureCallback<String>() {
-          @Override
-          public void onSuccess(String parentPath) {
-            // Create the requested path again
-            Futures.addCallback(
-              doCreate(path, data, createMode, false, ignoreNodeExists), new FutureCallback<String>() {
-              @Override
-              public void onSuccess(String pathResult) {
-                result.set(pathResult);
-              }
-
-              @Override
-              public void onFailure(Throwable t) {
-                // handle the failure
-                updateFailureResult(t, result, path, ignoreNodeExists);
-              }
-            });
-          }
-
-          @Override
-          public void onFailure(Throwable t) {
-            result.setException(t);
-          }
-        });
-      }
-
-      /**
-       * Updates the result future based on the given {@link Throwable}.
-       * @param t Cause of the failure
-       * @param result Future to be updated
-       * @param path Request path for the operation
-       * @return {@code true} if it is a failure, {@code false} otherwise.
-       */
-      private boolean updateFailureResult(Throwable t, SettableOperationFuture<String> result,
-                                          String path, boolean ignoreNodeExists) {
-        // Propagate if there is error
-        if (!(t instanceof KeeperException)) {
-          result.setException(t);
-          return true;
-        }
-        KeeperException.Code code = ((KeeperException) t).code();
-        // Node already exists, simply return success if it allows for ignoring node exists (for parent node creation).
-        if (ignoreNodeExists && code == KeeperException.Code.NODEEXISTS) {
-          // The requested path could be used because it only applies to non-sequential node
-          result.set(path);
-          return false;
-        }
-        if (code != KeeperException.Code.NONODE) {
-          result.setException(t);
-          return true;
-        }
-        return false;
-      }
-
-      /**
-       * Gets the parent of the given path.
-       * @param path Path for computing its parent
-       * @return Parent of the given path, or empty string if the given path is the root path already.
-       */
-      private String getParent(String path) {
-        String parentPath = path.substring(0, path.lastIndexOf('/'));
-        return (parentPath.isEmpty() && !"/".equals(path)) ? "/" : parentPath;
-      }
-    });
-
-    return result;
-  }
-
-  @Override
-  public OperationFuture<Stat> exists(String path) {
-    return exists(path, null);
-  }
-
-  @Override
-  public OperationFuture<Stat> exists(String path, Watcher watcher) {
-    SettableOperationFuture<Stat> result = SettableOperationFuture.create(path, eventExecutor);
-    getZooKeeper().exists(path, wrapWatcher(watcher), Callbacks.STAT_NONODE, result);
-    return result;
-  }
-
-  @Override
-  public OperationFuture<NodeChildren> getChildren(String path) {
-    return getChildren(path, null);
-  }
-
-  @Override
-  public OperationFuture<NodeChildren> getChildren(String path, Watcher watcher) {
-    SettableOperationFuture<NodeChildren> result = SettableOperationFuture.create(path, eventExecutor);
-    getZooKeeper().getChildren(path, wrapWatcher(watcher), Callbacks.CHILDREN, result);
-    return result;
-  }
-
-  @Override
-  public OperationFuture<NodeData> getData(String path) {
-    return getData(path, null);
-  }
-
-  @Override
-  public OperationFuture<NodeData> getData(String path, Watcher watcher) {
-    SettableOperationFuture<NodeData> result = SettableOperationFuture.create(path, eventExecutor);
-    getZooKeeper().getData(path, wrapWatcher(watcher), Callbacks.DATA, result);
-
-    return result;
-  }
-
-  @Override
-  public OperationFuture<Stat> setData(String path, byte[] data) {
-    return setData(path, data, -1);
-  }
-
-  @Override
-  public OperationFuture<Stat> setData(String dataPath, byte[] data, int version) {
-    SettableOperationFuture<Stat> result = SettableOperationFuture.create(dataPath, eventExecutor);
-    getZooKeeper().setData(dataPath, data, version, Callbacks.STAT, result);
-    return result;
-  }
-
-  @Override
-  public OperationFuture<String> delete(String path) {
-    return delete(path, -1);
-  }
-
-  @Override
-  public OperationFuture<String> delete(String deletePath, int version) {
-    SettableOperationFuture<String> result = SettableOperationFuture.create(deletePath, eventExecutor);
-    getZooKeeper().delete(deletePath, version, Callbacks.VOID, result);
-    return result;
-  }
-
-  @Override
-  public Supplier<ZooKeeper> getZooKeeperSupplier() {
-    return new Supplier<ZooKeeper>() {
-      @Override
-      public ZooKeeper get() {
-        return getZooKeeper();
-      }
-    };
-  }
-
-  @Override
-  public ListenableFuture<State> start() {
-    return serviceDelegate.start();
-  }
-
-  @Override
-  public State startAndWait() {
-    return serviceDelegate.startAndWait();
-  }
-
-  @Override
-  public boolean isRunning() {
-    return serviceDelegate.isRunning();
-  }
-
-  @Override
-  public State state() {
-    return serviceDelegate.state();
-  }
-
-  @Override
-  public ListenableFuture<State> stop() {
-    return serviceDelegate.stop();
-  }
-
-  @Override
-  public State stopAndWait() {
-    return serviceDelegate.stopAndWait();
-  }
-
-  @Override
-  public void addListener(Listener listener, Executor executor) {
-    serviceDelegate.addListener(listener, executor);
-  }
-
-  /**
-   * @return Current {@link ZooKeeper} client.
-   */
-  private ZooKeeper getZooKeeper() {
-    ZooKeeper zk = zooKeeper.get();
-    Preconditions.checkArgument(zk != null, "Not connected to zooKeeper.");
-    return zk;
-  }
-
-  /**
-   * Wraps the given watcher to be called from the event executor.
-   * @param watcher Watcher to be wrapped
-   * @return The wrapped Watcher
-   */
-  private Watcher wrapWatcher(final Watcher watcher) {
-    if (watcher == null) {
-      return null;
-    }
-    return new Watcher() {
-      @Override
-      public void process(final WatchedEvent event) {
-        eventExecutor.execute(new Runnable() {
-          @Override
-          public void run() {
-            try {
-              watcher.process(event);
-            } catch (Throwable t) {
-              LOG.error("Watcher throws exception.", t);
-            }
-          }
-        });
-      }
-    };
-  }
-
-  private final class ServiceDelegate extends AbstractService implements Watcher {
-
-    @Override
-    protected void doStart() {
-      // A single thread executor
-      eventExecutor = new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(),
-                                             Threads.createDaemonThreadFactory("zk-client-EventThread")) {
-        @Override
-        protected void terminated() {
-          super.terminated();
-          notifyStopped();
-        }
-      };
-
-      try {
-        zooKeeper.set(new ZooKeeper(zkStr, sessionTimeout, this));
-      } catch (IOException e) {
-        notifyFailed(e);
-      }
-    }
-
-    @Override
-    protected void doStop() {
-      ZooKeeper zk = zooKeeper.getAndSet(null);
-      if (zk != null) {
-        try {
-          zk.close();
-        } catch (InterruptedException e) {
-          notifyFailed(e);
-        } finally {
-          eventExecutor.shutdown();
-        }
-      }
-    }
-
-    @Override
-    public void process(WatchedEvent event) {
-      try {
-        if (event.getState() == Event.KeeperState.SyncConnected && state() == State.STARTING) {
-          LOG.info("Connected to ZooKeeper: " + zkStr);
-          notifyStarted();
-          return;
-        }
-        if (event.getState() == Event.KeeperState.Expired) {
-          LOG.info("ZooKeeper session expired: " + zkStr);
-
-          // When connection expired, simply reconnect again
-          Thread t = new Thread(new Runnable() {
-            @Override
-            public void run() {
-              try {
-                zooKeeper.set(new ZooKeeper(zkStr, sessionTimeout, ServiceDelegate.this));
-              } catch (IOException e) {
-                zooKeeper.set(null);
-                notifyFailed(e);
-              }
-            }
-          }, "zk-reconnect");
-          t.setDaemon(true);
-          t.start();
-        }
-      } finally {
-        if (event.getType() == Event.EventType.None && !connectionWatchers.isEmpty()) {
-          for (Watcher connectionWatcher : connectionWatchers) {
-            connectionWatcher.process(event);
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Collection of generic callbacks that simply reflect results into OperationFuture.
-   */
-  private static final class Callbacks {
-    static final AsyncCallback.StringCallback STRING = new AsyncCallback.StringCallback() {
-      @Override
-      @SuppressWarnings("unchecked")
-      public void processResult(int rc, String path, Object ctx, String name) {
-        SettableOperationFuture<String> result = (SettableOperationFuture<String>) ctx;
-        KeeperException.Code code = KeeperException.Code.get(rc);
-        if (code == KeeperException.Code.OK) {
-          result.set((name == null || name.isEmpty()) ? path : name);
-          return;
-        }
-        result.setException(KeeperException.create(code, result.getRequestPath()));
-      }
-    };
-
-    static final AsyncCallback.StatCallback STAT = new AsyncCallback.StatCallback() {
-      @Override
-      @SuppressWarnings("unchecked")
-      public void processResult(int rc, String path, Object ctx, Stat stat) {
-        SettableOperationFuture<Stat> result = (SettableOperationFuture<Stat>) ctx;
-        KeeperException.Code code = KeeperException.Code.get(rc);
-        if (code == KeeperException.Code.OK) {
-          result.set(stat);
-          return;
-        }
-        result.setException(KeeperException.create(code, result.getRequestPath()));
-      }
-    };
-
-    /**
-     * A stat callback that treats NONODE as success.
-     */
-    static final AsyncCallback.StatCallback STAT_NONODE = new AsyncCallback.StatCallback() {
-      @Override
-      @SuppressWarnings("unchecked")
-      public void processResult(int rc, String path, Object ctx, Stat stat) {
-        SettableOperationFuture<Stat> result = (SettableOperationFuture<Stat>) ctx;
-        KeeperException.Code code = KeeperException.Code.get(rc);
-        if (code == KeeperException.Code.OK || code == KeeperException.Code.NONODE) {
-          result.set(stat);
-          return;
-        }
-        result.setException(KeeperException.create(code, result.getRequestPath()));
-      }
-    };
-
-    static final AsyncCallback.Children2Callback CHILDREN = new AsyncCallback.Children2Callback() {
-      @Override
-      @SuppressWarnings("unchecked")
-      public void processResult(int rc, String path, Object ctx, List<String> children, Stat stat) {
-        SettableOperationFuture<NodeChildren> result = (SettableOperationFuture<NodeChildren>) ctx;
-        KeeperException.Code code = KeeperException.Code.get(rc);
-        if (code == KeeperException.Code.OK) {
-          result.set(new BasicNodeChildren(children, stat));
-          return;
-        }
-        result.setException(KeeperException.create(code, result.getRequestPath()));
-      }
-    };
-
-    static final AsyncCallback.DataCallback DATA = new AsyncCallback.DataCallback() {
-      @Override
-      @SuppressWarnings("unchecked")
-      public void processResult(int rc, String path, Object ctx, byte[] data, Stat stat) {
-        SettableOperationFuture<NodeData> result = (SettableOperationFuture<NodeData>) ctx;
-        KeeperException.Code code = KeeperException.Code.get(rc);
-        if (code == KeeperException.Code.OK) {
-          result.set(new BasicNodeData(data, stat));
-          return;
-        }
-        result.setException(KeeperException.create(code, result.getRequestPath()));
-      }
-    };
-
-    static final AsyncCallback.VoidCallback VOID = new AsyncCallback.VoidCallback() {
-      @Override
-      @SuppressWarnings("unchecked")
-      public void processResult(int rc, String path, Object ctx) {
-        SettableOperationFuture<String> result = (SettableOperationFuture<String>) ctx;
-        KeeperException.Code code = KeeperException.Code.get(rc);
-        if (code == KeeperException.Code.OK) {
-          result.set(result.getRequestPath());
-          return;
-        }
-        // Otherwise, it is an error
-        result.setException(KeeperException.create(code, result.getRequestPath()));
-      }
-    };
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/FailureRetryZKClient.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/FailureRetryZKClient.java b/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/FailureRetryZKClient.java
deleted file mode 100644
index 65ceadb..0000000
--- a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/FailureRetryZKClient.java
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.zookeeper;
-
-import org.apache.twill.common.Threads;
-import org.apache.twill.zookeeper.ForwardingZKClient;
-import org.apache.twill.zookeeper.NodeChildren;
-import org.apache.twill.zookeeper.NodeData;
-import org.apache.twill.zookeeper.OperationFuture;
-import org.apache.twill.zookeeper.RetryStrategy;
-import org.apache.twill.zookeeper.RetryStrategy.OperationType;
-import org.apache.twill.zookeeper.ZKClient;
-import com.google.common.base.Supplier;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.data.Stat;
-
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-
-/**
- * A {@link ZKClient} that will invoke {@link RetryStrategy} on operation failure.
- * This {@link ZKClient} works by delegating calls to another {@link ZKClient}
- * and listen for the result. If the result is a failure, and is
- * {@link RetryUtils#canRetry(org.apache.zookeeper.KeeperException.Code) retryable}, the given {@link RetryStrategy}
- * will be called to determine the next retry time, or give up, depending on the value returned by the strategy.
- */
-public final class FailureRetryZKClient extends ForwardingZKClient {
-
-  private static final ScheduledExecutorService SCHEDULER = Executors.newSingleThreadScheduledExecutor(
-                                                                Threads.createDaemonThreadFactory("retry-zkclient"));
-  private final RetryStrategy retryStrategy;
-
-  public FailureRetryZKClient(ZKClient delegate, RetryStrategy retryStrategy) {
-    super(delegate);
-    this.retryStrategy = retryStrategy;
-  }
-
-  @Override
-  public OperationFuture<String> create(String path, byte[] data, CreateMode createMode) {
-    return create(path, data, createMode, true);
-  }
-
-  @Override
-  public OperationFuture<String> create(final String path, final byte[] data,
-                                        final CreateMode createMode, final boolean createParent) {
-
-    // No retry for any SEQUENTIAL node, as some algorithms depends on only one sequential node being created.
-    if (createMode == CreateMode.PERSISTENT_SEQUENTIAL || createMode == CreateMode.EPHEMERAL_SEQUENTIAL) {
-      return super.create(path, data, createMode, createParent);
-    }
-
-    final SettableOperationFuture<String> result = SettableOperationFuture.create(path, Threads.SAME_THREAD_EXECUTOR);
-    Futures.addCallback(super.create(path, data, createMode, createParent),
-                        new OperationFutureCallback<String>(OperationType.CREATE, System.currentTimeMillis(),
-                                                            path, result, new Supplier<OperationFuture<String>>() {
-                          @Override
-                          public OperationFuture<String> get() {
-                            return FailureRetryZKClient.super.create(path, data, createMode, createParent);
-                          }
-                        }));
-    return result;
-  }
-
-  @Override
-  public OperationFuture<Stat> exists(String path) {
-    return exists(path, null);
-  }
-
-  @Override
-  public OperationFuture<Stat> exists(final String path, final Watcher watcher) {
-    final SettableOperationFuture<Stat> result = SettableOperationFuture.create(path, Threads.SAME_THREAD_EXECUTOR);
-    Futures.addCallback(super.exists(path, watcher),
-                        new OperationFutureCallback<Stat>(OperationType.EXISTS, System.currentTimeMillis(),
-                                                          path, result, new Supplier<OperationFuture<Stat>>() {
-                          @Override
-                          public OperationFuture<Stat> get() {
-                            return FailureRetryZKClient.super.exists(path, watcher);
-                          }
-                        }));
-    return result;
-  }
-
-  @Override
-  public OperationFuture<NodeChildren> getChildren(String path) {
-    return getChildren(path, null);
-  }
-
-  @Override
-  public OperationFuture<NodeChildren> getChildren(final String path, final Watcher watcher) {
-    final SettableOperationFuture<NodeChildren> result = SettableOperationFuture.create(path,
-                                                                                        Threads.SAME_THREAD_EXECUTOR);
-    Futures.addCallback(super.getChildren(path, watcher),
-                        new OperationFutureCallback<NodeChildren>(OperationType.GET_CHILDREN,
-                                                                  System.currentTimeMillis(), path, result,
-                                                                  new Supplier<OperationFuture<NodeChildren>>() {
-                          @Override
-                          public OperationFuture<NodeChildren> get() {
-                            return FailureRetryZKClient.super.getChildren(path, watcher);
-                          }
-                        }));
-    return result;
-  }
-
-  @Override
-  public OperationFuture<NodeData> getData(String path) {
-    return getData(path, null);
-  }
-
-  @Override
-  public OperationFuture<NodeData> getData(final String path, final Watcher watcher) {
-    final SettableOperationFuture<NodeData> result = SettableOperationFuture.create(path, Threads.SAME_THREAD_EXECUTOR);
-    Futures.addCallback(super.getData(path, watcher),
-                        new OperationFutureCallback<NodeData>(OperationType.GET_DATA, System.currentTimeMillis(),
-                                                              path, result, new Supplier<OperationFuture<NodeData>>() {
-                          @Override
-                          public OperationFuture<NodeData> get() {
-                            return FailureRetryZKClient.super.getData(path, watcher);
-                          }
-                        }));
-    return result;
-  }
-
-  @Override
-  public OperationFuture<Stat> setData(String path, byte[] data) {
-    return setData(path, data, -1);
-  }
-
-  @Override
-  public OperationFuture<Stat> setData(final String dataPath, final byte[] data, final int version) {
-    final SettableOperationFuture<Stat> result = SettableOperationFuture.create(dataPath, Threads.SAME_THREAD_EXECUTOR);
-    Futures.addCallback(super.setData(dataPath, data, version),
-                        new OperationFutureCallback<Stat>(OperationType.SET_DATA, System.currentTimeMillis(),
-                                                          dataPath, result, new Supplier<OperationFuture<Stat>>() {
-                          @Override
-                          public OperationFuture<Stat> get() {
-                            return FailureRetryZKClient.super.setData(dataPath, data, version);
-                          }
-                        }));
-    return result;
-  }
-
-  @Override
-  public OperationFuture<String> delete(String path) {
-    return delete(path, -1);
-  }
-
-  @Override
-  public OperationFuture<String> delete(final String deletePath, final int version) {
-    final SettableOperationFuture<String> result = SettableOperationFuture.create(deletePath,
-                                                                                  Threads.SAME_THREAD_EXECUTOR);
-    Futures.addCallback(super.delete(deletePath, version),
-                        new OperationFutureCallback<String>(OperationType.DELETE, System.currentTimeMillis(),
-                                                            deletePath, result, new Supplier<OperationFuture<String>>
-                          () {
-                          @Override
-                          public OperationFuture<String> get() {
-                            return FailureRetryZKClient.super.delete(deletePath, version);
-                          }
-                        }));
-    return result;
-  }
-
-  /**
-   * Callback to watch for operation result and trigger retry if necessary.
-   * @param <V> Type of operation result.
-   */
-  private final class OperationFutureCallback<V> implements FutureCallback<V> {
-
-    private final OperationType type;
-    private final long startTime;
-    private final String path;
-    private final SettableOperationFuture<V> result;
-    private final Supplier<OperationFuture<V>> retryAction;
-    private final AtomicInteger failureCount;
-
-    private OperationFutureCallback(OperationType type, long startTime, String path,
-                                    SettableOperationFuture<V> result, Supplier<OperationFuture<V>> retryAction) {
-      this.type = type;
-      this.startTime = startTime;
-      this.path = path;
-      this.result = result;
-      this.retryAction = retryAction;
-      this.failureCount = new AtomicInteger(0);
-    }
-
-    @Override
-    public void onSuccess(V result) {
-      this.result.set(result);
-    }
-
-    @Override
-    public void onFailure(Throwable t) {
-      if (!doRetry(t)) {
-        result.setException(t);
-      }
-    }
-
-    private boolean doRetry(Throwable t) {
-      if (!RetryUtils.canRetry(t)) {
-        return false;
-      }
-
-      // Determine the relay delay
-      long nextRetry = retryStrategy.nextRetry(failureCount.incrementAndGet(), startTime, type, path);
-      if (nextRetry < 0) {
-        return false;
-      }
-
-      // Schedule the retry.
-      SCHEDULER.schedule(new Runnable() {
-        @Override
-        public void run() {
-          Futures.addCallback(retryAction.get(), OperationFutureCallback.this);
-        }
-      }, nextRetry, TimeUnit.MILLISECONDS);
-
-      return true;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/InMemoryZKServer.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/InMemoryZKServer.java b/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/InMemoryZKServer.java
deleted file mode 100644
index c4eed59..0000000
--- a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/InMemoryZKServer.java
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.zookeeper;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Throwables;
-import com.google.common.io.Files;
-import com.google.common.util.concurrent.AbstractIdleService;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.Service;
-import org.apache.zookeeper.server.ServerCnxnFactory;
-import org.apache.zookeeper.server.ZooKeeperServer;
-import org.apache.zookeeper.server.persistence.FileTxnSnapLog;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
-import java.util.concurrent.Executor;
-
-/**
- *
- */
-public final class InMemoryZKServer implements Service {
-
-  private static final Logger LOG = LoggerFactory.getLogger(InMemoryZKServer.class);
-
-  private final File dataDir;
-  private final int tickTime;
-  private final boolean autoClean;
-  private final int port;
-  private final Service delegateService = new AbstractIdleService() {
-    @Override
-    protected void startUp() throws Exception {
-      ZooKeeperServer zkServer = new ZooKeeperServer();
-      FileTxnSnapLog ftxn = new FileTxnSnapLog(dataDir, dataDir);
-      zkServer.setTxnLogFactory(ftxn);
-      zkServer.setTickTime(tickTime);
-
-      factory = ServerCnxnFactory.createFactory();
-      factory.configure(getAddress(port), -1);
-      factory.startup(zkServer);
-
-      LOG.info("In memory ZK started: " + getConnectionStr());
-    }
-
-    @Override
-    protected void shutDown() throws Exception {
-      try {
-        factory.shutdown();
-      } finally {
-        if (autoClean) {
-          cleanDir(dataDir);
-        }
-      }
-    }
-  };
-
-  private ServerCnxnFactory factory;
-
-  public static Builder builder() {
-    return new Builder();
-  }
-
-  private InMemoryZKServer(File dataDir, int tickTime, boolean autoClean, int port) {
-    if (dataDir == null) {
-      dataDir = Files.createTempDir();
-      autoClean = true;
-    } else {
-      Preconditions.checkArgument(dataDir.isDirectory() || dataDir.mkdirs() || dataDir.isDirectory());
-    }
-
-    this.dataDir = dataDir;
-    this.tickTime = tickTime;
-    this.autoClean = autoClean;
-    this.port = port;
-  }
-
-  public String getConnectionStr() {
-    InetSocketAddress addr = factory.getLocalAddress();
-    return String.format("%s:%d", addr.getHostName(), addr.getPort());
-  }
-
-  public InetSocketAddress getLocalAddress() {
-    return factory.getLocalAddress();
-  }
-
-  private InetSocketAddress getAddress(int port) {
-    try {
-//      return new InetSocketAddress(InetAddress.getByAddress(new byte[] {127, 0, 0, 1}), port < 0 ? 0 : port);
-      return new InetSocketAddress(InetAddress.getLocalHost(), port < 0 ? 0 : port);
-    } catch (UnknownHostException e) {
-      throw Throwables.propagate(e);
-    }
-  }
-
-  private void cleanDir(File dir) {
-    File[] files = dir.listFiles();
-    if (files == null) {
-      return;
-    }
-    for (File file : files) {
-      if (file.isDirectory()) {
-        cleanDir(file);
-      }
-      file.delete();
-    }
-  }
-
-  @Override
-  public ListenableFuture<State> start() {
-    return delegateService.start();
-  }
-
-  @Override
-  public State startAndWait() {
-    return delegateService.startAndWait();
-  }
-
-  @Override
-  public boolean isRunning() {
-    return delegateService.isRunning();
-  }
-
-  @Override
-  public State state() {
-    return delegateService.state();
-  }
-
-  @Override
-  public ListenableFuture<State> stop() {
-    return delegateService.stop();
-  }
-
-  @Override
-  public State stopAndWait() {
-    return delegateService.stopAndWait();
-  }
-
-  @Override
-  public void addListener(Listener listener, Executor executor) {
-    delegateService.addListener(listener, executor);
-  }
-
-  /**
-   * Builder for creating instance of {@link InMemoryZKServer}.
-   */
-  public static final class Builder {
-    private File dataDir;
-    private boolean autoCleanDataDir = false;
-    private int tickTime = ZooKeeperServer.DEFAULT_TICK_TIME;
-    private int port = -1;
-
-    public Builder setDataDir(File dataDir) {
-      this.dataDir = dataDir;
-      return this;
-    }
-
-    public Builder setAutoCleanDataDir(boolean auto) {
-      this.autoCleanDataDir = auto;
-      return this;
-    }
-
-    public Builder setTickTime(int tickTime) {
-      this.tickTime = tickTime;
-      return this;
-    }
-
-    public Builder setPort(int port) {
-      this.port = port;
-      return this;
-    }
-
-    public InMemoryZKServer build() {
-      return new InMemoryZKServer(dataDir, tickTime, autoCleanDataDir, port);
-    }
-
-    private Builder() {
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/KillZKSession.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/KillZKSession.java b/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/KillZKSession.java
deleted file mode 100644
index bc01f08..0000000
--- a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/KillZKSession.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.zookeeper;
-
-import com.google.common.base.Preconditions;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.ZooKeeper;
-
-import java.io.IOException;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Utility class for killing ZK client to simulate failures during testing.
- */
-public final class KillZKSession {
-
-  /**
-   * Utility classes should have a public constructor or a default constructor
-   * hence made it private.
-   */
-  private KillZKSession() {}
-
-  /**
-   * Kills a Zookeeper client to simulate failure scenarious during testing.
-   * Callee will provide the amount of time to wait before it's considered failure
-   * to kill a client.
-   *
-   * @param client that needs to be killed.
-   * @param connectionString of Quorum
-   * @param maxMs time in millisecond specifying the max time to kill a client.
-   * @throws IOException When there is IO error
-   * @throws InterruptedException When call has been interrupted.
-   */
-  public static void kill(ZooKeeper client, String connectionString,
-                          int maxMs) throws IOException, InterruptedException {
-    final CountDownLatch latch = new CountDownLatch(1);
-    ZooKeeper zk = new ZooKeeper(connectionString, maxMs, new Watcher() {
-      @Override
-      public void process(WatchedEvent event) {
-        if (event.getState() == Event.KeeperState.SyncConnected) {
-          latch.countDown();
-        }
-      }
-    }, client.getSessionId(), client.getSessionPasswd());
-
-    try {
-      Preconditions.checkState(latch.await(maxMs, TimeUnit.MILLISECONDS), "Fail to kill ZK connection.");
-    } finally {
-      zk.close();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/NamespaceZKClient.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/NamespaceZKClient.java b/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/NamespaceZKClient.java
deleted file mode 100644
index 1a82e4b..0000000
--- a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/NamespaceZKClient.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.zookeeper;
-
-import org.apache.twill.common.Threads;
-import org.apache.twill.zookeeper.ForwardingZKClient;
-import org.apache.twill.zookeeper.NodeChildren;
-import org.apache.twill.zookeeper.NodeData;
-import org.apache.twill.zookeeper.OperationFuture;
-import org.apache.twill.zookeeper.ZKClient;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.data.Stat;
-
-import javax.annotation.Nullable;
-
-/**
- * A {@link ZKClient} that namespace every paths.
- */
-public final class NamespaceZKClient extends ForwardingZKClient {
-  // This class extends from ForwardingZKClient but overrides every method is for letting the
-  // ZKClientServices delegate logic works.
-
-  private final String namespace;
-  private final ZKClient delegate;
-  private final String connectString;
-
-  public NamespaceZKClient(ZKClient delegate, String namespace) {
-    super(delegate);
-    this.namespace = namespace;
-    this.delegate = delegate;
-    this.connectString = delegate.getConnectString() + namespace;
-  }
-
-  @Override
-  public Long getSessionId() {
-    return delegate.getSessionId();
-  }
-
-  @Override
-  public String getConnectString() {
-    return connectString;
-  }
-
-  @Override
-  public void addConnectionWatcher(Watcher watcher) {
-    delegate.addConnectionWatcher(watcher);
-  }
-
-  @Override
-  public OperationFuture<String> create(String path, @Nullable byte[] data, CreateMode createMode) {
-    return relayPath(delegate.create(namespace + path, data, createMode), this.<String>createFuture(path));
-  }
-
-  @Override
-  public OperationFuture<String> create(String path, @Nullable byte[] data, CreateMode createMode,
-                                        boolean createParent) {
-    return relayPath(delegate.create(namespace + path, data, createMode, createParent),
-                     this.<String>createFuture(path));
-  }
-
-  @Override
-  public OperationFuture<Stat> exists(String path) {
-    return relayFuture(delegate.exists(namespace + path), this.<Stat>createFuture(path));
-  }
-
-  @Override
-  public OperationFuture<Stat> exists(String path, @Nullable Watcher watcher) {
-    return relayFuture(delegate.exists(namespace + path, watcher), this.<Stat>createFuture(path));
-  }
-
-  @Override
-  public OperationFuture<NodeChildren> getChildren(String path) {
-    return relayFuture(delegate.getChildren(namespace + path), this.<NodeChildren>createFuture(path));
-  }
-
-  @Override
-  public OperationFuture<NodeChildren> getChildren(String path, @Nullable Watcher watcher) {
-    return relayFuture(delegate.getChildren(namespace + path, watcher), this.<NodeChildren>createFuture(path));
-  }
-
-  @Override
-  public OperationFuture<NodeData> getData(String path) {
-    return relayFuture(delegate.getData(namespace + path), this.<NodeData>createFuture(path));
-  }
-
-  @Override
-  public OperationFuture<NodeData> getData(String path, @Nullable Watcher watcher) {
-    return relayFuture(delegate.getData(namespace + path, watcher), this.<NodeData>createFuture(path));
-  }
-
-  @Override
-  public OperationFuture<Stat> setData(String path, byte[] data) {
-    return relayFuture(delegate.setData(namespace + path, data), this.<Stat>createFuture(path));
-  }
-
-  @Override
-  public OperationFuture<Stat> setData(String dataPath, byte[] data, int version) {
-    return relayFuture(delegate.setData(namespace + dataPath, data, version), this.<Stat>createFuture(dataPath));
-  }
-
-  @Override
-  public OperationFuture<String> delete(String path) {
-    return relayPath(delegate.delete(namespace + path), this.<String>createFuture(path));
-  }
-
-  @Override
-  public OperationFuture<String> delete(String deletePath, int version) {
-    return relayPath(delegate.delete(namespace + deletePath, version), this.<String>createFuture(deletePath));
-  }
-
-  private <V> SettableOperationFuture<V> createFuture(String path) {
-    return SettableOperationFuture.create(namespace + path, Threads.SAME_THREAD_EXECUTOR);
-  }
-
-  private <V> OperationFuture<V> relayFuture(final OperationFuture<V> from, final SettableOperationFuture<V> to) {
-    Futures.addCallback(from, new FutureCallback<V>() {
-      @Override
-      public void onSuccess(V result) {
-        to.set(result);
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        to.setException(t);
-      }
-    });
-    return to;
-  }
-
-  private OperationFuture<String> relayPath(final OperationFuture<String> from,
-                                            final SettableOperationFuture<String> to) {
-    from.addListener(new Runnable() {
-      @Override
-      public void run() {
-        try {
-          String path = from.get();
-          to.set(path.substring(namespace.length()));
-        } catch (Exception e) {
-          to.setException(e.getCause());
-        }
-      }
-    }, Threads.SAME_THREAD_EXECUTOR);
-    return to;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RetryUtils.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RetryUtils.java b/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RetryUtils.java
deleted file mode 100644
index fb42491..0000000
--- a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RetryUtils.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.zookeeper;
-
-import org.apache.zookeeper.KeeperException;
-
-/**
- * Utility class for help determining operation retry condition.
- */
-final class RetryUtils {
-
-  /**
-   * Tells if a given operation error code can be retried or not.
-   * @param code The error code of the operation.
-   * @return {@code true} if the operation can be retried.
-   */
-  public static boolean canRetry(KeeperException.Code code) {
-    return (code == KeeperException.Code.CONNECTIONLOSS
-          || code == KeeperException.Code.OPERATIONTIMEOUT
-          || code == KeeperException.Code.SESSIONEXPIRED
-          || code == KeeperException.Code.SESSIONMOVED);
-  }
-
-  /**
-   * Tells if a given operation exception can be retried or not.
-   * @param t The exception raised by an operation.
-   * @return {@code true} if the operation can be retried.
-   */
-  public static boolean canRetry(Throwable t) {
-    return t instanceof KeeperException && canRetry(((KeeperException) t).code());
-  }
-
-  private RetryUtils() {
-  }
-}


[02/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RewatchOnExpireWatcher.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RewatchOnExpireWatcher.java b/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RewatchOnExpireWatcher.java
deleted file mode 100644
index 181ca2b..0000000
--- a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RewatchOnExpireWatcher.java
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.zookeeper;
-
-import org.apache.twill.zookeeper.NodeChildren;
-import org.apache.twill.zookeeper.NodeData;
-import org.apache.twill.zookeeper.ZKClient;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.concurrent.atomic.AtomicMarkableReference;
-
-/**
- * A wrapper for {@link Watcher} that will re-set the watch automatically until it is successful.
- */
-final class RewatchOnExpireWatcher implements Watcher {
-
-  private static final Logger LOG = LoggerFactory.getLogger(RewatchOnExpireWatcher.class);
-
-  enum ActionType {
-    EXISTS,
-    CHILDREN,
-    DATA
-  }
-
-  private final ZKClient client;
-  private final ActionType actionType;
-  private final String path;
-  private final Watcher delegate;
-  private final AtomicMarkableReference<Object> lastResult;
-
-  RewatchOnExpireWatcher(ZKClient client, ActionType actionType, String path, Watcher delegate) {
-    this.client = client;
-    this.actionType = actionType;
-    this.path = path;
-    this.delegate = delegate;
-    this.lastResult = new AtomicMarkableReference<Object>(null, false);
-  }
-
-  /**
-   * Sets the result from the operation that causes this watcher to be set.
-   */
-  void setLastResult(Object result) {
-    lastResult.compareAndSet(null, result, false, true);
-  }
-
-  @Override
-  public void process(WatchedEvent event) {
-    if (delegate != null && event.getType() != Event.EventType.None) {
-      try {
-        delegate.process(event);
-      } catch (Throwable t) {
-        LOG.error("Watcher throws exception.", t);
-      }
-    }
-
-    if (event.getState() != Event.KeeperState.Expired) {
-      return;
-    }
-    switch (actionType) {
-      case EXISTS:
-        exists();
-        break;
-      case CHILDREN:
-        children();
-        break;
-      case DATA:
-        data();
-        break;
-    }
-  }
-
-  private void exists() {
-    Futures.addCallback(client.exists(path, this), new FutureCallback<Stat>() {
-      @Override
-      public void onSuccess(Stat stat) {
-        // Since we know all callbacks and watcher are triggered from single event thread, there is no race condition.
-        Object oldResult = lastResult.getReference();
-        lastResult.compareAndSet(oldResult, null, true, false);
-
-        if (stat != oldResult && (stat == null || !stat.equals(oldResult))) {
-          if (stat == null) {
-            // previous stat is not null, means node deleted
-            process(new WatchedEvent(Event.EventType.NodeDeleted, Event.KeeperState.SyncConnected, path));
-          } else if (oldResult == null) {
-            // previous stat is null, means node created
-            process(new WatchedEvent(Event.EventType.NodeCreated, Event.KeeperState.SyncConnected, path));
-          } else {
-            // Otherwise, something changed on the node
-            process(new WatchedEvent(Event.EventType.NodeDataChanged, Event.KeeperState.SyncConnected, path));
-          }
-        }
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        if (RetryUtils.canRetry(t)) {
-          exists();
-        } else {
-          lastResult.set(null, false);
-          LOG.error("Fail to re-set watch on exists for path " + path, t);
-        }
-      }
-    });
-  }
-
-  private void children() {
-    Futures.addCallback(client.getChildren(path, this), new FutureCallback<NodeChildren>() {
-      @Override
-      public void onSuccess(NodeChildren result) {
-        Object oldResult = lastResult.getReference();
-        lastResult.compareAndSet(oldResult, null, true, false);
-
-        if (result.equals(oldResult)) {
-          return;
-        }
-
-        if (!(oldResult instanceof NodeChildren)) {
-          // Something very wrong
-          LOG.error("The same watcher has been used for different event type.");
-          return;
-        }
-
-        NodeChildren oldNodeChildren = (NodeChildren) oldResult;
-        if (!result.getChildren().equals(oldNodeChildren.getChildren())) {
-          process(new WatchedEvent(Event.EventType.NodeChildrenChanged, Event.KeeperState.SyncConnected, path));
-        } else {
-          process(new WatchedEvent(Event.EventType.NodeDataChanged, Event.KeeperState.SyncConnected, path));
-        }
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        if (RetryUtils.canRetry(t)) {
-          children();
-          return;
-        }
-
-        lastResult.set(null, false);
-        if (t instanceof KeeperException) {
-          KeeperException.Code code = ((KeeperException) t).code();
-          if (code == KeeperException.Code.NONODE) {
-            // Node deleted
-            process(new WatchedEvent(Event.EventType.NodeDeleted, Event.KeeperState.SyncConnected, path));
-            return;
-          }
-        }
-        LOG.error("Fail to re-set watch on getChildren for path " + path, t);
-      }
-    });
-  }
-
-  private void data() {
-    Futures.addCallback(client.getData(path, this), new FutureCallback<NodeData>() {
-      @Override
-      public void onSuccess(NodeData result) {
-        Object oldResult = lastResult.getReference();
-        lastResult.compareAndSet(oldResult, null, true, false);
-
-        if (!result.equals(oldResult)) {
-          // Whenever something changed, treated it as data changed.
-          process(new WatchedEvent(Event.EventType.NodeDataChanged, Event.KeeperState.SyncConnected, path));
-        }
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        if (RetryUtils.canRetry(t)) {
-          data();
-          return;
-        }
-
-        lastResult.set(null, false);
-        if (t instanceof KeeperException) {
-          KeeperException.Code code = ((KeeperException) t).code();
-          if (code == KeeperException.Code.NONODE) {
-            // Node deleted
-            process(new WatchedEvent(Event.EventType.NodeDeleted, Event.KeeperState.SyncConnected, path));
-            return;
-          }
-        }
-        LOG.error("Fail to re-set watch on getData for path " + path, t);
-      }
-    });
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RewatchOnExpireZKClient.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RewatchOnExpireZKClient.java b/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RewatchOnExpireZKClient.java
deleted file mode 100644
index 402c153..0000000
--- a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RewatchOnExpireZKClient.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.zookeeper;
-
-import org.apache.twill.internal.zookeeper.RewatchOnExpireWatcher.ActionType;
-import org.apache.twill.zookeeper.ForwardingZKClient;
-import org.apache.twill.zookeeper.NodeChildren;
-import org.apache.twill.zookeeper.NodeData;
-import org.apache.twill.zookeeper.OperationFuture;
-import org.apache.twill.zookeeper.ZKClient;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.data.Stat;
-
-/**
- * A {@link ZKClient} that will rewatch automatically when session expired and reconnect.
- * The rewatch logic is mainly done in {@link RewatchOnExpireWatcher}.
- */
-public final class RewatchOnExpireZKClient extends ForwardingZKClient {
-
-  public RewatchOnExpireZKClient(ZKClient delegate) {
-    super(delegate);
-  }
-
-  @Override
-  public OperationFuture<Stat> exists(String path, Watcher watcher) {
-    final RewatchOnExpireWatcher wrappedWatcher = new RewatchOnExpireWatcher(this, ActionType.EXISTS, path, watcher);
-    OperationFuture<Stat> result = super.exists(path, wrappedWatcher);
-    Futures.addCallback(result, new FutureCallback<Stat>() {
-      @Override
-      public void onSuccess(Stat result) {
-        wrappedWatcher.setLastResult(result);
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        // No-op
-      }
-    });
-    return result;
-  }
-
-  @Override
-  public OperationFuture<NodeChildren> getChildren(String path, Watcher watcher) {
-    final RewatchOnExpireWatcher wrappedWatcher = new RewatchOnExpireWatcher(this, ActionType.CHILDREN, path, watcher);
-    OperationFuture<NodeChildren> result = super.getChildren(path, wrappedWatcher);
-    Futures.addCallback(result, new FutureCallback<NodeChildren>() {
-      @Override
-      public void onSuccess(NodeChildren result) {
-        wrappedWatcher.setLastResult(result);
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        // No-op
-      }
-    });
-    return result;
-  }
-
-  @Override
-  public OperationFuture<NodeData> getData(String path, Watcher watcher) {
-    final RewatchOnExpireWatcher wrappedWatcher = new RewatchOnExpireWatcher(this, ActionType.DATA, path, watcher);
-    OperationFuture<NodeData> result = super.getData(path, wrappedWatcher);
-    Futures.addCallback(result, new FutureCallback<NodeData>() {
-      @Override
-      public void onSuccess(NodeData result) {
-        wrappedWatcher.setLastResult(result);
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        // No-op
-      }
-    });
-    return result;
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/SettableOperationFuture.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/SettableOperationFuture.java b/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/SettableOperationFuture.java
deleted file mode 100644
index 7544e56..0000000
--- a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/SettableOperationFuture.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.zookeeper;
-
-import org.apache.twill.zookeeper.OperationFuture;
-import com.google.common.util.concurrent.AbstractFuture;
-
-import javax.annotation.Nullable;
-import java.util.concurrent.Executor;
-
-/**
- * An implementation for {@link OperationFuture} that allows setting result directly.
- * Also, all listener callback will be fired from the given executor.
- */
-public final class SettableOperationFuture<V> extends AbstractFuture<V> implements OperationFuture<V> {
-
-  private final String requestPath;
-  private final Executor executor;
-
-  public static <V> SettableOperationFuture<V> create(String path, Executor executor) {
-    return new SettableOperationFuture<V>(path, executor);
-  }
-
-  private SettableOperationFuture(String requestPath, Executor executor) {
-    this.requestPath = requestPath;
-    this.executor = executor;
-  }
-
-  @Override
-  public String getRequestPath() {
-    return requestPath;
-  }
-
-  @Override
-  public void addListener(final Runnable listener, final Executor exec) {
-    super.addListener(new Runnable() {
-      @Override
-      public void run() {
-        exec.execute(listener);
-      }
-    }, executor);
-  }
-
-  @Override
-  public boolean setException(Throwable throwable) {
-    return super.setException(throwable);
-  }
-
-  @Override
-  public boolean set(@Nullable V value) {
-    return super.set(value);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/package-info.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/package-info.java b/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/package-info.java
deleted file mode 100644
index d2afa11..0000000
--- a/zookeeper/src/main/java/org/apache/twill/internal/zookeeper/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Internal classes for zookeeper.
- */
-package org.apache.twill.internal.zookeeper;

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/zookeeper/ForwardingZKClient.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/zookeeper/ForwardingZKClient.java b/zookeeper/src/main/java/org/apache/twill/zookeeper/ForwardingZKClient.java
deleted file mode 100644
index 3f3003d..0000000
--- a/zookeeper/src/main/java/org/apache/twill/zookeeper/ForwardingZKClient.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.zookeeper;
-
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.data.Stat;
-
-import javax.annotation.Nullable;
-
-/**
- *
- */
-public abstract class ForwardingZKClient implements ZKClient {
-
-  private final ZKClient delegate;
-
-  protected ForwardingZKClient(ZKClient delegate) {
-    this.delegate = delegate;
-  }
-
-  public final ZKClient getDelegate() {
-    return delegate;
-  }
-
-  @Override
-  public Long getSessionId() {
-    return delegate.getSessionId();
-  }
-
-  @Override
-  public String getConnectString() {
-    return delegate.getConnectString();
-  }
-
-  @Override
-  public void addConnectionWatcher(Watcher watcher) {
-    delegate.addConnectionWatcher(watcher);
-  }
-
-  @Override
-  public OperationFuture<String> create(String path, @Nullable byte[] data, CreateMode createMode) {
-    return create(path, data, createMode, true);
-  }
-
-  @Override
-  public OperationFuture<String> create(String path, @Nullable byte[] data, CreateMode createMode,
-                                        boolean createParent) {
-    return delegate.create(path, data, createMode, createParent);
-  }
-
-  @Override
-  public OperationFuture<Stat> exists(String path) {
-    return exists(path, null);
-  }
-
-  @Override
-  public OperationFuture<Stat> exists(String path, @Nullable Watcher watcher) {
-    return delegate.exists(path, watcher);
-  }
-
-  @Override
-  public OperationFuture<NodeChildren> getChildren(String path) {
-    return getChildren(path, null);
-  }
-
-  @Override
-  public OperationFuture<NodeChildren> getChildren(String path, @Nullable Watcher watcher) {
-    return delegate.getChildren(path, watcher);
-  }
-
-  @Override
-  public OperationFuture<NodeData> getData(String path) {
-    return getData(path, null);
-  }
-
-  @Override
-  public OperationFuture<NodeData> getData(String path, @Nullable Watcher watcher) {
-    return delegate.getData(path, watcher);
-  }
-
-  @Override
-  public OperationFuture<Stat> setData(String path, byte[] data) {
-    return setData(path, data, -1);
-  }
-
-  @Override
-  public OperationFuture<Stat> setData(String dataPath, byte[] data, int version) {
-    return delegate.setData(dataPath, data, version);
-  }
-
-  @Override
-  public OperationFuture<String> delete(String path) {
-    return delete(path, -1);
-  }
-
-  @Override
-  public OperationFuture<String> delete(String deletePath, int version) {
-    return delegate.delete(deletePath, version);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/zookeeper/ForwardingZKClientService.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/zookeeper/ForwardingZKClientService.java b/zookeeper/src/main/java/org/apache/twill/zookeeper/ForwardingZKClientService.java
deleted file mode 100644
index 10391b2..0000000
--- a/zookeeper/src/main/java/org/apache/twill/zookeeper/ForwardingZKClientService.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.zookeeper;
-
-import com.google.common.base.Supplier;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import org.apache.zookeeper.ZooKeeper;
-
-import java.util.concurrent.Executor;
-
-/**
- *
- */
-public abstract class ForwardingZKClientService extends ForwardingZKClient implements ZKClientService {
-
-  private final ZKClientService delegate;
-
-  protected ForwardingZKClientService(ZKClientService delegate) {
-    super(delegate);
-    this.delegate = delegate;
-  }
-
-  @Override
-  public Supplier<ZooKeeper> getZooKeeperSupplier() {
-    return delegate.getZooKeeperSupplier();
-  }
-
-  @Override
-  public ListenableFuture<State> start() {
-    return delegate.start();
-  }
-
-  @Override
-  public State startAndWait() {
-    return Futures.getUnchecked(start());
-  }
-
-  @Override
-  public boolean isRunning() {
-    return delegate.isRunning();
-  }
-
-  @Override
-  public State state() {
-    return delegate.state();
-  }
-
-  @Override
-  public ListenableFuture<State> stop() {
-    return delegate.stop();
-  }
-
-  @Override
-  public State stopAndWait() {
-    return Futures.getUnchecked(stop());
-  }
-
-  @Override
-  public void addListener(Listener listener, Executor executor) {
-    delegate.addListener(listener, executor);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/zookeeper/NodeChildren.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/zookeeper/NodeChildren.java b/zookeeper/src/main/java/org/apache/twill/zookeeper/NodeChildren.java
deleted file mode 100644
index b432c01..0000000
--- a/zookeeper/src/main/java/org/apache/twill/zookeeper/NodeChildren.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.zookeeper;
-
-import org.apache.zookeeper.data.Stat;
-
-import java.util.List;
-
-/**
- * Represents result of call to {@link ZKClientService#getChildren(String, org.apache.zookeeper.Watcher)} method.
- */
-public interface NodeChildren {
-
-  /**
-   * @return The {@link Stat} of the node.
-   */
-  Stat getStat();
-
-  /**
-   * @return List of children node names.
-   */
-  List<String> getChildren();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/zookeeper/NodeData.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/zookeeper/NodeData.java b/zookeeper/src/main/java/org/apache/twill/zookeeper/NodeData.java
deleted file mode 100644
index ac15957..0000000
--- a/zookeeper/src/main/java/org/apache/twill/zookeeper/NodeData.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.zookeeper;
-
-import org.apache.zookeeper.data.Stat;
-
-import javax.annotation.Nullable;
-
-/**
- * Represents result of call to {@link ZKClientService#getData(String, org.apache.zookeeper.Watcher)}.
- */
-public interface NodeData {
-
-  /**
-   * @return The {@link Stat} of the node.
-   */
-  Stat getStat();
-
-  /**
-   * @return Data stored in the node, or {@code null} if there is no data.
-   */
-  @Nullable
-  byte[] getData();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/zookeeper/OperationFuture.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/zookeeper/OperationFuture.java b/zookeeper/src/main/java/org/apache/twill/zookeeper/OperationFuture.java
deleted file mode 100644
index fafaa7a..0000000
--- a/zookeeper/src/main/java/org/apache/twill/zookeeper/OperationFuture.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.zookeeper;
-
-import com.google.common.util.concurrent.ListenableFuture;
-
-/**
- * A {@link ListenableFuture} that also provides the requested path for a operation.
- *
- * @param <V> The result type returned by this Future's {@link #get()} method.
- */
-public interface OperationFuture<V> extends ListenableFuture<V> {
-
-  /**
-   * @return The path being requested for the ZooKeeper operation.
-   */
-  String getRequestPath();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/zookeeper/RetryStrategies.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/zookeeper/RetryStrategies.java b/zookeeper/src/main/java/org/apache/twill/zookeeper/RetryStrategies.java
deleted file mode 100644
index 56474b7..0000000
--- a/zookeeper/src/main/java/org/apache/twill/zookeeper/RetryStrategies.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.zookeeper;
-
-import com.google.common.base.Preconditions;
-
-import java.util.concurrent.TimeUnit;
-
-/**
- * Factory for creating common {@link RetryStrategy} implementation.
- */
-public final class RetryStrategies {
-
-  /**
-   * @return A {@link RetryStrategy} that doesn't do any retry.
-   */
-  public static RetryStrategy noRetry() {
-    return new RetryStrategy() {
-      @Override
-      public long nextRetry(int failureCount, long startTime, OperationType type, String path) {
-        return -1;
-      }
-    };
-  }
-
-  /**
-   * Creates a {@link RetryStrategy} that retries maximum given number of times, with the actual
-   * delay behavior delegated to another {@link RetryStrategy}.
-   * @param limit Maximum number of retries allowed.
-   * @param strategy When failure count is less than or equal to the limit, this strategy will be called.
-   * @return A {@link RetryStrategy}.
-   */
-  public static RetryStrategy limit(final int limit, final RetryStrategy strategy) {
-    Preconditions.checkArgument(limit >= 0, "limit must be >= 0");
-    return new RetryStrategy() {
-      @Override
-      public long nextRetry(int failureCount, long startTime, OperationType type, String path) {
-        return (failureCount <= limit) ? strategy.nextRetry(failureCount, startTime, type, path) : -1L;
-      }
-    };
-  }
-
-  /**
-   * Creates a {@link RetryStrategy} that imposes a fix delay between each retries.
-   * @param delay delay time
-   * @param delayUnit {@link TimeUnit} for the delay.
-   * @return A {@link RetryStrategy}.
-   */
-  public static RetryStrategy fixDelay(final long delay, final TimeUnit delayUnit) {
-    Preconditions.checkArgument(delay >= 0, "delay must be >= 0");
-    return new RetryStrategy() {
-      @Override
-      public long nextRetry(int failureCount, long startTime, OperationType type, String path) {
-        return TimeUnit.MILLISECONDS.convert(delay, delayUnit);
-      }
-    };
-  }
-
-  /**
-   * Creates a {@link RetryStrategy} that will increase delay exponentially between each retries.
-   * @param baseDelay delay to start with.
-   * @param maxDelay cap of the delay.
-   * @param delayUnit {@link TimeUnit} for the delays.
-   * @return A {@link RetryStrategy}.
-   */
-  public static RetryStrategy exponentialDelay(final long baseDelay, final long maxDelay, final TimeUnit delayUnit) {
-    Preconditions.checkArgument(baseDelay >= 0, "base delay must be >= 0");
-    Preconditions.checkArgument(maxDelay >= 0, "max delay must be >= 0");
-    return new RetryStrategy() {
-      @Override
-      public long nextRetry(int failureCount, long startTime, OperationType type, String path) {
-        long power = failureCount > Long.SIZE ? Long.MAX_VALUE : (1L << (failureCount - 1));
-        long delay = Math.min(baseDelay * power, maxDelay);
-        delay = delay < 0 ? maxDelay : delay;
-        return TimeUnit.MILLISECONDS.convert(delay, delayUnit);
-      }
-    };
-  }
-
-  /**
-   * Creates a {@link RetryStrategy} that will retry until maximum amount of time has been passed since the request,
-   * with the actual delay behavior delegated to another {@link RetryStrategy}.
-   * @param maxElapseTime Maximum amount of time until giving up retry.
-   * @param timeUnit {@link TimeUnit} for the max elapse time.
-   * @param strategy When time elapsed is less than or equal to the limit, this strategy will be called.
-   * @return A {@link RetryStrategy}.
-   */
-  public static RetryStrategy timeLimit(long maxElapseTime, TimeUnit timeUnit, final RetryStrategy strategy) {
-    Preconditions.checkArgument(maxElapseTime >= 0, "max elapse time must be >= 0");
-    final long maxElapseMs = TimeUnit.MILLISECONDS.convert(maxElapseTime, timeUnit);
-    return new RetryStrategy() {
-      @Override
-      public long nextRetry(int failureCount, long startTime, OperationType type, String path) {
-        long elapseTime = System.currentTimeMillis() - startTime;
-        return elapseTime <= maxElapseMs ? strategy.nextRetry(failureCount, startTime, type, path) : -1L;
-      }
-    };
-  }
-
-  private RetryStrategies() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/zookeeper/RetryStrategy.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/zookeeper/RetryStrategy.java b/zookeeper/src/main/java/org/apache/twill/zookeeper/RetryStrategy.java
deleted file mode 100644
index 3301e8a..0000000
--- a/zookeeper/src/main/java/org/apache/twill/zookeeper/RetryStrategy.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.zookeeper;
-
-/**
- * Provides strategy to use for operation retries.
- */
-public interface RetryStrategy {
-
-  /**
-   * Defines ZooKeeper operation type that triggers retry.
-   */
-  enum OperationType {
-    CREATE,
-    EXISTS,
-    GET_CHILDREN,
-    GET_DATA,
-    SET_DATA,
-    DELETE
-  }
-
-  /**
-   * Returns the number of milliseconds to wait before retrying the operation.
-   *
-   * @param failureCount Number of times that the request has been failed.
-   * @param startTime Timestamp in milliseconds that the request starts.
-   * @param type Type of operation tried to perform.
-   * @param path The path that the operation is acting on.
-   * @return Number of milliseconds to wait before retrying the operation. Returning {@code 0} means
-   *         retry it immediately, while negative means abort the operation.
-   */
-  long nextRetry(int failureCount, long startTime, OperationType type, String path);
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClient.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClient.java b/zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClient.java
deleted file mode 100644
index d60182e..0000000
--- a/zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClient.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.zookeeper;
-
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.data.Stat;
-
-import javax.annotation.Nullable;
-
-/**
- * A ZooKeeper client that provides asynchronous zookeeper operations.
- */
-public interface ZKClient {
-
-  /**
-   * Returns the current Zookeeper session ID of this client.
-   * If this ZKClient is not connected, {@code null} is returned.
-   */
-  Long getSessionId();
-
-  /**
-   * Returns the connection string used for connecting to Zookeeper.
-   */
-  String getConnectString();
-
-  /**
-   * Adds a {@link Watcher} that will be called whenever connection state change.
-   * @param watcher The watcher to set.
-   */
-  void addConnectionWatcher(Watcher watcher);
-
-  /**
-   * Same as calling
-   * {@link #create(String, byte[], org.apache.zookeeper.CreateMode, boolean) create(path, data, createMode, true)}.
-   *
-   * @see #create(String, byte[], org.apache.zookeeper.CreateMode, boolean)
-   */
-  OperationFuture<String> create(String path, @Nullable byte[] data, CreateMode createMode);
-
-  /**
-   * Creates a path in zookeeper, with given data and create mode.
-   *
-   * @param path Path to be created
-   * @param data Data to be stored in the node, or {@code null} if no data to store.
-   * @param createMode The {@link org.apache.zookeeper.CreateMode} for the node.
-   * @param createParent If {@code true} and parent nodes are missing, it will create all parent nodes as normal
-   *                     persistent node before creating the request node.
-   * @return A {@link OperationFuture} that will be completed when the
-   *         creation is done. If there is error during creation, it will be reflected as error in the future.
-   */
-  OperationFuture<String> create(String path, @Nullable byte[] data, CreateMode createMode, boolean createParent);
-
-  /**
-   * Checks if the path exists. Same as calling
-   * {@link #exists(String, org.apache.zookeeper.Watcher) exists(path, null)}.
-   *
-   * @see #exists(String, org.apache.zookeeper.Watcher)
-   */
-  OperationFuture<Stat> exists(String path);
-
-  /**
-   * Checks if the given path exists and leave a watcher on the node for watching creation/deletion/data changes
-   * on the node.
-   *
-   * @param path The path to check for existence.
-   * @param watcher Watcher for watching changes, or {@code null} if no watcher to set.
-   * @return A {@link OperationFuture} that will be completed when the exists check is done. If the path
-   *         does exists, the node {@link Stat} is set into the future. If the path doesn't exists,
-   *         a {@code null} value is set into the future.
-   */
-  OperationFuture<Stat> exists(String path, @Nullable Watcher watcher);
-
-  /**
-   * Gets the list of children nodes under the given path. Same as calling
-   * {@link #getChildren(String, org.apache.zookeeper.Watcher) getChildren(path, null)}.
-   *
-   * @see #getChildren(String, org.apache.zookeeper.Watcher)
-   */
-  OperationFuture<NodeChildren> getChildren(String path);
-
-  /**
-   * Gets the list of children nodes under the given path and leave a watcher on the node for watching node
-   * deletion and children nodes creation/deletion.
-   *
-   * @param path The path to fetch for children nodes
-   * @param watcher Watcher for watching changes, or {@code null} if no watcher to set.
-   * @return A {@link OperationFuture} that will be completed when the getChildren call is done, with the result
-   *         given as {@link NodeChildren}. If there is error, it will be reflected as error in the future.
-   */
-  OperationFuture<NodeChildren> getChildren(String path, @Nullable Watcher watcher);
-
-  /**
-   * Gets the data stored in the given path. Same as calling
-   * {@link #getData(String, org.apache.zookeeper.Watcher) getData(path, null)}.
-   */
-  OperationFuture<NodeData> getData(String path);
-
-  /**
-   * Gets the data stored in the given path and leave a watcher on the node for watching deletion/data changes on
-   * the node.
-   *
-   * @param path The path to get data from.
-   * @param watcher Watcher for watching changes, or {@code null} if no watcher to set.
-   * @return A {@link OperationFuture} that will be completed when the getData call is done, with the result
-   *         given as {@link NodeData}. If there is error, it will be reflected as error in the future.
-   */
-  OperationFuture<NodeData> getData(String path, @Nullable Watcher watcher);
-
-  /**
-   * Sets the data for the given path without matching version. Same as calling
-   * {@link #setData(String, byte[], int) setData(path, data, -1)}.
-   */
-  OperationFuture<Stat> setData(String path, byte[] data);
-
-  /**
-   * Sets the data for the given path that match the given version. If the version given is {@code -1}, it matches
-   * any version.
-   *
-   * @param dataPath The path to set data to.
-   * @param data Data to be set.
-   * @param version Matching version.
-   * @return A {@link OperationFuture} that will be completed when the setData call is done, with node {@link Stat}
-   *         given as the future result. If there is error, it will be reflected as error in the future.
-   */
-  OperationFuture<Stat> setData(String dataPath, byte[] data, int version);
-
-  /**
-   * Deletes the node of the given path without matching version. Same as calling
-   * {@link #delete(String, int) delete(path, -1)}.
-   *
-   * @see #delete(String, int)
-   */
-  OperationFuture<String> delete(String path);
-
-  /**
-   * Deletes the node of the given path that match the given version. If the version given is {@code -1}, it matches
-   * any version.
-   *
-   * @param deletePath The path to set data to.
-   * @param version Matching version.
-   * @return A {@link OperationFuture} that will be completed when the setData call is done, with node path
-   *         given as the future result. If there is error, it will be reflected as error in the future.
-   */
-  OperationFuture<String> delete(String deletePath, int version);
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClientService.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClientService.java b/zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClientService.java
deleted file mode 100644
index 63f27fb..0000000
--- a/zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClientService.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.zookeeper;
-
-import org.apache.twill.internal.zookeeper.DefaultZKClientService;
-import com.google.common.base.Supplier;
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.Multimap;
-import com.google.common.util.concurrent.Service;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.data.ACL;
-
-/**
- * A {@link ZKClient} that extends from {@link Service} to provide lifecycle management functions.
- * The {@link #start()} method needed to be called before calling any other method on this interface.
- * When the client is no longer needed, call {@link #stop()} to release any resources that it holds.
- */
-public interface ZKClientService extends ZKClient, Service {
-
-  /**
-   * Returns a {@link Supplier} of {@link ZooKeeper} that gives the current {@link ZooKeeper} in use at the moment
-   * when {@link com.google.common.base.Supplier#get()} get called.
-   *
-   * @return A {@link Supplier Supplier&lt;ZooKeeper&gt;}
-   */
-  Supplier<ZooKeeper> getZooKeeperSupplier();
-
-  /**
-   * Builder for creating an implementation of {@link ZKClientService}.
-   * The default client timeout is 10000ms.
-   */
-  public static final class Builder {
-
-    private final String connectStr;
-    private int timeout = 10000;
-    private Watcher connectionWatcher;
-    private Multimap<String, ACL> acls = HashMultimap.create();
-
-    /**
-     * Creates a {@link Builder} with the given ZooKeeper connection string.
-     * @param connectStr The connection string.
-     * @return A new instance of Builder.
-     */
-    public static Builder of(String connectStr) {
-      return new Builder(connectStr);
-    }
-
-    /**
-     * Sets the client timeout to the give milliseconds.
-     * @param timeout timeout in milliseconds.
-     * @return This builder
-     */
-    public Builder setSessionTimeout(int timeout) {
-      this.timeout = timeout;
-      return this;
-    }
-
-    /**
-     * Sets a {@link Watcher} that will be called whenever connection state change.
-     * @param watcher The watcher to set.
-     * @return This builder.
-     */
-    public Builder setConnectionWatcher(Watcher watcher) {
-      this.connectionWatcher = watcher;
-      return this;
-    }
-
-    /**
-     * Creates an instance of {@link ZKClientService} with the settings of this builder.
-     * @return A new instance of {@link ZKClientService}.
-     */
-    public ZKClientService build() {
-      return new DefaultZKClientService(connectStr, timeout, connectionWatcher);
-    }
-
-    private Builder(String connectStr) {
-      this.connectStr = connectStr;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClientServices.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClientServices.java b/zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClientServices.java
deleted file mode 100644
index cc38c76..0000000
--- a/zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClientServices.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.zookeeper;
-
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.data.Stat;
-
-import javax.annotation.Nullable;
-
-/**
- * Provides static factory method to create {@link ZKClientService} with modified behaviors.
- */
-public final class ZKClientServices {
-
-  /**
-   * Creates a {@link ZKClientService} from the given {@link ZKClient} if the given {@link ZKClient} is an instance of
-   * {@link ZKClientService} or is a {@link ForwardingZKClient} that eventually trace back to a delegate of type
-   * {@link ZKClientService}. If such a {@link ZKClientService} instance is found, this method returns
-   * an instance by invoking {@link #delegate(ZKClient, ZKClientService)} with the given {@link ZKClient} and
-   * the {@link ZKClientService} found respectively.
-   *
-   * @param client The {@link ZKClient}.
-   * @return A {@link ZKClientService}.
-   * @throws IllegalArgumentException If no {@link ZKClientService} is found.
-   */
-  public static ZKClientService delegate(ZKClient client) {
-    ZKClient zkClient = client;
-    while (!(zkClient instanceof ZKClientService) && zkClient instanceof ForwardingZKClient) {
-      zkClient = ((ForwardingZKClient) zkClient).getDelegate();
-    }
-    if (zkClient instanceof ZKClientService) {
-      return delegate(client, (ZKClientService) zkClient);
-    }
-    throw new IllegalArgumentException("No ZKClientService found from the delegation hierarchy");
-  }
-
-  /**
-   * Creates a {@link ZKClientService} that for all {@link ZKClient} methods would be delegated to another
-   * {@link ZKClient}, while methods for {@link ZKClientService} would be delegated to another {@link ZKClientService},
-   * which the given {@link ZKClient} and {@link ZKClientService} could be different instances.
-   *
-   * @param client The {@link ZKClient} for delegation
-   * @param clientService The {@link ZKClientService} for delegation.
-   * @return A {@link ZKClientService}.
-   */
-  public static ZKClientService delegate(final ZKClient client, ZKClientService clientService) {
-    return new ForwardingZKClientService(clientService) {
-
-      @Override
-      public Long getSessionId() {
-        return client.getSessionId();
-      }
-
-      @Override
-      public String getConnectString() {
-        return client.getConnectString();
-      }
-
-      @Override
-      public void addConnectionWatcher(Watcher watcher) {
-        client.addConnectionWatcher(watcher);
-      }
-
-      @Override
-      public OperationFuture<String> create(String path, @Nullable byte[] data, CreateMode createMode) {
-        return client.create(path, data, createMode);
-      }
-
-      @Override
-      public OperationFuture<String> create(String path, @Nullable byte[] data, CreateMode createMode,
-                                            boolean createParent) {
-        return client.create(path, data, createMode, createParent);
-      }
-
-      @Override
-      public OperationFuture<Stat> exists(String path) {
-        return client.exists(path);
-      }
-
-      @Override
-      public OperationFuture<Stat> exists(String path, @Nullable Watcher watcher) {
-        return client.exists(path, watcher);
-      }
-
-      @Override
-      public OperationFuture<NodeChildren> getChildren(String path) {
-        return client.getChildren(path);
-      }
-
-      @Override
-      public OperationFuture<NodeChildren> getChildren(String path, @Nullable Watcher watcher) {
-        return client.getChildren(path, watcher);
-      }
-
-      @Override
-      public OperationFuture<NodeData> getData(String path) {
-        return client.getData(path);
-      }
-
-      @Override
-      public OperationFuture<NodeData> getData(String path, @Nullable Watcher watcher) {
-        return client.getData(path, watcher);
-      }
-
-      @Override
-      public OperationFuture<Stat> setData(String path, byte[] data) {
-        return client.setData(path, data);
-      }
-
-      @Override
-      public OperationFuture<Stat> setData(String dataPath, byte[] data, int version) {
-        return client.setData(dataPath, data, version);
-      }
-
-      @Override
-      public OperationFuture<String> delete(String path) {
-        return client.delete(path);
-      }
-
-      @Override
-      public OperationFuture<String> delete(String deletePath, int version) {
-        return client.delete(deletePath, version);
-      }
-    };
-  }
-
-  private ZKClientServices() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClients.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClients.java b/zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClients.java
deleted file mode 100644
index f67c1bd..0000000
--- a/zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClients.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.zookeeper;
-
-import org.apache.twill.internal.zookeeper.FailureRetryZKClient;
-import org.apache.twill.internal.zookeeper.NamespaceZKClient;
-import org.apache.twill.internal.zookeeper.RewatchOnExpireZKClient;
-
-/**
- *
- */
-public final class ZKClients {
-
-  /**
-   * Creates a {@link ZKClient} that will perform auto re-watch on all existing watches
-   * when reconnection happens after session expiration. All {@link org.apache.zookeeper.Watcher Watchers}
-   * set through the returned {@link ZKClient} would not receive any connection events.
-   *
-   * @param client The {@link ZKClient} for operations delegation.
-   * @return A {@link ZKClient} that will do auto re-watch on all methods that accept a
-   *        {@link org.apache.zookeeper.Watcher} upon session expiration.
-   */
-  public static ZKClient reWatchOnExpire(ZKClient client) {
-    return new RewatchOnExpireZKClient(client);
-  }
-
-  /**
-   * Creates a {@link ZKClient} that will retry interim failure (e.g. connection loss, session expiration)
-   * based on the given {@link RetryStrategy}.
-   *
-   * @param client The {@link ZKClient} for operations delegation.
-   * @param retryStrategy The {@link RetryStrategy} to be invoke when there is operation failure.
-   * @return A {@link ZKClient}.
-   */
-  public static ZKClient retryOnFailure(ZKClient client, RetryStrategy retryStrategy) {
-    return new FailureRetryZKClient(client, retryStrategy);
-  }
-
-
-  public static ZKClient namespace(ZKClient zkClient, String namespace) {
-    return new NamespaceZKClient(zkClient, namespace);
-  }
-
-  private ZKClients() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/zookeeper/ZKOperations.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/zookeeper/ZKOperations.java b/zookeeper/src/main/java/org/apache/twill/zookeeper/ZKOperations.java
deleted file mode 100644
index 6dcd1a7..0000000
--- a/zookeeper/src/main/java/org/apache/twill/zookeeper/ZKOperations.java
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.zookeeper;
-
-import org.apache.twill.common.Cancellable;
-import org.apache.twill.common.Threads;
-import org.apache.twill.internal.zookeeper.SettableOperationFuture;
-import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.data.Stat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-import java.util.concurrent.CancellationException;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-/**
- * Collection of helper methods for common operations that usually needed when interacting with ZooKeeper.
- */
-public final class ZKOperations {
-
-  private static final Logger LOG = LoggerFactory.getLogger(ZKOperations.class);
-
-  /**
-   * Represents a ZK operation updates callback.
-   * @param <T> Type of updated data.
-   */
-  public interface Callback<T> {
-    void updated(T data);
-  }
-
-  /**
-   * Interface for defining callback method to receive node data updates.
-   */
-  public interface DataCallback extends Callback<NodeData> {
-    /**
-     * Invoked when data of the node changed.
-     * @param nodeData New data of the node, or {@code null} if the node has been deleted.
-     */
-    @Override
-    void updated(NodeData nodeData);
-  }
-
-  /**
-   * Interface for defining callback method to receive children nodes updates.
-   */
-  public interface ChildrenCallback extends Callback<NodeChildren> {
-    @Override
-    void updated(NodeChildren nodeChildren);
-  }
-
-  private interface Operation<T> {
-    ZKClient getZKClient();
-
-    OperationFuture<T> exec(String path, Watcher watcher);
-  }
-
-  /**
-   * Watch for data changes of the given path. The callback will be triggered whenever changes has been
-   * detected. Note that the callback won't see every single changes, as that's not the guarantee of ZooKeeper.
-   * If the node doesn't exists, it will watch for its creation then starts watching for data changes.
-   * When the node is deleted afterwards,
-   *
-   * @param zkClient The {@link ZKClient} for the operation
-   * @param path Path to watch
-   * @param callback Callback to be invoked when data changes is detected.
-   * @return A {@link Cancellable} to cancel the watch.
-   */
-  public static Cancellable watchData(final ZKClient zkClient, final String path, final DataCallback callback) {
-    final AtomicBoolean cancelled = new AtomicBoolean(false);
-    watchChanges(new Operation<NodeData>() {
-
-      @Override
-      public ZKClient getZKClient() {
-        return zkClient;
-      }
-
-      @Override
-      public OperationFuture<NodeData> exec(String path, Watcher watcher) {
-        return zkClient.getData(path, watcher);
-      }
-    }, path, callback, cancelled);
-
-    return new Cancellable() {
-      @Override
-      public void cancel() {
-        cancelled.set(true);
-      }
-    };
-  }
-
-  public static ListenableFuture<String> watchDeleted(final ZKClient zkClient, final String path) {
-    SettableFuture<String> completion = SettableFuture.create();
-    watchDeleted(zkClient, path, completion);
-    return completion;
-  }
-
-  public static void watchDeleted(final ZKClient zkClient, final String path,
-                                  final SettableFuture<String> completion) {
-
-    Futures.addCallback(zkClient.exists(path, new Watcher() {
-      @Override
-      public void process(WatchedEvent event) {
-        if (!completion.isDone()) {
-          if (event.getType() == Event.EventType.NodeDeleted) {
-            completion.set(path);
-          } else {
-            watchDeleted(zkClient, path, completion);
-          }
-        }
-      }
-    }), new FutureCallback<Stat>() {
-      @Override
-      public void onSuccess(Stat result) {
-        if (result == null) {
-          completion.set(path);
-        }
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        completion.setException(t);
-      }
-    });
-  }
-
-  public static Cancellable watchChildren(final ZKClient zkClient, String path, ChildrenCallback callback) {
-    final AtomicBoolean cancelled = new AtomicBoolean(false);
-    watchChanges(new Operation<NodeChildren>() {
-
-      @Override
-      public ZKClient getZKClient() {
-        return zkClient;
-      }
-
-      @Override
-      public OperationFuture<NodeChildren> exec(String path, Watcher watcher) {
-        return zkClient.getChildren(path, watcher);
-      }
-    }, path, callback, cancelled);
-
-    return new Cancellable() {
-      @Override
-      public void cancel() {
-        cancelled.set(true);
-      }
-    };
-  }
-
-  /**
-   * Returns a new {@link OperationFuture} that the result will be the same as the given future, except that when
-   * the source future is having an exception matching the giving exception type, the errorResult will be set
-   * in to the returned {@link OperationFuture}.
-   * @param future The source future.
-   * @param exceptionType Type of {@link KeeperException} to be ignored.
-   * @param errorResult Object to be set into the resulting future on a matching exception.
-   * @param <V> Type of the result.
-   * @return A new {@link OperationFuture}.
-   */
-  public static <V> OperationFuture<V> ignoreError(OperationFuture<V> future,
-                                                   final Class<? extends KeeperException> exceptionType,
-                                                   final V errorResult) {
-    final SettableOperationFuture<V> resultFuture = SettableOperationFuture.create(future.getRequestPath(),
-                                                                                   Threads.SAME_THREAD_EXECUTOR);
-
-    Futures.addCallback(future, new FutureCallback<V>() {
-      @Override
-      public void onSuccess(V result) {
-        resultFuture.set(result);
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        if (exceptionType.isAssignableFrom(t.getClass())) {
-          resultFuture.set(errorResult);
-        } else if (t instanceof CancellationException) {
-          resultFuture.cancel(true);
-        } else {
-          resultFuture.setException(t);
-        }
-      }
-    }, Threads.SAME_THREAD_EXECUTOR);
-
-    return resultFuture;
-  }
-
-  /**
-   * Deletes the given path recursively. The delete method will keep running until the given path is successfully
-   * removed, which means if there are new node created under the given path while deleting, they'll get deleted
-   * again.  If there is {@link KeeperException} during the deletion other than
-   * {@link KeeperException.NotEmptyException} or {@link KeeperException.NoNodeException},
-   * the exception would be reflected in the result future and deletion process will stop,
-   * leaving the given path with intermediate state.
-   *
-   * @param path The path to delete.
-   * @return An {@link OperationFuture} that will be completed when the given path is deleted or bailed due to
-   *         exception.
-   */
-  public static OperationFuture<String> recursiveDelete(final ZKClient zkClient, final String path) {
-    final SettableOperationFuture<String> resultFuture =
-      SettableOperationFuture.create(path, Threads.SAME_THREAD_EXECUTOR);
-
-    // Try to delete the given path.
-    Futures.addCallback(zkClient.delete(path), new FutureCallback<String>() {
-      private final FutureCallback<String> deleteCallback = this;
-
-      @Override
-      public void onSuccess(String result) {
-        // Path deleted successfully. Operation done.
-        resultFuture.set(result);
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        // Failed to delete the given path
-        if (!(t instanceof KeeperException.NotEmptyException || t instanceof KeeperException.NoNodeException)) {
-          // For errors other than NotEmptyException, treat the operation as failed.
-          resultFuture.setException(t);
-          return;
-        }
-
-        // If failed because of NotEmptyException, get the list of children under the given path
-        Futures.addCallback(zkClient.getChildren(path), new FutureCallback<NodeChildren>() {
-
-          @Override
-          public void onSuccess(NodeChildren result) {
-            // Delete all children nodes recursively.
-            final List<OperationFuture<String>> deleteFutures = Lists.newLinkedList();
-            for (String child :result.getChildren()) {
-              deleteFutures.add(recursiveDelete(zkClient, path + "/" + child));
-            }
-
-            // When deletion of all children succeeded, delete the given path again.
-            Futures.successfulAsList(deleteFutures).addListener(new Runnable() {
-              @Override
-              public void run() {
-                for (OperationFuture<String> deleteFuture : deleteFutures) {
-                  try {
-                    // If any exception when deleting children, treat the operation as failed.
-                    deleteFuture.get();
-                  } catch (Exception e) {
-                    resultFuture.setException(e.getCause());
-                  }
-                }
-                Futures.addCallback(zkClient.delete(path), deleteCallback, Threads.SAME_THREAD_EXECUTOR);
-              }
-            }, Threads.SAME_THREAD_EXECUTOR);
-          }
-
-          @Override
-          public void onFailure(Throwable t) {
-            // If failed to get list of children, treat the operation as failed.
-            resultFuture.setException(t);
-          }
-        }, Threads.SAME_THREAD_EXECUTOR);
-      }
-    }, Threads.SAME_THREAD_EXECUTOR);
-
-    return resultFuture;
-  }
-
-  /**
-   * Watch for the given path until it exists.
-   * @param zkClient The {@link ZKClient} to use.
-   * @param path A ZooKeeper path to watch for existent.
-   */
-  private static void watchExists(final ZKClient zkClient, final String path, final SettableFuture<String> completion) {
-    Futures.addCallback(zkClient.exists(path, new Watcher() {
-      @Override
-      public void process(WatchedEvent event) {
-        if (!completion.isDone()) {
-          watchExists(zkClient, path, completion);
-        }
-      }
-    }), new FutureCallback<Stat>() {
-      @Override
-      public void onSuccess(Stat result) {
-        if (result != null) {
-          completion.set(path);
-        }
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        completion.setException(t);
-      }
-    });
-  }
-
-  private static <T> void watchChanges(final Operation<T> operation, final String path,
-                                       final Callback<T> callback, final AtomicBoolean cancelled) {
-    Futures.addCallback(operation.exec(path, new Watcher() {
-      @Override
-      public void process(WatchedEvent event) {
-        if (!cancelled.get()) {
-          watchChanges(operation, path, callback, cancelled);
-        }
-      }
-    }), new FutureCallback<T>() {
-      @Override
-      public void onSuccess(T result) {
-        if (!cancelled.get()) {
-          callback.updated(result);
-        }
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        if (t instanceof KeeperException && ((KeeperException) t).code() == KeeperException.Code.NONODE) {
-          final SettableFuture<String> existCompletion = SettableFuture.create();
-          existCompletion.addListener(new Runnable() {
-            @Override
-            public void run() {
-              try {
-                if (!cancelled.get()) {
-                  watchChanges(operation, existCompletion.get(), callback, cancelled);
-                }
-              } catch (Exception e) {
-                LOG.error("Failed to watch children for path " + path, e);
-              }
-            }
-          }, Threads.SAME_THREAD_EXECUTOR);
-          watchExists(operation.getZKClient(), path, existCompletion);
-          return;
-        }
-        LOG.error("Failed to watch data for path " + path + " " + t, t);
-      }
-    });
-  }
-
-  private ZKOperations() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/main/java/org/apache/twill/zookeeper/package-info.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/main/java/org/apache/twill/zookeeper/package-info.java b/zookeeper/src/main/java/org/apache/twill/zookeeper/package-info.java
deleted file mode 100644
index e5bd237..0000000
--- a/zookeeper/src/main/java/org/apache/twill/zookeeper/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package provides functionality for ZooKeeper interactions.
- */
-package org.apache.twill.zookeeper;

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/test/java/org/apache/twill/zookeeper/RetryStrategyTest.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/test/java/org/apache/twill/zookeeper/RetryStrategyTest.java b/zookeeper/src/test/java/org/apache/twill/zookeeper/RetryStrategyTest.java
deleted file mode 100644
index 601f0bd..0000000
--- a/zookeeper/src/test/java/org/apache/twill/zookeeper/RetryStrategyTest.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.zookeeper;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.concurrent.TimeUnit;
-
-/**
- *
- */
-public class RetryStrategyTest {
-
-  @Test
-  public void testNoRetry() {
-    RetryStrategy strategy = RetryStrategies.noRetry();
-    long startTime = System.currentTimeMillis();
-    for (int i = 1; i <= 10; i++) {
-      Assert.assertEquals(-1L, strategy.nextRetry(i, startTime, RetryStrategy.OperationType.CREATE, "/"));
-    }
-  }
-
-  @Test
-  public void testLimit() {
-    RetryStrategy strategy = RetryStrategies.limit(10, RetryStrategies.fixDelay(1, TimeUnit.MILLISECONDS));
-    long startTime = System.currentTimeMillis();
-    for (int i = 1; i <= 10; i++) {
-      Assert.assertEquals(1L, strategy.nextRetry(i, startTime, RetryStrategy.OperationType.CREATE, "/"));
-    }
-    Assert.assertEquals(-1L, strategy.nextRetry(11, startTime, RetryStrategy.OperationType.CREATE, "/"));
-  }
-
-  @Test
-  public void testUnlimited() {
-    RetryStrategy strategy = RetryStrategies.fixDelay(1, TimeUnit.MILLISECONDS);
-    long startTime = System.currentTimeMillis();
-    for (int i = 1; i <= 10; i++) {
-      Assert.assertEquals(1L, strategy.nextRetry(i, startTime, RetryStrategy.OperationType.CREATE, "/"));
-    }
-    Assert.assertEquals(1L, strategy.nextRetry(100000, startTime, RetryStrategy.OperationType.CREATE, "/"));
-  }
-
-  @Test
-  public void testExponential() {
-    RetryStrategy strategy = RetryStrategies.exponentialDelay(1, 60000, TimeUnit.MILLISECONDS);
-    long startTime = System.currentTimeMillis();
-    for (int i = 1; i <= 16; i++) {
-      Assert.assertEquals(1L << (i - 1), strategy.nextRetry(i, startTime, RetryStrategy.OperationType.CREATE, "/"));
-    }
-    for (int i = 60; i <= 80; i++) {
-      Assert.assertEquals(60000, strategy.nextRetry(i, startTime, RetryStrategy.OperationType.CREATE, "/"));
-    }
-  }
-
-  @Test
-  public void testExponentialLimit() {
-    RetryStrategy strategy = RetryStrategies.limit(99,
-                                                   RetryStrategies.exponentialDelay(1, 60000, TimeUnit.MILLISECONDS));
-    long startTime = System.currentTimeMillis();
-    for (int i = 1; i <= 16; i++) {
-      Assert.assertEquals(1L << (i - 1), strategy.nextRetry(i, startTime, RetryStrategy.OperationType.CREATE, "/"));
-    }
-    for (int i = 60; i <= 80; i++) {
-      Assert.assertEquals(60000, strategy.nextRetry(i, startTime, RetryStrategy.OperationType.CREATE, "/"));
-    }
-    Assert.assertEquals(-1L, strategy.nextRetry(100, startTime, RetryStrategy.OperationType.CREATE, "/"));
-  }
-
-  @Test
-  public void testTimeLimit() throws InterruptedException {
-    RetryStrategy strategy = RetryStrategies.timeLimit(1, TimeUnit.SECONDS,
-                                                       RetryStrategies.fixDelay(1, TimeUnit.MILLISECONDS));
-    long startTime = System.currentTimeMillis();
-    Assert.assertEquals(1L, strategy.nextRetry(1, startTime, RetryStrategy.OperationType.CREATE, "/"));
-    TimeUnit.MILLISECONDS.sleep(1100);
-    Assert.assertEquals(-1L, strategy.nextRetry(2, startTime, RetryStrategy.OperationType.CREATE, "/"));
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/test/java/org/apache/twill/zookeeper/ZKClientTest.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/test/java/org/apache/twill/zookeeper/ZKClientTest.java b/zookeeper/src/test/java/org/apache/twill/zookeeper/ZKClientTest.java
deleted file mode 100644
index f1db74a..0000000
--- a/zookeeper/src/test/java/org/apache/twill/zookeeper/ZKClientTest.java
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.zookeeper;
-
-import org.apache.twill.internal.zookeeper.InMemoryZKServer;
-import org.apache.twill.internal.zookeeper.KillZKSession;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Lists;
-import com.google.common.io.Files;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.List;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-/**
- *
- */
-public class ZKClientTest {
-
-  @Test
-  public void testChroot() throws Exception {
-    InMemoryZKServer zkServer = InMemoryZKServer.builder().setTickTime(1000).build();
-    zkServer.startAndWait();
-
-    try {
-      ZKClientService client = ZKClientService.Builder.of(zkServer.getConnectionStr() + "/chroot").build();
-      client.startAndWait();
-      try {
-        List<OperationFuture<String>> futures = Lists.newArrayList();
-        futures.add(client.create("/test1/test2", null, CreateMode.PERSISTENT));
-        futures.add(client.create("/test1/test3", null, CreateMode.PERSISTENT));
-        Futures.successfulAsList(futures).get();
-
-        Assert.assertNotNull(client.exists("/test1/test2").get());
-        Assert.assertNotNull(client.exists("/test1/test3").get());
-
-      } finally {
-        client.stopAndWait();
-      }
-    } finally {
-      zkServer.stopAndWait();
-    }
-  }
-
-  @Test
-  public void testCreateParent() throws ExecutionException, InterruptedException {
-    InMemoryZKServer zkServer = InMemoryZKServer.builder().setTickTime(1000).build();
-    zkServer.startAndWait();
-
-    try {
-      ZKClientService client = ZKClientService.Builder.of(zkServer.getConnectionStr()).build();
-      client.startAndWait();
-
-      try {
-        String path = client.create("/test1/test2/test3/test4/test5",
-                                    "testing".getBytes(), CreateMode.PERSISTENT_SEQUENTIAL).get();
-        Assert.assertTrue(path.startsWith("/test1/test2/test3/test4/test5"));
-
-        String dataPath = "";
-        for (int i = 1; i <= 4; i++) {
-          dataPath = dataPath + "/test" + i;
-          Assert.assertNull(client.getData(dataPath).get().getData());
-        }
-        Assert.assertTrue(Arrays.equals("testing".getBytes(), client.getData(path).get().getData()));
-      } finally {
-        client.stopAndWait();
-      }
-    } finally {
-      zkServer.stopAndWait();
-    }
-  }
-
-  @Test
-  public void testGetChildren() throws ExecutionException, InterruptedException {
-    InMemoryZKServer zkServer = InMemoryZKServer.builder().setTickTime(1000).build();
-    zkServer.startAndWait();
-
-    try {
-      ZKClientService client = ZKClientService.Builder.of(zkServer.getConnectionStr()).build();
-      client.startAndWait();
-
-      try {
-        client.create("/test", null, CreateMode.PERSISTENT).get();
-        Assert.assertTrue(client.getChildren("/test").get().getChildren().isEmpty());
-
-        Futures.allAsList(ImmutableList.of(client.create("/test/c1", null, CreateMode.EPHEMERAL),
-                                           client.create("/test/c2", null, CreateMode.EPHEMERAL))).get();
-
-        NodeChildren nodeChildren = client.getChildren("/test").get();
-        Assert.assertEquals(2, nodeChildren.getChildren().size());
-
-        Assert.assertEquals(ImmutableSet.of("c1", "c2"), ImmutableSet.copyOf(nodeChildren.getChildren()));
-
-      } finally {
-        client.stopAndWait();
-      }
-    } finally {
-      zkServer.stopAndWait();
-    }
-  }
-
-  @Test
-  public void testSetData() throws ExecutionException, InterruptedException {
-    InMemoryZKServer zkServer = InMemoryZKServer.builder().setTickTime(1000).build();
-    zkServer.startAndWait();
-
-    try {
-      ZKClientService client = ZKClientService.Builder.of(zkServer.getConnectionStr()).build();
-      client.startAndWait();
-
-      client.create("/test", null, CreateMode.PERSISTENT).get();
-      Assert.assertNull(client.getData("/test").get().getData());
-
-      client.setData("/test", "testing".getBytes()).get();
-      Assert.assertTrue(Arrays.equals("testing".getBytes(), client.getData("/test").get().getData()));
-
-    } finally {
-      zkServer.stopAndWait();
-    }
-  }
-
-  @Test
-  public void testExpireRewatch() throws InterruptedException, IOException, ExecutionException {
-    InMemoryZKServer zkServer = InMemoryZKServer.builder().setTickTime(1000).build();
-    zkServer.startAndWait();
-
-    try {
-      final CountDownLatch expireReconnectLatch = new CountDownLatch(1);
-      final AtomicBoolean expired = new AtomicBoolean(false);
-      final ZKClientService client = ZKClientServices.delegate(ZKClients.reWatchOnExpire(
-                                        ZKClientService.Builder.of(zkServer.getConnectionStr())
-                                                       .setSessionTimeout(2000)
-                                                       .setConnectionWatcher(new Watcher() {
-            @Override
-            public void process(WatchedEvent event) {
-              if (event.getState() == Event.KeeperState.Expired) {
-                expired.set(true);
-              } else if (event.getState() == Event.KeeperState.SyncConnected && expired.compareAndSet(true, true)) {
-                expireReconnectLatch.countDown();
-              }
-            }
-          }).build()));
-      client.startAndWait();
-
-      try {
-        final BlockingQueue<Watcher.Event.EventType> events = new LinkedBlockingQueue<Watcher.Event.EventType>();
-        client.exists("/expireRewatch", new Watcher() {
-          @Override
-          public void process(WatchedEvent event) {
-            client.exists("/expireRewatch", this);
-            events.add(event.getType());
-          }
-        });
-
-        client.create("/expireRewatch", null, CreateMode.PERSISTENT);
-        Assert.assertEquals(Watcher.Event.EventType.NodeCreated, events.poll(2, TimeUnit.SECONDS));
-
-        KillZKSession.kill(client.getZooKeeperSupplier().get(), zkServer.getConnectionStr(), 1000);
-
-        Assert.assertTrue(expireReconnectLatch.await(5, TimeUnit.SECONDS));
-
-        client.delete("/expireRewatch");
-        Assert.assertEquals(Watcher.Event.EventType.NodeDeleted, events.poll(4, TimeUnit.SECONDS));
-      } finally {
-        client.stopAndWait();
-      }
-    } finally {
-      zkServer.stopAndWait();
-    }
-  }
-
-  @Test
-  public void testRetry() throws ExecutionException, InterruptedException, TimeoutException {
-    File dataDir = Files.createTempDir();
-    InMemoryZKServer zkServer = InMemoryZKServer.builder().setDataDir(dataDir).setTickTime(1000).build();
-    zkServer.startAndWait();
-    int port = zkServer.getLocalAddress().getPort();
-
-    final CountDownLatch disconnectLatch = new CountDownLatch(1);
-    ZKClientService client = ZKClientServices.delegate(ZKClients.retryOnFailure(
-      ZKClientService.Builder.of(zkServer.getConnectionStr()).setConnectionWatcher(new Watcher() {
-      @Override
-      public void process(WatchedEvent event) {
-        if (event.getState() == Event.KeeperState.Disconnected) {
-          disconnectLatch.countDown();
-        }
-      }
-    }).build(), RetryStrategies.fixDelay(0, TimeUnit.SECONDS)));
-    client.startAndWait();
-
-    zkServer.stopAndWait();
-
-    Assert.assertTrue(disconnectLatch.await(1, TimeUnit.SECONDS));
-
-    final CountDownLatch createLatch = new CountDownLatch(1);
-    Futures.addCallback(client.create("/testretry/test", null, CreateMode.PERSISTENT), new FutureCallback<String>() {
-      @Override
-      public void onSuccess(String result) {
-        createLatch.countDown();
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        t.printStackTrace(System.out);
-      }
-    });
-
-    TimeUnit.SECONDS.sleep(2);
-    zkServer = InMemoryZKServer.builder()
-                               .setDataDir(dataDir)
-                               .setAutoCleanDataDir(true)
-                               .setPort(port)
-                               .setTickTime(1000)
-                               .build();
-    zkServer.startAndWait();
-
-    try {
-      Assert.assertTrue(createLatch.await(5, TimeUnit.SECONDS));
-    } finally {
-      zkServer.stopAndWait();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/zookeeper/src/test/java/org/apache/twill/zookeeper/ZKOperationsTest.java
----------------------------------------------------------------------
diff --git a/zookeeper/src/test/java/org/apache/twill/zookeeper/ZKOperationsTest.java b/zookeeper/src/test/java/org/apache/twill/zookeeper/ZKOperationsTest.java
deleted file mode 100644
index 9518d6e..0000000
--- a/zookeeper/src/test/java/org/apache/twill/zookeeper/ZKOperationsTest.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.zookeeper;
-
-import org.apache.twill.internal.zookeeper.InMemoryZKServer;
-import org.apache.zookeeper.CreateMode;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-/**
- *
- */
-public class ZKOperationsTest {
-
-  @Test
-  public void recursiveDelete() throws ExecutionException, InterruptedException, TimeoutException {
-    InMemoryZKServer zkServer = InMemoryZKServer.builder().setTickTime(1000).build();
-    zkServer.startAndWait();
-
-    try {
-      ZKClientService client = ZKClientService.Builder.of(zkServer.getConnectionStr()).build();
-      client.startAndWait();
-
-      try {
-        client.create("/test1/test10/test101", null, CreateMode.PERSISTENT).get();
-        client.create("/test1/test10/test102", null, CreateMode.PERSISTENT).get();
-        client.create("/test1/test10/test103", null, CreateMode.PERSISTENT).get();
-
-        client.create("/test1/test11/test111", null, CreateMode.PERSISTENT).get();
-        client.create("/test1/test11/test112", null, CreateMode.PERSISTENT).get();
-        client.create("/test1/test11/test113", null, CreateMode.PERSISTENT).get();
-
-        ZKOperations.recursiveDelete(client, "/test1").get(2, TimeUnit.SECONDS);
-
-        Assert.assertNull(client.exists("/test1").get(2, TimeUnit.SECONDS));
-
-      } finally {
-        client.stopAndWait();
-      }
-    } finally {
-      zkServer.stopAndWait();
-    }
-  }
-}


[09/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/FailureRetryZKClient.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/FailureRetryZKClient.java b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/FailureRetryZKClient.java
new file mode 100644
index 0000000..65ceadb
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/FailureRetryZKClient.java
@@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.zookeeper;
+
+import org.apache.twill.common.Threads;
+import org.apache.twill.zookeeper.ForwardingZKClient;
+import org.apache.twill.zookeeper.NodeChildren;
+import org.apache.twill.zookeeper.NodeData;
+import org.apache.twill.zookeeper.OperationFuture;
+import org.apache.twill.zookeeper.RetryStrategy;
+import org.apache.twill.zookeeper.RetryStrategy.OperationType;
+import org.apache.twill.zookeeper.ZKClient;
+import com.google.common.base.Supplier;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.data.Stat;
+
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * A {@link ZKClient} that will invoke {@link RetryStrategy} on operation failure.
+ * This {@link ZKClient} works by delegating calls to another {@link ZKClient}
+ * and listen for the result. If the result is a failure, and is
+ * {@link RetryUtils#canRetry(org.apache.zookeeper.KeeperException.Code) retryable}, the given {@link RetryStrategy}
+ * will be called to determine the next retry time, or give up, depending on the value returned by the strategy.
+ */
+public final class FailureRetryZKClient extends ForwardingZKClient {
+
+  private static final ScheduledExecutorService SCHEDULER = Executors.newSingleThreadScheduledExecutor(
+                                                                Threads.createDaemonThreadFactory("retry-zkclient"));
+  private final RetryStrategy retryStrategy;
+
+  public FailureRetryZKClient(ZKClient delegate, RetryStrategy retryStrategy) {
+    super(delegate);
+    this.retryStrategy = retryStrategy;
+  }
+
+  @Override
+  public OperationFuture<String> create(String path, byte[] data, CreateMode createMode) {
+    return create(path, data, createMode, true);
+  }
+
+  @Override
+  public OperationFuture<String> create(final String path, final byte[] data,
+                                        final CreateMode createMode, final boolean createParent) {
+
+    // No retry for any SEQUENTIAL node, as some algorithms depends on only one sequential node being created.
+    if (createMode == CreateMode.PERSISTENT_SEQUENTIAL || createMode == CreateMode.EPHEMERAL_SEQUENTIAL) {
+      return super.create(path, data, createMode, createParent);
+    }
+
+    final SettableOperationFuture<String> result = SettableOperationFuture.create(path, Threads.SAME_THREAD_EXECUTOR);
+    Futures.addCallback(super.create(path, data, createMode, createParent),
+                        new OperationFutureCallback<String>(OperationType.CREATE, System.currentTimeMillis(),
+                                                            path, result, new Supplier<OperationFuture<String>>() {
+                          @Override
+                          public OperationFuture<String> get() {
+                            return FailureRetryZKClient.super.create(path, data, createMode, createParent);
+                          }
+                        }));
+    return result;
+  }
+
+  @Override
+  public OperationFuture<Stat> exists(String path) {
+    return exists(path, null);
+  }
+
+  @Override
+  public OperationFuture<Stat> exists(final String path, final Watcher watcher) {
+    final SettableOperationFuture<Stat> result = SettableOperationFuture.create(path, Threads.SAME_THREAD_EXECUTOR);
+    Futures.addCallback(super.exists(path, watcher),
+                        new OperationFutureCallback<Stat>(OperationType.EXISTS, System.currentTimeMillis(),
+                                                          path, result, new Supplier<OperationFuture<Stat>>() {
+                          @Override
+                          public OperationFuture<Stat> get() {
+                            return FailureRetryZKClient.super.exists(path, watcher);
+                          }
+                        }));
+    return result;
+  }
+
+  @Override
+  public OperationFuture<NodeChildren> getChildren(String path) {
+    return getChildren(path, null);
+  }
+
+  @Override
+  public OperationFuture<NodeChildren> getChildren(final String path, final Watcher watcher) {
+    final SettableOperationFuture<NodeChildren> result = SettableOperationFuture.create(path,
+                                                                                        Threads.SAME_THREAD_EXECUTOR);
+    Futures.addCallback(super.getChildren(path, watcher),
+                        new OperationFutureCallback<NodeChildren>(OperationType.GET_CHILDREN,
+                                                                  System.currentTimeMillis(), path, result,
+                                                                  new Supplier<OperationFuture<NodeChildren>>() {
+                          @Override
+                          public OperationFuture<NodeChildren> get() {
+                            return FailureRetryZKClient.super.getChildren(path, watcher);
+                          }
+                        }));
+    return result;
+  }
+
+  @Override
+  public OperationFuture<NodeData> getData(String path) {
+    return getData(path, null);
+  }
+
+  @Override
+  public OperationFuture<NodeData> getData(final String path, final Watcher watcher) {
+    final SettableOperationFuture<NodeData> result = SettableOperationFuture.create(path, Threads.SAME_THREAD_EXECUTOR);
+    Futures.addCallback(super.getData(path, watcher),
+                        new OperationFutureCallback<NodeData>(OperationType.GET_DATA, System.currentTimeMillis(),
+                                                              path, result, new Supplier<OperationFuture<NodeData>>() {
+                          @Override
+                          public OperationFuture<NodeData> get() {
+                            return FailureRetryZKClient.super.getData(path, watcher);
+                          }
+                        }));
+    return result;
+  }
+
+  @Override
+  public OperationFuture<Stat> setData(String path, byte[] data) {
+    return setData(path, data, -1);
+  }
+
+  @Override
+  public OperationFuture<Stat> setData(final String dataPath, final byte[] data, final int version) {
+    final SettableOperationFuture<Stat> result = SettableOperationFuture.create(dataPath, Threads.SAME_THREAD_EXECUTOR);
+    Futures.addCallback(super.setData(dataPath, data, version),
+                        new OperationFutureCallback<Stat>(OperationType.SET_DATA, System.currentTimeMillis(),
+                                                          dataPath, result, new Supplier<OperationFuture<Stat>>() {
+                          @Override
+                          public OperationFuture<Stat> get() {
+                            return FailureRetryZKClient.super.setData(dataPath, data, version);
+                          }
+                        }));
+    return result;
+  }
+
+  @Override
+  public OperationFuture<String> delete(String path) {
+    return delete(path, -1);
+  }
+
+  @Override
+  public OperationFuture<String> delete(final String deletePath, final int version) {
+    final SettableOperationFuture<String> result = SettableOperationFuture.create(deletePath,
+                                                                                  Threads.SAME_THREAD_EXECUTOR);
+    Futures.addCallback(super.delete(deletePath, version),
+                        new OperationFutureCallback<String>(OperationType.DELETE, System.currentTimeMillis(),
+                                                            deletePath, result, new Supplier<OperationFuture<String>>
+                          () {
+                          @Override
+                          public OperationFuture<String> get() {
+                            return FailureRetryZKClient.super.delete(deletePath, version);
+                          }
+                        }));
+    return result;
+  }
+
+  /**
+   * Callback to watch for operation result and trigger retry if necessary.
+   * @param <V> Type of operation result.
+   */
+  private final class OperationFutureCallback<V> implements FutureCallback<V> {
+
+    private final OperationType type;
+    private final long startTime;
+    private final String path;
+    private final SettableOperationFuture<V> result;
+    private final Supplier<OperationFuture<V>> retryAction;
+    private final AtomicInteger failureCount;
+
+    private OperationFutureCallback(OperationType type, long startTime, String path,
+                                    SettableOperationFuture<V> result, Supplier<OperationFuture<V>> retryAction) {
+      this.type = type;
+      this.startTime = startTime;
+      this.path = path;
+      this.result = result;
+      this.retryAction = retryAction;
+      this.failureCount = new AtomicInteger(0);
+    }
+
+    @Override
+    public void onSuccess(V result) {
+      this.result.set(result);
+    }
+
+    @Override
+    public void onFailure(Throwable t) {
+      if (!doRetry(t)) {
+        result.setException(t);
+      }
+    }
+
+    private boolean doRetry(Throwable t) {
+      if (!RetryUtils.canRetry(t)) {
+        return false;
+      }
+
+      // Determine the relay delay
+      long nextRetry = retryStrategy.nextRetry(failureCount.incrementAndGet(), startTime, type, path);
+      if (nextRetry < 0) {
+        return false;
+      }
+
+      // Schedule the retry.
+      SCHEDULER.schedule(new Runnable() {
+        @Override
+        public void run() {
+          Futures.addCallback(retryAction.get(), OperationFutureCallback.this);
+        }
+      }, nextRetry, TimeUnit.MILLISECONDS);
+
+      return true;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/InMemoryZKServer.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/InMemoryZKServer.java b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/InMemoryZKServer.java
new file mode 100644
index 0000000..c4eed59
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/InMemoryZKServer.java
@@ -0,0 +1,198 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.zookeeper;
+
+import com.google.common.base.Preconditions;
+import com.google.common.base.Throwables;
+import com.google.common.io.Files;
+import com.google.common.util.concurrent.AbstractIdleService;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.Service;
+import org.apache.zookeeper.server.ServerCnxnFactory;
+import org.apache.zookeeper.server.ZooKeeperServer;
+import org.apache.zookeeper.server.persistence.FileTxnSnapLog;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
+import java.util.concurrent.Executor;
+
+/**
+ *
+ */
+public final class InMemoryZKServer implements Service {
+
+  private static final Logger LOG = LoggerFactory.getLogger(InMemoryZKServer.class);
+
+  private final File dataDir;
+  private final int tickTime;
+  private final boolean autoClean;
+  private final int port;
+  private final Service delegateService = new AbstractIdleService() {
+    @Override
+    protected void startUp() throws Exception {
+      ZooKeeperServer zkServer = new ZooKeeperServer();
+      FileTxnSnapLog ftxn = new FileTxnSnapLog(dataDir, dataDir);
+      zkServer.setTxnLogFactory(ftxn);
+      zkServer.setTickTime(tickTime);
+
+      factory = ServerCnxnFactory.createFactory();
+      factory.configure(getAddress(port), -1);
+      factory.startup(zkServer);
+
+      LOG.info("In memory ZK started: " + getConnectionStr());
+    }
+
+    @Override
+    protected void shutDown() throws Exception {
+      try {
+        factory.shutdown();
+      } finally {
+        if (autoClean) {
+          cleanDir(dataDir);
+        }
+      }
+    }
+  };
+
+  private ServerCnxnFactory factory;
+
+  public static Builder builder() {
+    return new Builder();
+  }
+
+  private InMemoryZKServer(File dataDir, int tickTime, boolean autoClean, int port) {
+    if (dataDir == null) {
+      dataDir = Files.createTempDir();
+      autoClean = true;
+    } else {
+      Preconditions.checkArgument(dataDir.isDirectory() || dataDir.mkdirs() || dataDir.isDirectory());
+    }
+
+    this.dataDir = dataDir;
+    this.tickTime = tickTime;
+    this.autoClean = autoClean;
+    this.port = port;
+  }
+
+  public String getConnectionStr() {
+    InetSocketAddress addr = factory.getLocalAddress();
+    return String.format("%s:%d", addr.getHostName(), addr.getPort());
+  }
+
+  public InetSocketAddress getLocalAddress() {
+    return factory.getLocalAddress();
+  }
+
+  private InetSocketAddress getAddress(int port) {
+    try {
+//      return new InetSocketAddress(InetAddress.getByAddress(new byte[] {127, 0, 0, 1}), port < 0 ? 0 : port);
+      return new InetSocketAddress(InetAddress.getLocalHost(), port < 0 ? 0 : port);
+    } catch (UnknownHostException e) {
+      throw Throwables.propagate(e);
+    }
+  }
+
+  private void cleanDir(File dir) {
+    File[] files = dir.listFiles();
+    if (files == null) {
+      return;
+    }
+    for (File file : files) {
+      if (file.isDirectory()) {
+        cleanDir(file);
+      }
+      file.delete();
+    }
+  }
+
+  @Override
+  public ListenableFuture<State> start() {
+    return delegateService.start();
+  }
+
+  @Override
+  public State startAndWait() {
+    return delegateService.startAndWait();
+  }
+
+  @Override
+  public boolean isRunning() {
+    return delegateService.isRunning();
+  }
+
+  @Override
+  public State state() {
+    return delegateService.state();
+  }
+
+  @Override
+  public ListenableFuture<State> stop() {
+    return delegateService.stop();
+  }
+
+  @Override
+  public State stopAndWait() {
+    return delegateService.stopAndWait();
+  }
+
+  @Override
+  public void addListener(Listener listener, Executor executor) {
+    delegateService.addListener(listener, executor);
+  }
+
+  /**
+   * Builder for creating instance of {@link InMemoryZKServer}.
+   */
+  public static final class Builder {
+    private File dataDir;
+    private boolean autoCleanDataDir = false;
+    private int tickTime = ZooKeeperServer.DEFAULT_TICK_TIME;
+    private int port = -1;
+
+    public Builder setDataDir(File dataDir) {
+      this.dataDir = dataDir;
+      return this;
+    }
+
+    public Builder setAutoCleanDataDir(boolean auto) {
+      this.autoCleanDataDir = auto;
+      return this;
+    }
+
+    public Builder setTickTime(int tickTime) {
+      this.tickTime = tickTime;
+      return this;
+    }
+
+    public Builder setPort(int port) {
+      this.port = port;
+      return this;
+    }
+
+    public InMemoryZKServer build() {
+      return new InMemoryZKServer(dataDir, tickTime, autoCleanDataDir, port);
+    }
+
+    private Builder() {
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/KillZKSession.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/KillZKSession.java b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/KillZKSession.java
new file mode 100644
index 0000000..bc01f08
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/KillZKSession.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.zookeeper;
+
+import com.google.common.base.Preconditions;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.ZooKeeper;
+
+import java.io.IOException;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Utility class for killing ZK client to simulate failures during testing.
+ */
+public final class KillZKSession {
+
+  /**
+   * Utility classes should have a public constructor or a default constructor
+   * hence made it private.
+   */
+  private KillZKSession() {}
+
+  /**
+   * Kills a Zookeeper client to simulate failure scenarious during testing.
+   * Callee will provide the amount of time to wait before it's considered failure
+   * to kill a client.
+   *
+   * @param client that needs to be killed.
+   * @param connectionString of Quorum
+   * @param maxMs time in millisecond specifying the max time to kill a client.
+   * @throws IOException When there is IO error
+   * @throws InterruptedException When call has been interrupted.
+   */
+  public static void kill(ZooKeeper client, String connectionString,
+                          int maxMs) throws IOException, InterruptedException {
+    final CountDownLatch latch = new CountDownLatch(1);
+    ZooKeeper zk = new ZooKeeper(connectionString, maxMs, new Watcher() {
+      @Override
+      public void process(WatchedEvent event) {
+        if (event.getState() == Event.KeeperState.SyncConnected) {
+          latch.countDown();
+        }
+      }
+    }, client.getSessionId(), client.getSessionPasswd());
+
+    try {
+      Preconditions.checkState(latch.await(maxMs, TimeUnit.MILLISECONDS), "Fail to kill ZK connection.");
+    } finally {
+      zk.close();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/NamespaceZKClient.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/NamespaceZKClient.java b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/NamespaceZKClient.java
new file mode 100644
index 0000000..1a82e4b
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/NamespaceZKClient.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.zookeeper;
+
+import org.apache.twill.common.Threads;
+import org.apache.twill.zookeeper.ForwardingZKClient;
+import org.apache.twill.zookeeper.NodeChildren;
+import org.apache.twill.zookeeper.NodeData;
+import org.apache.twill.zookeeper.OperationFuture;
+import org.apache.twill.zookeeper.ZKClient;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.data.Stat;
+
+import javax.annotation.Nullable;
+
+/**
+ * A {@link ZKClient} that namespace every paths.
+ */
+public final class NamespaceZKClient extends ForwardingZKClient {
+  // This class extends from ForwardingZKClient but overrides every method is for letting the
+  // ZKClientServices delegate logic works.
+
+  private final String namespace;
+  private final ZKClient delegate;
+  private final String connectString;
+
+  public NamespaceZKClient(ZKClient delegate, String namespace) {
+    super(delegate);
+    this.namespace = namespace;
+    this.delegate = delegate;
+    this.connectString = delegate.getConnectString() + namespace;
+  }
+
+  @Override
+  public Long getSessionId() {
+    return delegate.getSessionId();
+  }
+
+  @Override
+  public String getConnectString() {
+    return connectString;
+  }
+
+  @Override
+  public void addConnectionWatcher(Watcher watcher) {
+    delegate.addConnectionWatcher(watcher);
+  }
+
+  @Override
+  public OperationFuture<String> create(String path, @Nullable byte[] data, CreateMode createMode) {
+    return relayPath(delegate.create(namespace + path, data, createMode), this.<String>createFuture(path));
+  }
+
+  @Override
+  public OperationFuture<String> create(String path, @Nullable byte[] data, CreateMode createMode,
+                                        boolean createParent) {
+    return relayPath(delegate.create(namespace + path, data, createMode, createParent),
+                     this.<String>createFuture(path));
+  }
+
+  @Override
+  public OperationFuture<Stat> exists(String path) {
+    return relayFuture(delegate.exists(namespace + path), this.<Stat>createFuture(path));
+  }
+
+  @Override
+  public OperationFuture<Stat> exists(String path, @Nullable Watcher watcher) {
+    return relayFuture(delegate.exists(namespace + path, watcher), this.<Stat>createFuture(path));
+  }
+
+  @Override
+  public OperationFuture<NodeChildren> getChildren(String path) {
+    return relayFuture(delegate.getChildren(namespace + path), this.<NodeChildren>createFuture(path));
+  }
+
+  @Override
+  public OperationFuture<NodeChildren> getChildren(String path, @Nullable Watcher watcher) {
+    return relayFuture(delegate.getChildren(namespace + path, watcher), this.<NodeChildren>createFuture(path));
+  }
+
+  @Override
+  public OperationFuture<NodeData> getData(String path) {
+    return relayFuture(delegate.getData(namespace + path), this.<NodeData>createFuture(path));
+  }
+
+  @Override
+  public OperationFuture<NodeData> getData(String path, @Nullable Watcher watcher) {
+    return relayFuture(delegate.getData(namespace + path, watcher), this.<NodeData>createFuture(path));
+  }
+
+  @Override
+  public OperationFuture<Stat> setData(String path, byte[] data) {
+    return relayFuture(delegate.setData(namespace + path, data), this.<Stat>createFuture(path));
+  }
+
+  @Override
+  public OperationFuture<Stat> setData(String dataPath, byte[] data, int version) {
+    return relayFuture(delegate.setData(namespace + dataPath, data, version), this.<Stat>createFuture(dataPath));
+  }
+
+  @Override
+  public OperationFuture<String> delete(String path) {
+    return relayPath(delegate.delete(namespace + path), this.<String>createFuture(path));
+  }
+
+  @Override
+  public OperationFuture<String> delete(String deletePath, int version) {
+    return relayPath(delegate.delete(namespace + deletePath, version), this.<String>createFuture(deletePath));
+  }
+
+  private <V> SettableOperationFuture<V> createFuture(String path) {
+    return SettableOperationFuture.create(namespace + path, Threads.SAME_THREAD_EXECUTOR);
+  }
+
+  private <V> OperationFuture<V> relayFuture(final OperationFuture<V> from, final SettableOperationFuture<V> to) {
+    Futures.addCallback(from, new FutureCallback<V>() {
+      @Override
+      public void onSuccess(V result) {
+        to.set(result);
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        to.setException(t);
+      }
+    });
+    return to;
+  }
+
+  private OperationFuture<String> relayPath(final OperationFuture<String> from,
+                                            final SettableOperationFuture<String> to) {
+    from.addListener(new Runnable() {
+      @Override
+      public void run() {
+        try {
+          String path = from.get();
+          to.set(path.substring(namespace.length()));
+        } catch (Exception e) {
+          to.setException(e.getCause());
+        }
+      }
+    }, Threads.SAME_THREAD_EXECUTOR);
+    return to;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RetryUtils.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RetryUtils.java b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RetryUtils.java
new file mode 100644
index 0000000..fb42491
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RetryUtils.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.zookeeper;
+
+import org.apache.zookeeper.KeeperException;
+
+/**
+ * Utility class for help determining operation retry condition.
+ */
+final class RetryUtils {
+
+  /**
+   * Tells if a given operation error code can be retried or not.
+   * @param code The error code of the operation.
+   * @return {@code true} if the operation can be retried.
+   */
+  public static boolean canRetry(KeeperException.Code code) {
+    return (code == KeeperException.Code.CONNECTIONLOSS
+          || code == KeeperException.Code.OPERATIONTIMEOUT
+          || code == KeeperException.Code.SESSIONEXPIRED
+          || code == KeeperException.Code.SESSIONMOVED);
+  }
+
+  /**
+   * Tells if a given operation exception can be retried or not.
+   * @param t The exception raised by an operation.
+   * @return {@code true} if the operation can be retried.
+   */
+  public static boolean canRetry(Throwable t) {
+    return t instanceof KeeperException && canRetry(((KeeperException) t).code());
+  }
+
+  private RetryUtils() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RewatchOnExpireWatcher.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RewatchOnExpireWatcher.java b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RewatchOnExpireWatcher.java
new file mode 100644
index 0000000..181ca2b
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RewatchOnExpireWatcher.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.zookeeper;
+
+import org.apache.twill.zookeeper.NodeChildren;
+import org.apache.twill.zookeeper.NodeData;
+import org.apache.twill.zookeeper.ZKClient;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.data.Stat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.atomic.AtomicMarkableReference;
+
+/**
+ * A wrapper for {@link Watcher} that will re-set the watch automatically until it is successful.
+ */
+final class RewatchOnExpireWatcher implements Watcher {
+
+  private static final Logger LOG = LoggerFactory.getLogger(RewatchOnExpireWatcher.class);
+
+  enum ActionType {
+    EXISTS,
+    CHILDREN,
+    DATA
+  }
+
+  private final ZKClient client;
+  private final ActionType actionType;
+  private final String path;
+  private final Watcher delegate;
+  private final AtomicMarkableReference<Object> lastResult;
+
+  RewatchOnExpireWatcher(ZKClient client, ActionType actionType, String path, Watcher delegate) {
+    this.client = client;
+    this.actionType = actionType;
+    this.path = path;
+    this.delegate = delegate;
+    this.lastResult = new AtomicMarkableReference<Object>(null, false);
+  }
+
+  /**
+   * Sets the result from the operation that causes this watcher to be set.
+   */
+  void setLastResult(Object result) {
+    lastResult.compareAndSet(null, result, false, true);
+  }
+
+  @Override
+  public void process(WatchedEvent event) {
+    if (delegate != null && event.getType() != Event.EventType.None) {
+      try {
+        delegate.process(event);
+      } catch (Throwable t) {
+        LOG.error("Watcher throws exception.", t);
+      }
+    }
+
+    if (event.getState() != Event.KeeperState.Expired) {
+      return;
+    }
+    switch (actionType) {
+      case EXISTS:
+        exists();
+        break;
+      case CHILDREN:
+        children();
+        break;
+      case DATA:
+        data();
+        break;
+    }
+  }
+
+  private void exists() {
+    Futures.addCallback(client.exists(path, this), new FutureCallback<Stat>() {
+      @Override
+      public void onSuccess(Stat stat) {
+        // Since we know all callbacks and watcher are triggered from single event thread, there is no race condition.
+        Object oldResult = lastResult.getReference();
+        lastResult.compareAndSet(oldResult, null, true, false);
+
+        if (stat != oldResult && (stat == null || !stat.equals(oldResult))) {
+          if (stat == null) {
+            // previous stat is not null, means node deleted
+            process(new WatchedEvent(Event.EventType.NodeDeleted, Event.KeeperState.SyncConnected, path));
+          } else if (oldResult == null) {
+            // previous stat is null, means node created
+            process(new WatchedEvent(Event.EventType.NodeCreated, Event.KeeperState.SyncConnected, path));
+          } else {
+            // Otherwise, something changed on the node
+            process(new WatchedEvent(Event.EventType.NodeDataChanged, Event.KeeperState.SyncConnected, path));
+          }
+        }
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        if (RetryUtils.canRetry(t)) {
+          exists();
+        } else {
+          lastResult.set(null, false);
+          LOG.error("Fail to re-set watch on exists for path " + path, t);
+        }
+      }
+    });
+  }
+
+  private void children() {
+    Futures.addCallback(client.getChildren(path, this), new FutureCallback<NodeChildren>() {
+      @Override
+      public void onSuccess(NodeChildren result) {
+        Object oldResult = lastResult.getReference();
+        lastResult.compareAndSet(oldResult, null, true, false);
+
+        if (result.equals(oldResult)) {
+          return;
+        }
+
+        if (!(oldResult instanceof NodeChildren)) {
+          // Something very wrong
+          LOG.error("The same watcher has been used for different event type.");
+          return;
+        }
+
+        NodeChildren oldNodeChildren = (NodeChildren) oldResult;
+        if (!result.getChildren().equals(oldNodeChildren.getChildren())) {
+          process(new WatchedEvent(Event.EventType.NodeChildrenChanged, Event.KeeperState.SyncConnected, path));
+        } else {
+          process(new WatchedEvent(Event.EventType.NodeDataChanged, Event.KeeperState.SyncConnected, path));
+        }
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        if (RetryUtils.canRetry(t)) {
+          children();
+          return;
+        }
+
+        lastResult.set(null, false);
+        if (t instanceof KeeperException) {
+          KeeperException.Code code = ((KeeperException) t).code();
+          if (code == KeeperException.Code.NONODE) {
+            // Node deleted
+            process(new WatchedEvent(Event.EventType.NodeDeleted, Event.KeeperState.SyncConnected, path));
+            return;
+          }
+        }
+        LOG.error("Fail to re-set watch on getChildren for path " + path, t);
+      }
+    });
+  }
+
+  private void data() {
+    Futures.addCallback(client.getData(path, this), new FutureCallback<NodeData>() {
+      @Override
+      public void onSuccess(NodeData result) {
+        Object oldResult = lastResult.getReference();
+        lastResult.compareAndSet(oldResult, null, true, false);
+
+        if (!result.equals(oldResult)) {
+          // Whenever something changed, treated it as data changed.
+          process(new WatchedEvent(Event.EventType.NodeDataChanged, Event.KeeperState.SyncConnected, path));
+        }
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        if (RetryUtils.canRetry(t)) {
+          data();
+          return;
+        }
+
+        lastResult.set(null, false);
+        if (t instanceof KeeperException) {
+          KeeperException.Code code = ((KeeperException) t).code();
+          if (code == KeeperException.Code.NONODE) {
+            // Node deleted
+            process(new WatchedEvent(Event.EventType.NodeDeleted, Event.KeeperState.SyncConnected, path));
+            return;
+          }
+        }
+        LOG.error("Fail to re-set watch on getData for path " + path, t);
+      }
+    });
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RewatchOnExpireZKClient.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RewatchOnExpireZKClient.java b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RewatchOnExpireZKClient.java
new file mode 100644
index 0000000..402c153
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/RewatchOnExpireZKClient.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.zookeeper;
+
+import org.apache.twill.internal.zookeeper.RewatchOnExpireWatcher.ActionType;
+import org.apache.twill.zookeeper.ForwardingZKClient;
+import org.apache.twill.zookeeper.NodeChildren;
+import org.apache.twill.zookeeper.NodeData;
+import org.apache.twill.zookeeper.OperationFuture;
+import org.apache.twill.zookeeper.ZKClient;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.data.Stat;
+
+/**
+ * A {@link ZKClient} that will rewatch automatically when session expired and reconnect.
+ * The rewatch logic is mainly done in {@link RewatchOnExpireWatcher}.
+ */
+public final class RewatchOnExpireZKClient extends ForwardingZKClient {
+
+  public RewatchOnExpireZKClient(ZKClient delegate) {
+    super(delegate);
+  }
+
+  @Override
+  public OperationFuture<Stat> exists(String path, Watcher watcher) {
+    final RewatchOnExpireWatcher wrappedWatcher = new RewatchOnExpireWatcher(this, ActionType.EXISTS, path, watcher);
+    OperationFuture<Stat> result = super.exists(path, wrappedWatcher);
+    Futures.addCallback(result, new FutureCallback<Stat>() {
+      @Override
+      public void onSuccess(Stat result) {
+        wrappedWatcher.setLastResult(result);
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        // No-op
+      }
+    });
+    return result;
+  }
+
+  @Override
+  public OperationFuture<NodeChildren> getChildren(String path, Watcher watcher) {
+    final RewatchOnExpireWatcher wrappedWatcher = new RewatchOnExpireWatcher(this, ActionType.CHILDREN, path, watcher);
+    OperationFuture<NodeChildren> result = super.getChildren(path, wrappedWatcher);
+    Futures.addCallback(result, new FutureCallback<NodeChildren>() {
+      @Override
+      public void onSuccess(NodeChildren result) {
+        wrappedWatcher.setLastResult(result);
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        // No-op
+      }
+    });
+    return result;
+  }
+
+  @Override
+  public OperationFuture<NodeData> getData(String path, Watcher watcher) {
+    final RewatchOnExpireWatcher wrappedWatcher = new RewatchOnExpireWatcher(this, ActionType.DATA, path, watcher);
+    OperationFuture<NodeData> result = super.getData(path, wrappedWatcher);
+    Futures.addCallback(result, new FutureCallback<NodeData>() {
+      @Override
+      public void onSuccess(NodeData result) {
+        wrappedWatcher.setLastResult(result);
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        // No-op
+      }
+    });
+    return result;
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/SettableOperationFuture.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/SettableOperationFuture.java b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/SettableOperationFuture.java
new file mode 100644
index 0000000..7544e56
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/SettableOperationFuture.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.zookeeper;
+
+import org.apache.twill.zookeeper.OperationFuture;
+import com.google.common.util.concurrent.AbstractFuture;
+
+import javax.annotation.Nullable;
+import java.util.concurrent.Executor;
+
+/**
+ * An implementation for {@link OperationFuture} that allows setting result directly.
+ * Also, all listener callback will be fired from the given executor.
+ */
+public final class SettableOperationFuture<V> extends AbstractFuture<V> implements OperationFuture<V> {
+
+  private final String requestPath;
+  private final Executor executor;
+
+  public static <V> SettableOperationFuture<V> create(String path, Executor executor) {
+    return new SettableOperationFuture<V>(path, executor);
+  }
+
+  private SettableOperationFuture(String requestPath, Executor executor) {
+    this.requestPath = requestPath;
+    this.executor = executor;
+  }
+
+  @Override
+  public String getRequestPath() {
+    return requestPath;
+  }
+
+  @Override
+  public void addListener(final Runnable listener, final Executor exec) {
+    super.addListener(new Runnable() {
+      @Override
+      public void run() {
+        exec.execute(listener);
+      }
+    }, executor);
+  }
+
+  @Override
+  public boolean setException(Throwable throwable) {
+    return super.setException(throwable);
+  }
+
+  @Override
+  public boolean set(@Nullable V value) {
+    return super.set(value);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/package-info.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/package-info.java b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/package-info.java
new file mode 100644
index 0000000..d2afa11
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Internal classes for zookeeper.
+ */
+package org.apache.twill.internal.zookeeper;

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ForwardingZKClient.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ForwardingZKClient.java b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ForwardingZKClient.java
new file mode 100644
index 0000000..3f3003d
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ForwardingZKClient.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.zookeeper;
+
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.data.Stat;
+
+import javax.annotation.Nullable;
+
+/**
+ *
+ */
+public abstract class ForwardingZKClient implements ZKClient {
+
+  private final ZKClient delegate;
+
+  protected ForwardingZKClient(ZKClient delegate) {
+    this.delegate = delegate;
+  }
+
+  public final ZKClient getDelegate() {
+    return delegate;
+  }
+
+  @Override
+  public Long getSessionId() {
+    return delegate.getSessionId();
+  }
+
+  @Override
+  public String getConnectString() {
+    return delegate.getConnectString();
+  }
+
+  @Override
+  public void addConnectionWatcher(Watcher watcher) {
+    delegate.addConnectionWatcher(watcher);
+  }
+
+  @Override
+  public OperationFuture<String> create(String path, @Nullable byte[] data, CreateMode createMode) {
+    return create(path, data, createMode, true);
+  }
+
+  @Override
+  public OperationFuture<String> create(String path, @Nullable byte[] data, CreateMode createMode,
+                                        boolean createParent) {
+    return delegate.create(path, data, createMode, createParent);
+  }
+
+  @Override
+  public OperationFuture<Stat> exists(String path) {
+    return exists(path, null);
+  }
+
+  @Override
+  public OperationFuture<Stat> exists(String path, @Nullable Watcher watcher) {
+    return delegate.exists(path, watcher);
+  }
+
+  @Override
+  public OperationFuture<NodeChildren> getChildren(String path) {
+    return getChildren(path, null);
+  }
+
+  @Override
+  public OperationFuture<NodeChildren> getChildren(String path, @Nullable Watcher watcher) {
+    return delegate.getChildren(path, watcher);
+  }
+
+  @Override
+  public OperationFuture<NodeData> getData(String path) {
+    return getData(path, null);
+  }
+
+  @Override
+  public OperationFuture<NodeData> getData(String path, @Nullable Watcher watcher) {
+    return delegate.getData(path, watcher);
+  }
+
+  @Override
+  public OperationFuture<Stat> setData(String path, byte[] data) {
+    return setData(path, data, -1);
+  }
+
+  @Override
+  public OperationFuture<Stat> setData(String dataPath, byte[] data, int version) {
+    return delegate.setData(dataPath, data, version);
+  }
+
+  @Override
+  public OperationFuture<String> delete(String path) {
+    return delete(path, -1);
+  }
+
+  @Override
+  public OperationFuture<String> delete(String deletePath, int version) {
+    return delegate.delete(deletePath, version);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ForwardingZKClientService.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ForwardingZKClientService.java b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ForwardingZKClientService.java
new file mode 100644
index 0000000..10391b2
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ForwardingZKClientService.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.zookeeper;
+
+import com.google.common.base.Supplier;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.zookeeper.ZooKeeper;
+
+import java.util.concurrent.Executor;
+
+/**
+ *
+ */
+public abstract class ForwardingZKClientService extends ForwardingZKClient implements ZKClientService {
+
+  private final ZKClientService delegate;
+
+  protected ForwardingZKClientService(ZKClientService delegate) {
+    super(delegate);
+    this.delegate = delegate;
+  }
+
+  @Override
+  public Supplier<ZooKeeper> getZooKeeperSupplier() {
+    return delegate.getZooKeeperSupplier();
+  }
+
+  @Override
+  public ListenableFuture<State> start() {
+    return delegate.start();
+  }
+
+  @Override
+  public State startAndWait() {
+    return Futures.getUnchecked(start());
+  }
+
+  @Override
+  public boolean isRunning() {
+    return delegate.isRunning();
+  }
+
+  @Override
+  public State state() {
+    return delegate.state();
+  }
+
+  @Override
+  public ListenableFuture<State> stop() {
+    return delegate.stop();
+  }
+
+  @Override
+  public State stopAndWait() {
+    return Futures.getUnchecked(stop());
+  }
+
+  @Override
+  public void addListener(Listener listener, Executor executor) {
+    delegate.addListener(listener, executor);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/NodeChildren.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/NodeChildren.java b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/NodeChildren.java
new file mode 100644
index 0000000..b432c01
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/NodeChildren.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.zookeeper;
+
+import org.apache.zookeeper.data.Stat;
+
+import java.util.List;
+
+/**
+ * Represents result of call to {@link ZKClientService#getChildren(String, org.apache.zookeeper.Watcher)} method.
+ */
+public interface NodeChildren {
+
+  /**
+   * @return The {@link Stat} of the node.
+   */
+  Stat getStat();
+
+  /**
+   * @return List of children node names.
+   */
+  List<String> getChildren();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/NodeData.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/NodeData.java b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/NodeData.java
new file mode 100644
index 0000000..ac15957
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/NodeData.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.zookeeper;
+
+import org.apache.zookeeper.data.Stat;
+
+import javax.annotation.Nullable;
+
+/**
+ * Represents result of call to {@link ZKClientService#getData(String, org.apache.zookeeper.Watcher)}.
+ */
+public interface NodeData {
+
+  /**
+   * @return The {@link Stat} of the node.
+   */
+  Stat getStat();
+
+  /**
+   * @return Data stored in the node, or {@code null} if there is no data.
+   */
+  @Nullable
+  byte[] getData();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/OperationFuture.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/OperationFuture.java b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/OperationFuture.java
new file mode 100644
index 0000000..fafaa7a
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/OperationFuture.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.zookeeper;
+
+import com.google.common.util.concurrent.ListenableFuture;
+
+/**
+ * A {@link ListenableFuture} that also provides the requested path for a operation.
+ *
+ * @param <V> The result type returned by this Future's {@link #get()} method.
+ */
+public interface OperationFuture<V> extends ListenableFuture<V> {
+
+  /**
+   * @return The path being requested for the ZooKeeper operation.
+   */
+  String getRequestPath();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/RetryStrategies.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/RetryStrategies.java b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/RetryStrategies.java
new file mode 100644
index 0000000..56474b7
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/RetryStrategies.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.zookeeper;
+
+import com.google.common.base.Preconditions;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Factory for creating common {@link RetryStrategy} implementation.
+ */
+public final class RetryStrategies {
+
+  /**
+   * @return A {@link RetryStrategy} that doesn't do any retry.
+   */
+  public static RetryStrategy noRetry() {
+    return new RetryStrategy() {
+      @Override
+      public long nextRetry(int failureCount, long startTime, OperationType type, String path) {
+        return -1;
+      }
+    };
+  }
+
+  /**
+   * Creates a {@link RetryStrategy} that retries maximum given number of times, with the actual
+   * delay behavior delegated to another {@link RetryStrategy}.
+   * @param limit Maximum number of retries allowed.
+   * @param strategy When failure count is less than or equal to the limit, this strategy will be called.
+   * @return A {@link RetryStrategy}.
+   */
+  public static RetryStrategy limit(final int limit, final RetryStrategy strategy) {
+    Preconditions.checkArgument(limit >= 0, "limit must be >= 0");
+    return new RetryStrategy() {
+      @Override
+      public long nextRetry(int failureCount, long startTime, OperationType type, String path) {
+        return (failureCount <= limit) ? strategy.nextRetry(failureCount, startTime, type, path) : -1L;
+      }
+    };
+  }
+
+  /**
+   * Creates a {@link RetryStrategy} that imposes a fix delay between each retries.
+   * @param delay delay time
+   * @param delayUnit {@link TimeUnit} for the delay.
+   * @return A {@link RetryStrategy}.
+   */
+  public static RetryStrategy fixDelay(final long delay, final TimeUnit delayUnit) {
+    Preconditions.checkArgument(delay >= 0, "delay must be >= 0");
+    return new RetryStrategy() {
+      @Override
+      public long nextRetry(int failureCount, long startTime, OperationType type, String path) {
+        return TimeUnit.MILLISECONDS.convert(delay, delayUnit);
+      }
+    };
+  }
+
+  /**
+   * Creates a {@link RetryStrategy} that will increase delay exponentially between each retries.
+   * @param baseDelay delay to start with.
+   * @param maxDelay cap of the delay.
+   * @param delayUnit {@link TimeUnit} for the delays.
+   * @return A {@link RetryStrategy}.
+   */
+  public static RetryStrategy exponentialDelay(final long baseDelay, final long maxDelay, final TimeUnit delayUnit) {
+    Preconditions.checkArgument(baseDelay >= 0, "base delay must be >= 0");
+    Preconditions.checkArgument(maxDelay >= 0, "max delay must be >= 0");
+    return new RetryStrategy() {
+      @Override
+      public long nextRetry(int failureCount, long startTime, OperationType type, String path) {
+        long power = failureCount > Long.SIZE ? Long.MAX_VALUE : (1L << (failureCount - 1));
+        long delay = Math.min(baseDelay * power, maxDelay);
+        delay = delay < 0 ? maxDelay : delay;
+        return TimeUnit.MILLISECONDS.convert(delay, delayUnit);
+      }
+    };
+  }
+
+  /**
+   * Creates a {@link RetryStrategy} that will retry until maximum amount of time has been passed since the request,
+   * with the actual delay behavior delegated to another {@link RetryStrategy}.
+   * @param maxElapseTime Maximum amount of time until giving up retry.
+   * @param timeUnit {@link TimeUnit} for the max elapse time.
+   * @param strategy When time elapsed is less than or equal to the limit, this strategy will be called.
+   * @return A {@link RetryStrategy}.
+   */
+  public static RetryStrategy timeLimit(long maxElapseTime, TimeUnit timeUnit, final RetryStrategy strategy) {
+    Preconditions.checkArgument(maxElapseTime >= 0, "max elapse time must be >= 0");
+    final long maxElapseMs = TimeUnit.MILLISECONDS.convert(maxElapseTime, timeUnit);
+    return new RetryStrategy() {
+      @Override
+      public long nextRetry(int failureCount, long startTime, OperationType type, String path) {
+        long elapseTime = System.currentTimeMillis() - startTime;
+        return elapseTime <= maxElapseMs ? strategy.nextRetry(failureCount, startTime, type, path) : -1L;
+      }
+    };
+  }
+
+  private RetryStrategies() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/RetryStrategy.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/RetryStrategy.java b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/RetryStrategy.java
new file mode 100644
index 0000000..3301e8a
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/RetryStrategy.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.zookeeper;
+
+/**
+ * Provides strategy to use for operation retries.
+ */
+public interface RetryStrategy {
+
+  /**
+   * Defines ZooKeeper operation type that triggers retry.
+   */
+  enum OperationType {
+    CREATE,
+    EXISTS,
+    GET_CHILDREN,
+    GET_DATA,
+    SET_DATA,
+    DELETE
+  }
+
+  /**
+   * Returns the number of milliseconds to wait before retrying the operation.
+   *
+   * @param failureCount Number of times that the request has been failed.
+   * @param startTime Timestamp in milliseconds that the request starts.
+   * @param type Type of operation tried to perform.
+   * @param path The path that the operation is acting on.
+   * @return Number of milliseconds to wait before retrying the operation. Returning {@code 0} means
+   *         retry it immediately, while negative means abort the operation.
+   */
+  long nextRetry(int failureCount, long startTime, OperationType type, String path);
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClient.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClient.java b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClient.java
new file mode 100644
index 0000000..d60182e
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClient.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.zookeeper;
+
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.data.Stat;
+
+import javax.annotation.Nullable;
+
+/**
+ * A ZooKeeper client that provides asynchronous zookeeper operations.
+ */
+public interface ZKClient {
+
+  /**
+   * Returns the current Zookeeper session ID of this client.
+   * If this ZKClient is not connected, {@code null} is returned.
+   */
+  Long getSessionId();
+
+  /**
+   * Returns the connection string used for connecting to Zookeeper.
+   */
+  String getConnectString();
+
+  /**
+   * Adds a {@link Watcher} that will be called whenever connection state change.
+   * @param watcher The watcher to set.
+   */
+  void addConnectionWatcher(Watcher watcher);
+
+  /**
+   * Same as calling
+   * {@link #create(String, byte[], org.apache.zookeeper.CreateMode, boolean) create(path, data, createMode, true)}.
+   *
+   * @see #create(String, byte[], org.apache.zookeeper.CreateMode, boolean)
+   */
+  OperationFuture<String> create(String path, @Nullable byte[] data, CreateMode createMode);
+
+  /**
+   * Creates a path in zookeeper, with given data and create mode.
+   *
+   * @param path Path to be created
+   * @param data Data to be stored in the node, or {@code null} if no data to store.
+   * @param createMode The {@link org.apache.zookeeper.CreateMode} for the node.
+   * @param createParent If {@code true} and parent nodes are missing, it will create all parent nodes as normal
+   *                     persistent node before creating the request node.
+   * @return A {@link OperationFuture} that will be completed when the
+   *         creation is done. If there is error during creation, it will be reflected as error in the future.
+   */
+  OperationFuture<String> create(String path, @Nullable byte[] data, CreateMode createMode, boolean createParent);
+
+  /**
+   * Checks if the path exists. Same as calling
+   * {@link #exists(String, org.apache.zookeeper.Watcher) exists(path, null)}.
+   *
+   * @see #exists(String, org.apache.zookeeper.Watcher)
+   */
+  OperationFuture<Stat> exists(String path);
+
+  /**
+   * Checks if the given path exists and leave a watcher on the node for watching creation/deletion/data changes
+   * on the node.
+   *
+   * @param path The path to check for existence.
+   * @param watcher Watcher for watching changes, or {@code null} if no watcher to set.
+   * @return A {@link OperationFuture} that will be completed when the exists check is done. If the path
+   *         does exists, the node {@link Stat} is set into the future. If the path doesn't exists,
+   *         a {@code null} value is set into the future.
+   */
+  OperationFuture<Stat> exists(String path, @Nullable Watcher watcher);
+
+  /**
+   * Gets the list of children nodes under the given path. Same as calling
+   * {@link #getChildren(String, org.apache.zookeeper.Watcher) getChildren(path, null)}.
+   *
+   * @see #getChildren(String, org.apache.zookeeper.Watcher)
+   */
+  OperationFuture<NodeChildren> getChildren(String path);
+
+  /**
+   * Gets the list of children nodes under the given path and leave a watcher on the node for watching node
+   * deletion and children nodes creation/deletion.
+   *
+   * @param path The path to fetch for children nodes
+   * @param watcher Watcher for watching changes, or {@code null} if no watcher to set.
+   * @return A {@link OperationFuture} that will be completed when the getChildren call is done, with the result
+   *         given as {@link NodeChildren}. If there is error, it will be reflected as error in the future.
+   */
+  OperationFuture<NodeChildren> getChildren(String path, @Nullable Watcher watcher);
+
+  /**
+   * Gets the data stored in the given path. Same as calling
+   * {@link #getData(String, org.apache.zookeeper.Watcher) getData(path, null)}.
+   */
+  OperationFuture<NodeData> getData(String path);
+
+  /**
+   * Gets the data stored in the given path and leave a watcher on the node for watching deletion/data changes on
+   * the node.
+   *
+   * @param path The path to get data from.
+   * @param watcher Watcher for watching changes, or {@code null} if no watcher to set.
+   * @return A {@link OperationFuture} that will be completed when the getData call is done, with the result
+   *         given as {@link NodeData}. If there is error, it will be reflected as error in the future.
+   */
+  OperationFuture<NodeData> getData(String path, @Nullable Watcher watcher);
+
+  /**
+   * Sets the data for the given path without matching version. Same as calling
+   * {@link #setData(String, byte[], int) setData(path, data, -1)}.
+   */
+  OperationFuture<Stat> setData(String path, byte[] data);
+
+  /**
+   * Sets the data for the given path that match the given version. If the version given is {@code -1}, it matches
+   * any version.
+   *
+   * @param dataPath The path to set data to.
+   * @param data Data to be set.
+   * @param version Matching version.
+   * @return A {@link OperationFuture} that will be completed when the setData call is done, with node {@link Stat}
+   *         given as the future result. If there is error, it will be reflected as error in the future.
+   */
+  OperationFuture<Stat> setData(String dataPath, byte[] data, int version);
+
+  /**
+   * Deletes the node of the given path without matching version. Same as calling
+   * {@link #delete(String, int) delete(path, -1)}.
+   *
+   * @see #delete(String, int)
+   */
+  OperationFuture<String> delete(String path);
+
+  /**
+   * Deletes the node of the given path that match the given version. If the version given is {@code -1}, it matches
+   * any version.
+   *
+   * @param deletePath The path to set data to.
+   * @param version Matching version.
+   * @return A {@link OperationFuture} that will be completed when the setData call is done, with node path
+   *         given as the future result. If there is error, it will be reflected as error in the future.
+   */
+  OperationFuture<String> delete(String deletePath, int version);
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClientService.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClientService.java b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClientService.java
new file mode 100644
index 0000000..63f27fb
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClientService.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.zookeeper;
+
+import org.apache.twill.internal.zookeeper.DefaultZKClientService;
+import com.google.common.base.Supplier;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Multimap;
+import com.google.common.util.concurrent.Service;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.data.ACL;
+
+/**
+ * A {@link ZKClient} that extends from {@link Service} to provide lifecycle management functions.
+ * The {@link #start()} method needed to be called before calling any other method on this interface.
+ * When the client is no longer needed, call {@link #stop()} to release any resources that it holds.
+ */
+public interface ZKClientService extends ZKClient, Service {
+
+  /**
+   * Returns a {@link Supplier} of {@link ZooKeeper} that gives the current {@link ZooKeeper} in use at the moment
+   * when {@link com.google.common.base.Supplier#get()} get called.
+   *
+   * @return A {@link Supplier Supplier&lt;ZooKeeper&gt;}
+   */
+  Supplier<ZooKeeper> getZooKeeperSupplier();
+
+  /**
+   * Builder for creating an implementation of {@link ZKClientService}.
+   * The default client timeout is 10000ms.
+   */
+  public static final class Builder {
+
+    private final String connectStr;
+    private int timeout = 10000;
+    private Watcher connectionWatcher;
+    private Multimap<String, ACL> acls = HashMultimap.create();
+
+    /**
+     * Creates a {@link Builder} with the given ZooKeeper connection string.
+     * @param connectStr The connection string.
+     * @return A new instance of Builder.
+     */
+    public static Builder of(String connectStr) {
+      return new Builder(connectStr);
+    }
+
+    /**
+     * Sets the client timeout to the give milliseconds.
+     * @param timeout timeout in milliseconds.
+     * @return This builder
+     */
+    public Builder setSessionTimeout(int timeout) {
+      this.timeout = timeout;
+      return this;
+    }
+
+    /**
+     * Sets a {@link Watcher} that will be called whenever connection state change.
+     * @param watcher The watcher to set.
+     * @return This builder.
+     */
+    public Builder setConnectionWatcher(Watcher watcher) {
+      this.connectionWatcher = watcher;
+      return this;
+    }
+
+    /**
+     * Creates an instance of {@link ZKClientService} with the settings of this builder.
+     * @return A new instance of {@link ZKClientService}.
+     */
+    public ZKClientService build() {
+      return new DefaultZKClientService(connectStr, timeout, connectionWatcher);
+    }
+
+    private Builder(String connectStr) {
+      this.connectStr = connectStr;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClientServices.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClientServices.java b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClientServices.java
new file mode 100644
index 0000000..cc38c76
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClientServices.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.zookeeper;
+
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.data.Stat;
+
+import javax.annotation.Nullable;
+
+/**
+ * Provides static factory method to create {@link ZKClientService} with modified behaviors.
+ */
+public final class ZKClientServices {
+
+  /**
+   * Creates a {@link ZKClientService} from the given {@link ZKClient} if the given {@link ZKClient} is an instance of
+   * {@link ZKClientService} or is a {@link ForwardingZKClient} that eventually trace back to a delegate of type
+   * {@link ZKClientService}. If such a {@link ZKClientService} instance is found, this method returns
+   * an instance by invoking {@link #delegate(ZKClient, ZKClientService)} with the given {@link ZKClient} and
+   * the {@link ZKClientService} found respectively.
+   *
+   * @param client The {@link ZKClient}.
+   * @return A {@link ZKClientService}.
+   * @throws IllegalArgumentException If no {@link ZKClientService} is found.
+   */
+  public static ZKClientService delegate(ZKClient client) {
+    ZKClient zkClient = client;
+    while (!(zkClient instanceof ZKClientService) && zkClient instanceof ForwardingZKClient) {
+      zkClient = ((ForwardingZKClient) zkClient).getDelegate();
+    }
+    if (zkClient instanceof ZKClientService) {
+      return delegate(client, (ZKClientService) zkClient);
+    }
+    throw new IllegalArgumentException("No ZKClientService found from the delegation hierarchy");
+  }
+
+  /**
+   * Creates a {@link ZKClientService} that for all {@link ZKClient} methods would be delegated to another
+   * {@link ZKClient}, while methods for {@link ZKClientService} would be delegated to another {@link ZKClientService},
+   * which the given {@link ZKClient} and {@link ZKClientService} could be different instances.
+   *
+   * @param client The {@link ZKClient} for delegation
+   * @param clientService The {@link ZKClientService} for delegation.
+   * @return A {@link ZKClientService}.
+   */
+  public static ZKClientService delegate(final ZKClient client, ZKClientService clientService) {
+    return new ForwardingZKClientService(clientService) {
+
+      @Override
+      public Long getSessionId() {
+        return client.getSessionId();
+      }
+
+      @Override
+      public String getConnectString() {
+        return client.getConnectString();
+      }
+
+      @Override
+      public void addConnectionWatcher(Watcher watcher) {
+        client.addConnectionWatcher(watcher);
+      }
+
+      @Override
+      public OperationFuture<String> create(String path, @Nullable byte[] data, CreateMode createMode) {
+        return client.create(path, data, createMode);
+      }
+
+      @Override
+      public OperationFuture<String> create(String path, @Nullable byte[] data, CreateMode createMode,
+                                            boolean createParent) {
+        return client.create(path, data, createMode, createParent);
+      }
+
+      @Override
+      public OperationFuture<Stat> exists(String path) {
+        return client.exists(path);
+      }
+
+      @Override
+      public OperationFuture<Stat> exists(String path, @Nullable Watcher watcher) {
+        return client.exists(path, watcher);
+      }
+
+      @Override
+      public OperationFuture<NodeChildren> getChildren(String path) {
+        return client.getChildren(path);
+      }
+
+      @Override
+      public OperationFuture<NodeChildren> getChildren(String path, @Nullable Watcher watcher) {
+        return client.getChildren(path, watcher);
+      }
+
+      @Override
+      public OperationFuture<NodeData> getData(String path) {
+        return client.getData(path);
+      }
+
+      @Override
+      public OperationFuture<NodeData> getData(String path, @Nullable Watcher watcher) {
+        return client.getData(path, watcher);
+      }
+
+      @Override
+      public OperationFuture<Stat> setData(String path, byte[] data) {
+        return client.setData(path, data);
+      }
+
+      @Override
+      public OperationFuture<Stat> setData(String dataPath, byte[] data, int version) {
+        return client.setData(dataPath, data, version);
+      }
+
+      @Override
+      public OperationFuture<String> delete(String path) {
+        return client.delete(path);
+      }
+
+      @Override
+      public OperationFuture<String> delete(String deletePath, int version) {
+        return client.delete(deletePath, version);
+      }
+    };
+  }
+
+  private ZKClientServices() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClients.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClients.java b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClients.java
new file mode 100644
index 0000000..f67c1bd
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ZKClients.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.zookeeper;
+
+import org.apache.twill.internal.zookeeper.FailureRetryZKClient;
+import org.apache.twill.internal.zookeeper.NamespaceZKClient;
+import org.apache.twill.internal.zookeeper.RewatchOnExpireZKClient;
+
+/**
+ *
+ */
+public final class ZKClients {
+
+  /**
+   * Creates a {@link ZKClient} that will perform auto re-watch on all existing watches
+   * when reconnection happens after session expiration. All {@link org.apache.zookeeper.Watcher Watchers}
+   * set through the returned {@link ZKClient} would not receive any connection events.
+   *
+   * @param client The {@link ZKClient} for operations delegation.
+   * @return A {@link ZKClient} that will do auto re-watch on all methods that accept a
+   *        {@link org.apache.zookeeper.Watcher} upon session expiration.
+   */
+  public static ZKClient reWatchOnExpire(ZKClient client) {
+    return new RewatchOnExpireZKClient(client);
+  }
+
+  /**
+   * Creates a {@link ZKClient} that will retry interim failure (e.g. connection loss, session expiration)
+   * based on the given {@link RetryStrategy}.
+   *
+   * @param client The {@link ZKClient} for operations delegation.
+   * @param retryStrategy The {@link RetryStrategy} to be invoke when there is operation failure.
+   * @return A {@link ZKClient}.
+   */
+  public static ZKClient retryOnFailure(ZKClient client, RetryStrategy retryStrategy) {
+    return new FailureRetryZKClient(client, retryStrategy);
+  }
+
+
+  public static ZKClient namespace(ZKClient zkClient, String namespace) {
+    return new NamespaceZKClient(zkClient, namespace);
+  }
+
+  private ZKClients() {
+  }
+}


[23/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/logging/KafkaAppender.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/logging/KafkaAppender.java b/core/src/main/java/org/apache/twill/internal/logging/KafkaAppender.java
deleted file mode 100644
index 12818ef..0000000
--- a/core/src/main/java/org/apache/twill/internal/logging/KafkaAppender.java
+++ /dev/null
@@ -1,303 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.logging;
-
-import org.apache.twill.common.Services;
-import org.apache.twill.common.Threads;
-import org.apache.twill.internal.kafka.client.Compression;
-import org.apache.twill.internal.kafka.client.SimpleKafkaClient;
-import org.apache.twill.kafka.client.KafkaClient;
-import org.apache.twill.kafka.client.PreparePublish;
-import org.apache.twill.zookeeper.RetryStrategies;
-import org.apache.twill.zookeeper.ZKClientService;
-import org.apache.twill.zookeeper.ZKClientServices;
-import org.apache.twill.zookeeper.ZKClients;
-import ch.qos.logback.classic.pattern.ClassOfCallerConverter;
-import ch.qos.logback.classic.pattern.FileOfCallerConverter;
-import ch.qos.logback.classic.pattern.LineOfCallerConverter;
-import ch.qos.logback.classic.pattern.MethodOfCallerConverter;
-import ch.qos.logback.classic.spi.ILoggingEvent;
-import ch.qos.logback.classic.spi.IThrowableProxy;
-import ch.qos.logback.classic.spi.StackTraceElementProxy;
-import ch.qos.logback.core.AppenderBase;
-import com.google.common.base.Charsets;
-import com.google.common.base.Function;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Throwables;
-import com.google.common.collect.Iterables;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.gson.stream.JsonWriter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.StringWriter;
-import java.util.Queue;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicReference;
-
-/**
- *
- */
-public final class KafkaAppender extends AppenderBase<ILoggingEvent> {
-
-  private static final Logger LOG = LoggerFactory.getLogger(KafkaAppender.class);
-
-  private final LogEventConverter eventConverter;
-  private final AtomicReference<PreparePublish> publisher;
-  private final Runnable flushTask;
-  /**
-   * Rough count of how many entries are being buffered. It's just approximate, not exact.
-   */
-  private final AtomicInteger bufferedSize;
-
-  private ZKClientService zkClientService;
-  private KafkaClient kafkaClient;
-  private String zkConnectStr;
-  private String hostname;
-  private String topic;
-  private Queue<String> buffer;
-  private int flushLimit = 20;
-  private int flushPeriod = 100;
-  private ScheduledExecutorService scheduler;
-
-  public KafkaAppender() {
-    eventConverter = new LogEventConverter();
-    publisher = new AtomicReference<PreparePublish>();
-    flushTask = createFlushTask();
-    bufferedSize = new AtomicInteger();
-    buffer = new ConcurrentLinkedQueue<String>();
-  }
-
-  /**
-   * Sets the zookeeper connection string. Called by slf4j.
-   */
-  @SuppressWarnings("unused")
-  public void setZookeeper(String zkConnectStr) {
-    this.zkConnectStr = zkConnectStr;
-  }
-
-  /**
-   * Sets the hostname. Called by slf4j.
-   */
-  @SuppressWarnings("unused")
-  public void setHostname(String hostname) {
-    this.hostname = hostname;
-  }
-
-  /**
-   * Sets the topic name for publishing logs. Called by slf4j.
-   */
-  @SuppressWarnings("unused")
-  public void setTopic(String topic) {
-    this.topic = topic;
-  }
-
-  /**
-   * Sets the maximum number of cached log entries before performing an force flush. Called by slf4j.
-   */
-  @SuppressWarnings("unused")
-  public void setFlushLimit(int flushLimit) {
-    this.flushLimit = flushLimit;
-  }
-
-  /**
-   * Sets the periodic flush time in milliseconds. Called by slf4j.
-   */
-  @SuppressWarnings("unused")
-  public void setFlushPeriod(int flushPeriod) {
-    this.flushPeriod = flushPeriod;
-  }
-
-  @Override
-  public void start() {
-    Preconditions.checkNotNull(zkConnectStr);
-
-    scheduler = Executors.newSingleThreadScheduledExecutor(Threads.createDaemonThreadFactory("kafka-logger"));
-
-    zkClientService = ZKClientServices.delegate(
-      ZKClients.reWatchOnExpire(
-        ZKClients.retryOnFailure(ZKClientService.Builder.of(zkConnectStr).build(),
-                                 RetryStrategies.fixDelay(1, TimeUnit.SECONDS))));
-
-    kafkaClient = new SimpleKafkaClient(zkClientService);
-    Futures.addCallback(Services.chainStart(zkClientService, kafkaClient), new FutureCallback<Object>() {
-      @Override
-      public void onSuccess(Object result) {
-        LOG.info("Kafka client started: " + zkConnectStr);
-        publisher.set(kafkaClient.preparePublish(topic, Compression.SNAPPY));
-        scheduler.scheduleWithFixedDelay(flushTask, 0, flushPeriod, TimeUnit.MILLISECONDS);
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        // Fail to talk to kafka. Other than logging, what can be done?
-        LOG.error("Failed to start kafka client.", t);
-      }
-    });
-
-    super.start();
-  }
-
-  @Override
-  public void stop() {
-    super.stop();
-    scheduler.shutdownNow();
-    Futures.getUnchecked(Services.chainStop(kafkaClient, zkClientService));
-  }
-
-  public void forceFlush() {
-    try {
-      publishLogs().get(2, TimeUnit.SECONDS);
-    } catch (Exception e) {
-      LOG.error("Failed to publish last batch of log.", e);
-    }
-  }
-
-  @Override
-  protected void append(ILoggingEvent eventObject) {
-    buffer.offer(eventConverter.convert(eventObject));
-    if (bufferedSize.incrementAndGet() >= flushLimit && publisher.get() != null) {
-      // Try to do a extra flush
-      scheduler.submit(flushTask);
-    }
-  }
-
-  private ListenableFuture<Integer> publishLogs() {
-    // If the publisher is not available, simply returns a completed future.
-    PreparePublish publisher = KafkaAppender.this.publisher.get();
-    if (publisher == null) {
-      return Futures.immediateFuture(0);
-    }
-
-    int count = 0;
-    for (String json : Iterables.consumingIterable(buffer)) {
-      publisher.add(Charsets.UTF_8.encode(json), 0);
-      count++;
-    }
-    // Nothing to publish, simply returns a completed future.
-    if (count == 0) {
-      return Futures.immediateFuture(0);
-    }
-
-    bufferedSize.set(0);
-    final int finalCount = count;
-    return Futures.transform(publisher.publish(), new Function<Object, Integer>() {
-      @Override
-      public Integer apply(Object input) {
-        return finalCount;
-      }
-    });
-  }
-
-  /**
-   * Creates a {@link Runnable} that writes all logs in the buffer into kafka.
-   * @return The Runnable task
-   */
-  private Runnable createFlushTask() {
-    return new Runnable() {
-      @Override
-      public void run() {
-        Futures.addCallback(publishLogs(), new FutureCallback<Integer>() {
-          @Override
-          public void onSuccess(Integer result) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Log entries published, size=" + result);
-            }
-          }
-
-          @Override
-          public void onFailure(Throwable t) {
-            LOG.error("Failed to push logs to kafka. Log entries dropped.", t);
-          }
-        });
-      }
-    };
-  }
-
-  /**
-   * Helper class to convert {@link ILoggingEvent} into json string.
-   */
-  private final class LogEventConverter {
-
-    private final ClassOfCallerConverter classNameConverter = new ClassOfCallerConverter();
-    private final MethodOfCallerConverter methodConverter = new MethodOfCallerConverter();
-    private final FileOfCallerConverter fileConverter = new FileOfCallerConverter();
-    private final LineOfCallerConverter lineConverter = new LineOfCallerConverter();
-
-    private String convert(ILoggingEvent event) {
-      StringWriter result = new StringWriter();
-      JsonWriter writer = new JsonWriter(result);
-
-      try {
-        try {
-          writer.beginObject();
-          writer.name("name").value(event.getLoggerName());
-          writer.name("host").value(hostname);
-          writer.name("timestamp").value(Long.toString(event.getTimeStamp()));
-          writer.name("level").value(event.getLevel().toString());
-          writer.name("className").value(classNameConverter.convert(event));
-          writer.name("method").value(methodConverter.convert(event));
-          writer.name("file").value(fileConverter.convert(event));
-          writer.name("line").value(lineConverter.convert(event));
-          writer.name("thread").value(event.getThreadName());
-          writer.name("message").value(event.getFormattedMessage());
-          writer.name("stackTraces");
-          encodeStackTraces(event.getThrowableProxy(), writer);
-
-          writer.endObject();
-        } finally {
-          writer.close();
-        }
-      } catch (IOException e) {
-        throw Throwables.propagate(e);
-      }
-
-      return result.toString();
-    }
-
-    private void encodeStackTraces(IThrowableProxy throwable, JsonWriter writer) throws IOException {
-      writer.beginArray();
-      try {
-        if (throwable == null) {
-          return;
-        }
-
-        for (StackTraceElementProxy stackTrace : throwable.getStackTraceElementProxyArray()) {
-          writer.beginObject();
-
-          StackTraceElement element = stackTrace.getStackTraceElement();
-          writer.name("className").value(element.getClassName());
-          writer.name("method").value(element.getMethodName());
-          writer.name("file").value(element.getFileName());
-          writer.name("line").value(element.getLineNumber());
-
-          writer.endObject();
-        }
-      } finally {
-        writer.endArray();
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/logging/KafkaTwillRunnable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/logging/KafkaTwillRunnable.java b/core/src/main/java/org/apache/twill/internal/logging/KafkaTwillRunnable.java
deleted file mode 100644
index c1695de..0000000
--- a/core/src/main/java/org/apache/twill/internal/logging/KafkaTwillRunnable.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.logging;
-
-import org.apache.twill.api.Command;
-import org.apache.twill.api.TwillContext;
-import org.apache.twill.api.TwillRunnable;
-import org.apache.twill.api.TwillRunnableSpecification;
-import org.apache.twill.internal.EnvKeys;
-import org.apache.twill.internal.kafka.EmbeddedKafkaServer;
-import org.apache.twill.internal.utils.Networks;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Throwables;
-import com.google.common.collect.ImmutableMap;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.util.Map;
-import java.util.Properties;
-import java.util.concurrent.CountDownLatch;
-
-/**
- * A {@link org.apache.twill.api.TwillRunnable} for managing Kafka server.
- */
-public final class KafkaTwillRunnable implements TwillRunnable {
-
-  private static final Logger LOG = LoggerFactory.getLogger(KafkaTwillRunnable.class);
-
-  private final String kafkaDir;
-  private EmbeddedKafkaServer server;
-  private CountDownLatch stopLatch;
-
-  public KafkaTwillRunnable(String kafkaDir) {
-    this.kafkaDir = kafkaDir;
-  }
-
-  @Override
-  public TwillRunnableSpecification configure() {
-    return TwillRunnableSpecification.Builder.with()
-      .setName("kafka")
-      .withConfigs(ImmutableMap.of("kafkaDir", kafkaDir))
-      .build();
-  }
-
-  @Override
-  public void initialize(TwillContext context) {
-    Map<String, String> args = context.getSpecification().getConfigs();
-    String zkConnectStr = System.getenv(EnvKeys.TWILL_LOG_KAFKA_ZK);
-    stopLatch = new CountDownLatch(1);
-
-    try {
-      server = new EmbeddedKafkaServer(new File(args.get("kafkaDir")), generateKafkaConfig(zkConnectStr));
-      server.startAndWait();
-    } catch (Exception e) {
-      throw Throwables.propagate(e);
-    }
-  }
-
-  @Override
-  public void handleCommand(Command command) throws Exception {
-  }
-
-  @Override
-  public void stop() {
-    stopLatch.countDown();
-  }
-
-  @Override
-  public void destroy() {
-    server.stopAndWait();
-  }
-
-  @Override
-  public void run() {
-    try {
-      stopLatch.await();
-    } catch (InterruptedException e) {
-      LOG.info("Running thread interrupted, shutting down kafka server.", e);
-    }
-  }
-
-  private Properties generateKafkaConfig(String zkConnectStr) {
-    int port = Networks.getRandomPort();
-    Preconditions.checkState(port > 0, "Failed to get random port.");
-
-    Properties prop = new Properties();
-    prop.setProperty("log.dir", new File("kafka-logs").getAbsolutePath());
-    prop.setProperty("zk.connect", zkConnectStr);
-    prop.setProperty("num.threads", "8");
-    prop.setProperty("port", Integer.toString(port));
-    prop.setProperty("log.flush.interval", "10000");
-    prop.setProperty("max.socket.request.bytes", "104857600");
-    prop.setProperty("log.cleanup.interval.mins", "1");
-    prop.setProperty("log.default.flush.scheduler.interval.ms", "1000");
-    prop.setProperty("zk.connectiontimeout.ms", "1000000");
-    prop.setProperty("socket.receive.buffer", "1048576");
-    prop.setProperty("enable.zookeeper", "true");
-    prop.setProperty("log.retention.hours", "168");
-    prop.setProperty("brokerid", "0");
-    prop.setProperty("socket.send.buffer", "1048576");
-    prop.setProperty("num.partitions", "1");
-    prop.setProperty("log.file.size", "536870912");
-    prop.setProperty("log.default.flush.interval.ms", "1000");
-    return prop;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/logging/LogEntryDecoder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/logging/LogEntryDecoder.java b/core/src/main/java/org/apache/twill/internal/logging/LogEntryDecoder.java
deleted file mode 100644
index dc11666..0000000
--- a/core/src/main/java/org/apache/twill/internal/logging/LogEntryDecoder.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.logging;
-
-import org.apache.twill.api.logging.LogEntry;
-import org.apache.twill.internal.json.JsonUtils;
-import com.google.gson.JsonDeserializationContext;
-import com.google.gson.JsonDeserializer;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParseException;
-
-import java.lang.reflect.Type;
-
-/**
- * A {@link com.google.gson.Gson} decoder for {@link LogEntry}.
- */
-public final class LogEntryDecoder implements JsonDeserializer<LogEntry> {
-
-  @Override
-  public LogEntry deserialize(JsonElement json, Type typeOfT,
-                              JsonDeserializationContext context) throws JsonParseException {
-    if (!json.isJsonObject()) {
-      return null;
-    }
-    JsonObject jsonObj = json.getAsJsonObject();
-
-    final String name = JsonUtils.getAsString(jsonObj, "name");
-    final String host = JsonUtils.getAsString(jsonObj, "host");
-    final long timestamp = JsonUtils.getAsLong(jsonObj, "timestamp", 0);
-    LogEntry.Level l;
-    try {
-      l = LogEntry.Level.valueOf(JsonUtils.getAsString(jsonObj, "level"));
-    } catch (Exception e) {
-      l = LogEntry.Level.FATAL;
-    }
-    final LogEntry.Level logLevel = l;
-    final String className = JsonUtils.getAsString(jsonObj, "className");
-    final String method = JsonUtils.getAsString(jsonObj, "method");
-    final String file = JsonUtils.getAsString(jsonObj, "file");
-    final String line = JsonUtils.getAsString(jsonObj, "line");
-    final String thread = JsonUtils.getAsString(jsonObj, "thread");
-    final String message = JsonUtils.getAsString(jsonObj, "message");
-
-    final StackTraceElement[] stackTraces = context.deserialize(jsonObj.get("stackTraces").getAsJsonArray(),
-                                                                StackTraceElement[].class);
-
-    return new LogEntry() {
-      @Override
-      public String getLoggerName() {
-        return name;
-      }
-
-      @Override
-      public String getHost() {
-        return host;
-      }
-
-      @Override
-      public long getTimestamp() {
-        return timestamp;
-      }
-
-      @Override
-      public Level getLogLevel() {
-        return logLevel;
-      }
-
-      @Override
-      public String getSourceClassName() {
-        return className;
-      }
-
-      @Override
-      public String getSourceMethodName() {
-        return method;
-      }
-
-      @Override
-      public String getFileName() {
-        return file;
-      }
-
-      @Override
-      public int getLineNumber() {
-        if (line.equals("?")) {
-          return -1;
-        } else {
-          return Integer.parseInt(line);
-        }
-      }
-
-      @Override
-      public String getThreadName() {
-        return thread;
-      }
-
-      @Override
-      public String getMessage() {
-        return message;
-      }
-
-      @Override
-      public StackTraceElement[] getStackTraces() {
-        return stackTraces;
-      }
-    };
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/logging/Loggings.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/logging/Loggings.java b/core/src/main/java/org/apache/twill/internal/logging/Loggings.java
deleted file mode 100644
index 9baed63..0000000
--- a/core/src/main/java/org/apache/twill/internal/logging/Loggings.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.logging;
-
-import ch.qos.logback.classic.Logger;
-import ch.qos.logback.classic.LoggerContext;
-import ch.qos.logback.classic.spi.ILoggingEvent;
-import ch.qos.logback.core.Appender;
-import org.slf4j.ILoggerFactory;
-import org.slf4j.LoggerFactory;
-
-/**
- *
- */
-public final class Loggings {
-
-  public static void forceFlush() {
-    ILoggerFactory loggerFactory = LoggerFactory.getILoggerFactory();
-
-    if (loggerFactory instanceof LoggerContext) {
-      Appender<ILoggingEvent> appender = ((LoggerContext) loggerFactory).getLogger(Logger.ROOT_LOGGER_NAME)
-                                                                        .getAppender("KAFKA");
-      if (appender != null && appender instanceof KafkaAppender) {
-        ((KafkaAppender) appender).forceFlush();
-      }
-    }
-  }
-
-  private Loggings() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/package-info.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/package-info.java b/core/src/main/java/org/apache/twill/internal/package-info.java
deleted file mode 100644
index a8459e0..0000000
--- a/core/src/main/java/org/apache/twill/internal/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * This package provides internal classes for Twill.
- */
-package org.apache.twill.internal;

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/state/Message.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/state/Message.java b/core/src/main/java/org/apache/twill/internal/state/Message.java
deleted file mode 100644
index 6c3e719..0000000
--- a/core/src/main/java/org/apache/twill/internal/state/Message.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.state;
-
-import org.apache.twill.api.Command;
-
-/**
- *
- */
-public interface Message {
-
-  /**
-   * Type of message.
-   */
-  enum Type {
-    SYSTEM,
-    USER
-  }
-
-  /**
-   * Scope of the message.
-   */
-  enum Scope {
-    APPLICATION,
-    ALL_RUNNABLE,
-    RUNNABLE
-  }
-
-  Type getType();
-
-  Scope getScope();
-
-  /**
-   * @return the name of the target runnable if scope is {@link Scope#RUNNABLE} or {@code null} otherwise.
-   */
-  String getRunnableName();
-
-  Command getCommand();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/state/MessageCallback.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/state/MessageCallback.java b/core/src/main/java/org/apache/twill/internal/state/MessageCallback.java
deleted file mode 100644
index f94eaa3..0000000
--- a/core/src/main/java/org/apache/twill/internal/state/MessageCallback.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.state;
-
-import com.google.common.util.concurrent.ListenableFuture;
-
-/**
- *
- */
-public interface MessageCallback {
-
-  /**
-   * Called when a message is received.
-   * @param message Message being received.
-   * @return A {@link ListenableFuture} that would be completed when message processing is completed or failed.
-   *         The result of the future should be the input message Id if succeeded.
-   */
-  ListenableFuture<String> onReceived(String messageId, Message message);
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/state/MessageCodec.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/state/MessageCodec.java b/core/src/main/java/org/apache/twill/internal/state/MessageCodec.java
deleted file mode 100644
index 176f620..0000000
--- a/core/src/main/java/org/apache/twill/internal/state/MessageCodec.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.state;
-
-import org.apache.twill.api.Command;
-import com.google.common.base.Charsets;
-import com.google.common.reflect.TypeToken;
-import com.google.gson.Gson;
-import com.google.gson.GsonBuilder;
-import com.google.gson.JsonDeserializationContext;
-import com.google.gson.JsonDeserializer;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParseException;
-import com.google.gson.JsonSerializationContext;
-import com.google.gson.JsonSerializer;
-
-import java.lang.reflect.Type;
-import java.util.Map;
-
-/**
- *
- */
-public final class MessageCodec {
-
-  private static final Type OPTIONS_TYPE = new TypeToken<Map<String, String>>() {}.getType();
-  private static final Gson GSON = new GsonBuilder()
-                                        .registerTypeAdapter(Message.class, new MessageAdapter())
-                                        .registerTypeAdapter(Command.class, new CommandAdapter())
-                                        .create();
-
-  /**
-   * Decodes a {@link Message} from the given byte array.
-   * @param bytes byte array to be decoded
-   * @return Message decoded or {@code null} if fails to decode.
-   */
-  public static Message decode(byte[] bytes) {
-    if (bytes == null) {
-      return null;
-    }
-    String content = new String(bytes, Charsets.UTF_8);
-    return GSON.fromJson(content, Message.class);
-  }
-
-  /**
-   * Encodes a {@link Message} into byte array. Revserse of {@link #decode(byte[])} method.
-   * @param message Message to be encoded
-   * @return byte array representing the encoded message.
-   */
-  public static byte[] encode(Message message) {
-    return GSON.toJson(message, Message.class).getBytes(Charsets.UTF_8);
-  }
-
-  /**
-   * Gson codec for {@link Message} object.
-   */
-  private static final class MessageAdapter implements JsonSerializer<Message>, JsonDeserializer<Message> {
-
-    @Override
-    public Message deserialize(JsonElement json, Type typeOfT,
-                               JsonDeserializationContext context) throws JsonParseException {
-      JsonObject jsonObj = json.getAsJsonObject();
-
-      Message.Type type = Message.Type.valueOf(jsonObj.get("type").getAsString());
-      Message.Scope scope = Message.Scope.valueOf(jsonObj.get("scope").getAsString());
-      JsonElement name = jsonObj.get("runnableName");
-      String runnableName = (name == null || name.isJsonNull()) ? null : name.getAsString();
-      Command command = context.deserialize(jsonObj.get("command"), Command.class);
-
-      return new SimpleMessage(type, scope, runnableName, command);
-    }
-
-    @Override
-    public JsonElement serialize(Message message, Type typeOfSrc, JsonSerializationContext context) {
-      JsonObject jsonObj = new JsonObject();
-      jsonObj.addProperty("type", message.getType().name());
-      jsonObj.addProperty("scope", message.getScope().name());
-      jsonObj.addProperty("runnableName", message.getRunnableName());
-      jsonObj.add("command", context.serialize(message.getCommand(), Command.class));
-
-      return jsonObj;
-    }
-  }
-
-  /**
-   * Gson codec for {@link Command} object.
-   */
-  private static final class CommandAdapter implements JsonSerializer<Command>, JsonDeserializer<Command> {
-
-    @Override
-    public Command deserialize(JsonElement json, Type typeOfT,
-                               JsonDeserializationContext context) throws JsonParseException {
-      JsonObject jsonObj = json.getAsJsonObject();
-      return Command.Builder.of(jsonObj.get("command").getAsString())
-                            .addOptions(context.<Map<String, String>>deserialize(jsonObj.get("options"), OPTIONS_TYPE))
-                            .build();
-    }
-
-    @Override
-    public JsonElement serialize(Command command, Type typeOfSrc, JsonSerializationContext context) {
-      JsonObject jsonObj = new JsonObject();
-      jsonObj.addProperty("command", command.getCommand());
-      jsonObj.add("options", context.serialize(command.getOptions(), OPTIONS_TYPE));
-      return jsonObj;
-    }
-  }
-
-  private MessageCodec() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/state/Messages.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/state/Messages.java b/core/src/main/java/org/apache/twill/internal/state/Messages.java
deleted file mode 100644
index 9783d62..0000000
--- a/core/src/main/java/org/apache/twill/internal/state/Messages.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.state;
-
-import org.apache.twill.api.Command;
-
-/**
- * Factory class for creating instances of {@link Message}.
- */
-public final class Messages {
-
-  /**
-   * Creates a {@link Message.Type#USER} type {@link Message} that sends the giving {@link Command} to a
-   * particular runnable.
-   *
-   * @param runnableName Name of the runnable.
-   * @param command The user command to send.
-   * @return A new instance of {@link Message}.
-   */
-  public static Message createForRunnable(String runnableName, Command command) {
-    return new SimpleMessage(Message.Type.USER, Message.Scope.RUNNABLE, runnableName, command);
-  }
-
-  /**
-   * Creates a {@link Message.Type#USER} type {@link Message} that sends the giving {@link Command} to all
-   * runnables.
-   *
-   * @param command The user command to send.
-   * @return A new instance of {@link Message}.
-   */
-  public static Message createForAll(Command command) {
-    return new SimpleMessage(Message.Type.USER, Message.Scope.ALL_RUNNABLE, null, command);
-  }
-
-  private Messages() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/state/SimpleMessage.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/state/SimpleMessage.java b/core/src/main/java/org/apache/twill/internal/state/SimpleMessage.java
deleted file mode 100644
index e146e56..0000000
--- a/core/src/main/java/org/apache/twill/internal/state/SimpleMessage.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.state;
-
-import org.apache.twill.api.Command;
-import com.google.common.base.Objects;
-
-/**
- *
- */
-final class SimpleMessage implements Message {
-
-  private final Type type;
-  private final Scope scope;
-  private final String runnableName;
-  private final Command command;
-
-  SimpleMessage(Type type, Scope scope, String runnableName, Command command) {
-    this.type = type;
-    this.scope = scope;
-    this.runnableName = runnableName;
-    this.command = command;
-  }
-
-  @Override
-  public Type getType() {
-    return type;
-  }
-
-  @Override
-  public Scope getScope() {
-    return scope;
-  }
-
-  @Override
-  public String getRunnableName() {
-    return runnableName;
-  }
-
-  @Override
-  public Command getCommand() {
-    return command;
-  }
-
-  @Override
-  public String toString() {
-    return Objects.toStringHelper(Message.class)
-      .add("type", type)
-      .add("scope", scope)
-      .add("runnable", runnableName)
-      .add("command", command)
-      .toString();
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hashCode(type, scope, runnableName, command);
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == this) {
-      return true;
-    }
-    if (!(obj instanceof Message)) {
-      return false;
-    }
-    Message other = (Message) obj;
-    return type == other.getType()
-      && scope == other.getScope()
-      && Objects.equal(runnableName, other.getRunnableName())
-      && Objects.equal(command, other.getCommand());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/state/StateNode.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/state/StateNode.java b/core/src/main/java/org/apache/twill/internal/state/StateNode.java
deleted file mode 100644
index d66f8a2..0000000
--- a/core/src/main/java/org/apache/twill/internal/state/StateNode.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.state;
-
-import org.apache.twill.api.ServiceController;
-import com.google.common.util.concurrent.Service;
-
-/**
- *
- */
-public final class StateNode {
-
-  private final ServiceController.State state;
-  private final String errorMessage;
-  private final StackTraceElement[] stackTraces;
-
-  /**
-   * Constructs a StateNode with the given state.
-   */
-  public StateNode(ServiceController.State state) {
-    this(state, null, null);
-  }
-
-  /**
-   * Constructs a StateNode with {@link ServiceController.State#FAILED} caused by the given error.
-   */
-  public StateNode(Throwable error) {
-    this(Service.State.FAILED, error.getMessage(), error.getStackTrace());
-  }
-
-  /**
-   * Constructs a StateNode with the given state, error and stacktraces.
-   * This constructor should only be used by the StateNodeCodec.
-   */
-  public StateNode(ServiceController.State state, String errorMessage, StackTraceElement[] stackTraces) {
-    this.state = state;
-    this.errorMessage = errorMessage;
-    this.stackTraces = stackTraces;
-  }
-
-  public ServiceController.State getState() {
-    return state;
-  }
-
-  public String getErrorMessage() {
-    return errorMessage;
-  }
-
-  public StackTraceElement[] getStackTraces() {
-    return stackTraces;
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder builder = new StringBuilder("state=").append(state);
-
-    if (errorMessage != null) {
-      builder.append("\n").append("error=").append(errorMessage);
-    }
-    if (stackTraces != null) {
-      builder.append("\n");
-      for (StackTraceElement stackTrace : stackTraces) {
-        builder.append("\tat ").append(stackTrace.toString()).append("\n");
-      }
-    }
-    return builder.toString();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/state/SystemMessages.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/state/SystemMessages.java b/core/src/main/java/org/apache/twill/internal/state/SystemMessages.java
deleted file mode 100644
index 9877121..0000000
--- a/core/src/main/java/org/apache/twill/internal/state/SystemMessages.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.state;
-
-import org.apache.twill.api.Command;
-import com.google.common.base.Preconditions;
-
-/**
- * Collection of predefined system messages.
- */
-public final class SystemMessages {
-
-  public static final Command STOP_COMMAND = Command.Builder.of("stop").build();
-  public static final Message SECURE_STORE_UPDATED = new SimpleMessage(
-    Message.Type.SYSTEM, Message.Scope.APPLICATION, null, Command.Builder.of("secureStoreUpdated").build());
-
-  public static Message stopApplication() {
-    return new SimpleMessage(Message.Type.SYSTEM, Message.Scope.APPLICATION, null, STOP_COMMAND);
-  }
-
-  public static Message stopRunnable(String runnableName) {
-    return new SimpleMessage(Message.Type.SYSTEM, Message.Scope.RUNNABLE, runnableName, STOP_COMMAND);
-  }
-
-  public static Message setInstances(String runnableName, int instances) {
-    Preconditions.checkArgument(instances > 0, "Instances should be > 0.");
-    return new SimpleMessage(Message.Type.SYSTEM, Message.Scope.RUNNABLE, runnableName,
-                             Command.Builder.of("instances").addOption("count", Integer.toString(instances)).build());
-  }
-
-  private SystemMessages() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/utils/Dependencies.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/utils/Dependencies.java b/core/src/main/java/org/apache/twill/internal/utils/Dependencies.java
deleted file mode 100644
index 015b9f5..0000000
--- a/core/src/main/java/org/apache/twill/internal/utils/Dependencies.java
+++ /dev/null
@@ -1,323 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.utils;
-
-import com.google.common.base.Throwables;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-import com.google.common.io.ByteStreams;
-import org.objectweb.asm.AnnotationVisitor;
-import org.objectweb.asm.ClassReader;
-import org.objectweb.asm.ClassVisitor;
-import org.objectweb.asm.FieldVisitor;
-import org.objectweb.asm.Label;
-import org.objectweb.asm.MethodVisitor;
-import org.objectweb.asm.Opcodes;
-import org.objectweb.asm.Type;
-import org.objectweb.asm.signature.SignatureReader;
-import org.objectweb.asm.signature.SignatureVisitor;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.MalformedURLException;
-import java.net.URI;
-import java.net.URL;
-import java.util.Queue;
-import java.util.Set;
-
-/**
- * Utility class to help find out class dependencies.
- */
-public final class Dependencies {
-
-  /**
-   * Represents a callback for accepting a class during dependency traversal.
-   */
-  public interface ClassAcceptor {
-    /**
-     * Invoked when a class is being found as a dependency.
-     *
-     * @param className Name of the class.
-     * @param classUrl URL for the class resource.
-     * @param classPathUrl URL for the class path resource that contains the class resource.
-     *                     If the URL protocol is {@code file}, it would be the path to root package.
-     *                     If the URL protocol is {@code jar}, it would be the jar file.
-     * @return true keep finding dependencies on the given class.
-     */
-    boolean accept(String className, URL classUrl, URL classPathUrl);
-  }
-
-  public static void findClassDependencies(ClassLoader classLoader,
-                                           ClassAcceptor acceptor,
-                                           String...classesToResolve) throws IOException {
-    findClassDependencies(classLoader, acceptor, ImmutableList.copyOf(classesToResolve));
-  }
-
-  /**
-   * Finds the class dependencies of the given class.
-   * @param classLoader ClassLoader for finding class bytecode.
-   * @param acceptor Predicate to accept a found class and its bytecode.
-   * @param classesToResolve Classes for looking for dependencies.
-   * @throws IOException Thrown where there is error when loading in class bytecode.
-   */
-  public static void findClassDependencies(ClassLoader classLoader,
-                                           ClassAcceptor acceptor,
-                                           Iterable<String> classesToResolve) throws IOException {
-
-    final Set<String> seenClasses = Sets.newHashSet(classesToResolve);
-    final Queue<String> classes = Lists.newLinkedList(classesToResolve);
-
-    // Breadth-first-search classes dependencies.
-    while (!classes.isEmpty()) {
-      String className = classes.remove();
-      URL classUrl = getClassURL(className, classLoader);
-      if (classUrl == null) {
-        continue;
-      }
-
-      // Call the accept to see if it accept the current class.
-      if (!acceptor.accept(className, classUrl, getClassPathURL(className, classUrl))) {
-        continue;
-      }
-
-      InputStream is = classUrl.openStream();
-      try {
-        // Visit the bytecode to lookup classes that the visiting class is depended on.
-        new ClassReader(ByteStreams.toByteArray(is)).accept(new DependencyClassVisitor(new DependencyAcceptor() {
-          @Override
-          public void accept(String className) {
-            // See if the class is accepted
-            if (seenClasses.add(className)) {
-              classes.add(className);
-            }
-          }
-        }), ClassReader.SKIP_DEBUG + ClassReader.SKIP_FRAMES);
-      } finally {
-        is.close();
-      }
-    }
-  }
-
-  /**
-   * Returns the URL for loading the class bytecode of the given class, or null if it is not found or if it is
-   * a system class.
-   */
-  private static URL getClassURL(String className, ClassLoader classLoader) {
-    String resourceName = className.replace('.', '/') + ".class";
-    return classLoader.getResource(resourceName);
-  }
-
-  private static URL getClassPathURL(String className, URL classUrl) {
-    try {
-      if ("file".equals(classUrl.getProtocol())) {
-        String path = classUrl.getFile();
-        // Compute the directory container the class.
-        int endIdx = path.length() - className.length() - ".class".length();
-        if (endIdx > 1) {
-          // If it is not the root directory, return the end index to remove the trailing '/'.
-          endIdx--;
-        }
-        return new URL("file", "", -1, path.substring(0, endIdx));
-      }
-      if ("jar".equals(classUrl.getProtocol())) {
-        String path = classUrl.getFile();
-        return URI.create(path.substring(0, path.indexOf("!/"))).toURL();
-      }
-    } catch (MalformedURLException e) {
-      throw Throwables.propagate(e);
-    }
-    throw new IllegalStateException("Unsupported class URL: " + classUrl);
-  }
-
-  /**
-   * A private interface for accepting a dependent class that is found during bytecode inspection.
-   */
-  private interface DependencyAcceptor {
-    void accept(String className);
-  }
-
-  /**
-   * ASM ClassVisitor for extracting classes dependencies.
-   */
-  private static final class DependencyClassVisitor extends ClassVisitor {
-
-    private final SignatureVisitor signatureVisitor;
-    private final DependencyAcceptor acceptor;
-
-    public DependencyClassVisitor(DependencyAcceptor acceptor) {
-      super(Opcodes.ASM4);
-      this.acceptor = acceptor;
-      this.signatureVisitor = new SignatureVisitor(Opcodes.ASM4) {
-        private String currentClass;
-
-        @Override
-        public void visitClassType(String name) {
-          currentClass = name;
-          addClass(name);
-        }
-
-        @Override
-        public void visitInnerClassType(String name) {
-          addClass(currentClass + "$" + name);
-        }
-      };
-    }
-
-    @Override
-    public void visit(int version, int access, String name, String signature, String superName, String[] interfaces) {
-      addClass(name);
-
-      if (signature != null) {
-        new SignatureReader(signature).accept(signatureVisitor);
-      } else {
-        addClass(superName);
-        addClasses(interfaces);
-      }
-    }
-
-    @Override
-    public void visitOuterClass(String owner, String name, String desc) {
-      addClass(owner);
-    }
-
-    @Override
-    public AnnotationVisitor visitAnnotation(String desc, boolean visible) {
-      addType(Type.getType(desc));
-      return null;
-    }
-
-    @Override
-    public void visitInnerClass(String name, String outerName, String innerName, int access) {
-      addClass(name);
-    }
-
-    @Override
-    public FieldVisitor visitField(int access, String name, String desc, String signature, Object value) {
-      if (signature != null) {
-        new SignatureReader(signature).acceptType(signatureVisitor);
-      } else {
-        addType(Type.getType(desc));
-      }
-
-      return new FieldVisitor(Opcodes.ASM4) {
-        @Override
-        public AnnotationVisitor visitAnnotation(String desc, boolean visible) {
-          addType(Type.getType(desc));
-          return null;
-        }
-      };
-    }
-
-    @Override
-    public MethodVisitor visitMethod(int access, String name, String desc, String signature, String[] exceptions) {
-      if (signature != null) {
-        new SignatureReader(signature).accept(signatureVisitor);
-      } else {
-        addMethod(desc);
-      }
-      addClasses(exceptions);
-
-      return new MethodVisitor(Opcodes.ASM4) {
-        @Override
-        public AnnotationVisitor visitAnnotation(String desc, boolean visible) {
-          addType(Type.getType(desc));
-          return null;
-        }
-
-        @Override
-        public AnnotationVisitor visitParameterAnnotation(int parameter, String desc, boolean visible) {
-          addType(Type.getType(desc));
-          return null;
-        }
-
-        @Override
-        public void visitTypeInsn(int opcode, String type) {
-          addType(Type.getObjectType(type));
-        }
-
-        @Override
-        public void visitFieldInsn(int opcode, String owner, String name, String desc) {
-          addType(Type.getObjectType(owner));
-          addType(Type.getType(desc));
-        }
-
-        @Override
-        public void visitMethodInsn(int opcode, String owner, String name, String desc) {
-          addType(Type.getObjectType(owner));
-          addMethod(desc);
-        }
-
-        @Override
-        public void visitLdcInsn(Object cst) {
-          if (cst instanceof Type) {
-            addType((Type) cst);
-          }
-        }
-
-        @Override
-        public void visitMultiANewArrayInsn(String desc, int dims) {
-          addType(Type.getType(desc));
-        }
-
-        @Override
-        public void visitLocalVariable(String name, String desc, String signature, Label start, Label end, int index) {
-          if (signature != null) {
-            new SignatureReader(signature).acceptType(signatureVisitor);
-          } else {
-            addType(Type.getType(desc));
-          }
-        }
-      };
-    }
-
-    private void addClass(String internalName) {
-      if (internalName == null || internalName.startsWith("java/")) {
-        return;
-      }
-      acceptor.accept(Type.getObjectType(internalName).getClassName());
-    }
-
-    private void addClasses(String[] classes) {
-      if (classes != null) {
-        for (String clz : classes) {
-          addClass(clz);
-        }
-      }
-    }
-
-    private void addType(Type type) {
-      if (type.getSort() == Type.ARRAY) {
-        type = type.getElementType();
-      }
-      if (type.getSort() == Type.OBJECT) {
-        addClass(type.getInternalName());
-      }
-    }
-
-    private void addMethod(String desc) {
-      addType(Type.getReturnType(desc));
-      for (Type type : Type.getArgumentTypes(desc)) {
-        addType(type);
-      }
-    }
-  }
-
-  private Dependencies() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/utils/Instances.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/utils/Instances.java b/core/src/main/java/org/apache/twill/internal/utils/Instances.java
deleted file mode 100644
index 28bfce9..0000000
--- a/core/src/main/java/org/apache/twill/internal/utils/Instances.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.utils;
-
-import com.google.common.base.Defaults;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Throwables;
-import com.google.common.reflect.TypeToken;
-
-import java.lang.reflect.Constructor;
-import java.lang.reflect.Field;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.lang.reflect.Modifier;
-
-/**
- * Utility class to help instantiate object instance from class.
- */
-public final class Instances {
-
-  private static final Object UNSAFE;
-  private static final Method UNSAFE_NEW_INSTANCE;
-
-  static {
-    Object unsafe;
-    Method newInstance;
-    try {
-      Class<?> clz = Class.forName("sun.misc.Unsafe");
-      Field f = clz.getDeclaredField("theUnsafe");
-      f.setAccessible(true);
-      unsafe = f.get(null);
-
-      newInstance = clz.getMethod("allocateInstance", Class.class);
-    } catch (Exception e) {
-      unsafe = null;
-      newInstance = null;
-    }
-    UNSAFE = unsafe;
-    UNSAFE_NEW_INSTANCE = newInstance;
-  }
-
-  /**
-   * Creates a new instance of the given class. It will use the default constructor if it is presents.
-   * Otherwise it will try to use {@link sun.misc.Unsafe#allocateInstance(Class)} to create the instance.
-   * @param clz Class of object to be instantiated.
-   * @param <T> Type of the class
-   * @return An instance of type {@code <T>}
-   */
-  @SuppressWarnings("unchecked")
-  public static <T> T newInstance(Class<T> clz) {
-    try {
-      try {
-        Constructor<T> cons = clz.getDeclaredConstructor();
-        if (!cons.isAccessible()) {
-          cons.setAccessible(true);
-        }
-        return cons.newInstance();
-      } catch (Exception e) {
-        // Try to use Unsafe
-        Preconditions.checkState(UNSAFE != null, "Fail to instantiate with Unsafe.");
-        return unsafeCreate(clz);
-      }
-    } catch (Exception e) {
-      throw Throwables.propagate(e);
-    }
-  }
-
-
-  /**
-   * Creates an instance of the given using Unsafe. It also initialize all fields into default values.
-   */
-  private static <T> T unsafeCreate(Class<T> clz) throws InvocationTargetException, IllegalAccessException {
-    T instance = (T) UNSAFE_NEW_INSTANCE.invoke(UNSAFE, clz);
-
-    for (TypeToken<?> type : TypeToken.of(clz).getTypes().classes()) {
-      if (Object.class.equals(type.getRawType())) {
-        break;
-      }
-      for (Field field : type.getRawType().getDeclaredFields()) {
-        if (Modifier.isStatic(field.getModifiers())) {
-          continue;
-        }
-        if (!field.isAccessible()) {
-          field.setAccessible(true);
-        }
-        field.set(instance, Defaults.defaultValue(field.getType()));
-      }
-    }
-
-    return instance;
-  }
-
-
-  private Instances() {
-    // Protect instantiation of this class
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/utils/Networks.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/utils/Networks.java b/core/src/main/java/org/apache/twill/internal/utils/Networks.java
deleted file mode 100644
index 8e7d736..0000000
--- a/core/src/main/java/org/apache/twill/internal/utils/Networks.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.utils;
-
-import java.io.IOException;
-import java.net.ServerSocket;
-
-/**
- *
- */
-public final class Networks {
-
-  /**
-   * Find a random free port in localhost for binding.
-   * @return A port number or -1 for failure.
-   */
-  public static int getRandomPort() {
-    try {
-      ServerSocket socket = new ServerSocket(0);
-      try {
-        return socket.getLocalPort();
-      } finally {
-        socket.close();
-      }
-    } catch (IOException e) {
-      return -1;
-    }
-  }
-
-  private Networks() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/utils/Paths.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/utils/Paths.java b/core/src/main/java/org/apache/twill/internal/utils/Paths.java
deleted file mode 100644
index aeee09f..0000000
--- a/core/src/main/java/org/apache/twill/internal/utils/Paths.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.utils;
-
-import com.google.common.io.Files;
-
-/**
- *
- */
-public final class Paths {
-
-
-  public static String appendSuffix(String extractFrom, String appendTo) {
-    String suffix = getExtension(extractFrom);
-    if (!suffix.isEmpty()) {
-      return appendTo + '.' + suffix;
-    }
-    return appendTo;
-  }
-
-  public static String getExtension(String path) {
-    if (path.endsWith(".tar.gz")) {
-      return "tar.gz";
-    }
-
-    return Files.getFileExtension(path);
-  }
-
-  private Paths() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/kafka/client/FetchException.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/kafka/client/FetchException.java b/core/src/main/java/org/apache/twill/kafka/client/FetchException.java
deleted file mode 100644
index acccf04..0000000
--- a/core/src/main/java/org/apache/twill/kafka/client/FetchException.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.kafka.client;
-
-/**
- *
- */
-public final class FetchException extends RuntimeException {
-
-  private final ErrorCode errorCode;
-
-  public FetchException(String message, ErrorCode errorCode) {
-    super(message);
-    this.errorCode = errorCode;
-  }
-
-  public ErrorCode getErrorCode() {
-    return errorCode;
-  }
-
-  @Override
-  public String toString() {
-    return String.format("%s. Error code: %s", super.toString(), errorCode);
-  }
-
-  public enum ErrorCode {
-    UNKNOWN(-1),
-    OK(0),
-    OFFSET_OUT_OF_RANGE(1),
-    INVALID_MESSAGE(2),
-    WRONG_PARTITION(3),
-    INVALID_FETCH_SIZE(4);
-
-    private final int code;
-
-    ErrorCode(int code) {
-      this.code = code;
-    }
-
-    public int getCode() {
-      return code;
-    }
-
-    public static ErrorCode fromCode(int code) {
-      switch (code) {
-        case -1:
-          return UNKNOWN;
-        case 0:
-          return OK;
-        case 1:
-          return OFFSET_OUT_OF_RANGE;
-        case 2:
-          return INVALID_MESSAGE;
-        case 3:
-          return WRONG_PARTITION;
-        case 4:
-          return INVALID_FETCH_SIZE;
-      }
-      throw new IllegalArgumentException("Unknown error code");
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/kafka/client/FetchedMessage.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/kafka/client/FetchedMessage.java b/core/src/main/java/org/apache/twill/kafka/client/FetchedMessage.java
deleted file mode 100644
index 65e140f..0000000
--- a/core/src/main/java/org/apache/twill/kafka/client/FetchedMessage.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.kafka.client;
-
-import java.nio.ByteBuffer;
-
-/**
- * Represents a message fetched from kafka broker.
- */
-public interface FetchedMessage {
-
-  /**
-   * Returns the message offset.
-   */
-  long getOffset();
-
-  /**
-   * Returns the message payload.
-   */
-  ByteBuffer getBuffer();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/kafka/client/KafkaClient.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/kafka/client/KafkaClient.java b/core/src/main/java/org/apache/twill/kafka/client/KafkaClient.java
deleted file mode 100644
index 496195b..0000000
--- a/core/src/main/java/org/apache/twill/kafka/client/KafkaClient.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.kafka.client;
-
-import org.apache.twill.internal.kafka.client.Compression;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.Service;
-
-import java.util.Iterator;
-
-/**
- * This interface provides methods for interacting with kafka broker. It also
- * extends from {@link Service} for lifecycle management. The {@link #start()} method
- * must be called prior to other methods in this class. When instance of this class
- * is not needed, call {@link #stop()}} to release any resources that it holds.
- */
-public interface KafkaClient extends Service {
-
-  PreparePublish preparePublish(String topic, Compression compression);
-
-  Iterator<FetchedMessage> consume(String topic, int partition, long offset, int maxSize);
-
-  /**
-   * Fetches offset from the given topic and partition.
-   * @param topic Topic to fetch from.
-   * @param partition Partition to fetch from.
-   * @param time The first offset of every segment file for a given partition with a modified time less than time.
-   *             {@code -1} for latest offset, {@code -2} for earliest offset.
-   * @param maxOffsets Maximum number of offsets to fetch.
-   * @return A Future that carry the result as an array of offsets in descending order.
-   *         The size of the result array would not be larger than maxOffsets. If there is any error during the fetch,
-   *         the exception will be carried in the exception.
-   */
-  ListenableFuture<long[]> getOffset(String topic, int partition, long time, int maxOffsets);
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/kafka/client/PreparePublish.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/kafka/client/PreparePublish.java b/core/src/main/java/org/apache/twill/kafka/client/PreparePublish.java
deleted file mode 100644
index 5db4abb..0000000
--- a/core/src/main/java/org/apache/twill/kafka/client/PreparePublish.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.kafka.client;
-
-import com.google.common.util.concurrent.ListenableFuture;
-
-import java.nio.ByteBuffer;
-
-/**
- * This interface is for preparing to publish a set of messages to kafka.
- */
-public interface PreparePublish {
-
-  PreparePublish add(byte[] payload, Object partitionKey);
-
-  PreparePublish add(ByteBuffer payload, Object partitionKey);
-
-  ListenableFuture<?> publish();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/kafka/client/package-info.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/kafka/client/package-info.java b/core/src/main/java/org/apache/twill/kafka/client/package-info.java
deleted file mode 100644
index ea3bf20..0000000
--- a/core/src/main/java/org/apache/twill/kafka/client/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * This package provides a pure java Kafka client interface.
- */
-package org.apache.twill.kafka.client;

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/launcher/TwillLauncher.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/launcher/TwillLauncher.java b/core/src/main/java/org/apache/twill/launcher/TwillLauncher.java
deleted file mode 100644
index 2c8c1ef..0000000
--- a/core/src/main/java/org/apache/twill/launcher/TwillLauncher.java
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.launcher;
-
-import java.io.BufferedOutputStream;
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.lang.reflect.Method;
-import java.net.MalformedURLException;
-import java.net.URL;
-import java.net.URLClassLoader;
-import java.nio.charset.Charset;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.jar.JarEntry;
-import java.util.jar.JarInputStream;
-
-/**
- * A launcher for application from a archive jar.
- * This class should have no dependencies on any library except the J2SE one.
- * This class should not import any thing except java.*
- */
-public final class TwillLauncher {
-
-  private static final int TEMP_DIR_ATTEMPTS = 20;
-
-  /**
-   * Main method to unpackage a jar and run the mainClass.main() method.
-   * @param args args[0] is the path to jar file, args[1] is the class name of the mainClass.
-   *             The rest of args will be passed the mainClass unmodified.
-   */
-  public static void main(String[] args) throws Exception {
-    if (args.length < 3) {
-      System.out.println("Usage: java " + TwillLauncher.class.getName() + " [jarFile] [mainClass] [use_classpath]");
-      return;
-    }
-
-    File file = new File(args[0]);
-    final File targetDir = createTempDir("twill.launcher");
-
-    Runtime.getRuntime().addShutdownHook(new Thread() {
-      @Override
-      public void run() {
-        System.out.println("Cleanup directory " + targetDir);
-        deleteDir(targetDir);
-      }
-    });
-
-    System.out.println("UnJar " + file + " to " + targetDir);
-    unJar(file, targetDir);
-
-    // Create ClassLoader
-    URLClassLoader classLoader = createClassLoader(targetDir, Boolean.parseBoolean(args[2]));
-    Thread.currentThread().setContextClassLoader(classLoader);
-
-    System.out.println("Launch class with classpath: " + Arrays.toString(classLoader.getURLs()));
-
-    Class<?> mainClass = classLoader.loadClass(args[1]);
-    Method mainMethod = mainClass.getMethod("main", String[].class);
-    String[] arguments = Arrays.copyOfRange(args, 3, args.length);
-    System.out.println("Launching main: " + mainMethod + " " + Arrays.toString(arguments));
-    mainMethod.invoke(mainClass, new Object[]{arguments});
-    System.out.println("Main class completed.");
-
-    System.out.println("Launcher completed");
-  }
-
-  /**
-   * This method is copied from Guava Files.createTempDir().
-   */
-  private static File createTempDir(String prefix) throws IOException {
-    File baseDir = new File(System.getProperty("java.io.tmpdir"));
-    if (!baseDir.isDirectory() && !baseDir.mkdirs()) {
-      throw new IOException("Tmp directory not exists: " + baseDir.getAbsolutePath());
-    }
-
-    String baseName = prefix + "-" + System.currentTimeMillis() + "-";
-
-    for (int counter = 0; counter < TEMP_DIR_ATTEMPTS; counter++) {
-      File tempDir = new File(baseDir, baseName + counter);
-      if (tempDir.mkdir()) {
-        return tempDir;
-      }
-    }
-    throw new IOException("Failed to create directory within "
-                            + TEMP_DIR_ATTEMPTS + " attempts (tried "
-                            + baseName + "0 to " + baseName + (TEMP_DIR_ATTEMPTS - 1) + ')');
-  }
-
-  private static void unJar(File jarFile, File targetDir) throws IOException {
-    JarInputStream jarInput = new JarInputStream(new FileInputStream(jarFile));
-    try {
-      JarEntry jarEntry = jarInput.getNextJarEntry();
-      while (jarEntry != null) {
-        File target = new File(targetDir, jarEntry.getName());
-        if (jarEntry.isDirectory()) {
-          target.mkdirs();
-        } else {
-          target.getParentFile().mkdirs();
-          copy(jarInput, target);
-        }
-        jarEntry = jarInput.getNextJarEntry();
-      }
-    } finally {
-      jarInput.close();
-    }
-  }
-
-  private static void copy(InputStream is, File file) throws IOException {
-    byte[] buf = new byte[8192];
-    OutputStream os = new BufferedOutputStream(new FileOutputStream(file));
-    try {
-      int len = is.read(buf);
-      while (len != -1) {
-        os.write(buf, 0, len);
-        len = is.read(buf);
-      }
-    } finally {
-      os.close();
-    }
-  }
-
-  private static URLClassLoader createClassLoader(File dir, boolean useClassPath) {
-    try {
-      List<URL> urls = new ArrayList<URL>();
-      urls.add(dir.toURI().toURL());
-      urls.add(new File(dir, "classes").toURI().toURL());
-      urls.add(new File(dir, "resources").toURI().toURL());
-
-      File libDir = new File(dir, "lib");
-      File[] files = libDir.listFiles();
-      if (files != null) {
-        for (File file : files) {
-          if (file.getName().endsWith(".jar")) {
-            urls.add(file.toURI().toURL());
-          }
-        }
-      }
-
-      if (useClassPath) {
-        InputStream is = ClassLoader.getSystemResourceAsStream("classpath");
-        if (is != null) {
-          try {
-            BufferedReader reader = new BufferedReader(new InputStreamReader(is, Charset.forName("UTF-8")));
-            String line = reader.readLine();
-            if (line != null) {
-              for (String path : line.split(":")) {
-                urls.addAll(getClassPaths(path));
-              }
-            }
-          } finally {
-            is.close();
-          }
-        }
-      }
-
-      return new URLClassLoader(urls.toArray(new URL[0]));
-
-    } catch (Exception e) {
-      throw new IllegalStateException(e);
-    }
-  }
-
-  private static Collection<URL> getClassPaths(String path) throws MalformedURLException {
-    String classpath = expand(path);
-    if (classpath.endsWith("/*")) {
-      // Grab all .jar files
-      File dir = new File(classpath.substring(0, classpath.length() - 2));
-      File[] files = dir.listFiles();
-      if (files == null || files.length == 0) {
-        return singleItem(dir.toURI().toURL());
-      }
-
-      List<URL> result = new ArrayList<URL>(files.length);
-      for (File file : files) {
-        if (file.getName().endsWith(".jar")) {
-          result.add(file.toURI().toURL());
-        }
-      }
-      return result;
-    } else {
-      return singleItem(new File(classpath).toURI().toURL());
-    }
-  }
-
-  private static Collection<URL> singleItem(URL url) {
-    List<URL> result = new ArrayList<URL>(1);
-    result.add(url);
-    return result;
-  }
-
-  private static String expand(String value) {
-    String result = value;
-    for (Map.Entry<String, String> entry : System.getenv().entrySet()) {
-      result = result.replace("$" + entry.getKey(), entry.getValue());
-      result = result.replace("${" + entry.getKey() + "}", entry.getValue());
-    }
-    return result;
-  }
-
-  private static void deleteDir(File dir) {
-    File[] files = dir.listFiles();
-    if (files == null || files.length == 0) {
-      dir.delete();
-      return;
-    }
-    for (File file : files) {
-      deleteDir(file);
-    }
-    dir.delete();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/resources/kafka-0.7.2.tgz
----------------------------------------------------------------------
diff --git a/core/src/main/resources/kafka-0.7.2.tgz b/core/src/main/resources/kafka-0.7.2.tgz
deleted file mode 100644
index 24178d9..0000000
Binary files a/core/src/main/resources/kafka-0.7.2.tgz and /dev/null differ


[25/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/ProcessLauncher.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/ProcessLauncher.java b/core/src/main/java/org/apache/twill/internal/ProcessLauncher.java
deleted file mode 100644
index e48a226..0000000
--- a/core/src/main/java/org/apache/twill/internal/ProcessLauncher.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.api.LocalFile;
-
-import java.util.Map;
-
-/**
- * Class for launching container process.
- *
- * @param <T> Type of the object that contains information about the container that the process is going to launch.
- */
-public interface ProcessLauncher<T> {
-
-  /**
-   * Returns information about the container that this launch would launch process in.
-   */
-  T getContainerInfo();
-
-  /**
-   * Returns a preparer with the given default set of environments, resources and credentials.
-   */
-  <C> PrepareLaunchContext prepareLaunch(Map<String, String> environments,
-                                         Iterable<LocalFile> resources, C credentials);
-
-  /**
-   * For setting up the launcher.
-   */
-  interface PrepareLaunchContext {
-
-    ResourcesAdder withResources();
-
-    AfterResources noResources();
-
-    interface ResourcesAdder {
-      MoreResources add(LocalFile localFile);
-    }
-
-    interface AfterResources {
-      EnvironmentAdder withEnvironment();
-
-      AfterEnvironment noEnvironment();
-    }
-
-    interface EnvironmentAdder {
-      <V> MoreEnvironment add(String key, V value);
-    }
-
-    interface MoreEnvironment extends EnvironmentAdder, AfterEnvironment {
-    }
-
-    interface AfterEnvironment {
-      CommandAdder withCommands();
-    }
-
-    interface MoreResources extends ResourcesAdder, AfterResources { }
-
-    interface CommandAdder {
-      StdOutSetter add(String cmd, String...args);
-    }
-
-    interface StdOutSetter {
-      StdErrSetter redirectOutput(String stdout);
-
-      StdErrSetter noOutput();
-    }
-
-    interface StdErrSetter {
-      MoreCommand redirectError(String stderr);
-
-      MoreCommand noError();
-    }
-
-    interface MoreCommand extends CommandAdder {
-      <R> ProcessController<R> launch();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/SingleRunnableApplication.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/SingleRunnableApplication.java b/core/src/main/java/org/apache/twill/internal/SingleRunnableApplication.java
deleted file mode 100644
index a52afe1..0000000
--- a/core/src/main/java/org/apache/twill/internal/SingleRunnableApplication.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.api.ResourceSpecification;
-import org.apache.twill.api.TwillApplication;
-import org.apache.twill.api.TwillRunnable;
-import org.apache.twill.api.TwillRunnableSpecification;
-import org.apache.twill.api.TwillSpecification;
-
-/**
- * A simple {@link org.apache.twill.api.TwillApplication} that contains only one {@link org.apache.twill.api.TwillRunnable}.
- */
-public class SingleRunnableApplication implements TwillApplication {
-
-  private final TwillRunnable runnable;
-  private final ResourceSpecification resourceSpec;
-
-  public SingleRunnableApplication(TwillRunnable runnable, ResourceSpecification resourceSpec) {
-    this.runnable = runnable;
-    this.resourceSpec = resourceSpec;
-  }
-
-  @Override
-  public TwillSpecification configure() {
-    TwillRunnableSpecification runnableSpec = runnable.configure();
-    return TwillSpecification.Builder.with()
-      .setName(runnableSpec.getName())
-      .withRunnable().add(runnableSpec.getName(), runnable, resourceSpec)
-      .noLocalFiles()
-      .anyOrder()
-      .build();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/TwillContainerController.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/TwillContainerController.java b/core/src/main/java/org/apache/twill/internal/TwillContainerController.java
deleted file mode 100644
index 8b090bd..0000000
--- a/core/src/main/java/org/apache/twill/internal/TwillContainerController.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.api.ServiceController;
-import org.apache.twill.internal.state.Message;
-import com.google.common.util.concurrent.ListenableFuture;
-
-/**
- * A {@link ServiceController} that allows sending a message directly. Internal use only.
- */
-public interface TwillContainerController extends ServiceController {
-
-  ListenableFuture<Message> sendMessage(Message message);
-
-  /**
-   * Calls to indicated that the container that this controller is associated with is completed.
-   * Any resources it hold will be releases and all pending futures will be cancelled.
-   */
-  void completed(int exitStatus);
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/TwillContainerLauncher.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/TwillContainerLauncher.java b/core/src/main/java/org/apache/twill/internal/TwillContainerLauncher.java
deleted file mode 100644
index 63f8732..0000000
--- a/core/src/main/java/org/apache/twill/internal/TwillContainerLauncher.java
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.api.LocalFile;
-import org.apache.twill.api.RunId;
-import org.apache.twill.api.RuntimeSpecification;
-import org.apache.twill.filesystem.Location;
-import org.apache.twill.internal.state.Message;
-import org.apache.twill.internal.state.StateNode;
-import org.apache.twill.launcher.TwillLauncher;
-import org.apache.twill.zookeeper.NodeData;
-import org.apache.twill.zookeeper.ZKClient;
-import com.google.common.util.concurrent.ListenableFuture;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-/**
- * This class helps launching a container.
- */
-public final class TwillContainerLauncher {
-
-  private static final Logger LOG = LoggerFactory.getLogger(TwillContainerLauncher.class);
-
-  private static final double HEAP_MIN_RATIO = 0.7d;
-
-  private final RuntimeSpecification runtimeSpec;
-  private final ProcessLauncher.PrepareLaunchContext launchContext;
-  private final ZKClient zkClient;
-  private final int instanceCount;
-  private final String jvmOpts;
-  private final int reservedMemory;
-  private final Location secureStoreLocation;
-
-  public TwillContainerLauncher(RuntimeSpecification runtimeSpec, ProcessLauncher.PrepareLaunchContext launchContext,
-                                ZKClient zkClient, int instanceCount, String jvmOpts, int reservedMemory,
-                                Location secureStoreLocation) {
-    this.runtimeSpec = runtimeSpec;
-    this.launchContext = launchContext;
-    this.zkClient = zkClient;
-    this.instanceCount = instanceCount;
-    this.jvmOpts = jvmOpts;
-    this.reservedMemory = reservedMemory;
-    this.secureStoreLocation = secureStoreLocation;
-  }
-
-  public TwillContainerController start(RunId runId, int instanceId, Class<?> mainClass, String classPath) {
-    ProcessLauncher.PrepareLaunchContext.AfterResources afterResources = null;
-    ProcessLauncher.PrepareLaunchContext.ResourcesAdder resourcesAdder = null;
-
-    // Adds all file to be localized to container
-    if (!runtimeSpec.getLocalFiles().isEmpty()) {
-      resourcesAdder = launchContext.withResources();
-
-      for (LocalFile localFile : runtimeSpec.getLocalFiles()) {
-        afterResources = resourcesAdder.add(localFile);
-      }
-    }
-
-    // Optionally localize secure store.
-    try {
-      if (secureStoreLocation != null && secureStoreLocation.exists()) {
-        if (resourcesAdder == null) {
-          resourcesAdder = launchContext.withResources();
-        }
-        afterResources = resourcesAdder.add(new DefaultLocalFile(Constants.Files.CREDENTIALS,
-                                                                 secureStoreLocation.toURI(),
-                                                                 secureStoreLocation.lastModified(),
-                                                                 secureStoreLocation.length(), false, null));
-      }
-    } catch (IOException e) {
-      LOG.warn("Failed to launch container with secure store {}.", secureStoreLocation.toURI());
-    }
-
-    if (afterResources == null) {
-      afterResources = launchContext.noResources();
-    }
-
-    int memory = runtimeSpec.getResourceSpecification().getMemorySize();
-    if (((double) (memory - reservedMemory) / memory) >= HEAP_MIN_RATIO) {
-      // Reduce -Xmx by the reserved memory size.
-      memory = runtimeSpec.getResourceSpecification().getMemorySize() - reservedMemory;
-    } else {
-      // If it is a small VM, just discount it by the min ratio.
-      memory = (int) Math.ceil(memory * HEAP_MIN_RATIO);
-    }
-
-    // Currently no reporting is supported for runnable containers
-    ProcessController<Void> processController = afterResources
-      .withEnvironment()
-      .add(EnvKeys.TWILL_RUN_ID, runId.getId())
-      .add(EnvKeys.TWILL_RUNNABLE_NAME, runtimeSpec.getName())
-      .add(EnvKeys.TWILL_INSTANCE_ID, Integer.toString(instanceId))
-      .add(EnvKeys.TWILL_INSTANCE_COUNT, Integer.toString(instanceCount))
-      .withCommands()
-      .add("java",
-           "-Djava.io.tmpdir=tmp",
-           "-Dyarn.container=$" + EnvKeys.YARN_CONTAINER_ID,
-           "-Dtwill.runnable=$" + EnvKeys.TWILL_APP_NAME + ".$" + EnvKeys.TWILL_RUNNABLE_NAME,
-           "-cp", Constants.Files.LAUNCHER_JAR + ":" + classPath,
-           "-Xmx" + memory + "m",
-           jvmOpts,
-           TwillLauncher.class.getName(),
-           Constants.Files.CONTAINER_JAR,
-           mainClass.getName(),
-           Boolean.TRUE.toString())
-      .redirectOutput(Constants.STDOUT).redirectError(Constants.STDERR)
-      .launch();
-
-    TwillContainerControllerImpl controller = new TwillContainerControllerImpl(zkClient, runId, processController);
-    controller.start();
-    return controller;
-  }
-
-  private static final class TwillContainerControllerImpl extends AbstractZKServiceController
-                                                          implements TwillContainerController {
-
-    private final ProcessController<Void> processController;
-
-    protected TwillContainerControllerImpl(ZKClient zkClient, RunId runId,
-                                           ProcessController<Void> processController) {
-      super(runId, zkClient);
-      this.processController = processController;
-    }
-
-    @Override
-    protected void doStartUp() {
-      // No-op
-    }
-
-    @Override
-    protected void doShutDown() {
-      // No-op
-    }
-
-    @Override
-    protected void instanceNodeUpdated(NodeData nodeData) {
-      // No-op
-    }
-
-    @Override
-    protected void stateNodeUpdated(StateNode stateNode) {
-      // No-op
-    }
-
-    @Override
-    public ListenableFuture<Message> sendMessage(Message message) {
-      return sendMessage(message, message);
-    }
-
-    @Override
-    public synchronized void completed(int exitStatus) {
-      if (exitStatus != 0) {  // If a container terminated with exit code != 0, treat it as error
-//        fireStateChange(new StateNode(State.FAILED, new StackTraceElement[0]));
-      }
-      forceShutDown();
-    }
-
-    @Override
-    public void kill() {
-      processController.cancel();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/ZKMessages.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/ZKMessages.java b/core/src/main/java/org/apache/twill/internal/ZKMessages.java
deleted file mode 100644
index 03575dd..0000000
--- a/core/src/main/java/org/apache/twill/internal/ZKMessages.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.internal.state.Message;
-import org.apache.twill.internal.state.MessageCodec;
-import org.apache.twill.zookeeper.ZKClient;
-import org.apache.twill.zookeeper.ZKOperations;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
-import org.apache.zookeeper.CreateMode;
-
-/**
- *
- */
-public final class ZKMessages {
-
-  /**
-   * Creates a message node in zookeeper. The message node created is a PERSISTENT_SEQUENTIAL node.
-   *
-   * @param zkClient The ZooKeeper client for interacting with ZooKeeper.
-   * @param messagePathPrefix ZooKeeper path prefix for the message node.
-   * @param message The {@link Message} object for the content of the message node.
-   * @param completionResult Object to set to the result future when the message is processed.
-   * @param <V> Type of the completion result.
-   * @return A {@link ListenableFuture} that will be completed when the message is consumed, which indicated
-   *         by deletion of the node. If there is exception during the process, it will be reflected
-   *         to the future returned.
-   */
-  public static <V> ListenableFuture<V> sendMessage(final ZKClient zkClient, String messagePathPrefix,
-                                                    Message message, final V completionResult) {
-    SettableFuture<V> result = SettableFuture.create();
-    sendMessage(zkClient, messagePathPrefix, message, result, completionResult);
-    return result;
-  }
-
-  /**
-   * Creates a message node in zookeeper. The message node created is a PERSISTENT_SEQUENTIAL node.
-   *
-   * @param zkClient The ZooKeeper client for interacting with ZooKeeper.
-   * @param messagePathPrefix ZooKeeper path prefix for the message node.
-   * @param message The {@link Message} object for the content of the message node.
-   * @param completion A {@link SettableFuture} to reflect the result of message process completion.
-   * @param completionResult Object to set to the result future when the message is processed.
-   * @param <V> Type of the completion result.
-   */
-  public static <V> void sendMessage(final ZKClient zkClient, String messagePathPrefix, Message message,
-                                     final SettableFuture<V> completion, final V completionResult) {
-
-    // Creates a message and watch for its deletion for completion.
-    Futures.addCallback(zkClient.create(messagePathPrefix, MessageCodec.encode(message),
-                                        CreateMode.PERSISTENT_SEQUENTIAL), new FutureCallback<String>() {
-      @Override
-      public void onSuccess(String path) {
-        Futures.addCallback(ZKOperations.watchDeleted(zkClient, path), new FutureCallback<String>() {
-          @Override
-          public void onSuccess(String result) {
-            completion.set(completionResult);
-          }
-
-          @Override
-          public void onFailure(Throwable t) {
-            completion.setException(t);
-          }
-        });
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        completion.setException(t);
-      }
-    });
-  }
-
-  private ZKMessages() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/ZKServiceDecorator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/ZKServiceDecorator.java b/core/src/main/java/org/apache/twill/internal/ZKServiceDecorator.java
deleted file mode 100644
index 7313d33..0000000
--- a/core/src/main/java/org/apache/twill/internal/ZKServiceDecorator.java
+++ /dev/null
@@ -1,482 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.api.RunId;
-import org.apache.twill.api.ServiceController;
-import org.apache.twill.common.ServiceListenerAdapter;
-import org.apache.twill.common.Threads;
-import org.apache.twill.internal.json.StackTraceElementCodec;
-import org.apache.twill.internal.json.StateNodeCodec;
-import org.apache.twill.internal.state.Message;
-import org.apache.twill.internal.state.MessageCallback;
-import org.apache.twill.internal.state.MessageCodec;
-import org.apache.twill.internal.state.StateNode;
-import org.apache.twill.internal.state.SystemMessages;
-import org.apache.twill.zookeeper.NodeChildren;
-import org.apache.twill.zookeeper.NodeData;
-import org.apache.twill.zookeeper.OperationFuture;
-import org.apache.twill.zookeeper.ZKClient;
-import org.apache.twill.zookeeper.ZKOperations;
-import com.google.common.base.Charsets;
-import com.google.common.base.Supplier;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.AbstractService;
-import com.google.common.util.concurrent.AsyncFunction;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.Service;
-import com.google.gson.Gson;
-import com.google.gson.GsonBuilder;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.annotation.Nullable;
-import java.util.Collections;
-import java.util.List;
-import java.util.concurrent.Executor;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-
-/**
- * A {@link Service} decorator that wrap another {@link Service} with the service states reflected
- * to ZooKeeper.
- */
-public final class ZKServiceDecorator extends AbstractService {
-
-  private static final Logger LOG = LoggerFactory.getLogger(ZKServiceDecorator.class);
-
-  private final ZKClient zkClient;
-  private final RunId id;
-  private final Supplier<? extends JsonElement> liveNodeData;
-  private final Service decoratedService;
-  private final MessageCallbackCaller messageCallback;
-  private ExecutorService callbackExecutor;
-
-
-  public ZKServiceDecorator(ZKClient zkClient, RunId id, Supplier<? extends JsonElement> liveNodeData,
-                            Service decoratedService) {
-    this(zkClient, id, liveNodeData, decoratedService, null);
-  }
-
-  /**
-   * Creates a ZKServiceDecorator.
-   * @param zkClient ZooKeeper client
-   * @param id The run id of the service
-   * @param liveNodeData A supplier for providing information writing to live node.
-   * @param decoratedService The Service for monitoring state changes
-   * @param finalizer An optional Runnable to run when this decorator terminated.
-   */
-  public ZKServiceDecorator(ZKClient zkClient, RunId id, Supplier <? extends JsonElement> liveNodeData,
-                            Service decoratedService, @Nullable Runnable finalizer) {
-    this.zkClient = zkClient;
-    this.id = id;
-    this.liveNodeData = liveNodeData;
-    this.decoratedService = decoratedService;
-    if (decoratedService instanceof MessageCallback) {
-      this.messageCallback = new MessageCallbackCaller((MessageCallback) decoratedService, zkClient);
-    } else {
-      this.messageCallback = new MessageCallbackCaller(zkClient);
-    }
-    if (finalizer != null) {
-      addFinalizer(finalizer);
-    }
-  }
-
-  /**
-   * Deletes the given ZK path recursively and create the path again.
-   */
-  private ListenableFuture<String> deleteAndCreate(final String path, final byte[] data, final CreateMode mode) {
-    return Futures.transform(ZKOperations.ignoreError(ZKOperations.recursiveDelete(zkClient, path),
-                                                      KeeperException.NoNodeException.class, null),
-                             new AsyncFunction<String, String>() {
-      @Override
-      public ListenableFuture<String> apply(String input) throws Exception {
-        return zkClient.create(path, data, mode);
-      }
-    }, Threads.SAME_THREAD_EXECUTOR);
-  }
-
-  @Override
-  protected void doStart() {
-    callbackExecutor = Executors.newSingleThreadExecutor(Threads.createDaemonThreadFactory("message-callback"));
-    Futures.addCallback(createLiveNode(), new FutureCallback<String>() {
-      @Override
-      public void onSuccess(String result) {
-        // Create nodes for states and messaging
-        StateNode stateNode = new StateNode(ServiceController.State.STARTING);
-
-        final ListenableFuture<List<String>> createFuture = Futures.allAsList(
-          deleteAndCreate(getZKPath("messages"), null, CreateMode.PERSISTENT),
-          deleteAndCreate(getZKPath("state"), encodeStateNode(stateNode), CreateMode.PERSISTENT)
-        );
-
-        createFuture.addListener(new Runnable() {
-          @Override
-          public void run() {
-            try {
-              createFuture.get();
-              // Starts the decorated service
-              decoratedService.addListener(createListener(), Threads.SAME_THREAD_EXECUTOR);
-              decoratedService.start();
-            } catch (Exception e) {
-              notifyFailed(e);
-            }
-          }
-        }, Threads.SAME_THREAD_EXECUTOR);
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        notifyFailed(t);
-      }
-    });
-  }
-
-  @Override
-  protected void doStop() {
-    // Stops the decorated service
-    decoratedService.stop();
-    callbackExecutor.shutdownNow();
-  }
-
-  private void addFinalizer(final Runnable finalizer) {
-    addListener(new ServiceListenerAdapter() {
-      @Override
-      public void terminated(State from) {
-        try {
-          finalizer.run();
-        } catch (Throwable t) {
-          LOG.warn("Exception when running finalizer.", t);
-        }
-      }
-
-      @Override
-      public void failed(State from, Throwable failure) {
-        try {
-          finalizer.run();
-        } catch (Throwable t) {
-          LOG.warn("Exception when running finalizer.", t);
-        }
-      }
-    }, Threads.SAME_THREAD_EXECUTOR);
-  }
-
-  private OperationFuture<String> createLiveNode() {
-    String liveNode = getLiveNodePath();
-    LOG.info("Create live node {}{}", zkClient.getConnectString(), liveNode);
-
-    JsonObject content = new JsonObject();
-    content.add("data", liveNodeData.get());
-    return ZKOperations.ignoreError(zkClient.create(liveNode, encodeJson(content), CreateMode.EPHEMERAL),
-                                    KeeperException.NodeExistsException.class, liveNode);
-  }
-
-  private OperationFuture<String> removeLiveNode() {
-    String liveNode = getLiveNodePath();
-    LOG.info("Remove live node {}{}", zkClient.getConnectString(), liveNode);
-    return ZKOperations.ignoreError(zkClient.delete(liveNode), KeeperException.NoNodeException.class, liveNode);
-  }
-
-  private OperationFuture<String> removeServiceNode() {
-    String serviceNode = String.format("/%s", id.getId());
-    LOG.info("Remove service node {}{}", zkClient.getConnectString(), serviceNode);
-    return ZKOperations.recursiveDelete(zkClient, serviceNode);
-  }
-
-  private void watchMessages() {
-    final String messagesPath = getZKPath("messages");
-    Futures.addCallback(zkClient.getChildren(messagesPath, new Watcher() {
-      @Override
-      public void process(WatchedEvent event) {
-        // TODO: Do we need to deal with other type of events?
-        if (event.getType() == Event.EventType.NodeChildrenChanged && decoratedService.isRunning()) {
-          watchMessages();
-        }
-      }
-    }), new FutureCallback<NodeChildren>() {
-      @Override
-      public void onSuccess(NodeChildren result) {
-        // Sort by the name, which is the messageId. Assumption is that message ids is ordered by time.
-        List<String> messages = Lists.newArrayList(result.getChildren());
-        Collections.sort(messages);
-        for (String messageId : messages) {
-          processMessage(messagesPath + "/" + messageId, messageId);
-        }
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        // TODO: what could be done besides just logging?
-        LOG.error("Failed to watch messages.", t);
-      }
-    });
-  }
-
-  private void processMessage(final String path, final String messageId) {
-    Futures.addCallback(zkClient.getData(path), new FutureCallback<NodeData>() {
-      @Override
-      public void onSuccess(NodeData result) {
-        Message message = MessageCodec.decode(result.getData());
-        if (message == null) {
-          LOG.error("Failed to decode message for " + messageId + " in " + path);
-          listenFailure(zkClient.delete(path, result.getStat().getVersion()));
-          return;
-        }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Message received from " + path + ": " + new String(MessageCodec.encode(message), Charsets.UTF_8));
-        }
-        if (handleStopMessage(message, getDeleteSupplier(path, result.getStat().getVersion()))) {
-          return;
-        }
-        messageCallback.onReceived(callbackExecutor, path, result.getStat().getVersion(), messageId, message);
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        LOG.error("Failed to fetch message content.", t);
-      }
-    });
-  }
-
-  private <V> boolean handleStopMessage(Message message, final Supplier<OperationFuture<V>> postHandleSupplier) {
-    if (message.getType() == Message.Type.SYSTEM && SystemMessages.STOP_COMMAND.equals(message.getCommand())) {
-      callbackExecutor.execute(new Runnable() {
-        @Override
-        public void run() {
-          decoratedService.stop().addListener(new Runnable() {
-
-            @Override
-            public void run() {
-              stopServiceOnComplete(postHandleSupplier.get(), ZKServiceDecorator.this);
-            }
-          }, MoreExecutors.sameThreadExecutor());
-        }
-      });
-      return true;
-    }
-    return false;
-  }
-
-
-  private Supplier<OperationFuture<String>> getDeleteSupplier(final String path, final int version) {
-    return new Supplier<OperationFuture<String>>() {
-      @Override
-      public OperationFuture<String> get() {
-        return zkClient.delete(path, version);
-      }
-    };
-  }
-
-  private Listener createListener() {
-    return new DecoratedServiceListener();
-  }
-
-  private <V> byte[] encode(V data, Class<? extends V> clz) {
-    return new GsonBuilder().registerTypeAdapter(StateNode.class, new StateNodeCodec())
-                            .registerTypeAdapter(StackTraceElement.class, new StackTraceElementCodec())
-                            .create()
-      .toJson(data, clz).getBytes(Charsets.UTF_8);
-  }
-
-  private byte[] encodeStateNode(StateNode stateNode) {
-    return encode(stateNode, StateNode.class);
-  }
-
-  private <V extends JsonElement> byte[] encodeJson(V json) {
-    return new Gson().toJson(json).getBytes(Charsets.UTF_8);
-  }
-
-  private String getZKPath(String path) {
-    return String.format("/%s/%s", id, path);
-  }
-
-  private String getLiveNodePath() {
-    return "/instances/" + id;
-  }
-
-  private static <V> OperationFuture<V> listenFailure(final OperationFuture<V> operationFuture) {
-    operationFuture.addListener(new Runnable() {
-
-      @Override
-      public void run() {
-        try {
-          if (!operationFuture.isCancelled()) {
-            operationFuture.get();
-          }
-        } catch (Exception e) {
-          // TODO: what could be done besides just logging?
-          LOG.error("Operation execution failed for " + operationFuture.getRequestPath(), e);
-        }
-      }
-    }, Threads.SAME_THREAD_EXECUTOR);
-    return operationFuture;
-  }
-
-  private static final class MessageCallbackCaller {
-    private final MessageCallback callback;
-    private final ZKClient zkClient;
-
-    private MessageCallbackCaller(ZKClient zkClient) {
-      this(null, zkClient);
-    }
-
-    private MessageCallbackCaller(MessageCallback callback, ZKClient zkClient) {
-      this.callback = callback;
-      this.zkClient = zkClient;
-    }
-
-    public void onReceived(Executor executor, final String path,
-                           final int version, final String id, final Message message) {
-      if (callback == null) {
-        // Simply delete the message
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Ignoring incoming message from " + path + ": " + message);
-        }
-        listenFailure(zkClient.delete(path, version));
-        return;
-      }
-
-      executor.execute(new Runnable() {
-        @Override
-        public void run() {
-          try {
-            // Message process is synchronous for now. Making it async needs more thoughts about race conditions.
-            // The executor is the callbackExecutor which is a single thread executor.
-            callback.onReceived(id, message).get();
-          } catch (Throwable t) {
-            LOG.error("Exception when processing message: {}, {}, {}", id, message, path, t);
-          } finally {
-            listenFailure(zkClient.delete(path, version));
-          }
-        }
-      });
-    }
-  }
-
-  private final class DecoratedServiceListener implements Listener {
-    private volatile boolean zkFailure = false;
-
-    @Override
-    public void starting() {
-      LOG.info("Starting: " + id);
-      saveState(ServiceController.State.STARTING);
-    }
-
-    @Override
-    public void running() {
-      LOG.info("Running: " + id);
-      notifyStarted();
-      watchMessages();
-      saveState(ServiceController.State.RUNNING);
-    }
-
-    @Override
-    public void stopping(State from) {
-      LOG.info("Stopping: " + id);
-      saveState(ServiceController.State.STOPPING);
-    }
-
-    @Override
-    public void terminated(State from) {
-      LOG.info("Terminated: " + from + " " + id);
-      if (zkFailure) {
-        return;
-      }
-
-      ImmutableList<OperationFuture<String>> futures = ImmutableList.of(removeLiveNode(), removeServiceNode());
-      final ListenableFuture<List<String>> future = Futures.allAsList(futures);
-      Futures.successfulAsList(futures).addListener(new Runnable() {
-        @Override
-        public void run() {
-          try {
-            future.get();
-            LOG.info("Service and state node removed");
-            notifyStopped();
-          } catch (Exception e) {
-            LOG.warn("Failed to remove ZK nodes.", e);
-            notifyFailed(e);
-          }
-        }
-      }, Threads.SAME_THREAD_EXECUTOR);
-    }
-
-    @Override
-    public void failed(State from, final Throwable failure) {
-      LOG.info("Failed: {} {}.", from, id, failure);
-      if (zkFailure) {
-        return;
-      }
-
-      ImmutableList<OperationFuture<String>> futures = ImmutableList.of(removeLiveNode(), removeServiceNode());
-      Futures.successfulAsList(futures).addListener(new Runnable() {
-        @Override
-        public void run() {
-          LOG.info("Service and state node removed");
-          notifyFailed(failure);
-        }
-      }, Threads.SAME_THREAD_EXECUTOR);
-    }
-
-    private void saveState(ServiceController.State state) {
-      if (zkFailure) {
-        return;
-      }
-      StateNode stateNode = new StateNode(state);
-      stopOnFailure(zkClient.setData(getZKPath("state"), encodeStateNode(stateNode)));
-    }
-
-    private <V> void stopOnFailure(final OperationFuture<V> future) {
-      future.addListener(new Runnable() {
-        @Override
-        public void run() {
-          try {
-            future.get();
-          } catch (final Exception e) {
-            LOG.error("ZK operation failed", e);
-            zkFailure = true;
-            decoratedService.stop().addListener(new Runnable() {
-              @Override
-              public void run() {
-                notifyFailed(e);
-              }
-            }, Threads.SAME_THREAD_EXECUTOR);
-          }
-        }
-      }, Threads.SAME_THREAD_EXECUTOR);
-    }
-  }
-
-  private <V> ListenableFuture<State> stopServiceOnComplete(ListenableFuture <V> future, final Service service) {
-    return Futures.transform(future, new AsyncFunction<V, State>() {
-      @Override
-      public ListenableFuture<State> apply(V input) throws Exception {
-        return service.stop();
-      }
-    });
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/json/ArgumentsCodec.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/json/ArgumentsCodec.java b/core/src/main/java/org/apache/twill/internal/json/ArgumentsCodec.java
deleted file mode 100644
index 07d4c1d..0000000
--- a/core/src/main/java/org/apache/twill/internal/json/ArgumentsCodec.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.json;
-
-import org.apache.twill.internal.Arguments;
-import com.google.common.collect.ImmutableMultimap;
-import com.google.common.io.InputSupplier;
-import com.google.common.io.OutputSupplier;
-import com.google.common.reflect.TypeToken;
-import com.google.gson.Gson;
-import com.google.gson.GsonBuilder;
-import com.google.gson.JsonDeserializationContext;
-import com.google.gson.JsonDeserializer;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParseException;
-import com.google.gson.JsonSerializationContext;
-import com.google.gson.JsonSerializer;
-
-import java.io.IOException;
-import java.io.Reader;
-import java.io.Writer;
-import java.lang.reflect.Type;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-
-/**
- *
- */
-public final class ArgumentsCodec implements JsonSerializer<Arguments>, JsonDeserializer<Arguments> {
-
-  private static final Gson GSON = new GsonBuilder().registerTypeAdapter(Arguments.class, new ArgumentsCodec())
-                                                    .create();
-
-  public static void encode(Arguments arguments, OutputSupplier<? extends Writer> writerSupplier) throws IOException {
-    Writer writer = writerSupplier.getOutput();
-    try {
-      GSON.toJson(arguments, writer);
-    } finally {
-      writer.close();
-    }
-  }
-
-
-  public static Arguments decode(InputSupplier<? extends Reader> readerSupplier) throws IOException {
-    Reader reader = readerSupplier.getInput();
-    try {
-      return GSON.fromJson(reader, Arguments.class);
-    } finally {
-      reader.close();
-    }
-  }
-
-  @Override
-  public JsonElement serialize(Arguments src, Type typeOfSrc,
-                               JsonSerializationContext context) {
-    JsonObject json = new JsonObject();
-    json.add("arguments", context.serialize(src.getArguments()));
-    json.add("runnableArguments", context.serialize(src.getRunnableArguments().asMap()));
-
-    return json;
-  }
-
-  @Override
-  public Arguments deserialize(JsonElement json, Type typeOfT,
-                              JsonDeserializationContext context) throws JsonParseException {
-    JsonObject jsonObj = json.getAsJsonObject();
-    List<String> arguments = context.deserialize(jsonObj.get("arguments"), new TypeToken<List<String>>() {}.getType());
-    Map<String, Collection<String>> args = context.deserialize(jsonObj.get("runnableArguments"),
-                                                               new TypeToken<Map<String, Collection<String>>>(){
-                                                               }.getType());
-
-    ImmutableMultimap.Builder<String, String> builder = ImmutableMultimap.builder();
-    for (Map.Entry<String, Collection<String>> entry : args.entrySet()) {
-      builder.putAll(entry.getKey(), entry.getValue());
-    }
-    return new Arguments(arguments, builder.build());
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/json/JsonUtils.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/json/JsonUtils.java b/core/src/main/java/org/apache/twill/internal/json/JsonUtils.java
deleted file mode 100644
index 9556ad8..0000000
--- a/core/src/main/java/org/apache/twill/internal/json/JsonUtils.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.json;
-
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-
-/**
- * Collections of helper functions for json codec.
- */
-public final class JsonUtils {
-
-  private JsonUtils() {
-  }
-
-  /**
-   * Returns a String representation of the given property.
-   */
-  public static String getAsString(JsonObject json, String property) {
-    JsonElement jsonElement = json.get(property);
-    if (jsonElement.isJsonNull()) {
-      return null;
-    }
-    if (jsonElement.isJsonPrimitive()) {
-      return jsonElement.getAsString();
-    }
-    return jsonElement.toString();
-  }
-
-  /**
-   * Returns a long representation of the given property.
-   */
-  public static long getAsLong(JsonObject json, String property, long defaultValue) {
-    try {
-      return json.get(property).getAsLong();
-    } catch (Exception e) {
-      return defaultValue;
-    }
-  }
-
-  /**
-   * Returns a long representation of the given property.
-   */
-  public static int getAsInt(JsonObject json, String property, int defaultValue) {
-    try {
-      return json.get(property).getAsInt();
-    } catch (Exception e) {
-      return defaultValue;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/json/LocalFileCodec.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/json/LocalFileCodec.java b/core/src/main/java/org/apache/twill/internal/json/LocalFileCodec.java
deleted file mode 100644
index 680a36c..0000000
--- a/core/src/main/java/org/apache/twill/internal/json/LocalFileCodec.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.json;
-
-import org.apache.twill.api.LocalFile;
-import org.apache.twill.internal.DefaultLocalFile;
-import com.google.gson.JsonDeserializationContext;
-import com.google.gson.JsonDeserializer;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParseException;
-import com.google.gson.JsonSerializationContext;
-import com.google.gson.JsonSerializer;
-
-import java.lang.reflect.Type;
-import java.net.URI;
-
-/**
- *
- */
-public final class LocalFileCodec implements JsonSerializer<LocalFile>, JsonDeserializer<LocalFile> {
-
-  @Override
-  public JsonElement serialize(LocalFile src, Type typeOfSrc, JsonSerializationContext context) {
-    JsonObject json = new JsonObject();
-
-    json.addProperty("name", src.getName());
-    json.addProperty("uri", src.getURI().toASCIIString());
-    json.addProperty("lastModified", src.getLastModified());
-    json.addProperty("size", src.getSize());
-    json.addProperty("archive", src.isArchive());
-    json.addProperty("pattern", src.getPattern());
-
-    return json;
-  }
-
-  @Override
-  public LocalFile deserialize(JsonElement json, Type typeOfT,
-                               JsonDeserializationContext context) throws JsonParseException {
-    JsonObject jsonObj = json.getAsJsonObject();
-
-    String name = jsonObj.get("name").getAsString();
-    URI uri = URI.create(jsonObj.get("uri").getAsString());
-    long lastModified = jsonObj.get("lastModified").getAsLong();
-    long size = jsonObj.get("size").getAsLong();
-    boolean archive = jsonObj.get("archive").getAsBoolean();
-    JsonElement pattern = jsonObj.get("pattern");
-
-    return new DefaultLocalFile(name, uri, lastModified, size,
-                                archive, (pattern == null || pattern.isJsonNull()) ? null : pattern.getAsString());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/json/ResourceReportAdapter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/json/ResourceReportAdapter.java b/core/src/main/java/org/apache/twill/internal/json/ResourceReportAdapter.java
deleted file mode 100644
index e473fe7..0000000
--- a/core/src/main/java/org/apache/twill/internal/json/ResourceReportAdapter.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.json;
-
-import org.apache.twill.api.ResourceReport;
-import org.apache.twill.api.TwillRunResources;
-import com.google.gson.Gson;
-import com.google.gson.GsonBuilder;
-
-import java.io.Reader;
-import java.io.Writer;
-
-/**
- * This class provides utility to help encode/decode {@link ResourceReport} to/from Json.
- */
-public final class ResourceReportAdapter {
-
-  private final Gson gson;
-
-  public static ResourceReportAdapter create() {
-    return new ResourceReportAdapter();
-  }
-
-  private ResourceReportAdapter() {
-    gson = new GsonBuilder()
-              .serializeNulls()
-              .registerTypeAdapter(TwillRunResources.class, new TwillRunResourcesCodec())
-              .registerTypeAdapter(ResourceReport.class, new ResourceReportCodec())
-              .create();
-  }
-
-  public String toJson(ResourceReport report) {
-    return gson.toJson(report, ResourceReport.class);
-  }
-
-  public void toJson(ResourceReport report, Writer writer) {
-    gson.toJson(report, ResourceReport.class, writer);
-  }
-
-  public ResourceReport fromJson(String json) {
-    return gson.fromJson(json, ResourceReport.class);
-  }
-
-  public ResourceReport fromJson(Reader reader) {
-    return gson.fromJson(reader, ResourceReport.class);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/json/ResourceReportCodec.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/json/ResourceReportCodec.java b/core/src/main/java/org/apache/twill/internal/json/ResourceReportCodec.java
deleted file mode 100644
index 884d889..0000000
--- a/core/src/main/java/org/apache/twill/internal/json/ResourceReportCodec.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.json;
-
-import org.apache.twill.api.ResourceReport;
-import org.apache.twill.api.TwillRunResources;
-import org.apache.twill.internal.DefaultResourceReport;
-import com.google.gson.JsonDeserializationContext;
-import com.google.gson.JsonDeserializer;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParseException;
-import com.google.gson.JsonSerializationContext;
-import com.google.gson.JsonSerializer;
-import com.google.gson.reflect.TypeToken;
-
-import java.lang.reflect.Type;
-import java.util.Collection;
-import java.util.Map;
-
-/**
- * Codec for serializing and deserializing a {@link ResourceReport} object using json.
- */
-public final class ResourceReportCodec implements JsonSerializer<ResourceReport>,
-                                           JsonDeserializer<ResourceReport> {
-
-  @Override
-  public JsonElement serialize(ResourceReport src, Type typeOfSrc, JsonSerializationContext context) {
-    JsonObject json = new JsonObject();
-
-    json.addProperty("appMasterId", src.getApplicationId());
-    json.add("appMasterResources", context.serialize(
-      src.getAppMasterResources(), new TypeToken<TwillRunResources>(){}.getType()));
-    json.add("runnableResources", context.serialize(
-      src.getResources(), new TypeToken<Map<String, Collection<TwillRunResources>>>(){}.getType()));
-
-    return json;
-  }
-
-  @Override
-  public ResourceReport deserialize(JsonElement json, Type typeOfT,
-                                           JsonDeserializationContext context) throws JsonParseException {
-    JsonObject jsonObj = json.getAsJsonObject();
-    String appMasterId = jsonObj.get("appMasterId").getAsString();
-    TwillRunResources masterResources = context.deserialize(
-      jsonObj.get("appMasterResources"), TwillRunResources.class);
-    Map<String, Collection<TwillRunResources>> resources = context.deserialize(
-      jsonObj.get("runnableResources"), new TypeToken<Map<String, Collection<TwillRunResources>>>(){}.getType());
-
-    return new DefaultResourceReport(appMasterId, masterResources, resources);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/json/ResourceSpecificationCodec.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/json/ResourceSpecificationCodec.java b/core/src/main/java/org/apache/twill/internal/json/ResourceSpecificationCodec.java
deleted file mode 100644
index bea73c4..0000000
--- a/core/src/main/java/org/apache/twill/internal/json/ResourceSpecificationCodec.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.json;
-
-import org.apache.twill.api.ResourceSpecification;
-import org.apache.twill.internal.DefaultResourceSpecification;
-import com.google.gson.JsonDeserializationContext;
-import com.google.gson.JsonDeserializer;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParseException;
-import com.google.gson.JsonSerializationContext;
-import com.google.gson.JsonSerializer;
-
-import java.lang.reflect.Type;
-
-/**
- *
- */
-final class ResourceSpecificationCodec implements JsonSerializer<ResourceSpecification>,
-                                                  JsonDeserializer<ResourceSpecification> {
-
-  @Override
-  public JsonElement serialize(ResourceSpecification src, Type typeOfSrc, JsonSerializationContext context) {
-    JsonObject json = new JsonObject();
-
-    json.addProperty("cores", src.getVirtualCores());
-    json.addProperty("memorySize", src.getMemorySize());
-    json.addProperty("instances", src.getInstances());
-    json.addProperty("uplink", src.getUplink());
-    json.addProperty("downlink", src.getDownlink());
-
-    return json;
-  }
-
-  @Override
-  public ResourceSpecification deserialize(JsonElement json, Type typeOfT,
-                                           JsonDeserializationContext context) throws JsonParseException {
-    JsonObject jsonObj = json.getAsJsonObject();
-    return new DefaultResourceSpecification(jsonObj.get("cores").getAsInt(),
-                                            jsonObj.get("memorySize").getAsInt(),
-                                            jsonObj.get("instances").getAsInt(),
-                                            jsonObj.get("uplink").getAsInt(),
-                                            jsonObj.get("downlink").getAsInt());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/json/RuntimeSpecificationCodec.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/json/RuntimeSpecificationCodec.java b/core/src/main/java/org/apache/twill/internal/json/RuntimeSpecificationCodec.java
deleted file mode 100644
index 867f4a8..0000000
--- a/core/src/main/java/org/apache/twill/internal/json/RuntimeSpecificationCodec.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.json;
-
-import org.apache.twill.api.LocalFile;
-import org.apache.twill.api.ResourceSpecification;
-import org.apache.twill.api.RuntimeSpecification;
-import org.apache.twill.api.TwillRunnableSpecification;
-import org.apache.twill.internal.DefaultRuntimeSpecification;
-import com.google.common.reflect.TypeToken;
-import com.google.gson.JsonDeserializationContext;
-import com.google.gson.JsonDeserializer;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParseException;
-import com.google.gson.JsonSerializationContext;
-import com.google.gson.JsonSerializer;
-
-import java.lang.reflect.Type;
-import java.util.Collection;
-
-/**
- *
- */
-final class RuntimeSpecificationCodec implements JsonSerializer<RuntimeSpecification>,
-                                                 JsonDeserializer<RuntimeSpecification> {
-
-  @Override
-  public JsonElement serialize(RuntimeSpecification src, Type typeOfSrc, JsonSerializationContext context) {
-    JsonObject json = new JsonObject();
-    json.addProperty("name", src.getName());
-    json.add("runnable", context.serialize(src.getRunnableSpecification(), TwillRunnableSpecification.class));
-    json.add("resources", context.serialize(src.getResourceSpecification(), ResourceSpecification.class));
-    json.add("files", context.serialize(src.getLocalFiles(), new TypeToken<Collection<LocalFile>>(){}.getType()));
-
-    return json;
-  }
-
-  @Override
-  public RuntimeSpecification deserialize(JsonElement json, Type typeOfT,
-                                          JsonDeserializationContext context) throws JsonParseException {
-    JsonObject jsonObj = json.getAsJsonObject();
-
-    String name = jsonObj.get("name").getAsString();
-    TwillRunnableSpecification runnable = context.deserialize(jsonObj.get("runnable"),
-                                                               TwillRunnableSpecification.class);
-    ResourceSpecification resources = context.deserialize(jsonObj.get("resources"),
-                                                          ResourceSpecification.class);
-    Collection<LocalFile> files = context.deserialize(jsonObj.get("files"),
-                                                      new TypeToken<Collection<LocalFile>>(){}.getType());
-
-    return new DefaultRuntimeSpecification(name, runnable, resources, files);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/json/StackTraceElementCodec.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/json/StackTraceElementCodec.java b/core/src/main/java/org/apache/twill/internal/json/StackTraceElementCodec.java
deleted file mode 100644
index 9a57b46..0000000
--- a/core/src/main/java/org/apache/twill/internal/json/StackTraceElementCodec.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.json;
-
-import com.google.gson.JsonDeserializationContext;
-import com.google.gson.JsonDeserializer;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParseException;
-import com.google.gson.JsonSerializationContext;
-import com.google.gson.JsonSerializer;
-
-import java.lang.reflect.Type;
-
-/**
- *
- */
-public final class StackTraceElementCodec implements JsonSerializer<StackTraceElement>,
-                                                     JsonDeserializer<StackTraceElement> {
-
-  @Override
-  public StackTraceElement deserialize(JsonElement json, Type typeOfT,
-                                       JsonDeserializationContext context) throws JsonParseException {
-    JsonObject jsonObj = json.getAsJsonObject();
-    return new StackTraceElement(JsonUtils.getAsString(jsonObj, "className"),
-                                 JsonUtils.getAsString(jsonObj, "method"),
-                                 JsonUtils.getAsString(jsonObj, "file"),
-                                 JsonUtils.getAsInt(jsonObj, "line", -1));
-  }
-
-  @Override
-  public JsonElement serialize(StackTraceElement src, Type typeOfSrc, JsonSerializationContext context) {
-    JsonObject jsonObj = new JsonObject();
-    jsonObj.addProperty("className", src.getClassName());
-    jsonObj.addProperty("method", src.getMethodName());
-    jsonObj.addProperty("file", src.getFileName());
-    jsonObj.addProperty("line", src.getLineNumber());
-
-    return jsonObj;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/json/StateNodeCodec.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/json/StateNodeCodec.java b/core/src/main/java/org/apache/twill/internal/json/StateNodeCodec.java
deleted file mode 100644
index c1e9d1c..0000000
--- a/core/src/main/java/org/apache/twill/internal/json/StateNodeCodec.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.json;
-
-import org.apache.twill.api.ServiceController;
-import org.apache.twill.internal.state.StateNode;
-import com.google.gson.JsonDeserializationContext;
-import com.google.gson.JsonDeserializer;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParseException;
-import com.google.gson.JsonSerializationContext;
-import com.google.gson.JsonSerializer;
-
-import java.lang.reflect.Type;
-
-/**
- *
- */
-public final class StateNodeCodec implements JsonSerializer<StateNode>, JsonDeserializer<StateNode> {
-
-  @Override
-  public StateNode deserialize(JsonElement json, Type typeOfT,
-                               JsonDeserializationContext context) throws JsonParseException {
-    JsonObject jsonObj = json.getAsJsonObject();
-    ServiceController.State state = ServiceController.State.valueOf(jsonObj.get("state").getAsString());
-    String errorMessage = jsonObj.has("errorMessage") ? jsonObj.get("errorMessage").getAsString() : null;
-
-    return new StateNode(state, errorMessage,
-                         context.<StackTraceElement[]>deserialize(jsonObj.get("stackTraces"), StackTraceElement[].class));
-  }
-
-  @Override
-  public JsonElement serialize(StateNode src, Type typeOfSrc, JsonSerializationContext context) {
-    JsonObject jsonObj = new JsonObject();
-    jsonObj.addProperty("state", src.getState().name());
-    if (src.getErrorMessage() != null) {
-      jsonObj.addProperty("errorMessage", src.getErrorMessage());
-    }
-    if (src.getStackTraces() != null) {
-      jsonObj.add("stackTraces", context.serialize(src.getStackTraces(), StackTraceElement[].class));
-    }
-    return jsonObj;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/json/TwillRunResourcesCodec.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/json/TwillRunResourcesCodec.java b/core/src/main/java/org/apache/twill/internal/json/TwillRunResourcesCodec.java
deleted file mode 100644
index 8951173..0000000
--- a/core/src/main/java/org/apache/twill/internal/json/TwillRunResourcesCodec.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.json;
-
-import org.apache.twill.api.TwillRunResources;
-import org.apache.twill.internal.DefaultTwillRunResources;
-import com.google.gson.JsonDeserializationContext;
-import com.google.gson.JsonDeserializer;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParseException;
-import com.google.gson.JsonSerializationContext;
-import com.google.gson.JsonSerializer;
-
-import java.lang.reflect.Type;
-
-/**
- * Codec for serializing and deserializing a {@link org.apache.twill.api.TwillRunResources} object using json.
- */
-public final class TwillRunResourcesCodec implements JsonSerializer<TwillRunResources>,
-                                              JsonDeserializer<TwillRunResources> {
-
-  @Override
-  public JsonElement serialize(TwillRunResources src, Type typeOfSrc, JsonSerializationContext context) {
-    JsonObject json = new JsonObject();
-
-    json.addProperty("containerId", src.getContainerId());
-    json.addProperty("instanceId", src.getInstanceId());
-    json.addProperty("host", src.getHost());
-    json.addProperty("memoryMB", src.getMemoryMB());
-    json.addProperty("virtualCores", src.getVirtualCores());
-
-    return json;
-  }
-
-  @Override
-  public TwillRunResources deserialize(JsonElement json, Type typeOfT,
-                                           JsonDeserializationContext context) throws JsonParseException {
-    JsonObject jsonObj = json.getAsJsonObject();
-    return new DefaultTwillRunResources(jsonObj.get("instanceId").getAsInt(),
-                                        jsonObj.get("containerId").getAsString(),
-                                        jsonObj.get("virtualCores").getAsInt(),
-                                        jsonObj.get("memoryMB").getAsInt(),
-                                        jsonObj.get("host").getAsString());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/json/TwillRunnableSpecificationCodec.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/json/TwillRunnableSpecificationCodec.java b/core/src/main/java/org/apache/twill/internal/json/TwillRunnableSpecificationCodec.java
deleted file mode 100644
index f37c1e8..0000000
--- a/core/src/main/java/org/apache/twill/internal/json/TwillRunnableSpecificationCodec.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.json;
-
-import org.apache.twill.api.TwillRunnableSpecification;
-import org.apache.twill.internal.DefaultTwillRunnableSpecification;
-import com.google.common.reflect.TypeToken;
-import com.google.gson.JsonDeserializationContext;
-import com.google.gson.JsonDeserializer;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParseException;
-import com.google.gson.JsonSerializationContext;
-import com.google.gson.JsonSerializer;
-
-import java.lang.reflect.Type;
-import java.util.Map;
-
-/**
- *
- */
-final class TwillRunnableSpecificationCodec implements JsonSerializer<TwillRunnableSpecification>,
-                                                       JsonDeserializer<TwillRunnableSpecification> {
-
-  @Override
-  public JsonElement serialize(TwillRunnableSpecification src, Type typeOfSrc, JsonSerializationContext context) {
-    JsonObject json = new JsonObject();
-
-    json.addProperty("classname", src.getClassName());
-    json.addProperty("name", src.getName());
-    json.add("arguments", context.serialize(src.getConfigs(), new TypeToken<Map<String, String>>(){}.getType()));
-
-    return json;
-  }
-
-  @Override
-  public TwillRunnableSpecification deserialize(JsonElement json, Type typeOfT,
-                                                JsonDeserializationContext context) throws JsonParseException {
-    JsonObject jsonObj = json.getAsJsonObject();
-
-    String className = jsonObj.get("classname").getAsString();
-    String name = jsonObj.get("name").getAsString();
-    Map<String, String> arguments = context.deserialize(jsonObj.get("arguments"),
-                                                        new TypeToken<Map<String, String>>(){}.getType());
-
-    return new DefaultTwillRunnableSpecification(className, name, arguments);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/json/TwillSpecificationAdapter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/json/TwillSpecificationAdapter.java b/core/src/main/java/org/apache/twill/internal/json/TwillSpecificationAdapter.java
deleted file mode 100644
index 67c15a2..0000000
--- a/core/src/main/java/org/apache/twill/internal/json/TwillSpecificationAdapter.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.json;
-
-import com.google.common.base.Charsets;
-import com.google.common.collect.Maps;
-import com.google.common.io.Files;
-import com.google.gson.Gson;
-import com.google.gson.GsonBuilder;
-import com.google.gson.TypeAdapter;
-import com.google.gson.TypeAdapterFactory;
-import com.google.gson.reflect.TypeToken;
-import com.google.gson.stream.JsonReader;
-import com.google.gson.stream.JsonToken;
-import com.google.gson.stream.JsonWriter;
-import org.apache.twill.api.EventHandlerSpecification;
-import org.apache.twill.api.LocalFile;
-import org.apache.twill.api.ResourceSpecification;
-import org.apache.twill.api.RuntimeSpecification;
-import org.apache.twill.api.TwillRunnableSpecification;
-import org.apache.twill.api.TwillSpecification;
-import org.apache.twill.internal.json.TwillSpecificationCodec.EventHandlerSpecificationCoder;
-import org.apache.twill.internal.json.TwillSpecificationCodec.TwillSpecificationOrderCoder;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.Reader;
-import java.io.Writer;
-import java.lang.reflect.ParameterizedType;
-import java.lang.reflect.Type;
-import java.util.Map;
-
-/**
- *
- */
-public final class TwillSpecificationAdapter {
-
-  private final Gson gson;
-
-  public static TwillSpecificationAdapter create() {
-    return new TwillSpecificationAdapter();
-  }
-
-  private TwillSpecificationAdapter() {
-    gson = new GsonBuilder()
-              .serializeNulls()
-              .registerTypeAdapter(TwillSpecification.class, new TwillSpecificationCodec())
-              .registerTypeAdapter(TwillSpecification.Order.class, new TwillSpecificationOrderCoder())
-              .registerTypeAdapter(EventHandlerSpecification.class, new EventHandlerSpecificationCoder())
-              .registerTypeAdapter(RuntimeSpecification.class, new RuntimeSpecificationCodec())
-              .registerTypeAdapter(TwillRunnableSpecification.class, new TwillRunnableSpecificationCodec())
-              .registerTypeAdapter(ResourceSpecification.class, new ResourceSpecificationCodec())
-              .registerTypeAdapter(LocalFile.class, new LocalFileCodec())
-              .registerTypeAdapterFactory(new TwillSpecificationTypeAdapterFactory())
-              .create();
-  }
-
-  public String toJson(TwillSpecification spec) {
-    return gson.toJson(spec, TwillSpecification.class);
-  }
-
-  public void toJson(TwillSpecification spec, Writer writer) {
-    gson.toJson(spec, TwillSpecification.class, writer);
-  }
-
-  public void toJson(TwillSpecification spec, File file) throws IOException {
-    Writer writer = Files.newWriter(file, Charsets.UTF_8);
-    try {
-      toJson(spec, writer);
-    } finally {
-      writer.close();
-    }
-  }
-
-  public TwillSpecification fromJson(String json) {
-    return gson.fromJson(json, TwillSpecification.class);
-  }
-
-  public TwillSpecification fromJson(Reader reader) {
-    return gson.fromJson(reader, TwillSpecification.class);
-  }
-
-  public TwillSpecification fromJson(File file) throws IOException {
-    Reader reader = Files.newReader(file, Charsets.UTF_8);
-    try {
-      return fromJson(reader);
-    } finally {
-      reader.close();
-    }
-  }
-
-  // This is to get around gson ignoring of inner class
-  private static final class TwillSpecificationTypeAdapterFactory implements TypeAdapterFactory {
-
-    @Override
-    public <T> TypeAdapter<T> create(Gson gson, TypeToken<T> type) {
-      Class<?> rawType = type.getRawType();
-      if (!Map.class.isAssignableFrom(rawType)) {
-        return null;
-      }
-      Type[] typeArgs = ((ParameterizedType) type.getType()).getActualTypeArguments();
-      TypeToken<?> keyType = TypeToken.get(typeArgs[0]);
-      TypeToken<?> valueType = TypeToken.get(typeArgs[1]);
-      if (keyType.getRawType() != String.class) {
-        return null;
-      }
-      return (TypeAdapter<T>) mapAdapter(gson, valueType);
-    }
-
-    private <V> TypeAdapter<Map<String, V>> mapAdapter(Gson gson, TypeToken<V> valueType) {
-      final TypeAdapter<V> valueAdapter = gson.getAdapter(valueType);
-
-      return new TypeAdapter<Map<String, V>>() {
-        @Override
-        public void write(JsonWriter writer, Map<String, V> map) throws IOException {
-          if (map == null) {
-            writer.nullValue();
-            return;
-          }
-          writer.beginObject();
-          for (Map.Entry<String, V> entry : map.entrySet()) {
-            writer.name(entry.getKey());
-            valueAdapter.write(writer, entry.getValue());
-          }
-          writer.endObject();
-        }
-
-        @Override
-        public Map<String, V> read(JsonReader reader) throws IOException {
-          if (reader.peek() == JsonToken.NULL) {
-            reader.nextNull();
-            return null;
-          }
-          if (reader.peek() != JsonToken.BEGIN_OBJECT) {
-            return null;
-          }
-          Map<String, V> map = Maps.newHashMap();
-          reader.beginObject();
-          while (reader.peek() != JsonToken.END_OBJECT) {
-            map.put(reader.nextName(), valueAdapter.read(reader));
-          }
-          reader.endObject();
-          return map;
-        }
-      };
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/core/src/main/java/org/apache/twill/internal/json/TwillSpecificationCodec.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/twill/internal/json/TwillSpecificationCodec.java b/core/src/main/java/org/apache/twill/internal/json/TwillSpecificationCodec.java
deleted file mode 100644
index 5d88350..0000000
--- a/core/src/main/java/org/apache/twill/internal/json/TwillSpecificationCodec.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.json;
-
-import org.apache.twill.api.EventHandlerSpecification;
-import org.apache.twill.api.RuntimeSpecification;
-import org.apache.twill.api.TwillSpecification;
-import org.apache.twill.internal.DefaultEventHandlerSpecification;
-import org.apache.twill.internal.DefaultTwillSpecification;
-import com.google.common.reflect.TypeToken;
-import com.google.gson.JsonDeserializationContext;
-import com.google.gson.JsonDeserializer;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParseException;
-import com.google.gson.JsonSerializationContext;
-import com.google.gson.JsonSerializer;
-
-import java.lang.reflect.Type;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * An implementation of gson serializer/deserializer {@link org.apache.twill.api.TwillSpecification}.
- */
-final class TwillSpecificationCodec implements JsonSerializer<TwillSpecification>,
-                                               JsonDeserializer<TwillSpecification> {
-
-  @Override
-  public JsonElement serialize(TwillSpecification src, Type typeOfSrc, JsonSerializationContext context) {
-    JsonObject json = new JsonObject();
-    json.addProperty("name", src.getName());
-    json.add("runnables", context.serialize(src.getRunnables(),
-                                            new TypeToken<Map<String, RuntimeSpecification>>(){}.getType()));
-    json.add("orders", context.serialize(src.getOrders(),
-                                         new TypeToken<List<TwillSpecification.Order>>(){}.getType()));
-    EventHandlerSpecification eventHandler = src.getEventHandler();
-    if (eventHandler != null) {
-      json.add("handler", context.serialize(eventHandler, EventHandlerSpecification.class));
-    }
-
-    return json;
-  }
-
-  @Override
-  public TwillSpecification deserialize(JsonElement json, Type typeOfT,
-                                        JsonDeserializationContext context) throws JsonParseException {
-    JsonObject jsonObj = json.getAsJsonObject();
-
-    String name = jsonObj.get("name").getAsString();
-    Map<String, RuntimeSpecification> runnables = context.deserialize(
-      jsonObj.get("runnables"), new TypeToken<Map<String, RuntimeSpecification>>(){}.getType());
-    List<TwillSpecification.Order> orders = context.deserialize(
-      jsonObj.get("orders"), new TypeToken<List<TwillSpecification.Order>>(){}.getType());
-
-    JsonElement handler = jsonObj.get("handler");
-    EventHandlerSpecification eventHandler = null;
-    if (handler != null && !handler.isJsonNull()) {
-      eventHandler = context.deserialize(handler, EventHandlerSpecification.class);
-    }
-
-    return new DefaultTwillSpecification(name, runnables, orders, eventHandler);
-  }
-
-  static final class TwillSpecificationOrderCoder implements JsonSerializer<TwillSpecification.Order>,
-                                                             JsonDeserializer<TwillSpecification.Order> {
-
-    @Override
-    public JsonElement serialize(TwillSpecification.Order src, Type typeOfSrc, JsonSerializationContext context) {
-      JsonObject json = new JsonObject();
-      json.add("names", context.serialize(src.getNames(), new TypeToken<Set<String>>(){}.getType()));
-      json.addProperty("type", src.getType().name());
-      return json;
-    }
-
-    @Override
-    public TwillSpecification.Order deserialize(JsonElement json, Type typeOfT,
-                                                JsonDeserializationContext context) throws JsonParseException {
-      JsonObject jsonObj = json.getAsJsonObject();
-
-      Set<String> names = context.deserialize(jsonObj.get("names"), new TypeToken<Set<String>>(){}.getType());
-      TwillSpecification.Order.Type type = TwillSpecification.Order.Type.valueOf(jsonObj.get("type").getAsString());
-
-      return new DefaultTwillSpecification.DefaultOrder(names, type);
-    }
-  }
-
-  static final class EventHandlerSpecificationCoder implements JsonSerializer<EventHandlerSpecification>,
-                                                               JsonDeserializer<EventHandlerSpecification> {
-
-    @Override
-    public JsonElement serialize(EventHandlerSpecification src, Type typeOfSrc, JsonSerializationContext context) {
-      JsonObject json = new JsonObject();
-      json.addProperty("classname", src.getClassName());
-      json.add("configs", context.serialize(src.getConfigs(), new TypeToken<Map<String, String>>(){}.getType()));
-      return json;
-    }
-
-    @Override
-    public EventHandlerSpecification deserialize(JsonElement json, Type typeOfT,
-                                                 JsonDeserializationContext context) throws JsonParseException {
-      JsonObject jsonObj = json.getAsJsonObject();
-      String className = jsonObj.get("classname").getAsString();
-      Map<String, String> configs = context.deserialize(jsonObj.get("configs"),
-                                                        new TypeToken<Map<String, String>>() {
-                                                        }.getType());
-
-      return new DefaultEventHandlerSpecification(className, configs);
-    }
-  }
-}


[11/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnUtils.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnUtils.java b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnUtils.java
new file mode 100644
index 0000000..4f7597b
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/YarnUtils.java
@@ -0,0 +1,279 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.yarn;
+
+import org.apache.twill.api.LocalFile;
+import org.apache.twill.filesystem.ForwardingLocationFactory;
+import org.apache.twill.filesystem.HDFSLocationFactory;
+import org.apache.twill.filesystem.LocationFactory;
+import com.google.common.base.Function;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Throwables;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Maps;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.io.DataInputByteBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.hadoop.yarn.util.Records;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.lang.reflect.Method;
+import java.net.InetSocketAddress;
+import java.nio.ByteBuffer;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ * Collection of helper methods to simplify YARN calls.
+ */
+public class YarnUtils {
+
+  private static final Logger LOG = LoggerFactory.getLogger(YarnUtils.class);
+  private static final AtomicReference<Boolean> HADOOP_20 = new AtomicReference<Boolean>();
+
+  public static YarnLocalResource createLocalResource(LocalFile localFile) {
+    Preconditions.checkArgument(localFile.getLastModified() >= 0, "Last modified time should be >= 0.");
+    Preconditions.checkArgument(localFile.getSize() >= 0, "File size should be >= 0.");
+
+    YarnLocalResource resource = createAdapter(YarnLocalResource.class);
+    resource.setVisibility(LocalResourceVisibility.APPLICATION);
+    resource.setResource(ConverterUtils.getYarnUrlFromURI(localFile.getURI()));
+    resource.setTimestamp(localFile.getLastModified());
+    resource.setSize(localFile.getSize());
+    return setLocalResourceType(resource, localFile);
+  }
+
+  public static YarnLaunchContext createLaunchContext() {
+    return createAdapter(YarnLaunchContext.class);
+  }
+
+  // temporary workaround since older versions of hadoop don't have the getVirtualCores method.
+  public static int getVirtualCores(Resource resource) {
+    try {
+      Method getVirtualCores = Resource.class.getMethod("getVirtualCores");
+      return (Integer) getVirtualCores.invoke(resource);
+    } catch (Exception e) {
+      return 0;
+    }
+  }
+
+  /**
+   * Temporary workaround since older versions of hadoop don't have the setCores method.
+   *
+   * @param resource
+   * @param cores
+   * @return true if virtual cores was set, false if not.
+   */
+  public static boolean setVirtualCores(Resource resource, int cores) {
+    try {
+      Method setVirtualCores = Resource.class.getMethod("setVirtualCores", int.class);
+      setVirtualCores.invoke(resource, cores);
+    } catch (Exception e) {
+      // It's ok to ignore this exception, as it's using older version of API.
+      return false;
+    }
+    return true;
+  }
+
+  /**
+   * Creates {@link ApplicationId} from the given cluster timestamp and id.
+   */
+  public static ApplicationId createApplicationId(long timestamp, int id) {
+    try {
+      try {
+        // For Hadoop-2.1
+        Method method = ApplicationId.class.getMethod("newInstance", long.class, int.class);
+        return (ApplicationId) method.invoke(null, timestamp, id);
+      } catch (NoSuchMethodException e) {
+        // Try with Hadoop-2.0 way
+        ApplicationId appId = Records.newRecord(ApplicationId.class);
+
+        Method setClusterTimestamp = ApplicationId.class.getMethod("setClusterTimestamp", long.class);
+        Method setId = ApplicationId.class.getMethod("setId", int.class);
+
+        setClusterTimestamp.invoke(appId, timestamp);
+        setId.invoke(appId, id);
+
+        return appId;
+      }
+    } catch (Exception e) {
+      throw Throwables.propagate(e);
+    }
+  }
+
+  /**
+   * Helper method to get delegation tokens for the given LocationFactory.
+   * @param config The hadoop configuration.
+   * @param locationFactory The LocationFactory for generating tokens.
+   * @param credentials Credentials for storing tokens acquired.
+   * @return List of delegation Tokens acquired.
+   */
+  public static List<Token<?>> addDelegationTokens(Configuration config,
+                                                   LocationFactory locationFactory,
+                                                   Credentials credentials) throws IOException {
+    if (!UserGroupInformation.isSecurityEnabled()) {
+      LOG.debug("Security is not enabled");
+      return ImmutableList.of();
+    }
+
+    FileSystem fileSystem = getFileSystem(locationFactory);
+
+    if (fileSystem == null) {
+      LOG.debug("LocationFactory is not HDFS");
+      return ImmutableList.of();
+    }
+
+    String renewer = getYarnTokenRenewer(config);
+
+    Token<?>[] tokens = fileSystem.addDelegationTokens(renewer, credentials);
+    return tokens == null ? ImmutableList.<Token<?>>of() : ImmutableList.copyOf(tokens);
+  }
+
+  public static ByteBuffer encodeCredentials(Credentials credentials) {
+    try {
+      DataOutputBuffer out = new DataOutputBuffer();
+      credentials.writeTokenStorageToStream(out);
+      return ByteBuffer.wrap(out.getData(), 0, out.getLength());
+    } catch (IOException e) {
+      // Shouldn't throw
+      LOG.error("Failed to encode Credentials.", e);
+      throw Throwables.propagate(e);
+    }
+  }
+
+  /**
+   * Decodes {@link Credentials} from the given buffer.
+   * If the buffer is null or empty, it returns an empty Credentials.
+   */
+  public static Credentials decodeCredentials(ByteBuffer buffer) throws IOException {
+    Credentials credentials = new Credentials();
+    if (buffer != null && buffer.hasRemaining()) {
+      DataInputByteBuffer in = new DataInputByteBuffer();
+      in.reset(buffer);
+      credentials.readTokenStorageStream(in);
+    }
+    return credentials;
+  }
+
+  public static String getYarnTokenRenewer(Configuration config) throws IOException {
+    String rmHost = getRMAddress(config).getHostName();
+    String renewer = SecurityUtil.getServerPrincipal(config.get(YarnConfiguration.RM_PRINCIPAL), rmHost);
+
+    if (renewer == null || renewer.length() == 0) {
+      throw new IOException("No Kerberos principal for Yarn RM to use as renewer");
+    }
+
+    return renewer;
+  }
+
+  public static InetSocketAddress getRMAddress(Configuration config) {
+    return config.getSocketAddr(YarnConfiguration.RM_ADDRESS,
+                                YarnConfiguration.DEFAULT_RM_ADDRESS,
+                                YarnConfiguration.DEFAULT_RM_PORT);
+  }
+
+  /**
+   * Returns true if Hadoop-2.0 classes are in the classpath.
+   */
+  public static boolean isHadoop20() {
+    Boolean hadoop20 = HADOOP_20.get();
+    if (hadoop20 != null) {
+      return hadoop20;
+    }
+    try {
+      Class.forName("org.apache.hadoop.yarn.client.api.NMClient");
+      HADOOP_20.set(false);
+      return false;
+    } catch (ClassNotFoundException e) {
+      HADOOP_20.set(true);
+      return true;
+    }
+  }
+
+  /**
+   * Helper method to create adapter class for bridging between Hadoop 2.0 and 2.1
+   */
+  private static <T> T createAdapter(Class<T> clz) {
+    String className = clz.getPackage().getName();
+
+    if (isHadoop20()) {
+      className += ".Hadoop20" + clz.getSimpleName();
+    } else {
+      className += ".Hadoop21" + clz.getSimpleName();
+    }
+
+    try {
+      return (T) Class.forName(className).newInstance();
+    } catch (Exception e) {
+      throw Throwables.propagate(e);
+    }
+  }
+
+  private static YarnLocalResource setLocalResourceType(YarnLocalResource localResource, LocalFile localFile) {
+    if (localFile.isArchive()) {
+      if (localFile.getPattern() == null) {
+        localResource.setType(LocalResourceType.ARCHIVE);
+      } else {
+        localResource.setType(LocalResourceType.PATTERN);
+        localResource.setPattern(localFile.getPattern());
+      }
+    } else {
+      localResource.setType(LocalResourceType.FILE);
+    }
+    return localResource;
+  }
+
+  private static <T> Map<String, T> transformResource(Map<String, YarnLocalResource> from) {
+    return Maps.transformValues(from, new Function<YarnLocalResource, T>() {
+      @Override
+      public T apply(YarnLocalResource resource) {
+        return resource.getLocalResource();
+      }
+    });
+  }
+
+  /**
+   * Gets the Hadoop FileSystem from LocationFactory.
+   */
+  private static FileSystem getFileSystem(LocationFactory locationFactory) {
+    if (locationFactory instanceof HDFSLocationFactory) {
+      return ((HDFSLocationFactory) locationFactory).getFileSystem();
+    }
+    if (locationFactory instanceof ForwardingLocationFactory) {
+      return getFileSystem(((ForwardingLocationFactory) locationFactory).getDelegate());
+    }
+    return null;
+  }
+
+  private YarnUtils() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/yarn/package-info.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/yarn/package-info.java b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/package-info.java
new file mode 100644
index 0000000..d6ec9f7
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/yarn/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * This package contains class for interacting with Yarn.
+ */
+package org.apache.twill.internal.yarn;

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/yarn/LocationSecureStoreUpdater.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/yarn/LocationSecureStoreUpdater.java b/twill-yarn/src/main/java/org/apache/twill/yarn/LocationSecureStoreUpdater.java
new file mode 100644
index 0000000..4d20c9c
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/yarn/LocationSecureStoreUpdater.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.yarn;
+
+import org.apache.twill.api.RunId;
+import org.apache.twill.api.SecureStore;
+import org.apache.twill.api.SecureStoreUpdater;
+import org.apache.twill.filesystem.LocationFactory;
+import org.apache.twill.internal.yarn.YarnUtils;
+import com.google.common.base.Throwables;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.Credentials;
+
+import java.io.IOException;
+
+/**
+ * Package private class for updating location related secure store.
+ */
+final class LocationSecureStoreUpdater implements SecureStoreUpdater {
+
+  private final Configuration configuration;
+  private final LocationFactory locationFactory;
+
+  LocationSecureStoreUpdater(Configuration configuration, LocationFactory locationFactory) {
+    this.configuration = configuration;
+    this.locationFactory = locationFactory;
+  }
+
+  @Override
+  public SecureStore update(String application, RunId runId) {
+    try {
+      Credentials credentials = new Credentials();
+      YarnUtils.addDelegationTokens(configuration, locationFactory, credentials);
+      return YarnSecureStore.create(credentials);
+    } catch (IOException e) {
+      throw Throwables.propagate(e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/yarn/ResourceReportClient.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/yarn/ResourceReportClient.java b/twill-yarn/src/main/java/org/apache/twill/yarn/ResourceReportClient.java
new file mode 100644
index 0000000..2974c3f
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/yarn/ResourceReportClient.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.yarn;
+
+import org.apache.twill.api.ResourceReport;
+import org.apache.twill.internal.json.ResourceReportAdapter;
+import com.google.common.base.Charsets;
+import com.google.common.io.Closeables;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.net.URL;
+
+/**
+ * Package private class to get {@link ResourceReport} from the application master.
+ */
+final class ResourceReportClient {
+  private static final Logger LOG = LoggerFactory.getLogger(ResourceReportClient.class);
+
+  private final ResourceReportAdapter reportAdapter;
+  private final URL resourceUrl;
+
+  ResourceReportClient(URL resourceUrl) {
+    this.resourceUrl = resourceUrl;
+    this.reportAdapter = ResourceReportAdapter.create();
+  }
+
+  /**
+   * Returns the resource usage of the application fetched from the resource endpoint URL.
+   * @return A {@link ResourceReport} or {@code null} if failed to fetch the report.
+   */
+  public ResourceReport get() {
+    try {
+      Reader reader = new BufferedReader(new InputStreamReader(resourceUrl.openStream(), Charsets.UTF_8));
+      try {
+        return reportAdapter.fromJson(reader);
+      } finally {
+        Closeables.closeQuietly(reader);
+      }
+    } catch (Exception e) {
+      LOG.error("Exception getting resource report from {}.", resourceUrl, e);
+      return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/yarn/YarnSecureStore.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/yarn/YarnSecureStore.java b/twill-yarn/src/main/java/org/apache/twill/yarn/YarnSecureStore.java
new file mode 100644
index 0000000..e6f461a
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/yarn/YarnSecureStore.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.yarn;
+
+import org.apache.twill.api.SecureStore;
+import org.apache.hadoop.security.Credentials;
+
+/**
+ * A {@link SecureStore} for hadoop credentials.
+ */
+public final class YarnSecureStore implements SecureStore {
+
+  private final Credentials credentials;
+
+  public static SecureStore create(Credentials credentials) {
+    return new YarnSecureStore(credentials);
+  }
+
+  private YarnSecureStore(Credentials credentials) {
+    this.credentials = credentials;
+  }
+
+  @Override
+  public Credentials getStore() {
+    return credentials;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/yarn/YarnTwillController.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/yarn/YarnTwillController.java b/twill-yarn/src/main/java/org/apache/twill/yarn/YarnTwillController.java
new file mode 100644
index 0000000..4c240fb
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/yarn/YarnTwillController.java
@@ -0,0 +1,208 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.yarn;
+
+import org.apache.twill.api.ResourceReport;
+import org.apache.twill.api.RunId;
+import org.apache.twill.api.TwillController;
+import org.apache.twill.api.logging.LogHandler;
+import org.apache.twill.internal.AbstractTwillController;
+import org.apache.twill.internal.Constants;
+import org.apache.twill.internal.ProcessController;
+import org.apache.twill.internal.appmaster.TrackerService;
+import org.apache.twill.internal.state.StateNode;
+import org.apache.twill.internal.state.SystemMessages;
+import org.apache.twill.internal.yarn.YarnApplicationReport;
+import org.apache.twill.zookeeper.NodeData;
+import org.apache.twill.zookeeper.ZKClient;
+import com.google.common.base.Throwables;
+import com.google.common.collect.ImmutableList;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.Uninterruptibles;
+import org.apache.commons.lang.time.StopWatch;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URL;
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * A {@link org.apache.twill.api.TwillController} that controllers application running on Hadoop YARN.
+ */
+final class YarnTwillController extends AbstractTwillController implements TwillController {
+
+  private static final Logger LOG = LoggerFactory.getLogger(YarnTwillController.class);
+
+  private final Callable<ProcessController<YarnApplicationReport>> startUp;
+  private ProcessController<YarnApplicationReport> processController;
+  private ResourceReportClient resourcesClient;
+
+  /**
+   * Creates an instance without any {@link LogHandler}.
+   */
+  YarnTwillController(RunId runId, ZKClient zkClient, Callable<ProcessController<YarnApplicationReport>> startUp) {
+    this(runId, zkClient, ImmutableList.<LogHandler>of(), startUp);
+  }
+
+  YarnTwillController(RunId runId, ZKClient zkClient, Iterable<LogHandler> logHandlers,
+                      Callable<ProcessController<YarnApplicationReport>> startUp) {
+    super(runId, zkClient, logHandlers);
+    this.startUp = startUp;
+  }
+
+
+  /**
+   * Sends a message to application to notify the secure store has be updated.
+   */
+  ListenableFuture<Void> secureStoreUpdated() {
+    return sendMessage(SystemMessages.SECURE_STORE_UPDATED, null);
+  }
+
+  @Override
+  protected void doStartUp() {
+    super.doStartUp();
+
+    // Submit and poll the status of the yarn application
+    try {
+      processController = startUp.call();
+
+      YarnApplicationReport report = processController.getReport();
+      LOG.debug("Application {} submit", report.getApplicationId());
+
+      YarnApplicationState state = report.getYarnApplicationState();
+      StopWatch stopWatch = new StopWatch();
+      stopWatch.start();
+      stopWatch.split();
+      long maxTime = TimeUnit.MILLISECONDS.convert(Constants.APPLICATION_MAX_START_SECONDS, TimeUnit.SECONDS);
+
+      LOG.info("Checking yarn application status");
+      while (!hasRun(state) && stopWatch.getSplitTime() < maxTime) {
+        report = processController.getReport();
+        state = report.getYarnApplicationState();
+        LOG.debug("Yarn application status: {}", state);
+        TimeUnit.SECONDS.sleep(1);
+        stopWatch.split();
+      }
+      LOG.info("Yarn application is in state {}", state);
+      if (state != YarnApplicationState.RUNNING) {
+        LOG.info("Yarn application is not in running state. Shutting down controller.",
+                 Constants.APPLICATION_MAX_START_SECONDS);
+        forceShutDown();
+      } else {
+        try {
+          URL resourceUrl = URI.create(String.format("http://%s:%d", report.getHost(), report.getRpcPort()))
+                               .resolve(TrackerService.PATH).toURL();
+          resourcesClient = new ResourceReportClient(resourceUrl);
+        } catch (IOException e) {
+          resourcesClient = null;
+        }
+      }
+    } catch (Exception e) {
+      throw Throwables.propagate(e);
+    }
+  }
+
+  @Override
+  protected void doShutDown() {
+    if (processController == null) {
+      LOG.warn("No process controller for application that is not submitted.");
+      return;
+    }
+
+    // Wait for the stop message being processed
+    try {
+      Uninterruptibles.getUninterruptibly(getStopMessageFuture(),
+                                          Constants.APPLICATION_MAX_STOP_SECONDS, TimeUnit.SECONDS);
+    } catch (Exception e) {
+      LOG.error("Failed to wait for stop message being processed.", e);
+      // Kill the application through yarn
+      kill();
+    }
+
+    // Poll application status from yarn
+    try {
+      StopWatch stopWatch = new StopWatch();
+      stopWatch.start();
+      stopWatch.split();
+      long maxTime = TimeUnit.MILLISECONDS.convert(Constants.APPLICATION_MAX_STOP_SECONDS, TimeUnit.SECONDS);
+
+      YarnApplicationReport report = processController.getReport();
+      FinalApplicationStatus finalStatus = report.getFinalApplicationStatus();
+      while (finalStatus == FinalApplicationStatus.UNDEFINED && stopWatch.getSplitTime() < maxTime) {
+        LOG.debug("Yarn application final status for {} {}", report.getApplicationId(), finalStatus);
+        TimeUnit.SECONDS.sleep(1);
+        stopWatch.split();
+        finalStatus = processController.getReport().getFinalApplicationStatus();
+      }
+      LOG.debug("Yarn application final status is {}", finalStatus);
+
+      // Application not finished after max stop time, kill the application
+      if (finalStatus == FinalApplicationStatus.UNDEFINED) {
+        kill();
+      }
+    } catch (Exception e) {
+      LOG.warn("Exception while waiting for application report: {}", e.getMessage(), e);
+      kill();
+    }
+
+    super.doShutDown();
+  }
+
+  @Override
+  public void kill() {
+    if (processController != null) {
+      YarnApplicationReport report = processController.getReport();
+      LOG.info("Killing application {}", report.getApplicationId());
+      processController.cancel();
+    } else {
+      LOG.warn("No process controller for application that is not submitted.");
+    }
+  }
+
+  @Override
+  protected void instanceNodeUpdated(NodeData nodeData) {
+
+  }
+
+  @Override
+  protected void stateNodeUpdated(StateNode stateNode) {
+
+  }
+
+  private boolean hasRun(YarnApplicationState state) {
+    switch (state) {
+      case RUNNING:
+      case FINISHED:
+      case FAILED:
+      case KILLED:
+        return true;
+    }
+    return false;
+  }
+
+  @Override
+  public ResourceReport getResourceReport() {
+    // in case the user calls this before starting, return null
+    return (resourcesClient == null) ? null : resourcesClient.get();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/yarn/YarnTwillControllerFactory.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/yarn/YarnTwillControllerFactory.java b/twill-yarn/src/main/java/org/apache/twill/yarn/YarnTwillControllerFactory.java
new file mode 100644
index 0000000..11c2ae6
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/yarn/YarnTwillControllerFactory.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.yarn;
+
+import org.apache.twill.api.RunId;
+import org.apache.twill.api.logging.LogHandler;
+import org.apache.twill.internal.ProcessController;
+import org.apache.twill.internal.yarn.YarnApplicationReport;
+
+import java.util.concurrent.Callable;
+
+/**
+ * Factory for creating {@link YarnTwillController}.
+ */
+interface YarnTwillControllerFactory {
+
+  YarnTwillController create(RunId runId, Iterable<LogHandler> logHandlers,
+                             Callable<ProcessController<YarnApplicationReport>> startUp);
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/yarn/YarnTwillPreparer.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/yarn/YarnTwillPreparer.java b/twill-yarn/src/main/java/org/apache/twill/yarn/YarnTwillPreparer.java
new file mode 100644
index 0000000..17425d4
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/yarn/YarnTwillPreparer.java
@@ -0,0 +1,600 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.yarn;
+
+import com.google.common.base.Charsets;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Predicates;
+import com.google.common.base.Supplier;
+import com.google.common.base.Throwables;
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.ListMultimap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Multimap;
+import com.google.common.collect.Sets;
+import com.google.common.io.ByteStreams;
+import com.google.common.io.CharStreams;
+import com.google.common.io.OutputSupplier;
+import com.google.common.reflect.TypeToken;
+import com.google.gson.GsonBuilder;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.twill.api.EventHandlerSpecification;
+import org.apache.twill.api.LocalFile;
+import org.apache.twill.api.RunId;
+import org.apache.twill.api.RuntimeSpecification;
+import org.apache.twill.api.SecureStore;
+import org.apache.twill.api.TwillController;
+import org.apache.twill.api.TwillPreparer;
+import org.apache.twill.api.TwillSpecification;
+import org.apache.twill.api.logging.LogHandler;
+import org.apache.twill.filesystem.Location;
+import org.apache.twill.filesystem.LocationFactory;
+import org.apache.twill.internal.ApplicationBundler;
+import org.apache.twill.internal.Arguments;
+import org.apache.twill.internal.Configs;
+import org.apache.twill.internal.Constants;
+import org.apache.twill.internal.DefaultLocalFile;
+import org.apache.twill.internal.DefaultRuntimeSpecification;
+import org.apache.twill.internal.DefaultTwillSpecification;
+import org.apache.twill.internal.EnvKeys;
+import org.apache.twill.internal.LogOnlyEventHandler;
+import org.apache.twill.internal.ProcessController;
+import org.apache.twill.internal.ProcessLauncher;
+import org.apache.twill.internal.RunIds;
+import org.apache.twill.internal.appmaster.ApplicationMasterMain;
+import org.apache.twill.internal.container.TwillContainerMain;
+import org.apache.twill.internal.json.ArgumentsCodec;
+import org.apache.twill.internal.json.LocalFileCodec;
+import org.apache.twill.internal.json.TwillSpecificationAdapter;
+import org.apache.twill.internal.utils.Dependencies;
+import org.apache.twill.internal.utils.Paths;
+import org.apache.twill.internal.yarn.YarnAppClient;
+import org.apache.twill.internal.yarn.YarnApplicationReport;
+import org.apache.twill.internal.yarn.YarnUtils;
+import org.apache.twill.launcher.TwillLauncher;
+import org.apache.twill.zookeeper.ZKClient;
+import org.apache.twill.zookeeper.ZKClients;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.jar.JarEntry;
+import java.util.jar.JarOutputStream;
+
+/**
+ * Implementation for {@link TwillPreparer} to prepare and launch distributed application on Hadoop YARN.
+ */
+final class YarnTwillPreparer implements TwillPreparer {
+
+  private static final Logger LOG = LoggerFactory.getLogger(YarnTwillPreparer.class);
+  private static final String KAFKA_ARCHIVE = "kafka-0.7.2.tgz";
+
+  private final YarnConfiguration yarnConfig;
+  private final TwillSpecification twillSpec;
+  private final YarnAppClient yarnAppClient;
+  private final ZKClient zkClient;
+  private final LocationFactory locationFactory;
+  private final Supplier<String> jvmOpts;
+  private final YarnTwillControllerFactory controllerFactory;
+  private final RunId runId;
+
+  private final List<LogHandler> logHandlers = Lists.newArrayList();
+  private final List<String> arguments = Lists.newArrayList();
+  private final Set<Class<?>> dependencies = Sets.newIdentityHashSet();
+  private final List<URI> resources = Lists.newArrayList();
+  private final List<String> classPaths = Lists.newArrayList();
+  private final ListMultimap<String, String> runnableArgs = ArrayListMultimap.create();
+  private final Credentials credentials;
+  private final int reservedMemory;
+  private String user;
+
+  YarnTwillPreparer(YarnConfiguration yarnConfig, TwillSpecification twillSpec, YarnAppClient yarnAppClient,
+                    ZKClient zkClient, LocationFactory locationFactory, Supplier<String> jvmOpts,
+                    YarnTwillControllerFactory controllerFactory) {
+    this.yarnConfig = yarnConfig;
+    this.twillSpec = twillSpec;
+    this.yarnAppClient = yarnAppClient;
+    this.zkClient = ZKClients.namespace(zkClient, "/" + twillSpec.getName());
+    this.locationFactory = locationFactory;
+    this.jvmOpts = jvmOpts;
+    this.controllerFactory = controllerFactory;
+    this.runId = RunIds.generate();
+    this.credentials = createCredentials();
+    this.reservedMemory = yarnConfig.getInt(Configs.Keys.JAVA_RESERVED_MEMORY_MB,
+                                            Configs.Defaults.JAVA_RESERVED_MEMORY_MB);
+    this.user = System.getProperty("user.name");
+  }
+
+  @Override
+  public TwillPreparer addLogHandler(LogHandler handler) {
+    logHandlers.add(handler);
+    return this;
+  }
+
+  @Override
+  public TwillPreparer setUser(String user) {
+    this.user = user;
+    return this;
+  }
+
+  @Override
+  public TwillPreparer withApplicationArguments(String... args) {
+    return withApplicationArguments(ImmutableList.copyOf(args));
+  }
+
+  @Override
+  public TwillPreparer withApplicationArguments(Iterable<String> args) {
+    Iterables.addAll(arguments, args);
+    return this;
+  }
+
+  @Override
+  public TwillPreparer withArguments(String runnableName, String... args) {
+    return withArguments(runnableName, ImmutableList.copyOf(args));
+  }
+
+  @Override
+  public TwillPreparer withArguments(String runnableName, Iterable<String> args) {
+    runnableArgs.putAll(runnableName, args);
+    return this;
+  }
+
+  @Override
+  public TwillPreparer withDependencies(Class<?>... classes) {
+    return withDependencies(ImmutableList.copyOf(classes));
+  }
+
+  @Override
+  public TwillPreparer withDependencies(Iterable<Class<?>> classes) {
+    Iterables.addAll(dependencies, classes);
+    return this;
+  }
+
+  @Override
+  public TwillPreparer withResources(URI... resources) {
+    return withResources(ImmutableList.copyOf(resources));
+  }
+
+  @Override
+  public TwillPreparer withResources(Iterable<URI> resources) {
+    Iterables.addAll(this.resources, resources);
+    return this;
+  }
+
+  @Override
+  public TwillPreparer withClassPaths(String... classPaths) {
+    return withClassPaths(ImmutableList.copyOf(classPaths));
+  }
+
+  @Override
+  public TwillPreparer withClassPaths(Iterable<String> classPaths) {
+    Iterables.addAll(this.classPaths, classPaths);
+    return this;
+  }
+
+  @Override
+  public TwillPreparer addSecureStore(SecureStore secureStore) {
+    Object store = secureStore.getStore();
+    Preconditions.checkArgument(store instanceof Credentials, "Only Hadoop Credentials is supported.");
+    this.credentials.mergeAll((Credentials) store);
+    return this;
+  }
+
+  @Override
+  public TwillController start() {
+    try {
+      final ProcessLauncher<ApplicationId> launcher = yarnAppClient.createLauncher(user, twillSpec);
+      final ApplicationId appId = launcher.getContainerInfo();
+
+      Callable<ProcessController<YarnApplicationReport>> submitTask =
+        new Callable<ProcessController<YarnApplicationReport>>() {
+        @Override
+        public ProcessController<YarnApplicationReport> call() throws Exception {
+          String fsUser = locationFactory.getHomeLocation().getName();
+
+          // Local files needed by AM
+          Map<String, LocalFile> localFiles = Maps.newHashMap();
+          // Local files declared by runnables
+          Multimap<String, LocalFile> runnableLocalFiles = HashMultimap.create();
+
+          String vmOpts = jvmOpts.get();
+
+          createAppMasterJar(createBundler(), localFiles);
+          createContainerJar(createBundler(), localFiles);
+          populateRunnableLocalFiles(twillSpec, runnableLocalFiles);
+          saveSpecification(twillSpec, runnableLocalFiles, localFiles);
+          saveLogback(localFiles);
+          saveLauncher(localFiles);
+          saveKafka(localFiles);
+          saveVmOptions(vmOpts, localFiles);
+          saveArguments(new Arguments(arguments, runnableArgs), localFiles);
+          saveLocalFiles(localFiles, ImmutableSet.of(Constants.Files.TWILL_SPEC,
+                                                     Constants.Files.LOGBACK_TEMPLATE,
+                                                     Constants.Files.CONTAINER_JAR,
+                                                     Constants.Files.LAUNCHER_JAR,
+                                                     Constants.Files.ARGUMENTS));
+
+          LOG.debug("Submit AM container spec: {}", appId);
+          // java -Djava.io.tmpdir=tmp -cp launcher.jar:$HADOOP_CONF_DIR -XmxMemory
+          //     org.apache.twill.internal.TwillLauncher
+          //     appMaster.jar
+          //     org.apache.twill.internal.appmaster.ApplicationMasterMain
+          //     false
+          return launcher.prepareLaunch(
+            ImmutableMap.<String, String>builder()
+              .put(EnvKeys.TWILL_FS_USER, fsUser)
+              .put(EnvKeys.TWILL_APP_DIR, getAppLocation().toURI().toASCIIString())
+              .put(EnvKeys.TWILL_ZK_CONNECT, zkClient.getConnectString())
+              .put(EnvKeys.TWILL_RUN_ID, runId.getId())
+              .put(EnvKeys.TWILL_RESERVED_MEMORY_MB, Integer.toString(reservedMemory))
+              .put(EnvKeys.TWILL_APP_NAME, twillSpec.getName()).build(),
+            localFiles.values(), credentials)
+            .noResources()
+            .noEnvironment()
+            .withCommands().add(
+              "java",
+              "-Djava.io.tmpdir=tmp",
+              "-Dyarn.appId=$" + EnvKeys.YARN_APP_ID_STR,
+              "-Dtwill.app=$" + EnvKeys.TWILL_APP_NAME,
+              "-cp", Constants.Files.LAUNCHER_JAR + ":$HADOOP_CONF_DIR",
+              "-Xmx" + (Constants.APP_MASTER_MEMORY_MB - Constants.APP_MASTER_RESERVED_MEMORY_MB) + "m",
+              vmOpts,
+              TwillLauncher.class.getName(),
+              Constants.Files.APP_MASTER_JAR,
+              ApplicationMasterMain.class.getName(),
+              Boolean.FALSE.toString())
+            .redirectOutput(Constants.STDOUT)
+            .redirectError(Constants.STDERR)
+            .launch();
+        }
+      };
+
+      YarnTwillController controller = controllerFactory.create(runId, logHandlers, submitTask);
+      controller.start();
+      return controller;
+    } catch (Exception e) {
+      LOG.error("Failed to submit application {}", twillSpec.getName(), e);
+      throw Throwables.propagate(e);
+    }
+  }
+
+  private Credentials createCredentials() {
+    Credentials credentials = new Credentials();
+
+    try {
+      credentials.addAll(UserGroupInformation.getCurrentUser().getCredentials());
+
+      List<Token<?>> tokens = YarnUtils.addDelegationTokens(yarnConfig, locationFactory, credentials);
+      for (Token<?> token : tokens) {
+        LOG.debug("Delegation token acquired for {}, {}", locationFactory.getHomeLocation().toURI(), token);
+      }
+    } catch (IOException e) {
+      LOG.warn("Failed to check for secure login type. Not gathering any delegation token.", e);
+    }
+    return credentials;
+  }
+
+  private ApplicationBundler createBundler() {
+    return new ApplicationBundler(ImmutableList.<String>of());
+  }
+
+  private LocalFile createLocalFile(String name, Location location) throws IOException {
+    return createLocalFile(name, location, false);
+  }
+
+  private LocalFile createLocalFile(String name, Location location, boolean archive) throws IOException {
+    return new DefaultLocalFile(name, location.toURI(), location.lastModified(), location.length(), archive, null);
+  }
+
+  private void createAppMasterJar(ApplicationBundler bundler, Map<String, LocalFile> localFiles) throws IOException {
+    try {
+      LOG.debug("Create and copy {}", Constants.Files.APP_MASTER_JAR);
+      Location location = createTempLocation(Constants.Files.APP_MASTER_JAR);
+
+      List<Class<?>> classes = Lists.newArrayList();
+      classes.add(ApplicationMasterMain.class);
+
+      // Stuck in the yarnAppClient class to make bundler being able to pickup the right yarn-client version
+      classes.add(yarnAppClient.getClass());
+
+      // Add the TwillRunnableEventHandler class
+      if (twillSpec.getEventHandler() != null) {
+        classes.add(getClassLoader().loadClass(twillSpec.getEventHandler().getClassName()));
+      }
+
+      bundler.createBundle(location, classes);
+      LOG.debug("Done {}", Constants.Files.APP_MASTER_JAR);
+
+      localFiles.put(Constants.Files.APP_MASTER_JAR, createLocalFile(Constants.Files.APP_MASTER_JAR, location));
+    } catch (ClassNotFoundException e) {
+      throw Throwables.propagate(e);
+    }
+  }
+
+  private void createContainerJar(ApplicationBundler bundler, Map<String, LocalFile> localFiles) throws IOException {
+    try {
+      Set<Class<?>> classes = Sets.newIdentityHashSet();
+      classes.add(TwillContainerMain.class);
+      classes.addAll(dependencies);
+
+      ClassLoader classLoader = getClassLoader();
+      for (RuntimeSpecification spec : twillSpec.getRunnables().values()) {
+        classes.add(classLoader.loadClass(spec.getRunnableSpecification().getClassName()));
+      }
+
+      LOG.debug("Create and copy {}", Constants.Files.CONTAINER_JAR);
+      Location location = createTempLocation(Constants.Files.CONTAINER_JAR);
+      bundler.createBundle(location, classes, resources);
+      LOG.debug("Done {}", Constants.Files.CONTAINER_JAR);
+
+      localFiles.put(Constants.Files.CONTAINER_JAR, createLocalFile(Constants.Files.CONTAINER_JAR, location));
+
+    } catch (ClassNotFoundException e) {
+      throw Throwables.propagate(e);
+    }
+  }
+
+  /**
+   * Based on the given {@link TwillSpecification}, upload LocalFiles to Yarn Cluster.
+   * @param twillSpec The {@link TwillSpecification} for populating resource.
+   * @param localFiles A Multimap to store runnable name to transformed LocalFiles.
+   * @throws IOException
+   */
+  private void populateRunnableLocalFiles(TwillSpecification twillSpec,
+                                          Multimap<String, LocalFile> localFiles) throws IOException {
+
+    LOG.debug("Populating Runnable LocalFiles");
+    for (Map.Entry<String, RuntimeSpecification> entry: twillSpec.getRunnables().entrySet()) {
+      String runnableName = entry.getKey();
+      for (LocalFile localFile : entry.getValue().getLocalFiles()) {
+        Location location;
+
+        URI uri = localFile.getURI();
+        if ("hdfs".equals(uri.getScheme())) {
+          // Assuming the location factory is HDFS one. If it is not, it will failed, which is the correct behavior.
+          location = locationFactory.create(uri);
+        } else {
+          URL url = uri.toURL();
+          LOG.debug("Create and copy {} : {}", runnableName, url);
+          // Preserves original suffix for expansion.
+          location = copyFromURL(url, createTempLocation(Paths.appendSuffix(url.getFile(), localFile.getName())));
+          LOG.debug("Done {} : {}", runnableName, url);
+        }
+
+        localFiles.put(runnableName,
+                       new DefaultLocalFile(localFile.getName(), location.toURI(), location.lastModified(),
+                                            location.length(), localFile.isArchive(), localFile.getPattern()));
+      }
+    }
+    LOG.debug("Done Runnable LocalFiles");
+  }
+
+  private void saveSpecification(TwillSpecification spec, final Multimap<String, LocalFile> runnableLocalFiles,
+                                 Map<String, LocalFile> localFiles) throws IOException {
+    // Rewrite LocalFiles inside twillSpec
+    Map<String, RuntimeSpecification> runtimeSpec = Maps.transformEntries(
+      spec.getRunnables(), new Maps.EntryTransformer<String, RuntimeSpecification, RuntimeSpecification>() {
+      @Override
+      public RuntimeSpecification transformEntry(String key, RuntimeSpecification value) {
+        return new DefaultRuntimeSpecification(value.getName(), value.getRunnableSpecification(),
+                                               value.getResourceSpecification(), runnableLocalFiles.get(key));
+      }
+    });
+
+    // Serialize into a local temp file.
+    LOG.debug("Create and copy {}", Constants.Files.TWILL_SPEC);
+    Location location = createTempLocation(Constants.Files.TWILL_SPEC);
+    Writer writer = new OutputStreamWriter(location.getOutputStream(), Charsets.UTF_8);
+    try {
+      EventHandlerSpecification eventHandler = spec.getEventHandler();
+      if (eventHandler == null) {
+        eventHandler = new LogOnlyEventHandler().configure();
+      }
+
+      TwillSpecificationAdapter.create().toJson(
+        new DefaultTwillSpecification(spec.getName(), runtimeSpec, spec.getOrders(), eventHandler),
+        writer);
+    } finally {
+      writer.close();
+    }
+    LOG.debug("Done {}", Constants.Files.TWILL_SPEC);
+
+    localFiles.put(Constants.Files.TWILL_SPEC, createLocalFile(Constants.Files.TWILL_SPEC, location));
+  }
+
+  private void saveLogback(Map<String, LocalFile> localFiles) throws IOException {
+    LOG.debug("Create and copy {}", Constants.Files.LOGBACK_TEMPLATE);
+    Location location = copyFromURL(getClass().getClassLoader().getResource(Constants.Files.LOGBACK_TEMPLATE),
+                                    createTempLocation(Constants.Files.LOGBACK_TEMPLATE));
+    LOG.debug("Done {}", Constants.Files.LOGBACK_TEMPLATE);
+
+    localFiles.put(Constants.Files.LOGBACK_TEMPLATE, createLocalFile(Constants.Files.LOGBACK_TEMPLATE, location));
+  }
+
+  /**
+   * Creates the launcher.jar for launch the main application.
+   */
+  private void saveLauncher(Map<String, LocalFile> localFiles) throws URISyntaxException, IOException {
+
+    LOG.debug("Create and copy {}", Constants.Files.LAUNCHER_JAR);
+    Location location = createTempLocation(Constants.Files.LAUNCHER_JAR);
+
+    final String launcherName = TwillLauncher.class.getName();
+
+    // Create a jar file with the TwillLauncher optionally a json serialized classpath.json in it.
+    final JarOutputStream jarOut = new JarOutputStream(location.getOutputStream());
+    ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+    if (classLoader == null) {
+      classLoader = getClass().getClassLoader();
+    }
+    Dependencies.findClassDependencies(classLoader, new Dependencies.ClassAcceptor() {
+      @Override
+      public boolean accept(String className, URL classUrl, URL classPathUrl) {
+        Preconditions.checkArgument(className.startsWith(launcherName),
+                                    "Launcher jar should not have dependencies: %s", className);
+        try {
+          jarOut.putNextEntry(new JarEntry(className.replace('.', '/') + ".class"));
+          InputStream is = classUrl.openStream();
+          try {
+            ByteStreams.copy(is, jarOut);
+          } finally {
+            is.close();
+          }
+        } catch (IOException e) {
+          throw Throwables.propagate(e);
+        }
+        return true;
+      }
+    }, TwillLauncher.class.getName());
+
+    try {
+      if (!classPaths.isEmpty()) {
+        jarOut.putNextEntry(new JarEntry("classpath"));
+        jarOut.write(Joiner.on(':').join(classPaths).getBytes(Charsets.UTF_8));
+      }
+    } finally {
+      jarOut.close();
+    }
+    LOG.debug("Done {}", Constants.Files.LAUNCHER_JAR);
+
+    localFiles.put(Constants.Files.LAUNCHER_JAR, createLocalFile(Constants.Files.LAUNCHER_JAR, location));
+  }
+
+  private void saveKafka(Map<String, LocalFile> localFiles) throws IOException {
+    LOG.debug("Copy {}", Constants.Files.KAFKA);
+    Location location = copyFromURL(getClass().getClassLoader().getResource(KAFKA_ARCHIVE),
+                                    createTempLocation(Constants.Files.KAFKA));
+    LOG.debug("Done {}", Constants.Files.KAFKA);
+
+    localFiles.put(Constants.Files.KAFKA, createLocalFile(Constants.Files.KAFKA, location, true));
+  }
+
+  private void saveVmOptions(String opts, Map<String, LocalFile> localFiles) throws IOException {
+    if (opts.isEmpty()) {
+      // If no vm options, no need to localize the file.
+      return;
+    }
+    LOG.debug("Copy {}", Constants.Files.JVM_OPTIONS);
+    final Location location = createTempLocation(Constants.Files.JVM_OPTIONS);
+    CharStreams.write(opts, new OutputSupplier<Writer>() {
+      @Override
+      public Writer getOutput() throws IOException {
+        return new OutputStreamWriter(location.getOutputStream(), Charsets.UTF_8);
+      }
+    });
+    LOG.debug("Done {}", Constants.Files.JVM_OPTIONS);
+
+    localFiles.put(Constants.Files.JVM_OPTIONS, createLocalFile(Constants.Files.JVM_OPTIONS, location));
+  }
+
+  private void saveArguments(Arguments arguments, Map<String, LocalFile> localFiles) throws IOException {
+    LOG.debug("Create and copy {}", Constants.Files.ARGUMENTS);
+    final Location location = createTempLocation(Constants.Files.ARGUMENTS);
+    ArgumentsCodec.encode(arguments, new OutputSupplier<Writer>() {
+      @Override
+      public Writer getOutput() throws IOException {
+        return new OutputStreamWriter(location.getOutputStream(), Charsets.UTF_8);
+      }
+    });
+    LOG.debug("Done {}", Constants.Files.ARGUMENTS);
+
+    localFiles.put(Constants.Files.ARGUMENTS, createLocalFile(Constants.Files.ARGUMENTS, location));
+  }
+
+  /**
+   * Serializes the list of files that needs to localize from AM to Container.
+   */
+  private void saveLocalFiles(Map<String, LocalFile> localFiles, Set<String> includes) throws IOException {
+    Map<String, LocalFile> localize = ImmutableMap.copyOf(Maps.filterKeys(localFiles, Predicates.in(includes)));
+    LOG.debug("Create and copy {}", Constants.Files.LOCALIZE_FILES);
+    Location location = createTempLocation(Constants.Files.LOCALIZE_FILES);
+    Writer writer = new OutputStreamWriter(location.getOutputStream(), Charsets.UTF_8);
+    try {
+      new GsonBuilder().registerTypeAdapter(LocalFile.class, new LocalFileCodec())
+        .create().toJson(localize.values(), new TypeToken<List<LocalFile>>() {
+      }.getType(), writer);
+    } finally {
+      writer.close();
+    }
+    LOG.debug("Done {}", Constants.Files.LOCALIZE_FILES);
+    localFiles.put(Constants.Files.LOCALIZE_FILES, createLocalFile(Constants.Files.LOCALIZE_FILES, location));
+  }
+
+  private Location copyFromURL(URL url, Location target) throws IOException {
+    InputStream is = url.openStream();
+    try {
+      OutputStream os = new BufferedOutputStream(target.getOutputStream());
+      try {
+        ByteStreams.copy(is, os);
+      } finally {
+        os.close();
+      }
+    } finally {
+      is.close();
+    }
+    return target;
+  }
+
+  private Location createTempLocation(String fileName) {
+    String name;
+    String suffix = Paths.getExtension(fileName);
+
+    name = fileName.substring(0, fileName.length() - suffix.length() - 1);
+
+    try {
+      return getAppLocation().append(name).getTempFile('.' + suffix);
+    } catch (IOException e) {
+      throw Throwables.propagate(e);
+    }
+  }
+
+  private Location getAppLocation() {
+    return locationFactory.create(String.format("/%s/%s", twillSpec.getName(), runId.getId()));
+  }
+
+  /**
+   * Returns the context ClassLoader if there is any, otherwise, returns ClassLoader of this class.
+   */
+  private ClassLoader getClassLoader() {
+    ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+    return classLoader == null ? getClass().getClassLoader() : classLoader;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/yarn/YarnTwillRunnerService.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/yarn/YarnTwillRunnerService.java b/twill-yarn/src/main/java/org/apache/twill/yarn/YarnTwillRunnerService.java
new file mode 100644
index 0000000..9335465
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/yarn/YarnTwillRunnerService.java
@@ -0,0 +1,583 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.yarn;
+
+import org.apache.twill.api.ResourceSpecification;
+import org.apache.twill.api.RunId;
+import org.apache.twill.api.SecureStore;
+import org.apache.twill.api.SecureStoreUpdater;
+import org.apache.twill.api.TwillApplication;
+import org.apache.twill.api.TwillController;
+import org.apache.twill.api.TwillPreparer;
+import org.apache.twill.api.TwillRunnable;
+import org.apache.twill.api.TwillRunnerService;
+import org.apache.twill.api.TwillSpecification;
+import org.apache.twill.api.logging.LogHandler;
+import org.apache.twill.common.Cancellable;
+import org.apache.twill.common.ServiceListenerAdapter;
+import org.apache.twill.common.Threads;
+import org.apache.twill.filesystem.HDFSLocationFactory;
+import org.apache.twill.filesystem.Location;
+import org.apache.twill.filesystem.LocationFactory;
+import org.apache.twill.internal.Constants;
+import org.apache.twill.internal.ProcessController;
+import org.apache.twill.internal.RunIds;
+import org.apache.twill.internal.SingleRunnableApplication;
+import org.apache.twill.internal.appmaster.ApplicationMasterLiveNodeData;
+import org.apache.twill.internal.yarn.VersionDetectYarnAppClientFactory;
+import org.apache.twill.internal.yarn.YarnAppClient;
+import org.apache.twill.internal.yarn.YarnApplicationReport;
+import org.apache.twill.internal.yarn.YarnUtils;
+import org.apache.twill.zookeeper.NodeChildren;
+import org.apache.twill.zookeeper.NodeData;
+import org.apache.twill.zookeeper.RetryStrategies;
+import org.apache.twill.zookeeper.ZKClient;
+import org.apache.twill.zookeeper.ZKClientService;
+import org.apache.twill.zookeeper.ZKClientServices;
+import org.apache.twill.zookeeper.ZKClients;
+import org.apache.twill.zookeeper.ZKOperations;
+import com.google.common.base.Charsets;
+import com.google.common.base.Function;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Predicate;
+import com.google.common.base.Suppliers;
+import com.google.common.base.Throwables;
+import com.google.common.collect.HashBasedTable;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.ImmutableTable;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Multimap;
+import com.google.common.collect.Sets;
+import com.google.common.collect.Table;
+import com.google.common.util.concurrent.AbstractIdleService;
+import com.google.common.util.concurrent.Callables;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.gson.Gson;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * An implementation of {@link org.apache.twill.api.TwillRunnerService} that runs application on a YARN cluster.
+ */
+public final class YarnTwillRunnerService extends AbstractIdleService implements TwillRunnerService {
+
+  private static final Logger LOG = LoggerFactory.getLogger(YarnTwillRunnerService.class);
+
+  private static final int ZK_TIMEOUT = 10000;
+  private static final Function<String, RunId> STRING_TO_RUN_ID = new Function<String, RunId>() {
+    @Override
+    public RunId apply(String input) {
+      return RunIds.fromString(input);
+    }
+  };
+  private static final Function<YarnTwillController, TwillController> CAST_CONTROLLER =
+    new Function<YarnTwillController, TwillController>() {
+    @Override
+    public TwillController apply(YarnTwillController controller) {
+      return controller;
+    }
+  };
+
+  private final YarnConfiguration yarnConfig;
+  private final YarnAppClient yarnAppClient;
+  private final ZKClientService zkClientService;
+  private final LocationFactory locationFactory;
+  private final Table<String, RunId, YarnTwillController> controllers;
+  private ScheduledExecutorService secureStoreScheduler;
+
+  private Iterable<LiveInfo> liveInfos;
+  private Cancellable watchCancellable;
+  private volatile String jvmOptions = "";
+
+  public YarnTwillRunnerService(YarnConfiguration config, String zkConnect) {
+    this(config, zkConnect, new HDFSLocationFactory(getFileSystem(config), "/twill"));
+  }
+
+  public YarnTwillRunnerService(YarnConfiguration config, String zkConnect, LocationFactory locationFactory) {
+    this.yarnConfig = config;
+    this.yarnAppClient = new VersionDetectYarnAppClientFactory().create(config);
+    this.locationFactory = locationFactory;
+    this.zkClientService = getZKClientService(zkConnect);
+    this.controllers = HashBasedTable.create();
+  }
+
+  /**
+   * This methods sets the extra JVM options that will be passed to the java command line for every application
+   * started through this {@link YarnTwillRunnerService} instance. It only affects applications that are started
+   * after options is set.
+   *
+   * This is intended for advance usage. All options will be passed unchanged to the java command line. Invalid
+   * options could cause application not able to start.
+   *
+   * @param options extra JVM options.
+   */
+  public void setJVMOptions(String options) {
+    Preconditions.checkArgument(options != null, "JVM options cannot be null.");
+    this.jvmOptions = options;
+  }
+
+  @Override
+  public Cancellable scheduleSecureStoreUpdate(final SecureStoreUpdater updater,
+                                               long initialDelay, long delay, TimeUnit unit) {
+    if (!UserGroupInformation.isSecurityEnabled()) {
+      return new Cancellable() {
+        @Override
+        public void cancel() {
+          // No-op
+        }
+      };
+    }
+
+    synchronized (this) {
+      if (secureStoreScheduler == null) {
+        secureStoreScheduler = Executors.newSingleThreadScheduledExecutor(
+          Threads.createDaemonThreadFactory("secure-store-updater"));
+      }
+    }
+
+    final ScheduledFuture<?> future = secureStoreScheduler.scheduleWithFixedDelay(new Runnable() {
+      @Override
+      public void run() {
+        // Collects all <application, runId> pairs first
+        Multimap<String, RunId> liveApps = HashMultimap.create();
+        synchronized (YarnTwillRunnerService.this) {
+          for (Table.Cell<String, RunId, YarnTwillController> cell : controllers.cellSet()) {
+            liveApps.put(cell.getRowKey(), cell.getColumnKey());
+          }
+        }
+
+        // Collect all secure stores that needs to be updated.
+        Table<String, RunId, SecureStore> secureStores = HashBasedTable.create();
+        for (Map.Entry<String, RunId> entry : liveApps.entries()) {
+          try {
+            secureStores.put(entry.getKey(), entry.getValue(), updater.update(entry.getKey(), entry.getValue()));
+          } catch (Throwable t) {
+            LOG.warn("Exception thrown by SecureStoreUpdater {}", updater, t);
+          }
+        }
+
+        // Update secure stores.
+        updateSecureStores(secureStores);
+      }
+    }, initialDelay, delay, unit);
+
+    return new Cancellable() {
+      @Override
+      public void cancel() {
+        future.cancel(false);
+      }
+    };
+  }
+
+  @Override
+  public TwillPreparer prepare(TwillRunnable runnable) {
+    return prepare(runnable, ResourceSpecification.BASIC);
+  }
+
+  @Override
+  public TwillPreparer prepare(TwillRunnable runnable, ResourceSpecification resourceSpecification) {
+    return prepare(new SingleRunnableApplication(runnable, resourceSpecification));
+  }
+
+  @Override
+  public TwillPreparer prepare(TwillApplication application) {
+    Preconditions.checkState(isRunning(), "Service not start. Please call start() first.");
+    final TwillSpecification twillSpec = application.configure();
+    final String appName = twillSpec.getName();
+
+    return new YarnTwillPreparer(yarnConfig, twillSpec, yarnAppClient, zkClientService, locationFactory,
+                                 Suppliers.ofInstance(jvmOptions),
+                                 new YarnTwillControllerFactory() {
+      @Override
+      public YarnTwillController create(RunId runId, Iterable<LogHandler> logHandlers,
+                                        Callable<ProcessController<YarnApplicationReport>> startUp) {
+        ZKClient zkClient = ZKClients.namespace(zkClientService, "/" + appName);
+        YarnTwillController controller = listenController(new YarnTwillController(runId, zkClient,
+                                                                                  logHandlers, startUp));
+        synchronized (YarnTwillRunnerService.this) {
+          Preconditions.checkArgument(!controllers.contains(appName, runId),
+                                      "Application %s with runId %s is already running.", appName, runId);
+          controllers.put(appName, runId, controller);
+        }
+        return controller;
+      }
+    });
+  }
+
+  @Override
+  public synchronized TwillController lookup(String applicationName, final RunId runId) {
+    return controllers.get(applicationName, runId);
+  }
+
+  @Override
+  public Iterable<TwillController> lookup(final String applicationName) {
+    return new Iterable<TwillController>() {
+      @Override
+      public Iterator<TwillController> iterator() {
+        synchronized (YarnTwillRunnerService.this) {
+          return Iterators.transform(ImmutableList.copyOf(controllers.row(applicationName).values()).iterator(),
+                                     CAST_CONTROLLER);
+        }
+      }
+    };
+  }
+
+  @Override
+  public Iterable<LiveInfo> lookupLive() {
+    return liveInfos;
+  }
+
+  @Override
+  protected void startUp() throws Exception {
+    yarnAppClient.startAndWait();
+    zkClientService.startAndWait();
+
+    // Create the root node, so that the namespace root would get created if it is missing
+    // If the exception is caused by node exists, then it's ok. Otherwise propagate the exception.
+    ZKOperations.ignoreError(zkClientService.create("/", null, CreateMode.PERSISTENT),
+                             KeeperException.NodeExistsException.class, null).get();
+
+    watchCancellable = watchLiveApps();
+    liveInfos = createLiveInfos();
+
+    // Schedule an updater for updating HDFS delegation tokens
+    if (UserGroupInformation.isSecurityEnabled()) {
+      long delay = yarnConfig.getLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY,
+                                      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT);
+      scheduleSecureStoreUpdate(new LocationSecureStoreUpdater(yarnConfig, locationFactory),
+                                delay, delay, TimeUnit.MILLISECONDS);
+    }
+  }
+
+  @Override
+  protected void shutDown() throws Exception {
+    // Shutdown shouldn't stop any controllers, as stopping this client service should let the remote containers
+    // running. However, this assumes that this TwillRunnerService is a long running service and you only stop it
+    // when the JVM process is about to exit. Hence it is important that threads created in the controllers are
+    // daemon threads.
+    synchronized (this) {
+      if (secureStoreScheduler != null) {
+        secureStoreScheduler.shutdownNow();
+      }
+    }
+    watchCancellable.cancel();
+    zkClientService.stopAndWait();
+    yarnAppClient.stopAndWait();
+  }
+
+  private Cancellable watchLiveApps() {
+    final Map<String, Cancellable> watched = Maps.newConcurrentMap();
+
+    final AtomicBoolean cancelled = new AtomicBoolean(false);
+    // Watch child changes in the root, which gives all application names.
+    final Cancellable cancellable = ZKOperations.watchChildren(zkClientService, "/",
+                                                               new ZKOperations.ChildrenCallback() {
+      @Override
+      public void updated(NodeChildren nodeChildren) {
+        if (cancelled.get()) {
+          return;
+        }
+
+        Set<String> apps = ImmutableSet.copyOf(nodeChildren.getChildren());
+
+        // For each for the application name, watch for ephemeral nodes under /instances.
+        for (final String appName : apps) {
+          if (watched.containsKey(appName)) {
+            continue;
+          }
+
+          final String instancePath = String.format("/%s/instances", appName);
+          watched.put(appName,
+                      ZKOperations.watchChildren(zkClientService, instancePath, new ZKOperations.ChildrenCallback() {
+            @Override
+            public void updated(NodeChildren nodeChildren) {
+              if (cancelled.get()) {
+                return;
+              }
+              if (nodeChildren.getChildren().isEmpty()) {     // No more child, means no live instances
+                Cancellable removed = watched.remove(appName);
+                if (removed != null) {
+                  removed.cancel();
+                }
+                return;
+              }
+              synchronized (YarnTwillRunnerService.this) {
+                // For each of the children, which the node name is the runId,
+                // fetch the application Id and construct TwillController.
+                for (final RunId runId : Iterables.transform(nodeChildren.getChildren(), STRING_TO_RUN_ID)) {
+                  if (controllers.contains(appName, runId)) {
+                    continue;
+                  }
+                  updateController(appName, runId, cancelled);
+                }
+              }
+            }
+          }));
+        }
+
+        // Remove app watches for apps that are gone. Removal of controller from controllers table is done
+        // in the state listener attached to the twill controller.
+        for (String removeApp : Sets.difference(watched.keySet(), apps)) {
+          watched.remove(removeApp).cancel();
+        }
+      }
+    });
+    return new Cancellable() {
+      @Override
+      public void cancel() {
+        cancelled.set(true);
+        cancellable.cancel();
+        for (Cancellable c : watched.values()) {
+          c.cancel();
+        }
+      }
+    };
+  }
+
+  private YarnTwillController listenController(final YarnTwillController controller) {
+    controller.addListener(new ServiceListenerAdapter() {
+      @Override
+      public void terminated(State from) {
+        removeController();
+      }
+
+      @Override
+      public void failed(State from, Throwable failure) {
+        removeController();
+      }
+
+      private void removeController() {
+        synchronized (YarnTwillRunnerService.this) {
+          Iterables.removeIf(controllers.values(),
+                             new Predicate<TwillController>() {
+             @Override
+             public boolean apply(TwillController input) {
+               return input == controller;
+             }
+           });
+        }
+      }
+    }, Threads.SAME_THREAD_EXECUTOR);
+    return controller;
+  }
+
+  private ZKClientService getZKClientService(String zkConnect) {
+    return ZKClientServices.delegate(
+      ZKClients.reWatchOnExpire(
+        ZKClients.retryOnFailure(ZKClientService.Builder.of(zkConnect)
+                                   .setSessionTimeout(ZK_TIMEOUT)
+                                   .build(), RetryStrategies.exponentialDelay(100, 2000, TimeUnit.MILLISECONDS))));
+  }
+
+  private Iterable<LiveInfo> createLiveInfos() {
+    return new Iterable<LiveInfo>() {
+
+      @Override
+      public Iterator<LiveInfo> iterator() {
+        Map<String, Map<RunId, YarnTwillController>> controllerMap = ImmutableTable.copyOf(controllers).rowMap();
+        return Iterators.transform(controllerMap.entrySet().iterator(),
+                                   new Function<Map.Entry<String, Map<RunId, YarnTwillController>>, LiveInfo>() {
+          @Override
+          public LiveInfo apply(final Map.Entry<String, Map<RunId, YarnTwillController>> entry) {
+            return new LiveInfo() {
+              @Override
+              public String getApplicationName() {
+                return entry.getKey();
+              }
+
+              @Override
+              public Iterable<TwillController> getControllers() {
+                return Iterables.transform(entry.getValue().values(), CAST_CONTROLLER);
+              }
+            };
+          }
+        });
+      }
+    };
+  }
+
+  private void updateController(final String appName, final RunId runId, final AtomicBoolean cancelled) {
+    String instancePath = String.format("/%s/instances/%s", appName, runId.getId());
+
+    // Fetch the content node.
+    Futures.addCallback(zkClientService.getData(instancePath), new FutureCallback<NodeData>() {
+      @Override
+      public void onSuccess(NodeData result) {
+        if (cancelled.get()) {
+          return;
+        }
+        ApplicationId appId = getApplicationId(result);
+        if (appId == null) {
+          return;
+        }
+
+        synchronized (YarnTwillRunnerService.this) {
+          if (!controllers.contains(appName, runId)) {
+            ZKClient zkClient = ZKClients.namespace(zkClientService, "/" + appName);
+            YarnTwillController controller = listenController(
+              new YarnTwillController(runId, zkClient,
+                                      Callables.returning(yarnAppClient.createProcessController(appId))));
+            controllers.put(appName, runId, controller);
+            controller.start();
+          }
+        }
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        LOG.warn("Failed in fetching application instance node.", t);
+      }
+    }, Threads.SAME_THREAD_EXECUTOR);
+  }
+
+
+  /**
+   * Decodes application ID stored inside the node data.
+   * @param nodeData The node data to decode from. If it is {@code null}, this method would return {@code null}.
+   * @return The ApplicationId or {@code null} if failed to decode.
+   */
+  private ApplicationId getApplicationId(NodeData nodeData) {
+    byte[] data = nodeData == null ? null : nodeData.getData();
+    if (data == null) {
+      return null;
+    }
+
+    Gson gson = new Gson();
+    JsonElement json = gson.fromJson(new String(data, Charsets.UTF_8), JsonElement.class);
+    if (!json.isJsonObject()) {
+      LOG.warn("Unable to decode live data node.");
+      return null;
+    }
+
+    JsonObject jsonObj = json.getAsJsonObject();
+    json = jsonObj.get("data");
+    if (!json.isJsonObject()) {
+      LOG.warn("Property data not found in live data node.");
+      return null;
+    }
+
+    try {
+      ApplicationMasterLiveNodeData amLiveNode = gson.fromJson(json, ApplicationMasterLiveNodeData.class);
+      return YarnUtils.createApplicationId(amLiveNode.getAppIdClusterTime(), amLiveNode.getAppId());
+    } catch (Exception e) {
+      LOG.warn("Failed to decode application live node data.", e);
+      return null;
+    }
+  }
+
+  private void updateSecureStores(Table<String, RunId, SecureStore> secureStores) {
+    for (Table.Cell<String, RunId, SecureStore> cell : secureStores.cellSet()) {
+      Object store = cell.getValue().getStore();
+      if (!(store instanceof Credentials)) {
+        LOG.warn("Only Hadoop Credentials is supported. Ignore update for {}.", cell);
+        continue;
+      }
+
+      Credentials credentials = (Credentials) store;
+      if (credentials.getAllTokens().isEmpty()) {
+        // Nothing to update.
+        continue;
+      }
+
+      try {
+        updateCredentials(cell.getRowKey(), cell.getColumnKey(), credentials);
+        synchronized (YarnTwillRunnerService.this) {
+          // Notify the application for secure store updates if it is still running.
+          YarnTwillController controller = controllers.get(cell.getRowKey(), cell.getColumnKey());
+          if (controller != null) {
+            controller.secureStoreUpdated();
+          }
+        }
+      } catch (Throwable t) {
+        LOG.warn("Failed to update secure store for {}.", cell, t);
+      }
+    }
+  }
+
+  private void updateCredentials(String application, RunId runId, Credentials updates) throws IOException {
+    Location credentialsLocation = locationFactory.create(String.format("/%s/%s/%s", application, runId.getId(),
+                                                                        Constants.Files.CREDENTIALS));
+    // Try to read the old credentials.
+    Credentials credentials = new Credentials();
+    if (credentialsLocation.exists()) {
+      DataInputStream is = new DataInputStream(new BufferedInputStream(credentialsLocation.getInputStream()));
+      try {
+        credentials.readTokenStorageStream(is);
+      } finally {
+        is.close();
+      }
+    }
+
+    // Overwrite with the updates.
+    credentials.addAll(updates);
+
+    // Overwrite the credentials.
+    Location tmpLocation = credentialsLocation.getTempFile(Constants.Files.CREDENTIALS);
+
+    // Save the credentials store with user-only permission.
+    DataOutputStream os = new DataOutputStream(new BufferedOutputStream(tmpLocation.getOutputStream("600")));
+    try {
+      credentials.writeTokenStorageToStream(os);
+    } finally {
+      os.close();
+    }
+
+    // Rename the tmp file into the credentials location
+    tmpLocation.renameTo(credentialsLocation);
+
+    LOG.debug("Secure store for {} {} saved to {}.", application, runId, credentialsLocation.toURI());
+  }
+
+  private static FileSystem getFileSystem(YarnConfiguration configuration) {
+    try {
+      return FileSystem.get(configuration);
+    } catch (IOException e) {
+      throw Throwables.propagate(e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/yarn/package-info.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/yarn/package-info.java b/twill-yarn/src/main/java/org/apache/twill/yarn/package-info.java
new file mode 100644
index 0000000..b3cbc5e
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/yarn/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Classes in this package implement the Twill API for Apache Hadoop YARN.
+ */
+package org.apache.twill.yarn;

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/resources/logback-template.xml
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/resources/logback-template.xml b/twill-yarn/src/main/resources/logback-template.xml
new file mode 100644
index 0000000..38cf6c8
--- /dev/null
+++ b/twill-yarn/src/main/resources/logback-template.xml
@@ -0,0 +1,11 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!-- Default logback configuration for twill library -->
+<configuration>
+
+    <logger name="org.apache.hadoop" level="WARN" />
+    <logger name="org.apache.zookeeper" level="WARN" />
+
+    <root level="INFO" />
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/test/java/org/apache/twill/yarn/BuggyServer.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/test/java/org/apache/twill/yarn/BuggyServer.java b/twill-yarn/src/test/java/org/apache/twill/yarn/BuggyServer.java
new file mode 100644
index 0000000..bb1a583
--- /dev/null
+++ b/twill-yarn/src/test/java/org/apache/twill/yarn/BuggyServer.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.yarn;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.PrintWriter;
+
+/**
+ * Server for testing that will die if you give it a 0.
+ */
+public final class BuggyServer extends SocketServer {
+
+  private static final Logger LOG = LoggerFactory.getLogger(BuggyServer.class);
+
+  @Override
+  public void handleRequest(BufferedReader reader, PrintWriter writer) throws IOException {
+    String line = reader.readLine();
+    LOG.info("Received: " + line + " going to divide by it");
+    Integer toDivide = Integer.valueOf(line);
+    writer.println(Integer.toString(100 / toDivide));
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/test/java/org/apache/twill/yarn/DistributeShellTestRun.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/test/java/org/apache/twill/yarn/DistributeShellTestRun.java b/twill-yarn/src/test/java/org/apache/twill/yarn/DistributeShellTestRun.java
new file mode 100644
index 0000000..1054ec9
--- /dev/null
+++ b/twill-yarn/src/test/java/org/apache/twill/yarn/DistributeShellTestRun.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.yarn;
+
+import org.apache.twill.api.TwillController;
+import org.apache.twill.api.TwillRunner;
+import org.apache.twill.api.logging.PrinterLogHandler;
+import org.apache.twill.common.ServiceListenerAdapter;
+import org.apache.twill.common.Threads;
+import com.google.common.util.concurrent.Service;
+import org.junit.Assert;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import java.io.PrintWriter;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * This test is executed by {@link YarnTestSuite}.
+ */
+public class DistributeShellTestRun {
+
+  @Ignore
+  @Test
+  public void testDistributedShell() throws InterruptedException {
+    TwillRunner twillRunner = YarnTestSuite.getTwillRunner();
+
+    TwillController controller = twillRunner.prepare(new DistributedShell("pwd", "ls -al"))
+                                            .addLogHandler(new PrinterLogHandler(new PrintWriter(System.out)))
+                                            .start();
+
+    final CountDownLatch stopLatch = new CountDownLatch(1);
+    controller.addListener(new ServiceListenerAdapter() {
+
+      @Override
+      public void terminated(Service.State from) {
+        stopLatch.countDown();
+      }
+
+      @Override
+      public void failed(Service.State from, Throwable failure) {
+        stopLatch.countDown();
+      }
+    }, Threads.SAME_THREAD_EXECUTOR);
+
+    Assert.assertTrue(stopLatch.await(10, TimeUnit.SECONDS));
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/test/java/org/apache/twill/yarn/DistributedShell.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/test/java/org/apache/twill/yarn/DistributedShell.java b/twill-yarn/src/test/java/org/apache/twill/yarn/DistributedShell.java
new file mode 100644
index 0000000..c89371c
--- /dev/null
+++ b/twill-yarn/src/test/java/org/apache/twill/yarn/DistributedShell.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.yarn;
+
+import org.apache.twill.api.AbstractTwillRunnable;
+import com.google.common.base.Charsets;
+import com.google.common.base.Joiner;
+import com.google.common.base.Splitter;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+
+/**
+ *
+ */
+public final class DistributedShell extends AbstractTwillRunnable {
+
+  private static final Logger LOG = LoggerFactory.getLogger(DistributedShell.class);
+
+  public DistributedShell(String...commands) {
+    super(ImmutableMap.of("cmds", Joiner.on(';').join(commands)));
+  }
+
+  @Override
+  public void run() {
+    for (String cmd : Splitter.on(';').split(getArgument("cmds"))) {
+      try {
+        Process process = new ProcessBuilder(ImmutableList.copyOf(Splitter.on(' ').split(cmd)))
+                              .redirectErrorStream(true).start();
+        BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream(), Charsets.US_ASCII));
+        try {
+          String line = reader.readLine();
+          while (line != null) {
+            LOG.info(line);
+            line = reader.readLine();
+          }
+        } finally {
+          reader.close();
+        }
+      } catch (IOException e) {
+        LOG.error("Fail to execute command " + cmd, e);
+      }
+    }
+  }
+
+  @Override
+  public void stop() {
+    // No-op
+  }
+}


[10/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/test/java/org/apache/twill/yarn/EchoServer.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/test/java/org/apache/twill/yarn/EchoServer.java b/twill-yarn/src/test/java/org/apache/twill/yarn/EchoServer.java
new file mode 100644
index 0000000..6b77e66
--- /dev/null
+++ b/twill-yarn/src/test/java/org/apache/twill/yarn/EchoServer.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.yarn;
+
+import org.apache.twill.api.Command;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.PrintWriter;
+
+/**
+ * Test server that echoes back what it receives.
+ */
+public final class EchoServer extends SocketServer {
+
+  private static final Logger LOG = LoggerFactory.getLogger(EchoServer.class);
+
+  @Override
+  public void handleRequest(BufferedReader reader, PrintWriter writer) throws IOException {
+    String line = reader.readLine();
+    LOG.info("Received: " + line);
+    if (line != null) {
+      writer.println(line);
+    }
+  }
+
+  @Override
+  public void handleCommand(Command command) throws Exception {
+    LOG.info("Command received: " + command + " " + getContext().getInstanceCount());
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/test/java/org/apache/twill/yarn/EchoServerTestRun.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/test/java/org/apache/twill/yarn/EchoServerTestRun.java b/twill-yarn/src/test/java/org/apache/twill/yarn/EchoServerTestRun.java
new file mode 100644
index 0000000..d868eef
--- /dev/null
+++ b/twill-yarn/src/test/java/org/apache/twill/yarn/EchoServerTestRun.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.yarn;
+
+import org.apache.twill.api.ResourceSpecification;
+import org.apache.twill.api.TwillController;
+import org.apache.twill.api.TwillRunner;
+import org.apache.twill.api.TwillRunnerService;
+import org.apache.twill.api.logging.PrinterLogHandler;
+import org.apache.twill.common.ServiceListenerAdapter;
+import org.apache.twill.common.Threads;
+import org.apache.twill.discovery.Discoverable;
+import com.google.common.base.Charsets;
+import com.google.common.io.LineReader;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.net.Socket;
+import java.net.URISyntaxException;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * Using echo server to test various behavior of YarnTwillService.
+ * This test is executed by {@link YarnTestSuite}.
+ */
+public class EchoServerTestRun {
+
+  private static final Logger LOG = LoggerFactory.getLogger(EchoServerTestRun.class);
+
+  @Test
+  public void testEchoServer() throws InterruptedException, ExecutionException, IOException,
+    URISyntaxException, TimeoutException {
+    TwillRunner runner = YarnTestSuite.getTwillRunner();
+
+    TwillController controller = runner.prepare(new EchoServer(),
+                                                ResourceSpecification.Builder.with()
+                                                         .setVirtualCores(1)
+                                                         .setMemory(1, ResourceSpecification.SizeUnit.GIGA)
+                                                         .setInstances(2)
+                                                         .build())
+                                        .addLogHandler(new PrinterLogHandler(new PrintWriter(System.out, true)))
+                                        .withApplicationArguments("echo")
+                                        .withArguments("EchoServer", "echo2")
+                                        .start();
+
+    final CountDownLatch running = new CountDownLatch(1);
+    controller.addListener(new ServiceListenerAdapter() {
+      @Override
+      public void running() {
+        running.countDown();
+      }
+    }, Threads.SAME_THREAD_EXECUTOR);
+
+    Assert.assertTrue(running.await(30, TimeUnit.SECONDS));
+
+    Iterable<Discoverable> echoServices = controller.discoverService("echo");
+    Assert.assertTrue(YarnTestSuite.waitForSize(echoServices, 2, 60));
+
+    for (Discoverable discoverable : echoServices) {
+      String msg = "Hello: " + discoverable.getSocketAddress();
+
+      Socket socket = new Socket(discoverable.getSocketAddress().getAddress(),
+                                 discoverable.getSocketAddress().getPort());
+      try {
+        PrintWriter writer = new PrintWriter(new OutputStreamWriter(socket.getOutputStream(), Charsets.UTF_8), true);
+        LineReader reader = new LineReader(new InputStreamReader(socket.getInputStream(), Charsets.UTF_8));
+
+        writer.println(msg);
+        Assert.assertEquals(msg, reader.readLine());
+      } finally {
+        socket.close();
+      }
+    }
+
+    // Increase number of instances
+    controller.changeInstances("EchoServer", 3);
+    Assert.assertTrue(YarnTestSuite.waitForSize(echoServices, 3, 60));
+
+    echoServices = controller.discoverService("echo2");
+
+    // Decrease number of instances
+    controller.changeInstances("EchoServer", 1);
+    Assert.assertTrue(YarnTestSuite.waitForSize(echoServices, 1, 60));
+
+    // Increase number of instances again
+    controller.changeInstances("EchoServer", 2);
+    Assert.assertTrue(YarnTestSuite.waitForSize(echoServices, 2, 60));
+
+    // Make sure still only one app is running
+    Iterable<TwillRunner.LiveInfo> apps = runner.lookupLive();
+    Assert.assertTrue(YarnTestSuite.waitForSize(apps, 1, 60));
+
+    // Creates a new runner service to check it can regain control over running app.
+    TwillRunnerService runnerService = YarnTestSuite.createTwillRunnerService();
+    runnerService.startAndWait();
+
+    try {
+      Iterable <TwillController> controllers = runnerService.lookup("EchoServer");
+      Assert.assertTrue(YarnTestSuite.waitForSize(controllers, 1, 60));
+
+      for (TwillController c : controllers) {
+        LOG.info("Stopping application: " + c.getRunId());
+        c.stop().get(30, TimeUnit.SECONDS);
+      }
+
+      Assert.assertTrue(YarnTestSuite.waitForSize(apps, 0, 60));
+    } finally {
+      runnerService.stopAndWait();
+    }
+
+    // Sleep a bit before exiting.
+    TimeUnit.SECONDS.sleep(2);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/test/java/org/apache/twill/yarn/EnvironmentEchoServer.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/test/java/org/apache/twill/yarn/EnvironmentEchoServer.java b/twill-yarn/src/test/java/org/apache/twill/yarn/EnvironmentEchoServer.java
new file mode 100644
index 0000000..4be2472
--- /dev/null
+++ b/twill-yarn/src/test/java/org/apache/twill/yarn/EnvironmentEchoServer.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.yarn;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.PrintWriter;
+
+/**
+ * Test server that returns back the value of the env key sent in.  Used to check env for
+ * runnables is correctly set.
+ */
+public class EnvironmentEchoServer extends SocketServer {
+
+  @Override
+  public void handleRequest(BufferedReader reader, PrintWriter writer) throws IOException {
+    String envKey = reader.readLine();
+    writer.println(System.getenv(envKey));
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/test/java/org/apache/twill/yarn/FailureRestartTestRun.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/test/java/org/apache/twill/yarn/FailureRestartTestRun.java b/twill-yarn/src/test/java/org/apache/twill/yarn/FailureRestartTestRun.java
new file mode 100644
index 0000000..b3d3933
--- /dev/null
+++ b/twill-yarn/src/test/java/org/apache/twill/yarn/FailureRestartTestRun.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.yarn;
+
+import org.apache.twill.api.Command;
+import org.apache.twill.api.ResourceSpecification;
+import org.apache.twill.api.TwillController;
+import org.apache.twill.api.TwillRunner;
+import org.apache.twill.api.logging.PrinterLogHandler;
+import org.apache.twill.discovery.Discoverable;
+import com.google.common.base.Charsets;
+import com.google.common.collect.Sets;
+import com.google.common.io.LineReader;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+/**
+ *
+ */
+public class FailureRestartTestRun {
+
+  @Test
+  public void testFailureRestart() throws Exception {
+    TwillRunner runner = YarnTestSuite.getTwillRunner();
+
+    ResourceSpecification resource = ResourceSpecification.Builder.with()
+      .setVirtualCores(1)
+      .setMemory(512, ResourceSpecification.SizeUnit.MEGA)
+      .setInstances(2)
+      .build();
+    TwillController controller = runner.prepare(new FailureRunnable(), resource)
+      .withApplicationArguments("failure")
+      .withArguments(FailureRunnable.class.getSimpleName(), "failure2")
+      .addLogHandler(new PrinterLogHandler(new PrintWriter(System.out, true)))
+      .start();
+
+    Iterable<Discoverable> discoverables = controller.discoverService("failure");
+    Assert.assertTrue(YarnTestSuite.waitForSize(discoverables, 2, 60));
+
+    // Make sure we see the right instance IDs
+    Assert.assertEquals(Sets.newHashSet(0, 1), getInstances(discoverables));
+
+    // Kill server with instanceId = 0
+    controller.sendCommand(FailureRunnable.class.getSimpleName(), Command.Builder.of("kill0").build());
+
+    // Do a shot sleep, make sure the runnable is killed.
+    TimeUnit.SECONDS.sleep(5);
+
+    Assert.assertTrue(YarnTestSuite.waitForSize(discoverables, 2, 60));
+    // Make sure we see the right instance IDs
+    Assert.assertEquals(Sets.newHashSet(0, 1), getInstances(discoverables));
+
+    controller.stopAndWait();
+  }
+
+  private Set<Integer> getInstances(Iterable<Discoverable> discoverables) throws IOException {
+    Set<Integer> instances = Sets.newHashSet();
+    for (Discoverable discoverable : discoverables) {
+      InetSocketAddress socketAddress = discoverable.getSocketAddress();
+      Socket socket = new Socket(socketAddress.getAddress(), socketAddress.getPort());
+      try {
+        PrintWriter writer = new PrintWriter(new OutputStreamWriter(socket.getOutputStream(), Charsets.UTF_8), true);
+        LineReader reader = new LineReader(new InputStreamReader(socket.getInputStream(), Charsets.UTF_8));
+
+        String msg = "Failure";
+        writer.println(msg);
+
+        String line = reader.readLine();
+        Assert.assertTrue(line.endsWith(msg));
+        instances.add(Integer.parseInt(line.substring(0, line.length() - msg.length())));
+      } finally {
+        socket.close();
+      }
+    }
+    return instances;
+  }
+
+
+  public static final class FailureRunnable extends SocketServer {
+
+    private volatile boolean killed;
+
+    @Override
+    public void run() {
+      killed = false;
+      super.run();
+      if (killed) {
+        throw new RuntimeException("Exception");
+      }
+    }
+
+    @Override
+    public void handleCommand(Command command) throws Exception {
+      if (command.getCommand().equals("kill" + getContext().getInstanceId())) {
+        killed = true;
+        running = false;
+        serverSocket.close();
+      }
+    }
+
+    @Override
+    public void handleRequest(BufferedReader reader, PrintWriter writer) throws IOException {
+      String line = reader.readLine();
+      writer.println(getContext().getInstanceId() + line);
+      writer.flush();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/test/java/org/apache/twill/yarn/LocalFileTestRun.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/test/java/org/apache/twill/yarn/LocalFileTestRun.java b/twill-yarn/src/test/java/org/apache/twill/yarn/LocalFileTestRun.java
new file mode 100644
index 0000000..de2c74c
--- /dev/null
+++ b/twill-yarn/src/test/java/org/apache/twill/yarn/LocalFileTestRun.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.yarn;
+
+import org.apache.twill.api.TwillApplication;
+import org.apache.twill.api.TwillController;
+import org.apache.twill.api.TwillRunner;
+import org.apache.twill.api.TwillSpecification;
+import org.apache.twill.api.logging.PrinterLogHandler;
+import org.apache.twill.discovery.Discoverable;
+import com.google.common.base.Charsets;
+import com.google.common.base.Preconditions;
+import com.google.common.io.ByteStreams;
+import com.google.common.io.Files;
+import com.google.common.io.LineReader;
+import org.junit.Assert;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+import java.util.concurrent.TimeUnit;
+import java.util.jar.JarEntry;
+import java.util.jar.JarOutputStream;
+
+/**
+ * Test for local file transfer.
+ */
+public class LocalFileTestRun {
+
+  @ClassRule
+  public static TemporaryFolder tmpFolder = new TemporaryFolder();
+
+  @Test
+  public void testLocalFile() throws Exception {
+    String header = Files.readFirstLine(new File(getClass().getClassLoader().getResource("header.txt").toURI()),
+                                        Charsets.UTF_8);
+
+    TwillRunner runner = YarnTestSuite.getTwillRunner();
+    if (runner instanceof YarnTwillRunnerService) {
+      ((YarnTwillRunnerService) runner).setJVMOptions("-verbose:gc -Xloggc:gc.log -XX:+PrintGCDetails");
+    }
+
+    TwillController controller = runner.prepare(new LocalFileApplication())
+      .withApplicationArguments("local")
+      .withArguments("LocalFileSocketServer", "local2")
+      .addLogHandler(new PrinterLogHandler(new PrintWriter(System.out, true)))
+      .start();
+
+    if (runner instanceof YarnTwillRunnerService) {
+      ((YarnTwillRunnerService) runner).setJVMOptions("");
+    }
+
+    Iterable<Discoverable> discoverables = controller.discoverService("local");
+    Assert.assertTrue(YarnTestSuite.waitForSize(discoverables, 1, 60));
+
+    InetSocketAddress socketAddress = discoverables.iterator().next().getSocketAddress();
+    Socket socket = new Socket(socketAddress.getAddress(), socketAddress.getPort());
+    try {
+      PrintWriter writer = new PrintWriter(new OutputStreamWriter(socket.getOutputStream(), Charsets.UTF_8), true);
+      LineReader reader = new LineReader(new InputStreamReader(socket.getInputStream(), Charsets.UTF_8));
+
+      String msg = "Local file test";
+      writer.println(msg);
+      Assert.assertEquals(header, reader.readLine());
+      Assert.assertEquals(msg, reader.readLine());
+    } finally {
+      socket.close();
+    }
+
+    controller.stopAndWait();
+
+    Assert.assertTrue(YarnTestSuite.waitForSize(discoverables, 0, 60));
+
+    TimeUnit.SECONDS.sleep(2);
+  }
+
+  public static final class LocalFileApplication implements TwillApplication {
+
+    private final File headerFile;
+
+    public LocalFileApplication() throws Exception {
+      // Create a jar file that contains the header.txt file inside.
+      headerFile = tmpFolder.newFile("header.jar");
+      JarOutputStream os = new JarOutputStream(new FileOutputStream(headerFile));
+      try {
+        os.putNextEntry(new JarEntry("header.txt"));
+        ByteStreams.copy(getClass().getClassLoader().getResourceAsStream("header.txt"), os);
+      } finally {
+        os.close();
+      }
+    }
+
+    @Override
+    public TwillSpecification configure() {
+      return TwillSpecification.Builder.with()
+        .setName("LocalFileApp")
+        .withRunnable()
+          .add(new LocalFileSocketServer())
+            .withLocalFiles()
+              .add("header", headerFile, true).apply()
+        .anyOrder()
+        .build();
+    }
+  }
+
+  public static final class LocalFileSocketServer extends SocketServer {
+
+    private static final Logger LOG = LoggerFactory.getLogger(LocalFileSocketServer.class);
+
+    @Override
+    public void handleRequest(BufferedReader reader, PrintWriter writer) throws IOException {
+      // Verify there is a gc.log file locally
+      Preconditions.checkState(new File("gc.log").exists());
+
+      LOG.info("handleRequest");
+      String header = Files.toString(new File("header/header.txt"), Charsets.UTF_8);
+      writer.write(header);
+      writer.println(reader.readLine());
+      LOG.info("Flushed response");
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/test/java/org/apache/twill/yarn/ProvisionTimeoutTestRun.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/test/java/org/apache/twill/yarn/ProvisionTimeoutTestRun.java b/twill-yarn/src/test/java/org/apache/twill/yarn/ProvisionTimeoutTestRun.java
new file mode 100644
index 0000000..0598ef1
--- /dev/null
+++ b/twill-yarn/src/test/java/org/apache/twill/yarn/ProvisionTimeoutTestRun.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.yarn;
+
+import org.apache.twill.api.AbstractTwillRunnable;
+import org.apache.twill.api.EventHandler;
+import org.apache.twill.api.EventHandlerContext;
+import org.apache.twill.api.ResourceSpecification;
+import org.apache.twill.api.TwillApplication;
+import org.apache.twill.api.TwillController;
+import org.apache.twill.api.TwillRunner;
+import org.apache.twill.api.TwillSpecification;
+import org.apache.twill.api.logging.PrinterLogHandler;
+import org.apache.twill.common.Services;
+import com.google.common.base.Throwables;
+import com.google.common.collect.ImmutableMap;
+import org.junit.Test;
+
+import java.io.PrintWriter;
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+/**
+ *
+ */
+public class ProvisionTimeoutTestRun {
+
+  @Test
+  public void testProvisionTimeout() throws InterruptedException, ExecutionException, TimeoutException {
+    TwillRunner runner = YarnTestSuite.getTwillRunner();
+
+    TwillController controller = runner.prepare(new TimeoutApplication())
+                                       .addLogHandler(new PrinterLogHandler(new PrintWriter(System.out, true)))
+                                       .start();
+
+    // The provision should failed in 30 seconds after AM started, which AM could took a while to start.
+    // Hence we give 90 seconds max time here.
+    try {
+      Services.getCompletionFuture(controller).get(90, TimeUnit.SECONDS);
+    } finally {
+      // If it timeout, kill the app as cleanup.
+      controller.kill();
+    }
+  }
+
+  public static final class Handler extends EventHandler {
+
+    private boolean abort;
+
+    @Override
+    protected Map<String, String> getConfigs() {
+      return ImmutableMap.of("abort", "true");
+    }
+
+    @Override
+    public void initialize(EventHandlerContext context) {
+      this.abort = Boolean.parseBoolean(context.getSpecification().getConfigs().get("abort"));
+    }
+
+    @Override
+    public TimeoutAction launchTimeout(Iterable<TimeoutEvent> timeoutEvents) {
+      if (abort) {
+        return TimeoutAction.abort();
+      } else {
+        return TimeoutAction.recheck(10, TimeUnit.SECONDS);
+      }
+    }
+  }
+
+  public static final class TimeoutApplication implements TwillApplication {
+
+    @Override
+    public TwillSpecification configure() {
+      return TwillSpecification.Builder.with()
+        .setName("TimeoutApplication")
+        .withRunnable()
+        .add(new TimeoutRunnable(),
+             ResourceSpecification.Builder.with()
+               .setVirtualCores(1)
+               .setMemory(8, ResourceSpecification.SizeUnit.GIGA).build())
+        .noLocalFiles()
+        .anyOrder()
+        .withEventHandler(new Handler())
+        .build();
+    }
+  }
+
+  /**
+   * A runnable that do nothing, as it's not expected to get provisioned.
+   */
+  public static final class TimeoutRunnable extends AbstractTwillRunnable {
+
+    private final CountDownLatch latch = new CountDownLatch(1);
+
+    @Override
+    public void stop() {
+      latch.countDown();
+    }
+
+    @Override
+    public void run() {
+      // Simply block here
+      try {
+        latch.await();
+      } catch (InterruptedException e) {
+        throw Throwables.propagate(e);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/test/java/org/apache/twill/yarn/ResourceReportTestRun.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/test/java/org/apache/twill/yarn/ResourceReportTestRun.java b/twill-yarn/src/test/java/org/apache/twill/yarn/ResourceReportTestRun.java
new file mode 100644
index 0000000..131f90a
--- /dev/null
+++ b/twill-yarn/src/test/java/org/apache/twill/yarn/ResourceReportTestRun.java
@@ -0,0 +1,268 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.yarn;
+
+import org.apache.twill.api.ResourceReport;
+import org.apache.twill.api.ResourceSpecification;
+import org.apache.twill.api.TwillApplication;
+import org.apache.twill.api.TwillController;
+import org.apache.twill.api.TwillRunResources;
+import org.apache.twill.api.TwillRunner;
+import org.apache.twill.api.TwillSpecification;
+import org.apache.twill.api.logging.PrinterLogHandler;
+import org.apache.twill.common.ServiceListenerAdapter;
+import org.apache.twill.common.Threads;
+import org.apache.twill.discovery.Discoverable;
+import org.apache.twill.internal.EnvKeys;
+import com.google.common.base.Charsets;
+import com.google.common.collect.Maps;
+import com.google.common.io.LineReader;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.net.Socket;
+import java.net.URISyntaxException;
+import java.util.Collection;
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * Using echo server to test resource reports.
+ * This test is executed by {@link org.apache.twill.yarn.YarnTestSuite}.
+ */
+public class ResourceReportTestRun {
+
+  private static final Logger LOG = LoggerFactory.getLogger(ResourceReportTestRun.class);
+
+  private class ResourceApplication implements TwillApplication {
+    @Override
+    public TwillSpecification configure() {
+      return TwillSpecification.Builder.with()
+        .setName("ResourceApplication")
+        .withRunnable()
+          .add("echo1", new EchoServer(), ResourceSpecification.Builder.with()
+            .setVirtualCores(1)
+            .setMemory(128, ResourceSpecification.SizeUnit.MEGA)
+            .setInstances(2).build()).noLocalFiles()
+          .add("echo2", new EchoServer(), ResourceSpecification.Builder.with()
+            .setVirtualCores(2)
+            .setMemory(256, ResourceSpecification.SizeUnit.MEGA)
+            .setInstances(1).build()).noLocalFiles()
+        .anyOrder()
+        .build();
+    }
+  }
+
+  @Test
+  public void testRunnablesGetAllowedResourcesInEnv() throws InterruptedException, IOException,
+    TimeoutException, ExecutionException {
+    TwillRunner runner = YarnTestSuite.getTwillRunner();
+
+    ResourceSpecification resourceSpec = ResourceSpecification.Builder.with()
+      .setVirtualCores(1)
+      .setMemory(2048, ResourceSpecification.SizeUnit.MEGA)
+      .setInstances(1)
+      .build();
+    TwillController controller = runner.prepare(new EnvironmentEchoServer(), resourceSpec)
+      .addLogHandler(new PrinterLogHandler(new PrintWriter(System.out, true)))
+      .withApplicationArguments("envecho")
+      .withArguments("EnvironmentEchoServer", "echo2")
+      .start();
+
+    final CountDownLatch running = new CountDownLatch(1);
+    controller.addListener(new ServiceListenerAdapter() {
+      @Override
+      public void running() {
+        running.countDown();
+      }
+    }, Threads.SAME_THREAD_EXECUTOR);
+
+    Assert.assertTrue(running.await(30, TimeUnit.SECONDS));
+
+    Iterable<Discoverable> envEchoServices = controller.discoverService("envecho");
+    Assert.assertTrue(YarnTestSuite.waitForSize(envEchoServices, 1, 30));
+
+    // TODO: check virtual cores once yarn adds the ability
+    Map<String, String> expectedValues = Maps.newHashMap();
+    expectedValues.put(EnvKeys.YARN_CONTAINER_MEMORY_MB, "2048");
+    expectedValues.put(EnvKeys.TWILL_INSTANCE_COUNT, "1");
+
+    // check environment of the runnable.
+    Discoverable discoverable = envEchoServices.iterator().next();
+    for (Map.Entry<String, String> expected : expectedValues.entrySet()) {
+      Socket socket = new Socket(discoverable.getSocketAddress().getHostName(),
+                                 discoverable.getSocketAddress().getPort());
+      try {
+        PrintWriter writer = new PrintWriter(new OutputStreamWriter(socket.getOutputStream(), Charsets.UTF_8), true);
+        LineReader reader = new LineReader(new InputStreamReader(socket.getInputStream(), Charsets.UTF_8));
+        writer.println(expected.getKey());
+        Assert.assertEquals(expected.getValue(), reader.readLine());
+      } finally {
+        socket.close();
+      }
+    }
+
+    controller.stop().get(30, TimeUnit.SECONDS);
+    // Sleep a bit before exiting.
+    TimeUnit.SECONDS.sleep(2);
+  }
+
+  @Test
+  public void testResourceReportWithFailingContainers() throws InterruptedException, IOException,
+    TimeoutException, ExecutionException {
+    TwillRunner runner = YarnTestSuite.getTwillRunner();
+
+    ResourceSpecification resourceSpec = ResourceSpecification.Builder.with()
+      .setVirtualCores(1)
+      .setMemory(128, ResourceSpecification.SizeUnit.MEGA)
+      .setInstances(2)
+      .build();
+    TwillController controller = runner.prepare(new BuggyServer(), resourceSpec)
+      .addLogHandler(new PrinterLogHandler(new PrintWriter(System.out, true)))
+      .withApplicationArguments("echo")
+      .withArguments("BuggyServer", "echo2")
+      .start();
+
+    final CountDownLatch running = new CountDownLatch(1);
+    controller.addListener(new ServiceListenerAdapter() {
+      @Override
+      public void running() {
+        running.countDown();
+      }
+    }, Threads.SAME_THREAD_EXECUTOR);
+
+    Assert.assertTrue(running.await(30, TimeUnit.SECONDS));
+
+    Iterable<Discoverable> echoServices = controller.discoverService("echo");
+    Assert.assertTrue(YarnTestSuite.waitForSize(echoServices, 2, 60));
+    // check that we have 2 runnables.
+    ResourceReport report = controller.getResourceReport();
+    Assert.assertEquals(2, report.getRunnableResources("BuggyServer").size());
+
+    // cause a divide by 0 in one server
+    Discoverable discoverable = echoServices.iterator().next();
+    Socket socket = new Socket(discoverable.getSocketAddress().getAddress(),
+                               discoverable.getSocketAddress().getPort());
+    try {
+      PrintWriter writer = new PrintWriter(new OutputStreamWriter(socket.getOutputStream(), Charsets.UTF_8), true);
+      writer.println("0");
+    } finally {
+      socket.close();
+    }
+
+    // takes some time for app master to find out the container completed...
+    TimeUnit.SECONDS.sleep(5);
+    // check that we have 1 runnable, not 2.
+    report = controller.getResourceReport();
+    Assert.assertEquals(1, report.getRunnableResources("BuggyServer").size());
+
+    controller.stop().get(30, TimeUnit.SECONDS);
+    // Sleep a bit before exiting.
+    TimeUnit.SECONDS.sleep(2);
+  }
+
+  @Test
+  public void testResourceReport() throws InterruptedException, ExecutionException, IOException,
+    URISyntaxException, TimeoutException {
+    TwillRunner runner = YarnTestSuite.getTwillRunner();
+
+    TwillController controller = runner.prepare(new ResourceApplication())
+                                        .addLogHandler(new PrinterLogHandler(new PrintWriter(System.out, true)))
+                                        .withApplicationArguments("echo")
+                                        .withArguments("echo1", "echo1")
+                                        .withArguments("echo2", "echo2")
+                                        .start();
+
+    final CountDownLatch running = new CountDownLatch(1);
+    controller.addListener(new ServiceListenerAdapter() {
+      @Override
+      public void running() {
+        running.countDown();
+      }
+    }, Threads.SAME_THREAD_EXECUTOR);
+
+    Assert.assertTrue(running.await(30, TimeUnit.SECONDS));
+
+    // wait for 3 echo servers to come up
+    Iterable<Discoverable> echoServices = controller.discoverService("echo");
+    Assert.assertTrue(YarnTestSuite.waitForSize(echoServices, 3, 60));
+    ResourceReport report = controller.getResourceReport();
+    // make sure resources for echo1 and echo2 are there
+    Map<String, Collection<TwillRunResources>> usedResources = report.getResources();
+    Assert.assertEquals(2, usedResources.keySet().size());
+    Assert.assertTrue(usedResources.containsKey("echo1"));
+    Assert.assertTrue(usedResources.containsKey("echo2"));
+
+    Collection<TwillRunResources> echo1Resources = usedResources.get("echo1");
+    // 2 instances of echo1
+    Assert.assertEquals(2, echo1Resources.size());
+    // TODO: check cores after hadoop-2.1.0
+    for (TwillRunResources resources : echo1Resources) {
+      Assert.assertEquals(128, resources.getMemoryMB());
+    }
+
+    Collection<TwillRunResources> echo2Resources = usedResources.get("echo2");
+    // 2 instances of echo1
+    Assert.assertEquals(1, echo2Resources.size());
+    // TODO: check cores after hadoop-2.1.0
+    for (TwillRunResources resources : echo2Resources) {
+      Assert.assertEquals(256, resources.getMemoryMB());
+    }
+
+    // Decrease number of instances of echo1 from 2 to 1
+    controller.changeInstances("echo1", 1);
+    echoServices = controller.discoverService("echo1");
+    Assert.assertTrue(YarnTestSuite.waitForSize(echoServices, 1, 60));
+    report = controller.getResourceReport();
+
+    // make sure resources for echo1 and echo2 are there
+    usedResources = report.getResources();
+    Assert.assertEquals(2, usedResources.keySet().size());
+    Assert.assertTrue(usedResources.containsKey("echo1"));
+    Assert.assertTrue(usedResources.containsKey("echo2"));
+
+    echo1Resources = usedResources.get("echo1");
+    // 1 instance of echo1 now
+    Assert.assertEquals(1, echo1Resources.size());
+    // TODO: check cores after hadoop-2.1.0
+    for (TwillRunResources resources : echo1Resources) {
+      Assert.assertEquals(128, resources.getMemoryMB());
+    }
+
+    echo2Resources = usedResources.get("echo2");
+    // 2 instances of echo1
+    Assert.assertEquals(1, echo2Resources.size());
+    // TODO: check cores after hadoop-2.1.0
+    for (TwillRunResources resources : echo2Resources) {
+      Assert.assertEquals(256, resources.getMemoryMB());
+    }
+
+    controller.stop().get(30, TimeUnit.SECONDS);
+    // Sleep a bit before exiting.
+    TimeUnit.SECONDS.sleep(2);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/test/java/org/apache/twill/yarn/SocketServer.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/test/java/org/apache/twill/yarn/SocketServer.java b/twill-yarn/src/test/java/org/apache/twill/yarn/SocketServer.java
new file mode 100644
index 0000000..5148ed2
--- /dev/null
+++ b/twill-yarn/src/test/java/org/apache/twill/yarn/SocketServer.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.yarn;
+
+import org.apache.twill.api.AbstractTwillRunnable;
+import org.apache.twill.api.TwillContext;
+import org.apache.twill.api.TwillContext;
+import org.apache.twill.common.Cancellable;
+import com.google.common.base.Charsets;
+import com.google.common.base.Throwables;
+import com.google.common.collect.ImmutableList;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.net.ServerSocket;
+import java.net.Socket;
+import java.net.SocketException;
+import java.util.List;
+
+/**
+ * Boilerplate for a server that announces itself and talks to clients through a socket.
+ */
+public abstract class SocketServer extends AbstractTwillRunnable {
+
+  private static final Logger LOG = LoggerFactory.getLogger(SocketServer.class);
+
+  protected volatile boolean running;
+  protected volatile Thread runThread;
+  protected ServerSocket serverSocket;
+  protected Cancellable canceller;
+
+  @Override
+  public void initialize(TwillContext context) {
+    super.initialize(context);
+    running = true;
+    try {
+      serverSocket = new ServerSocket(0);
+      LOG.info("Server started: " + serverSocket.getLocalSocketAddress() +
+               ", id: " + context.getInstanceId() +
+               ", count: " + context.getInstanceCount());
+
+      final List<Cancellable> cancellables = ImmutableList.of(
+        context.announce(context.getApplicationArguments()[0], serverSocket.getLocalPort()),
+        context.announce(context.getArguments()[0], serverSocket.getLocalPort())
+      );
+      canceller = new Cancellable() {
+        @Override
+        public void cancel() {
+          for (Cancellable c : cancellables) {
+            c.cancel();
+          }
+        }
+      };
+    } catch (IOException e) {
+      throw Throwables.propagate(e);
+    }
+  }
+
+  @Override
+  public void run() {
+    try {
+      runThread = Thread.currentThread();
+      while (running) {
+        try {
+          Socket socket = serverSocket.accept();
+          try {
+            BufferedReader reader = new BufferedReader(new InputStreamReader(socket.getInputStream(), Charsets.UTF_8));
+            PrintWriter writer = new PrintWriter(new OutputStreamWriter(socket.getOutputStream()), true);
+            handleRequest(reader, writer);
+          } finally {
+            socket.close();
+          }
+        } catch (SocketException e) {
+          LOG.info("Socket exception: " + e);
+        }
+      }
+    } catch (Exception e) {
+      LOG.error(e.getMessage(), e);
+    }
+  }
+
+  @Override
+  public void stop() {
+    LOG.info("Stopping server");
+    canceller.cancel();
+    running = false;
+    Thread t = runThread;
+    if (t != null) {
+      t.interrupt();
+    }
+    try {
+      serverSocket.close();
+    } catch (IOException e) {
+      LOG.error("Exception while closing socket.", e);
+      throw Throwables.propagate(e);
+    }
+    serverSocket = null;
+  }
+
+  @Override
+  public void destroy() {
+    try {
+      if (serverSocket != null) {
+        serverSocket.close();
+      }
+    } catch (IOException e) {
+      LOG.error("Exception while closing socket.", e);
+      throw Throwables.propagate(e);
+    }
+  }
+
+  abstract public void handleRequest(BufferedReader reader, PrintWriter writer) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/test/java/org/apache/twill/yarn/TaskCompletedTestRun.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/test/java/org/apache/twill/yarn/TaskCompletedTestRun.java b/twill-yarn/src/test/java/org/apache/twill/yarn/TaskCompletedTestRun.java
new file mode 100644
index 0000000..5a93271
--- /dev/null
+++ b/twill-yarn/src/test/java/org/apache/twill/yarn/TaskCompletedTestRun.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.yarn;
+
+import org.apache.twill.api.AbstractTwillRunnable;
+import org.apache.twill.api.ResourceSpecification;
+import org.apache.twill.api.TwillController;
+import org.apache.twill.api.TwillRunner;
+import org.apache.twill.api.logging.PrinterLogHandler;
+import org.apache.twill.common.ServiceListenerAdapter;
+import org.apache.twill.common.Threads;
+import com.google.common.base.Throwables;
+import com.google.common.util.concurrent.Service;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.PrintWriter;
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Testing application master will shutdown itself when all tasks are completed.
+ * This test is executed by {@link YarnTestSuite}.
+ */
+public class TaskCompletedTestRun {
+
+  public static final class SleepTask extends AbstractTwillRunnable {
+
+    @Override
+    public void run() {
+      // Randomly sleep for 3-5 seconds.
+      try {
+        TimeUnit.SECONDS.sleep(new Random().nextInt(3) + 3);
+      } catch (InterruptedException e) {
+        throw Throwables.propagate(e);
+      }
+    }
+
+    @Override
+    public void stop() {
+      // No-op
+    }
+  }
+
+  @Test
+  public void testTaskCompleted() throws InterruptedException {
+    TwillRunner twillRunner = YarnTestSuite.getTwillRunner();
+    TwillController controller = twillRunner.prepare(new SleepTask(),
+                                                ResourceSpecification.Builder.with()
+                                                  .setVirtualCores(1)
+                                                  .setMemory(512, ResourceSpecification.SizeUnit.MEGA)
+                                                  .setInstances(3).build())
+                                            .addLogHandler(new PrinterLogHandler(new PrintWriter(System.out, true)))
+                                            .start();
+
+    final CountDownLatch runLatch = new CountDownLatch(1);
+    final CountDownLatch stopLatch = new CountDownLatch(1);
+    controller.addListener(new ServiceListenerAdapter() {
+
+      @Override
+      public void running() {
+        runLatch.countDown();
+      }
+
+      @Override
+      public void terminated(Service.State from) {
+        stopLatch.countDown();
+      }
+    }, Threads.SAME_THREAD_EXECUTOR);
+
+    Assert.assertTrue(runLatch.await(1, TimeUnit.MINUTES));
+
+    Assert.assertTrue(stopLatch.await(1, TimeUnit.MINUTES));
+
+    TimeUnit.SECONDS.sleep(2);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/test/java/org/apache/twill/yarn/TwillSpecificationTest.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/test/java/org/apache/twill/yarn/TwillSpecificationTest.java b/twill-yarn/src/test/java/org/apache/twill/yarn/TwillSpecificationTest.java
new file mode 100644
index 0000000..8be907b
--- /dev/null
+++ b/twill-yarn/src/test/java/org/apache/twill/yarn/TwillSpecificationTest.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.yarn;
+
+import org.apache.twill.api.AbstractTwillRunnable;
+import org.apache.twill.api.TwillSpecification;
+import com.google.common.collect.ImmutableSet;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.List;
+
+/**
+ *
+ */
+public class TwillSpecificationTest {
+
+  /**
+   * Dummy for test.
+   */
+  public static final class DummyRunnable extends AbstractTwillRunnable {
+
+    @Override
+    public void stop() {
+      // no-op
+    }
+
+    @Override
+    public void run() {
+      // no-op
+    }
+  }
+
+  @Test
+  public void testAnyOrder() {
+    TwillSpecification spec =
+      TwillSpecification.Builder.with()
+        .setName("Testing")
+        .withRunnable()
+        .add("r1", new DummyRunnable()).noLocalFiles()
+        .add("r2", new DummyRunnable()).noLocalFiles()
+        .add("r3", new DummyRunnable()).noLocalFiles()
+        .anyOrder()
+        .build();
+
+    Assert.assertEquals(3, spec.getRunnables().size());
+    List<TwillSpecification.Order> orders = spec.getOrders();
+    Assert.assertEquals(1, orders.size());
+    Assert.assertEquals(ImmutableSet.of("r1", "r2", "r3"), orders.get(0).getNames());
+  }
+
+  @Test
+  public void testOrder() {
+    TwillSpecification spec =
+      TwillSpecification.Builder.with()
+        .setName("Testing")
+        .withRunnable()
+        .add("r1", new DummyRunnable()).noLocalFiles()
+        .add("r2", new DummyRunnable()).noLocalFiles()
+        .add("r3", new DummyRunnable()).noLocalFiles()
+        .add("r4", new DummyRunnable()).noLocalFiles()
+        .withOrder().begin("r1", "r2").nextWhenStarted("r3")
+        .build();
+
+    Assert.assertEquals(4, spec.getRunnables().size());
+    List<TwillSpecification.Order> orders = spec.getOrders();
+    Assert.assertEquals(3, orders.size());
+    Assert.assertEquals(ImmutableSet.of("r1", "r2"), orders.get(0).getNames());
+    Assert.assertEquals(ImmutableSet.of("r3"), orders.get(1).getNames());
+    Assert.assertEquals(ImmutableSet.of("r4"), orders.get(2).getNames());
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/test/java/org/apache/twill/yarn/YarnTestSuite.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/test/java/org/apache/twill/yarn/YarnTestSuite.java b/twill-yarn/src/test/java/org/apache/twill/yarn/YarnTestSuite.java
new file mode 100644
index 0000000..b55d620
--- /dev/null
+++ b/twill-yarn/src/test/java/org/apache/twill/yarn/YarnTestSuite.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.yarn;
+
+import org.apache.twill.api.TwillRunner;
+import org.apache.twill.api.TwillRunnerService;
+import org.apache.twill.filesystem.LocalLocationFactory;
+import org.apache.twill.internal.zookeeper.InMemoryZKServer;
+import org.apache.twill.internal.yarn.YarnUtils;
+import com.google.common.collect.Iterables;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.MiniYARNCluster;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.rules.TemporaryFolder;
+import org.junit.runner.RunWith;
+import org.junit.runners.Suite;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Test suite for all tests with mini yarn cluster.
+ */
+@RunWith(Suite.class)
+@Suite.SuiteClasses({
+                      EchoServerTestRun.class,
+                      ResourceReportTestRun.class,
+                      TaskCompletedTestRun.class,
+                      DistributeShellTestRun.class,
+                      LocalFileTestRun.class,
+                      FailureRestartTestRun.class,
+                      ProvisionTimeoutTestRun.class
+                    })
+public class YarnTestSuite {
+  private static final Logger LOG = LoggerFactory.getLogger(YarnTestSuite.class);
+
+  @ClassRule
+  public static TemporaryFolder tmpFolder = new TemporaryFolder();
+
+  private static InMemoryZKServer zkServer;
+  private static MiniYARNCluster cluster;
+  private static TwillRunnerService runnerService;
+  private static YarnConfiguration config;
+
+  @BeforeClass
+  public static final void init() throws IOException {
+    // Starts Zookeeper
+    zkServer = InMemoryZKServer.builder().build();
+    zkServer.startAndWait();
+
+    // Start YARN mini cluster
+    config = new YarnConfiguration(new Configuration());
+
+    if (YarnUtils.isHadoop20()) {
+      config.set("yarn.resourcemanager.scheduler.class",
+                 "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler");
+    } else {
+      config.set("yarn.resourcemanager.scheduler.class",
+                 "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler");
+      config.set("yarn.scheduler.capacity.resource-calculator",
+                 "org.apache.hadoop.yarn.util.resource.DominantResourceCalculator");
+    }
+    config.set("yarn.minicluster.fixed.ports", "true");
+    config.set("yarn.nodemanager.vmem-pmem-ratio", "20.1");
+    config.set("yarn.nodemanager.vmem-check-enabled", "false");
+    config.set("yarn.scheduler.minimum-allocation-mb", "128");
+    config.set("yarn.nodemanager.delete.debug-delay-sec", "3600");
+
+    cluster = new MiniYARNCluster("test-cluster", 1, 1, 1);
+    cluster.init(config);
+    cluster.start();
+
+    runnerService = createTwillRunnerService();
+    runnerService.startAndWait();
+  }
+
+  @AfterClass
+  public static final void finish() {
+    runnerService.stopAndWait();
+    cluster.stop();
+    zkServer.stopAndWait();
+  }
+
+  public static final TwillRunner getTwillRunner() {
+    return runnerService;
+  }
+
+  /**
+   * Creates an unstarted instance of {@link org.apache.twill.api.TwillRunnerService}.
+   */
+  public static final TwillRunnerService createTwillRunnerService() throws IOException {
+    return new YarnTwillRunnerService(config, zkServer.getConnectionStr() + "/twill",
+                                      new LocalLocationFactory(tmpFolder.newFolder()));
+  }
+
+  public static final <T> boolean waitForSize(Iterable<T> iterable, int count, int limit) throws InterruptedException {
+    int trial = 0;
+    int size = Iterables.size(iterable);
+    while (size != count && trial < limit) {
+      LOG.info("Waiting for {} size {} == {}", iterable, size, count);
+      TimeUnit.SECONDS.sleep(1);
+      trial++;
+      size = Iterables.size(iterable);
+    }
+    return trial < limit;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/test/resources/header.txt
----------------------------------------------------------------------
diff --git a/twill-yarn/src/test/resources/header.txt b/twill-yarn/src/test/resources/header.txt
new file mode 100644
index 0000000..b6e25e6
--- /dev/null
+++ b/twill-yarn/src/test/resources/header.txt
@@ -0,0 +1 @@
+Local file header

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/test/resources/logback-test.xml
----------------------------------------------------------------------
diff --git a/twill-yarn/src/test/resources/logback-test.xml b/twill-yarn/src/test/resources/logback-test.xml
new file mode 100644
index 0000000..2615cb4
--- /dev/null
+++ b/twill-yarn/src/test/resources/logback-test.xml
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!-- Default logback configuration for twill library -->
+<configuration>
+    <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+        <encoder>
+            <pattern>%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n</pattern>
+        </encoder>
+    </appender>
+
+    <logger name="org.apache.twill" level="DEBUG" />
+
+    <root level="WARN">
+        <appender-ref ref="STDOUT"/>
+    </root>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/pom.xml
----------------------------------------------------------------------
diff --git a/twill-zookeeper/pom.xml b/twill-zookeeper/pom.xml
new file mode 100644
index 0000000..e76ee50
--- /dev/null
+++ b/twill-zookeeper/pom.xml
@@ -0,0 +1,67 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <parent>
+        <artifactId>twill-parent</artifactId>
+        <groupId>org.apache.twill</groupId>
+        <version>0.1.0-SNAPSHOT</version>
+    </parent>
+    <modelVersion>4.0.0</modelVersion>
+
+    <artifactId>twill-zookeeper</artifactId>
+    <name>Twill ZooKeeper client library</name>
+
+    <dependencies>
+        <dependency>
+            <groupId>${project.groupId}</groupId>
+            <artifactId>twill-common</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>com.google.code.findbugs</groupId>
+            <artifactId>jsr305</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.zookeeper</groupId>
+            <artifactId>zookeeper</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>ch.qos.logback</groupId>
+            <artifactId>logback-core</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>ch.qos.logback</groupId>
+            <artifactId>logback-classic</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+        </dependency>
+    </dependencies>
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/BasicNodeChildren.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/BasicNodeChildren.java b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/BasicNodeChildren.java
new file mode 100644
index 0000000..9e4f55f
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/BasicNodeChildren.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.zookeeper;
+
+import org.apache.twill.zookeeper.NodeChildren;
+import com.google.common.base.Objects;
+import org.apache.zookeeper.data.Stat;
+
+import java.util.List;
+
+/**
+ *
+ */
+final class BasicNodeChildren implements NodeChildren {
+
+  private final Stat stat;
+  private final List<String> children;
+
+  BasicNodeChildren(List<String> children, Stat stat) {
+    this.stat = stat;
+    this.children = children;
+  }
+
+  @Override
+  public Stat getStat() {
+    return stat;
+  }
+
+  @Override
+  public List<String> getChildren() {
+    return children;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || !(o instanceof NodeChildren)) {
+      return false;
+    }
+
+    NodeChildren that = (NodeChildren) o;
+    return stat.equals(that.getStat()) && children.equals(that.getChildren());
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hashCode(children, stat);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/BasicNodeData.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/BasicNodeData.java b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/BasicNodeData.java
new file mode 100644
index 0000000..98a3a66
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/BasicNodeData.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.zookeeper;
+
+import org.apache.twill.zookeeper.NodeData;
+import com.google.common.base.Objects;
+import org.apache.zookeeper.data.Stat;
+
+import java.util.Arrays;
+
+/**
+ * A straightforward implementation for {@link NodeData}.
+ */
+final class BasicNodeData implements NodeData {
+
+  private final byte[] data;
+  private final Stat stat;
+
+  BasicNodeData(byte[] data, Stat stat) {
+    this.data = data;
+    this.stat = stat;
+  }
+
+  @Override
+  public Stat getStat() {
+    return stat;
+  }
+
+  @Override
+  public byte[] getData() {
+    return data;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || !(o instanceof NodeData)) {
+      return false;
+    }
+
+    BasicNodeData that = (BasicNodeData) o;
+
+    return stat.equals(that.getStat()) && Arrays.equals(data, that.getData());
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hashCode(data, stat);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/DefaultZKClientService.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/DefaultZKClientService.java b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/DefaultZKClientService.java
new file mode 100644
index 0000000..c52fb08
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/internal/zookeeper/DefaultZKClientService.java
@@ -0,0 +1,525 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.zookeeper;
+
+import org.apache.twill.common.Threads;
+import org.apache.twill.zookeeper.NodeChildren;
+import org.apache.twill.zookeeper.NodeData;
+import org.apache.twill.zookeeper.OperationFuture;
+import org.apache.twill.zookeeper.ZKClientService;
+import com.google.common.base.Function;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Supplier;
+import com.google.common.util.concurrent.AbstractService;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.Service;
+import org.apache.zookeeper.AsyncCallback;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.ZooDefs;
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.data.ACL;
+import org.apache.zookeeper.data.Stat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.Nullable;
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ * The base implementation of {@link ZKClientService}.
+ */
+public final class DefaultZKClientService implements ZKClientService {
+
+  private static final Logger LOG = LoggerFactory.getLogger(DefaultZKClientService.class);
+
+  private final String zkStr;
+  private final int sessionTimeout;
+  private final List<Watcher> connectionWatchers;
+  private final AtomicReference<ZooKeeper> zooKeeper;
+  private final Function<String, List<ACL>> aclMapper;
+  private final Service serviceDelegate;
+  private ExecutorService eventExecutor;
+
+  public DefaultZKClientService(String zkStr, int sessionTimeout, Watcher connectionWatcher) {
+    this.zkStr = zkStr;
+    this.sessionTimeout = sessionTimeout;
+    this.connectionWatchers = new CopyOnWriteArrayList<Watcher>();
+    addConnectionWatcher(connectionWatcher);
+
+    this.zooKeeper = new AtomicReference<ZooKeeper>();
+
+    // TODO (terence): Add ACL
+    aclMapper = new Function<String, List<ACL>>() {
+      @Override
+      public List<ACL> apply(String input) {
+        return ZooDefs.Ids.OPEN_ACL_UNSAFE;
+      }
+    };
+    serviceDelegate = new ServiceDelegate();
+  }
+
+  @Override
+  public Long getSessionId() {
+    ZooKeeper zk = zooKeeper.get();
+    return zk == null ? null : zk.getSessionId();
+  }
+
+  @Override
+  public String getConnectString() {
+    return zkStr;
+  }
+
+  @Override
+  public void addConnectionWatcher(Watcher watcher) {
+    if (watcher != null) {
+      connectionWatchers.add(wrapWatcher(watcher));
+    }
+  }
+
+  @Override
+  public OperationFuture<String> create(String path, byte[] data, CreateMode createMode) {
+    return create(path, data, createMode, true);
+  }
+
+  @Override
+  public OperationFuture<String> create(String path, @Nullable byte[] data,
+                                        CreateMode createMode, boolean createParent) {
+    return doCreate(path, data, createMode, createParent, false);
+  }
+
+  private OperationFuture<String> doCreate(final String path,
+                                        @Nullable final byte[] data,
+                                        final CreateMode createMode,
+                                        final boolean createParent,
+                                        final boolean ignoreNodeExists) {
+    final SettableOperationFuture<String> createFuture = SettableOperationFuture.create(path, eventExecutor);
+    getZooKeeper().create(path, data, aclMapper.apply(path), createMode, Callbacks.STRING, createFuture);
+    if (!createParent) {
+      return createFuture;
+    }
+
+    // If create parent is request, return a different future
+    final SettableOperationFuture<String> result = SettableOperationFuture.create(path, eventExecutor);
+    // Watch for changes in the original future
+    Futures.addCallback(createFuture, new FutureCallback<String>() {
+      @Override
+      public void onSuccess(String path) {
+        // Propagate if creation was successful
+        result.set(path);
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        // See if the failure can be handled
+        if (updateFailureResult(t, result, path, ignoreNodeExists)) {
+          return;
+        }
+        // Create the parent node
+        String parentPath = getParent(path);
+        if (parentPath.isEmpty()) {
+          result.setException(t);
+          return;
+        }
+        // Watch for parent creation complete
+        Futures.addCallback(
+          doCreate(parentPath, null, CreateMode.PERSISTENT, createParent, true), new FutureCallback<String>() {
+          @Override
+          public void onSuccess(String parentPath) {
+            // Create the requested path again
+            Futures.addCallback(
+              doCreate(path, data, createMode, false, ignoreNodeExists), new FutureCallback<String>() {
+              @Override
+              public void onSuccess(String pathResult) {
+                result.set(pathResult);
+              }
+
+              @Override
+              public void onFailure(Throwable t) {
+                // handle the failure
+                updateFailureResult(t, result, path, ignoreNodeExists);
+              }
+            });
+          }
+
+          @Override
+          public void onFailure(Throwable t) {
+            result.setException(t);
+          }
+        });
+      }
+
+      /**
+       * Updates the result future based on the given {@link Throwable}.
+       * @param t Cause of the failure
+       * @param result Future to be updated
+       * @param path Request path for the operation
+       * @return {@code true} if it is a failure, {@code false} otherwise.
+       */
+      private boolean updateFailureResult(Throwable t, SettableOperationFuture<String> result,
+                                          String path, boolean ignoreNodeExists) {
+        // Propagate if there is error
+        if (!(t instanceof KeeperException)) {
+          result.setException(t);
+          return true;
+        }
+        KeeperException.Code code = ((KeeperException) t).code();
+        // Node already exists, simply return success if it allows for ignoring node exists (for parent node creation).
+        if (ignoreNodeExists && code == KeeperException.Code.NODEEXISTS) {
+          // The requested path could be used because it only applies to non-sequential node
+          result.set(path);
+          return false;
+        }
+        if (code != KeeperException.Code.NONODE) {
+          result.setException(t);
+          return true;
+        }
+        return false;
+      }
+
+      /**
+       * Gets the parent of the given path.
+       * @param path Path for computing its parent
+       * @return Parent of the given path, or empty string if the given path is the root path already.
+       */
+      private String getParent(String path) {
+        String parentPath = path.substring(0, path.lastIndexOf('/'));
+        return (parentPath.isEmpty() && !"/".equals(path)) ? "/" : parentPath;
+      }
+    });
+
+    return result;
+  }
+
+  @Override
+  public OperationFuture<Stat> exists(String path) {
+    return exists(path, null);
+  }
+
+  @Override
+  public OperationFuture<Stat> exists(String path, Watcher watcher) {
+    SettableOperationFuture<Stat> result = SettableOperationFuture.create(path, eventExecutor);
+    getZooKeeper().exists(path, wrapWatcher(watcher), Callbacks.STAT_NONODE, result);
+    return result;
+  }
+
+  @Override
+  public OperationFuture<NodeChildren> getChildren(String path) {
+    return getChildren(path, null);
+  }
+
+  @Override
+  public OperationFuture<NodeChildren> getChildren(String path, Watcher watcher) {
+    SettableOperationFuture<NodeChildren> result = SettableOperationFuture.create(path, eventExecutor);
+    getZooKeeper().getChildren(path, wrapWatcher(watcher), Callbacks.CHILDREN, result);
+    return result;
+  }
+
+  @Override
+  public OperationFuture<NodeData> getData(String path) {
+    return getData(path, null);
+  }
+
+  @Override
+  public OperationFuture<NodeData> getData(String path, Watcher watcher) {
+    SettableOperationFuture<NodeData> result = SettableOperationFuture.create(path, eventExecutor);
+    getZooKeeper().getData(path, wrapWatcher(watcher), Callbacks.DATA, result);
+
+    return result;
+  }
+
+  @Override
+  public OperationFuture<Stat> setData(String path, byte[] data) {
+    return setData(path, data, -1);
+  }
+
+  @Override
+  public OperationFuture<Stat> setData(String dataPath, byte[] data, int version) {
+    SettableOperationFuture<Stat> result = SettableOperationFuture.create(dataPath, eventExecutor);
+    getZooKeeper().setData(dataPath, data, version, Callbacks.STAT, result);
+    return result;
+  }
+
+  @Override
+  public OperationFuture<String> delete(String path) {
+    return delete(path, -1);
+  }
+
+  @Override
+  public OperationFuture<String> delete(String deletePath, int version) {
+    SettableOperationFuture<String> result = SettableOperationFuture.create(deletePath, eventExecutor);
+    getZooKeeper().delete(deletePath, version, Callbacks.VOID, result);
+    return result;
+  }
+
+  @Override
+  public Supplier<ZooKeeper> getZooKeeperSupplier() {
+    return new Supplier<ZooKeeper>() {
+      @Override
+      public ZooKeeper get() {
+        return getZooKeeper();
+      }
+    };
+  }
+
+  @Override
+  public ListenableFuture<State> start() {
+    return serviceDelegate.start();
+  }
+
+  @Override
+  public State startAndWait() {
+    return serviceDelegate.startAndWait();
+  }
+
+  @Override
+  public boolean isRunning() {
+    return serviceDelegate.isRunning();
+  }
+
+  @Override
+  public State state() {
+    return serviceDelegate.state();
+  }
+
+  @Override
+  public ListenableFuture<State> stop() {
+    return serviceDelegate.stop();
+  }
+
+  @Override
+  public State stopAndWait() {
+    return serviceDelegate.stopAndWait();
+  }
+
+  @Override
+  public void addListener(Listener listener, Executor executor) {
+    serviceDelegate.addListener(listener, executor);
+  }
+
+  /**
+   * @return Current {@link ZooKeeper} client.
+   */
+  private ZooKeeper getZooKeeper() {
+    ZooKeeper zk = zooKeeper.get();
+    Preconditions.checkArgument(zk != null, "Not connected to zooKeeper.");
+    return zk;
+  }
+
+  /**
+   * Wraps the given watcher to be called from the event executor.
+   * @param watcher Watcher to be wrapped
+   * @return The wrapped Watcher
+   */
+  private Watcher wrapWatcher(final Watcher watcher) {
+    if (watcher == null) {
+      return null;
+    }
+    return new Watcher() {
+      @Override
+      public void process(final WatchedEvent event) {
+        eventExecutor.execute(new Runnable() {
+          @Override
+          public void run() {
+            try {
+              watcher.process(event);
+            } catch (Throwable t) {
+              LOG.error("Watcher throws exception.", t);
+            }
+          }
+        });
+      }
+    };
+  }
+
+  private final class ServiceDelegate extends AbstractService implements Watcher {
+
+    @Override
+    protected void doStart() {
+      // A single thread executor
+      eventExecutor = new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(),
+                                             Threads.createDaemonThreadFactory("zk-client-EventThread")) {
+        @Override
+        protected void terminated() {
+          super.terminated();
+          notifyStopped();
+        }
+      };
+
+      try {
+        zooKeeper.set(new ZooKeeper(zkStr, sessionTimeout, this));
+      } catch (IOException e) {
+        notifyFailed(e);
+      }
+    }
+
+    @Override
+    protected void doStop() {
+      ZooKeeper zk = zooKeeper.getAndSet(null);
+      if (zk != null) {
+        try {
+          zk.close();
+        } catch (InterruptedException e) {
+          notifyFailed(e);
+        } finally {
+          eventExecutor.shutdown();
+        }
+      }
+    }
+
+    @Override
+    public void process(WatchedEvent event) {
+      try {
+        if (event.getState() == Event.KeeperState.SyncConnected && state() == State.STARTING) {
+          LOG.info("Connected to ZooKeeper: " + zkStr);
+          notifyStarted();
+          return;
+        }
+        if (event.getState() == Event.KeeperState.Expired) {
+          LOG.info("ZooKeeper session expired: " + zkStr);
+
+          // When connection expired, simply reconnect again
+          Thread t = new Thread(new Runnable() {
+            @Override
+            public void run() {
+              try {
+                zooKeeper.set(new ZooKeeper(zkStr, sessionTimeout, ServiceDelegate.this));
+              } catch (IOException e) {
+                zooKeeper.set(null);
+                notifyFailed(e);
+              }
+            }
+          }, "zk-reconnect");
+          t.setDaemon(true);
+          t.start();
+        }
+      } finally {
+        if (event.getType() == Event.EventType.None && !connectionWatchers.isEmpty()) {
+          for (Watcher connectionWatcher : connectionWatchers) {
+            connectionWatcher.process(event);
+          }
+        }
+      }
+    }
+  }
+
+  /**
+   * Collection of generic callbacks that simply reflect results into OperationFuture.
+   */
+  private static final class Callbacks {
+    static final AsyncCallback.StringCallback STRING = new AsyncCallback.StringCallback() {
+      @Override
+      @SuppressWarnings("unchecked")
+      public void processResult(int rc, String path, Object ctx, String name) {
+        SettableOperationFuture<String> result = (SettableOperationFuture<String>) ctx;
+        KeeperException.Code code = KeeperException.Code.get(rc);
+        if (code == KeeperException.Code.OK) {
+          result.set((name == null || name.isEmpty()) ? path : name);
+          return;
+        }
+        result.setException(KeeperException.create(code, result.getRequestPath()));
+      }
+    };
+
+    static final AsyncCallback.StatCallback STAT = new AsyncCallback.StatCallback() {
+      @Override
+      @SuppressWarnings("unchecked")
+      public void processResult(int rc, String path, Object ctx, Stat stat) {
+        SettableOperationFuture<Stat> result = (SettableOperationFuture<Stat>) ctx;
+        KeeperException.Code code = KeeperException.Code.get(rc);
+        if (code == KeeperException.Code.OK) {
+          result.set(stat);
+          return;
+        }
+        result.setException(KeeperException.create(code, result.getRequestPath()));
+      }
+    };
+
+    /**
+     * A stat callback that treats NONODE as success.
+     */
+    static final AsyncCallback.StatCallback STAT_NONODE = new AsyncCallback.StatCallback() {
+      @Override
+      @SuppressWarnings("unchecked")
+      public void processResult(int rc, String path, Object ctx, Stat stat) {
+        SettableOperationFuture<Stat> result = (SettableOperationFuture<Stat>) ctx;
+        KeeperException.Code code = KeeperException.Code.get(rc);
+        if (code == KeeperException.Code.OK || code == KeeperException.Code.NONODE) {
+          result.set(stat);
+          return;
+        }
+        result.setException(KeeperException.create(code, result.getRequestPath()));
+      }
+    };
+
+    static final AsyncCallback.Children2Callback CHILDREN = new AsyncCallback.Children2Callback() {
+      @Override
+      @SuppressWarnings("unchecked")
+      public void processResult(int rc, String path, Object ctx, List<String> children, Stat stat) {
+        SettableOperationFuture<NodeChildren> result = (SettableOperationFuture<NodeChildren>) ctx;
+        KeeperException.Code code = KeeperException.Code.get(rc);
+        if (code == KeeperException.Code.OK) {
+          result.set(new BasicNodeChildren(children, stat));
+          return;
+        }
+        result.setException(KeeperException.create(code, result.getRequestPath()));
+      }
+    };
+
+    static final AsyncCallback.DataCallback DATA = new AsyncCallback.DataCallback() {
+      @Override
+      @SuppressWarnings("unchecked")
+      public void processResult(int rc, String path, Object ctx, byte[] data, Stat stat) {
+        SettableOperationFuture<NodeData> result = (SettableOperationFuture<NodeData>) ctx;
+        KeeperException.Code code = KeeperException.Code.get(rc);
+        if (code == KeeperException.Code.OK) {
+          result.set(new BasicNodeData(data, stat));
+          return;
+        }
+        result.setException(KeeperException.create(code, result.getRequestPath()));
+      }
+    };
+
+    static final AsyncCallback.VoidCallback VOID = new AsyncCallback.VoidCallback() {
+      @Override
+      @SuppressWarnings("unchecked")
+      public void processResult(int rc, String path, Object ctx) {
+        SettableOperationFuture<String> result = (SettableOperationFuture<String>) ctx;
+        KeeperException.Code code = KeeperException.Code.get(rc);
+        if (code == KeeperException.Code.OK) {
+          result.set(result.getRequestPath());
+          return;
+        }
+        // Otherwise, it is an error
+        result.setException(KeeperException.create(code, result.getRequestPath()));
+      }
+    };
+  }
+}


[16/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/state/MessageCallback.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/state/MessageCallback.java b/twill-core/src/main/java/org/apache/twill/internal/state/MessageCallback.java
new file mode 100644
index 0000000..f94eaa3
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/state/MessageCallback.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.state;
+
+import com.google.common.util.concurrent.ListenableFuture;
+
+/**
+ *
+ */
+public interface MessageCallback {
+
+  /**
+   * Called when a message is received.
+   * @param message Message being received.
+   * @return A {@link ListenableFuture} that would be completed when message processing is completed or failed.
+   *         The result of the future should be the input message Id if succeeded.
+   */
+  ListenableFuture<String> onReceived(String messageId, Message message);
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/state/MessageCodec.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/state/MessageCodec.java b/twill-core/src/main/java/org/apache/twill/internal/state/MessageCodec.java
new file mode 100644
index 0000000..176f620
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/state/MessageCodec.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.state;
+
+import org.apache.twill.api.Command;
+import com.google.common.base.Charsets;
+import com.google.common.reflect.TypeToken;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+
+import java.lang.reflect.Type;
+import java.util.Map;
+
+/**
+ *
+ */
+public final class MessageCodec {
+
+  private static final Type OPTIONS_TYPE = new TypeToken<Map<String, String>>() {}.getType();
+  private static final Gson GSON = new GsonBuilder()
+                                        .registerTypeAdapter(Message.class, new MessageAdapter())
+                                        .registerTypeAdapter(Command.class, new CommandAdapter())
+                                        .create();
+
+  /**
+   * Decodes a {@link Message} from the given byte array.
+   * @param bytes byte array to be decoded
+   * @return Message decoded or {@code null} if fails to decode.
+   */
+  public static Message decode(byte[] bytes) {
+    if (bytes == null) {
+      return null;
+    }
+    String content = new String(bytes, Charsets.UTF_8);
+    return GSON.fromJson(content, Message.class);
+  }
+
+  /**
+   * Encodes a {@link Message} into byte array. Revserse of {@link #decode(byte[])} method.
+   * @param message Message to be encoded
+   * @return byte array representing the encoded message.
+   */
+  public static byte[] encode(Message message) {
+    return GSON.toJson(message, Message.class).getBytes(Charsets.UTF_8);
+  }
+
+  /**
+   * Gson codec for {@link Message} object.
+   */
+  private static final class MessageAdapter implements JsonSerializer<Message>, JsonDeserializer<Message> {
+
+    @Override
+    public Message deserialize(JsonElement json, Type typeOfT,
+                               JsonDeserializationContext context) throws JsonParseException {
+      JsonObject jsonObj = json.getAsJsonObject();
+
+      Message.Type type = Message.Type.valueOf(jsonObj.get("type").getAsString());
+      Message.Scope scope = Message.Scope.valueOf(jsonObj.get("scope").getAsString());
+      JsonElement name = jsonObj.get("runnableName");
+      String runnableName = (name == null || name.isJsonNull()) ? null : name.getAsString();
+      Command command = context.deserialize(jsonObj.get("command"), Command.class);
+
+      return new SimpleMessage(type, scope, runnableName, command);
+    }
+
+    @Override
+    public JsonElement serialize(Message message, Type typeOfSrc, JsonSerializationContext context) {
+      JsonObject jsonObj = new JsonObject();
+      jsonObj.addProperty("type", message.getType().name());
+      jsonObj.addProperty("scope", message.getScope().name());
+      jsonObj.addProperty("runnableName", message.getRunnableName());
+      jsonObj.add("command", context.serialize(message.getCommand(), Command.class));
+
+      return jsonObj;
+    }
+  }
+
+  /**
+   * Gson codec for {@link Command} object.
+   */
+  private static final class CommandAdapter implements JsonSerializer<Command>, JsonDeserializer<Command> {
+
+    @Override
+    public Command deserialize(JsonElement json, Type typeOfT,
+                               JsonDeserializationContext context) throws JsonParseException {
+      JsonObject jsonObj = json.getAsJsonObject();
+      return Command.Builder.of(jsonObj.get("command").getAsString())
+                            .addOptions(context.<Map<String, String>>deserialize(jsonObj.get("options"), OPTIONS_TYPE))
+                            .build();
+    }
+
+    @Override
+    public JsonElement serialize(Command command, Type typeOfSrc, JsonSerializationContext context) {
+      JsonObject jsonObj = new JsonObject();
+      jsonObj.addProperty("command", command.getCommand());
+      jsonObj.add("options", context.serialize(command.getOptions(), OPTIONS_TYPE));
+      return jsonObj;
+    }
+  }
+
+  private MessageCodec() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/state/Messages.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/state/Messages.java b/twill-core/src/main/java/org/apache/twill/internal/state/Messages.java
new file mode 100644
index 0000000..9783d62
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/state/Messages.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.state;
+
+import org.apache.twill.api.Command;
+
+/**
+ * Factory class for creating instances of {@link Message}.
+ */
+public final class Messages {
+
+  /**
+   * Creates a {@link Message.Type#USER} type {@link Message} that sends the giving {@link Command} to a
+   * particular runnable.
+   *
+   * @param runnableName Name of the runnable.
+   * @param command The user command to send.
+   * @return A new instance of {@link Message}.
+   */
+  public static Message createForRunnable(String runnableName, Command command) {
+    return new SimpleMessage(Message.Type.USER, Message.Scope.RUNNABLE, runnableName, command);
+  }
+
+  /**
+   * Creates a {@link Message.Type#USER} type {@link Message} that sends the giving {@link Command} to all
+   * runnables.
+   *
+   * @param command The user command to send.
+   * @return A new instance of {@link Message}.
+   */
+  public static Message createForAll(Command command) {
+    return new SimpleMessage(Message.Type.USER, Message.Scope.ALL_RUNNABLE, null, command);
+  }
+
+  private Messages() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/state/SimpleMessage.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/state/SimpleMessage.java b/twill-core/src/main/java/org/apache/twill/internal/state/SimpleMessage.java
new file mode 100644
index 0000000..e146e56
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/state/SimpleMessage.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.state;
+
+import org.apache.twill.api.Command;
+import com.google.common.base.Objects;
+
+/**
+ *
+ */
+final class SimpleMessage implements Message {
+
+  private final Type type;
+  private final Scope scope;
+  private final String runnableName;
+  private final Command command;
+
+  SimpleMessage(Type type, Scope scope, String runnableName, Command command) {
+    this.type = type;
+    this.scope = scope;
+    this.runnableName = runnableName;
+    this.command = command;
+  }
+
+  @Override
+  public Type getType() {
+    return type;
+  }
+
+  @Override
+  public Scope getScope() {
+    return scope;
+  }
+
+  @Override
+  public String getRunnableName() {
+    return runnableName;
+  }
+
+  @Override
+  public Command getCommand() {
+    return command;
+  }
+
+  @Override
+  public String toString() {
+    return Objects.toStringHelper(Message.class)
+      .add("type", type)
+      .add("scope", scope)
+      .add("runnable", runnableName)
+      .add("command", command)
+      .toString();
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hashCode(type, scope, runnableName, command);
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == this) {
+      return true;
+    }
+    if (!(obj instanceof Message)) {
+      return false;
+    }
+    Message other = (Message) obj;
+    return type == other.getType()
+      && scope == other.getScope()
+      && Objects.equal(runnableName, other.getRunnableName())
+      && Objects.equal(command, other.getCommand());
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/state/StateNode.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/state/StateNode.java b/twill-core/src/main/java/org/apache/twill/internal/state/StateNode.java
new file mode 100644
index 0000000..d66f8a2
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/state/StateNode.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.state;
+
+import org.apache.twill.api.ServiceController;
+import com.google.common.util.concurrent.Service;
+
+/**
+ *
+ */
+public final class StateNode {
+
+  private final ServiceController.State state;
+  private final String errorMessage;
+  private final StackTraceElement[] stackTraces;
+
+  /**
+   * Constructs a StateNode with the given state.
+   */
+  public StateNode(ServiceController.State state) {
+    this(state, null, null);
+  }
+
+  /**
+   * Constructs a StateNode with {@link ServiceController.State#FAILED} caused by the given error.
+   */
+  public StateNode(Throwable error) {
+    this(Service.State.FAILED, error.getMessage(), error.getStackTrace());
+  }
+
+  /**
+   * Constructs a StateNode with the given state, error and stacktraces.
+   * This constructor should only be used by the StateNodeCodec.
+   */
+  public StateNode(ServiceController.State state, String errorMessage, StackTraceElement[] stackTraces) {
+    this.state = state;
+    this.errorMessage = errorMessage;
+    this.stackTraces = stackTraces;
+  }
+
+  public ServiceController.State getState() {
+    return state;
+  }
+
+  public String getErrorMessage() {
+    return errorMessage;
+  }
+
+  public StackTraceElement[] getStackTraces() {
+    return stackTraces;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder builder = new StringBuilder("state=").append(state);
+
+    if (errorMessage != null) {
+      builder.append("\n").append("error=").append(errorMessage);
+    }
+    if (stackTraces != null) {
+      builder.append("\n");
+      for (StackTraceElement stackTrace : stackTraces) {
+        builder.append("\tat ").append(stackTrace.toString()).append("\n");
+      }
+    }
+    return builder.toString();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/state/SystemMessages.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/state/SystemMessages.java b/twill-core/src/main/java/org/apache/twill/internal/state/SystemMessages.java
new file mode 100644
index 0000000..9877121
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/state/SystemMessages.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.state;
+
+import org.apache.twill.api.Command;
+import com.google.common.base.Preconditions;
+
+/**
+ * Collection of predefined system messages.
+ */
+public final class SystemMessages {
+
+  public static final Command STOP_COMMAND = Command.Builder.of("stop").build();
+  public static final Message SECURE_STORE_UPDATED = new SimpleMessage(
+    Message.Type.SYSTEM, Message.Scope.APPLICATION, null, Command.Builder.of("secureStoreUpdated").build());
+
+  public static Message stopApplication() {
+    return new SimpleMessage(Message.Type.SYSTEM, Message.Scope.APPLICATION, null, STOP_COMMAND);
+  }
+
+  public static Message stopRunnable(String runnableName) {
+    return new SimpleMessage(Message.Type.SYSTEM, Message.Scope.RUNNABLE, runnableName, STOP_COMMAND);
+  }
+
+  public static Message setInstances(String runnableName, int instances) {
+    Preconditions.checkArgument(instances > 0, "Instances should be > 0.");
+    return new SimpleMessage(Message.Type.SYSTEM, Message.Scope.RUNNABLE, runnableName,
+                             Command.Builder.of("instances").addOption("count", Integer.toString(instances)).build());
+  }
+
+  private SystemMessages() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/utils/Dependencies.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/utils/Dependencies.java b/twill-core/src/main/java/org/apache/twill/internal/utils/Dependencies.java
new file mode 100644
index 0000000..015b9f5
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/utils/Dependencies.java
@@ -0,0 +1,323 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.utils;
+
+import com.google.common.base.Throwables;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import com.google.common.io.ByteStreams;
+import org.objectweb.asm.AnnotationVisitor;
+import org.objectweb.asm.ClassReader;
+import org.objectweb.asm.ClassVisitor;
+import org.objectweb.asm.FieldVisitor;
+import org.objectweb.asm.Label;
+import org.objectweb.asm.MethodVisitor;
+import org.objectweb.asm.Opcodes;
+import org.objectweb.asm.Type;
+import org.objectweb.asm.signature.SignatureReader;
+import org.objectweb.asm.signature.SignatureVisitor;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URL;
+import java.util.Queue;
+import java.util.Set;
+
+/**
+ * Utility class to help find out class dependencies.
+ */
+public final class Dependencies {
+
+  /**
+   * Represents a callback for accepting a class during dependency traversal.
+   */
+  public interface ClassAcceptor {
+    /**
+     * Invoked when a class is being found as a dependency.
+     *
+     * @param className Name of the class.
+     * @param classUrl URL for the class resource.
+     * @param classPathUrl URL for the class path resource that contains the class resource.
+     *                     If the URL protocol is {@code file}, it would be the path to root package.
+     *                     If the URL protocol is {@code jar}, it would be the jar file.
+     * @return true keep finding dependencies on the given class.
+     */
+    boolean accept(String className, URL classUrl, URL classPathUrl);
+  }
+
+  public static void findClassDependencies(ClassLoader classLoader,
+                                           ClassAcceptor acceptor,
+                                           String...classesToResolve) throws IOException {
+    findClassDependencies(classLoader, acceptor, ImmutableList.copyOf(classesToResolve));
+  }
+
+  /**
+   * Finds the class dependencies of the given class.
+   * @param classLoader ClassLoader for finding class bytecode.
+   * @param acceptor Predicate to accept a found class and its bytecode.
+   * @param classesToResolve Classes for looking for dependencies.
+   * @throws IOException Thrown where there is error when loading in class bytecode.
+   */
+  public static void findClassDependencies(ClassLoader classLoader,
+                                           ClassAcceptor acceptor,
+                                           Iterable<String> classesToResolve) throws IOException {
+
+    final Set<String> seenClasses = Sets.newHashSet(classesToResolve);
+    final Queue<String> classes = Lists.newLinkedList(classesToResolve);
+
+    // Breadth-first-search classes dependencies.
+    while (!classes.isEmpty()) {
+      String className = classes.remove();
+      URL classUrl = getClassURL(className, classLoader);
+      if (classUrl == null) {
+        continue;
+      }
+
+      // Call the accept to see if it accept the current class.
+      if (!acceptor.accept(className, classUrl, getClassPathURL(className, classUrl))) {
+        continue;
+      }
+
+      InputStream is = classUrl.openStream();
+      try {
+        // Visit the bytecode to lookup classes that the visiting class is depended on.
+        new ClassReader(ByteStreams.toByteArray(is)).accept(new DependencyClassVisitor(new DependencyAcceptor() {
+          @Override
+          public void accept(String className) {
+            // See if the class is accepted
+            if (seenClasses.add(className)) {
+              classes.add(className);
+            }
+          }
+        }), ClassReader.SKIP_DEBUG + ClassReader.SKIP_FRAMES);
+      } finally {
+        is.close();
+      }
+    }
+  }
+
+  /**
+   * Returns the URL for loading the class bytecode of the given class, or null if it is not found or if it is
+   * a system class.
+   */
+  private static URL getClassURL(String className, ClassLoader classLoader) {
+    String resourceName = className.replace('.', '/') + ".class";
+    return classLoader.getResource(resourceName);
+  }
+
+  private static URL getClassPathURL(String className, URL classUrl) {
+    try {
+      if ("file".equals(classUrl.getProtocol())) {
+        String path = classUrl.getFile();
+        // Compute the directory container the class.
+        int endIdx = path.length() - className.length() - ".class".length();
+        if (endIdx > 1) {
+          // If it is not the root directory, return the end index to remove the trailing '/'.
+          endIdx--;
+        }
+        return new URL("file", "", -1, path.substring(0, endIdx));
+      }
+      if ("jar".equals(classUrl.getProtocol())) {
+        String path = classUrl.getFile();
+        return URI.create(path.substring(0, path.indexOf("!/"))).toURL();
+      }
+    } catch (MalformedURLException e) {
+      throw Throwables.propagate(e);
+    }
+    throw new IllegalStateException("Unsupported class URL: " + classUrl);
+  }
+
+  /**
+   * A private interface for accepting a dependent class that is found during bytecode inspection.
+   */
+  private interface DependencyAcceptor {
+    void accept(String className);
+  }
+
+  /**
+   * ASM ClassVisitor for extracting classes dependencies.
+   */
+  private static final class DependencyClassVisitor extends ClassVisitor {
+
+    private final SignatureVisitor signatureVisitor;
+    private final DependencyAcceptor acceptor;
+
+    public DependencyClassVisitor(DependencyAcceptor acceptor) {
+      super(Opcodes.ASM4);
+      this.acceptor = acceptor;
+      this.signatureVisitor = new SignatureVisitor(Opcodes.ASM4) {
+        private String currentClass;
+
+        @Override
+        public void visitClassType(String name) {
+          currentClass = name;
+          addClass(name);
+        }
+
+        @Override
+        public void visitInnerClassType(String name) {
+          addClass(currentClass + "$" + name);
+        }
+      };
+    }
+
+    @Override
+    public void visit(int version, int access, String name, String signature, String superName, String[] interfaces) {
+      addClass(name);
+
+      if (signature != null) {
+        new SignatureReader(signature).accept(signatureVisitor);
+      } else {
+        addClass(superName);
+        addClasses(interfaces);
+      }
+    }
+
+    @Override
+    public void visitOuterClass(String owner, String name, String desc) {
+      addClass(owner);
+    }
+
+    @Override
+    public AnnotationVisitor visitAnnotation(String desc, boolean visible) {
+      addType(Type.getType(desc));
+      return null;
+    }
+
+    @Override
+    public void visitInnerClass(String name, String outerName, String innerName, int access) {
+      addClass(name);
+    }
+
+    @Override
+    public FieldVisitor visitField(int access, String name, String desc, String signature, Object value) {
+      if (signature != null) {
+        new SignatureReader(signature).acceptType(signatureVisitor);
+      } else {
+        addType(Type.getType(desc));
+      }
+
+      return new FieldVisitor(Opcodes.ASM4) {
+        @Override
+        public AnnotationVisitor visitAnnotation(String desc, boolean visible) {
+          addType(Type.getType(desc));
+          return null;
+        }
+      };
+    }
+
+    @Override
+    public MethodVisitor visitMethod(int access, String name, String desc, String signature, String[] exceptions) {
+      if (signature != null) {
+        new SignatureReader(signature).accept(signatureVisitor);
+      } else {
+        addMethod(desc);
+      }
+      addClasses(exceptions);
+
+      return new MethodVisitor(Opcodes.ASM4) {
+        @Override
+        public AnnotationVisitor visitAnnotation(String desc, boolean visible) {
+          addType(Type.getType(desc));
+          return null;
+        }
+
+        @Override
+        public AnnotationVisitor visitParameterAnnotation(int parameter, String desc, boolean visible) {
+          addType(Type.getType(desc));
+          return null;
+        }
+
+        @Override
+        public void visitTypeInsn(int opcode, String type) {
+          addType(Type.getObjectType(type));
+        }
+
+        @Override
+        public void visitFieldInsn(int opcode, String owner, String name, String desc) {
+          addType(Type.getObjectType(owner));
+          addType(Type.getType(desc));
+        }
+
+        @Override
+        public void visitMethodInsn(int opcode, String owner, String name, String desc) {
+          addType(Type.getObjectType(owner));
+          addMethod(desc);
+        }
+
+        @Override
+        public void visitLdcInsn(Object cst) {
+          if (cst instanceof Type) {
+            addType((Type) cst);
+          }
+        }
+
+        @Override
+        public void visitMultiANewArrayInsn(String desc, int dims) {
+          addType(Type.getType(desc));
+        }
+
+        @Override
+        public void visitLocalVariable(String name, String desc, String signature, Label start, Label end, int index) {
+          if (signature != null) {
+            new SignatureReader(signature).acceptType(signatureVisitor);
+          } else {
+            addType(Type.getType(desc));
+          }
+        }
+      };
+    }
+
+    private void addClass(String internalName) {
+      if (internalName == null || internalName.startsWith("java/")) {
+        return;
+      }
+      acceptor.accept(Type.getObjectType(internalName).getClassName());
+    }
+
+    private void addClasses(String[] classes) {
+      if (classes != null) {
+        for (String clz : classes) {
+          addClass(clz);
+        }
+      }
+    }
+
+    private void addType(Type type) {
+      if (type.getSort() == Type.ARRAY) {
+        type = type.getElementType();
+      }
+      if (type.getSort() == Type.OBJECT) {
+        addClass(type.getInternalName());
+      }
+    }
+
+    private void addMethod(String desc) {
+      addType(Type.getReturnType(desc));
+      for (Type type : Type.getArgumentTypes(desc)) {
+        addType(type);
+      }
+    }
+  }
+
+  private Dependencies() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/utils/Instances.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/utils/Instances.java b/twill-core/src/main/java/org/apache/twill/internal/utils/Instances.java
new file mode 100644
index 0000000..28bfce9
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/utils/Instances.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.utils;
+
+import com.google.common.base.Defaults;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Throwables;
+import com.google.common.reflect.TypeToken;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Field;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+
+/**
+ * Utility class to help instantiate object instance from class.
+ */
+public final class Instances {
+
+  private static final Object UNSAFE;
+  private static final Method UNSAFE_NEW_INSTANCE;
+
+  static {
+    Object unsafe;
+    Method newInstance;
+    try {
+      Class<?> clz = Class.forName("sun.misc.Unsafe");
+      Field f = clz.getDeclaredField("theUnsafe");
+      f.setAccessible(true);
+      unsafe = f.get(null);
+
+      newInstance = clz.getMethod("allocateInstance", Class.class);
+    } catch (Exception e) {
+      unsafe = null;
+      newInstance = null;
+    }
+    UNSAFE = unsafe;
+    UNSAFE_NEW_INSTANCE = newInstance;
+  }
+
+  /**
+   * Creates a new instance of the given class. It will use the default constructor if it is presents.
+   * Otherwise it will try to use {@link sun.misc.Unsafe#allocateInstance(Class)} to create the instance.
+   * @param clz Class of object to be instantiated.
+   * @param <T> Type of the class
+   * @return An instance of type {@code <T>}
+   */
+  @SuppressWarnings("unchecked")
+  public static <T> T newInstance(Class<T> clz) {
+    try {
+      try {
+        Constructor<T> cons = clz.getDeclaredConstructor();
+        if (!cons.isAccessible()) {
+          cons.setAccessible(true);
+        }
+        return cons.newInstance();
+      } catch (Exception e) {
+        // Try to use Unsafe
+        Preconditions.checkState(UNSAFE != null, "Fail to instantiate with Unsafe.");
+        return unsafeCreate(clz);
+      }
+    } catch (Exception e) {
+      throw Throwables.propagate(e);
+    }
+  }
+
+
+  /**
+   * Creates an instance of the given using Unsafe. It also initialize all fields into default values.
+   */
+  private static <T> T unsafeCreate(Class<T> clz) throws InvocationTargetException, IllegalAccessException {
+    T instance = (T) UNSAFE_NEW_INSTANCE.invoke(UNSAFE, clz);
+
+    for (TypeToken<?> type : TypeToken.of(clz).getTypes().classes()) {
+      if (Object.class.equals(type.getRawType())) {
+        break;
+      }
+      for (Field field : type.getRawType().getDeclaredFields()) {
+        if (Modifier.isStatic(field.getModifiers())) {
+          continue;
+        }
+        if (!field.isAccessible()) {
+          field.setAccessible(true);
+        }
+        field.set(instance, Defaults.defaultValue(field.getType()));
+      }
+    }
+
+    return instance;
+  }
+
+
+  private Instances() {
+    // Protect instantiation of this class
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/utils/Networks.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/utils/Networks.java b/twill-core/src/main/java/org/apache/twill/internal/utils/Networks.java
new file mode 100644
index 0000000..8e7d736
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/utils/Networks.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.utils;
+
+import java.io.IOException;
+import java.net.ServerSocket;
+
+/**
+ *
+ */
+public final class Networks {
+
+  /**
+   * Find a random free port in localhost for binding.
+   * @return A port number or -1 for failure.
+   */
+  public static int getRandomPort() {
+    try {
+      ServerSocket socket = new ServerSocket(0);
+      try {
+        return socket.getLocalPort();
+      } finally {
+        socket.close();
+      }
+    } catch (IOException e) {
+      return -1;
+    }
+  }
+
+  private Networks() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/utils/Paths.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/utils/Paths.java b/twill-core/src/main/java/org/apache/twill/internal/utils/Paths.java
new file mode 100644
index 0000000..aeee09f
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/utils/Paths.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.utils;
+
+import com.google.common.io.Files;
+
+/**
+ *
+ */
+public final class Paths {
+
+
+  public static String appendSuffix(String extractFrom, String appendTo) {
+    String suffix = getExtension(extractFrom);
+    if (!suffix.isEmpty()) {
+      return appendTo + '.' + suffix;
+    }
+    return appendTo;
+  }
+
+  public static String getExtension(String path) {
+    if (path.endsWith(".tar.gz")) {
+      return "tar.gz";
+    }
+
+    return Files.getFileExtension(path);
+  }
+
+  private Paths() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/kafka/client/FetchException.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/kafka/client/FetchException.java b/twill-core/src/main/java/org/apache/twill/kafka/client/FetchException.java
new file mode 100644
index 0000000..acccf04
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/kafka/client/FetchException.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.kafka.client;
+
+/**
+ *
+ */
+public final class FetchException extends RuntimeException {
+
+  private final ErrorCode errorCode;
+
+  public FetchException(String message, ErrorCode errorCode) {
+    super(message);
+    this.errorCode = errorCode;
+  }
+
+  public ErrorCode getErrorCode() {
+    return errorCode;
+  }
+
+  @Override
+  public String toString() {
+    return String.format("%s. Error code: %s", super.toString(), errorCode);
+  }
+
+  public enum ErrorCode {
+    UNKNOWN(-1),
+    OK(0),
+    OFFSET_OUT_OF_RANGE(1),
+    INVALID_MESSAGE(2),
+    WRONG_PARTITION(3),
+    INVALID_FETCH_SIZE(4);
+
+    private final int code;
+
+    ErrorCode(int code) {
+      this.code = code;
+    }
+
+    public int getCode() {
+      return code;
+    }
+
+    public static ErrorCode fromCode(int code) {
+      switch (code) {
+        case -1:
+          return UNKNOWN;
+        case 0:
+          return OK;
+        case 1:
+          return OFFSET_OUT_OF_RANGE;
+        case 2:
+          return INVALID_MESSAGE;
+        case 3:
+          return WRONG_PARTITION;
+        case 4:
+          return INVALID_FETCH_SIZE;
+      }
+      throw new IllegalArgumentException("Unknown error code");
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/kafka/client/FetchedMessage.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/kafka/client/FetchedMessage.java b/twill-core/src/main/java/org/apache/twill/kafka/client/FetchedMessage.java
new file mode 100644
index 0000000..65e140f
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/kafka/client/FetchedMessage.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.kafka.client;
+
+import java.nio.ByteBuffer;
+
+/**
+ * Represents a message fetched from kafka broker.
+ */
+public interface FetchedMessage {
+
+  /**
+   * Returns the message offset.
+   */
+  long getOffset();
+
+  /**
+   * Returns the message payload.
+   */
+  ByteBuffer getBuffer();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/kafka/client/KafkaClient.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/kafka/client/KafkaClient.java b/twill-core/src/main/java/org/apache/twill/kafka/client/KafkaClient.java
new file mode 100644
index 0000000..496195b
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/kafka/client/KafkaClient.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.kafka.client;
+
+import org.apache.twill.internal.kafka.client.Compression;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.Service;
+
+import java.util.Iterator;
+
+/**
+ * This interface provides methods for interacting with kafka broker. It also
+ * extends from {@link Service} for lifecycle management. The {@link #start()} method
+ * must be called prior to other methods in this class. When instance of this class
+ * is not needed, call {@link #stop()}} to release any resources that it holds.
+ */
+public interface KafkaClient extends Service {
+
+  PreparePublish preparePublish(String topic, Compression compression);
+
+  Iterator<FetchedMessage> consume(String topic, int partition, long offset, int maxSize);
+
+  /**
+   * Fetches offset from the given topic and partition.
+   * @param topic Topic to fetch from.
+   * @param partition Partition to fetch from.
+   * @param time The first offset of every segment file for a given partition with a modified time less than time.
+   *             {@code -1} for latest offset, {@code -2} for earliest offset.
+   * @param maxOffsets Maximum number of offsets to fetch.
+   * @return A Future that carry the result as an array of offsets in descending order.
+   *         The size of the result array would not be larger than maxOffsets. If there is any error during the fetch,
+   *         the exception will be carried in the exception.
+   */
+  ListenableFuture<long[]> getOffset(String topic, int partition, long time, int maxOffsets);
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/kafka/client/PreparePublish.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/kafka/client/PreparePublish.java b/twill-core/src/main/java/org/apache/twill/kafka/client/PreparePublish.java
new file mode 100644
index 0000000..5db4abb
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/kafka/client/PreparePublish.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.kafka.client;
+
+import com.google.common.util.concurrent.ListenableFuture;
+
+import java.nio.ByteBuffer;
+
+/**
+ * This interface is for preparing to publish a set of messages to kafka.
+ */
+public interface PreparePublish {
+
+  PreparePublish add(byte[] payload, Object partitionKey);
+
+  PreparePublish add(ByteBuffer payload, Object partitionKey);
+
+  ListenableFuture<?> publish();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/kafka/client/package-info.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/kafka/client/package-info.java b/twill-core/src/main/java/org/apache/twill/kafka/client/package-info.java
new file mode 100644
index 0000000..ea3bf20
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/kafka/client/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * This package provides a pure java Kafka client interface.
+ */
+package org.apache.twill.kafka.client;

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/launcher/TwillLauncher.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/launcher/TwillLauncher.java b/twill-core/src/main/java/org/apache/twill/launcher/TwillLauncher.java
new file mode 100644
index 0000000..2c8c1ef
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/launcher/TwillLauncher.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.launcher;
+
+import java.io.BufferedOutputStream;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.lang.reflect.Method;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.jar.JarEntry;
+import java.util.jar.JarInputStream;
+
+/**
+ * A launcher for application from a archive jar.
+ * This class should have no dependencies on any library except the J2SE one.
+ * This class should not import any thing except java.*
+ */
+public final class TwillLauncher {
+
+  private static final int TEMP_DIR_ATTEMPTS = 20;
+
+  /**
+   * Main method to unpackage a jar and run the mainClass.main() method.
+   * @param args args[0] is the path to jar file, args[1] is the class name of the mainClass.
+   *             The rest of args will be passed the mainClass unmodified.
+   */
+  public static void main(String[] args) throws Exception {
+    if (args.length < 3) {
+      System.out.println("Usage: java " + TwillLauncher.class.getName() + " [jarFile] [mainClass] [use_classpath]");
+      return;
+    }
+
+    File file = new File(args[0]);
+    final File targetDir = createTempDir("twill.launcher");
+
+    Runtime.getRuntime().addShutdownHook(new Thread() {
+      @Override
+      public void run() {
+        System.out.println("Cleanup directory " + targetDir);
+        deleteDir(targetDir);
+      }
+    });
+
+    System.out.println("UnJar " + file + " to " + targetDir);
+    unJar(file, targetDir);
+
+    // Create ClassLoader
+    URLClassLoader classLoader = createClassLoader(targetDir, Boolean.parseBoolean(args[2]));
+    Thread.currentThread().setContextClassLoader(classLoader);
+
+    System.out.println("Launch class with classpath: " + Arrays.toString(classLoader.getURLs()));
+
+    Class<?> mainClass = classLoader.loadClass(args[1]);
+    Method mainMethod = mainClass.getMethod("main", String[].class);
+    String[] arguments = Arrays.copyOfRange(args, 3, args.length);
+    System.out.println("Launching main: " + mainMethod + " " + Arrays.toString(arguments));
+    mainMethod.invoke(mainClass, new Object[]{arguments});
+    System.out.println("Main class completed.");
+
+    System.out.println("Launcher completed");
+  }
+
+  /**
+   * This method is copied from Guava Files.createTempDir().
+   */
+  private static File createTempDir(String prefix) throws IOException {
+    File baseDir = new File(System.getProperty("java.io.tmpdir"));
+    if (!baseDir.isDirectory() && !baseDir.mkdirs()) {
+      throw new IOException("Tmp directory not exists: " + baseDir.getAbsolutePath());
+    }
+
+    String baseName = prefix + "-" + System.currentTimeMillis() + "-";
+
+    for (int counter = 0; counter < TEMP_DIR_ATTEMPTS; counter++) {
+      File tempDir = new File(baseDir, baseName + counter);
+      if (tempDir.mkdir()) {
+        return tempDir;
+      }
+    }
+    throw new IOException("Failed to create directory within "
+                            + TEMP_DIR_ATTEMPTS + " attempts (tried "
+                            + baseName + "0 to " + baseName + (TEMP_DIR_ATTEMPTS - 1) + ')');
+  }
+
+  private static void unJar(File jarFile, File targetDir) throws IOException {
+    JarInputStream jarInput = new JarInputStream(new FileInputStream(jarFile));
+    try {
+      JarEntry jarEntry = jarInput.getNextJarEntry();
+      while (jarEntry != null) {
+        File target = new File(targetDir, jarEntry.getName());
+        if (jarEntry.isDirectory()) {
+          target.mkdirs();
+        } else {
+          target.getParentFile().mkdirs();
+          copy(jarInput, target);
+        }
+        jarEntry = jarInput.getNextJarEntry();
+      }
+    } finally {
+      jarInput.close();
+    }
+  }
+
+  private static void copy(InputStream is, File file) throws IOException {
+    byte[] buf = new byte[8192];
+    OutputStream os = new BufferedOutputStream(new FileOutputStream(file));
+    try {
+      int len = is.read(buf);
+      while (len != -1) {
+        os.write(buf, 0, len);
+        len = is.read(buf);
+      }
+    } finally {
+      os.close();
+    }
+  }
+
+  private static URLClassLoader createClassLoader(File dir, boolean useClassPath) {
+    try {
+      List<URL> urls = new ArrayList<URL>();
+      urls.add(dir.toURI().toURL());
+      urls.add(new File(dir, "classes").toURI().toURL());
+      urls.add(new File(dir, "resources").toURI().toURL());
+
+      File libDir = new File(dir, "lib");
+      File[] files = libDir.listFiles();
+      if (files != null) {
+        for (File file : files) {
+          if (file.getName().endsWith(".jar")) {
+            urls.add(file.toURI().toURL());
+          }
+        }
+      }
+
+      if (useClassPath) {
+        InputStream is = ClassLoader.getSystemResourceAsStream("classpath");
+        if (is != null) {
+          try {
+            BufferedReader reader = new BufferedReader(new InputStreamReader(is, Charset.forName("UTF-8")));
+            String line = reader.readLine();
+            if (line != null) {
+              for (String path : line.split(":")) {
+                urls.addAll(getClassPaths(path));
+              }
+            }
+          } finally {
+            is.close();
+          }
+        }
+      }
+
+      return new URLClassLoader(urls.toArray(new URL[0]));
+
+    } catch (Exception e) {
+      throw new IllegalStateException(e);
+    }
+  }
+
+  private static Collection<URL> getClassPaths(String path) throws MalformedURLException {
+    String classpath = expand(path);
+    if (classpath.endsWith("/*")) {
+      // Grab all .jar files
+      File dir = new File(classpath.substring(0, classpath.length() - 2));
+      File[] files = dir.listFiles();
+      if (files == null || files.length == 0) {
+        return singleItem(dir.toURI().toURL());
+      }
+
+      List<URL> result = new ArrayList<URL>(files.length);
+      for (File file : files) {
+        if (file.getName().endsWith(".jar")) {
+          result.add(file.toURI().toURL());
+        }
+      }
+      return result;
+    } else {
+      return singleItem(new File(classpath).toURI().toURL());
+    }
+  }
+
+  private static Collection<URL> singleItem(URL url) {
+    List<URL> result = new ArrayList<URL>(1);
+    result.add(url);
+    return result;
+  }
+
+  private static String expand(String value) {
+    String result = value;
+    for (Map.Entry<String, String> entry : System.getenv().entrySet()) {
+      result = result.replace("$" + entry.getKey(), entry.getValue());
+      result = result.replace("${" + entry.getKey() + "}", entry.getValue());
+    }
+    return result;
+  }
+
+  private static void deleteDir(File dir) {
+    File[] files = dir.listFiles();
+    if (files == null || files.length == 0) {
+      dir.delete();
+      return;
+    }
+    for (File file : files) {
+      deleteDir(file);
+    }
+    dir.delete();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/resources/kafka-0.7.2.tgz
----------------------------------------------------------------------
diff --git a/twill-core/src/main/resources/kafka-0.7.2.tgz b/twill-core/src/main/resources/kafka-0.7.2.tgz
new file mode 100644
index 0000000..24178d9
Binary files /dev/null and b/twill-core/src/main/resources/kafka-0.7.2.tgz differ

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/test/java/org/apache/twill/internal/ControllerTest.java
----------------------------------------------------------------------
diff --git a/twill-core/src/test/java/org/apache/twill/internal/ControllerTest.java b/twill-core/src/test/java/org/apache/twill/internal/ControllerTest.java
new file mode 100644
index 0000000..382dc95
--- /dev/null
+++ b/twill-core/src/test/java/org/apache/twill/internal/ControllerTest.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.api.Command;
+import org.apache.twill.api.ResourceReport;
+import org.apache.twill.api.RunId;
+import org.apache.twill.api.ServiceController;
+import org.apache.twill.api.TwillController;
+import org.apache.twill.api.logging.LogHandler;
+import org.apache.twill.common.ServiceListenerAdapter;
+import org.apache.twill.common.Threads;
+import org.apache.twill.internal.state.StateNode;
+import org.apache.twill.internal.zookeeper.InMemoryZKServer;
+import org.apache.twill.zookeeper.NodeData;
+import org.apache.twill.zookeeper.ZKClient;
+import org.apache.twill.zookeeper.ZKClientService;
+import com.google.common.base.Suppliers;
+import com.google.common.collect.ImmutableList;
+import com.google.common.util.concurrent.AbstractIdleService;
+import com.google.common.util.concurrent.Service;
+import com.google.gson.JsonObject;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+/**
+ *
+ */
+public class ControllerTest {
+
+  private static final Logger LOG = LoggerFactory.getLogger(ControllerTest.class);
+
+  @Test
+  public void testController() throws ExecutionException, InterruptedException, TimeoutException {
+    InMemoryZKServer zkServer = InMemoryZKServer.builder().build();
+    zkServer.startAndWait();
+
+    LOG.info("ZKServer: " + zkServer.getConnectionStr());
+
+    try {
+      RunId runId = RunIds.generate();
+      ZKClientService zkClientService = ZKClientService.Builder.of(zkServer.getConnectionStr()).build();
+      zkClientService.startAndWait();
+
+      Service service = createService(zkClientService, runId);
+      service.startAndWait();
+
+      TwillController controller = getController(zkClientService, runId);
+      controller.sendCommand(Command.Builder.of("test").build()).get(2, TimeUnit.SECONDS);
+      controller.stop().get(2, TimeUnit.SECONDS);
+
+      Assert.assertEquals(ServiceController.State.TERMINATED, controller.state());
+
+      final CountDownLatch terminateLatch = new CountDownLatch(1);
+      service.addListener(new ServiceListenerAdapter() {
+        @Override
+        public void terminated(Service.State from) {
+          terminateLatch.countDown();
+        }
+      }, Threads.SAME_THREAD_EXECUTOR);
+
+      Assert.assertTrue(service.state() == Service.State.TERMINATED || terminateLatch.await(2, TimeUnit.SECONDS));
+
+      zkClientService.stopAndWait();
+
+    } finally {
+      zkServer.stopAndWait();
+    }
+  }
+
+  // Test controller created before service starts.
+  @Test
+  public void testControllerBefore() throws InterruptedException {
+    InMemoryZKServer zkServer = InMemoryZKServer.builder().build();
+    zkServer.startAndWait();
+
+    LOG.info("ZKServer: " + zkServer.getConnectionStr());
+    try {
+      RunId runId = RunIds.generate();
+      ZKClientService zkClientService = ZKClientService.Builder.of(zkServer.getConnectionStr()).build();
+      zkClientService.startAndWait();
+
+      final CountDownLatch runLatch = new CountDownLatch(1);
+      final CountDownLatch stopLatch = new CountDownLatch(1);
+      TwillController controller = getController(zkClientService, runId);
+      controller.addListener(new ServiceListenerAdapter() {
+        @Override
+        public void running() {
+          runLatch.countDown();
+        }
+
+        @Override
+        public void terminated(Service.State from) {
+          stopLatch.countDown();
+        }
+      }, Threads.SAME_THREAD_EXECUTOR);
+
+      Service service = createService(zkClientService, runId);
+      service.start();
+
+      Assert.assertTrue(runLatch.await(2, TimeUnit.SECONDS));
+      Assert.assertFalse(stopLatch.await(2, TimeUnit.SECONDS));
+
+      service.stop();
+
+      Assert.assertTrue(stopLatch.await(2, TimeUnit.SECONDS));
+
+    } finally {
+      zkServer.stopAndWait();
+    }
+  }
+
+  // Test controller listener receive first state change without state transition from service
+  @Test
+  public void testControllerListener() throws InterruptedException {
+    InMemoryZKServer zkServer = InMemoryZKServer.builder().build();
+    zkServer.startAndWait();
+
+    LOG.info("ZKServer: " + zkServer.getConnectionStr());
+    try {
+      RunId runId = RunIds.generate();
+      ZKClientService zkClientService = ZKClientService.Builder.of(zkServer.getConnectionStr()).build();
+      zkClientService.startAndWait();
+
+      Service service = createService(zkClientService, runId);
+      service.startAndWait();
+
+      final CountDownLatch runLatch = new CountDownLatch(1);
+      TwillController controller = getController(zkClientService, runId);
+      controller.addListener(new ServiceListenerAdapter() {
+        @Override
+        public void running() {
+          runLatch.countDown();
+        }
+      }, Threads.SAME_THREAD_EXECUTOR);
+
+      Assert.assertTrue(runLatch.await(2, TimeUnit.SECONDS));
+
+      service.stopAndWait();
+
+      zkClientService.stopAndWait();
+    } finally {
+      zkServer.stopAndWait();
+    }
+  }
+
+  private Service createService(ZKClient zkClient, RunId runId) {
+    return new ZKServiceDecorator(
+      zkClient, runId, Suppliers.ofInstance(new JsonObject()), new AbstractIdleService() {
+
+      @Override
+      protected void startUp() throws Exception {
+        LOG.info("Start");
+      }
+
+      @Override
+      protected void shutDown() throws Exception {
+        LOG.info("Stop");
+      }
+    });
+  }
+
+  private TwillController getController(ZKClient zkClient, RunId runId) {
+    TwillController controller = new AbstractTwillController(runId, zkClient, ImmutableList.<LogHandler>of()) {
+
+      @Override
+      public void kill() {
+        // No-op
+      }
+
+      @Override
+      protected void instanceNodeUpdated(NodeData nodeData) {
+        // No-op
+      }
+
+      @Override
+      protected void stateNodeUpdated(StateNode stateNode) {
+        // No-op
+      }
+
+      @Override
+      public ResourceReport getResourceReport() {
+        return null;
+      }
+    };
+    controller.startAndWait();
+    return controller;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/test/java/org/apache/twill/internal/state/MessageCodecTest.java
----------------------------------------------------------------------
diff --git a/twill-core/src/test/java/org/apache/twill/internal/state/MessageCodecTest.java b/twill-core/src/test/java/org/apache/twill/internal/state/MessageCodecTest.java
new file mode 100644
index 0000000..d267cf8
--- /dev/null
+++ b/twill-core/src/test/java/org/apache/twill/internal/state/MessageCodecTest.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.state;
+
+import org.apache.twill.api.Command;
+import com.google.common.collect.ImmutableMap;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.Map;
+
+/**
+ *
+ */
+public class MessageCodecTest {
+
+  @Test
+  public void testCodec() {
+    Message message = MessageCodec.decode(MessageCodec.encode(new Message() {
+
+      @Override
+      public Type getType() {
+        return Type.SYSTEM;
+      }
+
+      @Override
+      public Scope getScope() {
+        return Scope.APPLICATION;
+      }
+
+      @Override
+      public String getRunnableName() {
+        return null;
+      }
+
+      @Override
+      public Command getCommand() {
+        return new Command() {
+          @Override
+          public String getCommand() {
+            return "stop";
+          }
+
+          @Override
+          public Map<String, String> getOptions() {
+            return ImmutableMap.of("timeout", "1", "timeoutUnit", "SECONDS");
+          }
+        };
+      }
+    }));
+
+    Assert.assertEquals(Message.Type.SYSTEM, message.getType());
+    Assert.assertEquals(Message.Scope.APPLICATION, message.getScope());
+    Assert.assertNull(message.getRunnableName());
+    Assert.assertEquals("stop", message.getCommand().getCommand());
+    Assert.assertEquals(ImmutableMap.of("timeout", "1", "timeoutUnit", "SECONDS"), message.getCommand().getOptions());
+  }
+
+  @Test
+  public void testFailureDecode() {
+    Assert.assertNull(MessageCodec.decode("".getBytes()));
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/test/java/org/apache/twill/internal/state/ZKServiceDecoratorTest.java
----------------------------------------------------------------------
diff --git a/twill-core/src/test/java/org/apache/twill/internal/state/ZKServiceDecoratorTest.java b/twill-core/src/test/java/org/apache/twill/internal/state/ZKServiceDecoratorTest.java
new file mode 100644
index 0000000..47d8562
--- /dev/null
+++ b/twill-core/src/test/java/org/apache/twill/internal/state/ZKServiceDecoratorTest.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.state;
+
+import org.apache.twill.api.RunId;
+import org.apache.twill.internal.RunIds;
+import org.apache.twill.internal.ZKServiceDecorator;
+import org.apache.twill.internal.zookeeper.InMemoryZKServer;
+import org.apache.twill.zookeeper.NodeData;
+import org.apache.twill.zookeeper.ZKClientService;
+import org.apache.twill.zookeeper.ZKClients;
+import com.google.common.base.Charsets;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Suppliers;
+import com.google.common.util.concurrent.AbstractIdleService;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.Service;
+import com.google.gson.Gson;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.data.Stat;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ *
+ */
+public class ZKServiceDecoratorTest {
+
+  private static final Logger LOG = LoggerFactory.getLogger(ZKServiceDecoratorTest.class);
+
+  @Test
+  public void testStateTransition() throws InterruptedException, ExecutionException, TimeoutException {
+    InMemoryZKServer zkServer = InMemoryZKServer.builder().build();
+    zkServer.startAndWait();
+
+    try {
+      final String namespace = Joiner.on('/').join("/twill", RunIds.generate(), "runnables", "Runner1");
+
+      final ZKClientService zkClient = ZKClientService.Builder.of(zkServer.getConnectionStr()).build();
+      zkClient.startAndWait();
+      zkClient.create(namespace, null, CreateMode.PERSISTENT).get();
+
+      try {
+        JsonObject content = new JsonObject();
+        content.addProperty("containerId", "container-123");
+        content.addProperty("host", "localhost");
+
+        RunId runId = RunIds.generate();
+        final Semaphore semaphore = new Semaphore(0);
+        ZKServiceDecorator service = new ZKServiceDecorator(ZKClients.namespace(zkClient, namespace),
+                                                            runId, Suppliers.ofInstance(content),
+                                                            new AbstractIdleService() {
+          @Override
+          protected void startUp() throws Exception {
+            Preconditions.checkArgument(semaphore.tryAcquire(5, TimeUnit.SECONDS), "Fail to start");
+          }
+
+          @Override
+          protected void shutDown() throws Exception {
+            Preconditions.checkArgument(semaphore.tryAcquire(5, TimeUnit.SECONDS), "Fail to stop");
+          }
+        });
+
+        final String runnablePath = namespace + "/" + runId.getId();
+        final AtomicReference<String> stateMatch = new AtomicReference<String>("STARTING");
+        watchDataChange(zkClient, runnablePath + "/state", semaphore, stateMatch);
+        Assert.assertEquals(Service.State.RUNNING, service.start().get(5, TimeUnit.SECONDS));
+
+        stateMatch.set("STOPPING");
+        Assert.assertEquals(Service.State.TERMINATED, service.stop().get(5, TimeUnit.SECONDS));
+
+      } finally {
+        zkClient.stopAndWait();
+      }
+    } finally {
+      zkServer.stopAndWait();
+    }
+  }
+
+  private void watchDataChange(final ZKClientService zkClient, final String path,
+                               final Semaphore semaphore, final AtomicReference<String> stateMatch) {
+    Futures.addCallback(zkClient.getData(path, new Watcher() {
+      @Override
+      public void process(WatchedEvent event) {
+        if (event.getType() == Event.EventType.NodeDataChanged) {
+          watchDataChange(zkClient, path, semaphore, stateMatch);
+        }
+      }
+    }), new FutureCallback<NodeData>() {
+      @Override
+      public void onSuccess(NodeData result) {
+        String content = new String(result.getData(), Charsets.UTF_8);
+        JsonObject json = new Gson().fromJson(content, JsonElement.class).getAsJsonObject();
+        if (stateMatch.get().equals(json.get("state").getAsString())) {
+          semaphore.release();
+        }
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        exists();
+      }
+
+      private void exists() {
+        Futures.addCallback(zkClient.exists(path, new Watcher() {
+          @Override
+          public void process(WatchedEvent event) {
+            if (event.getType() == Event.EventType.NodeCreated) {
+              watchDataChange(zkClient, path, semaphore, stateMatch);
+            }
+          }
+        }), new FutureCallback<Stat>() {
+          @Override
+          public void onSuccess(Stat result) {
+            if (result != null) {
+              watchDataChange(zkClient, path, semaphore, stateMatch);
+            }
+          }
+
+          @Override
+          public void onFailure(Throwable t) {
+            LOG.error(t.getMessage(), t);
+          }
+        });
+      }
+    });
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/test/java/org/apache/twill/internal/utils/ApplicationBundlerTest.java
----------------------------------------------------------------------
diff --git a/twill-core/src/test/java/org/apache/twill/internal/utils/ApplicationBundlerTest.java b/twill-core/src/test/java/org/apache/twill/internal/utils/ApplicationBundlerTest.java
new file mode 100644
index 0000000..508cadb
--- /dev/null
+++ b/twill-core/src/test/java/org/apache/twill/internal/utils/ApplicationBundlerTest.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.utils;
+
+import org.apache.twill.filesystem.LocalLocationFactory;
+import org.apache.twill.filesystem.Location;
+import org.apache.twill.internal.ApplicationBundler;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import com.google.common.io.ByteStreams;
+import com.google.common.io.Files;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.util.List;
+import java.util.jar.JarEntry;
+import java.util.jar.JarInputStream;
+
+/**
+ *
+ */
+public class ApplicationBundlerTest {
+
+  @Rule
+  public TemporaryFolder tmpDir = new TemporaryFolder();
+
+  @Test
+  public void testFindDependencies() throws IOException, ClassNotFoundException {
+    Location location = new LocalLocationFactory(tmpDir.newFolder()).create("test.jar");
+
+    // Create a jar file with by tracing dependency
+    ApplicationBundler bundler = new ApplicationBundler(ImmutableList.<String>of());
+    bundler.createBundle(location, ApplicationBundler.class);
+
+    File targetDir = tmpDir.newFolder();
+    unjar(new File(location.toURI()), targetDir);
+
+    // Load the class back, it should be loaded by the custom classloader
+    ClassLoader classLoader = createClassLoader(targetDir);
+    Class<?> clz = classLoader.loadClass(ApplicationBundler.class.getName());
+    Assert.assertSame(classLoader, clz.getClassLoader());
+
+    // For system classes, they shouldn't be packaged, hence loaded by different classloader.
+    clz = classLoader.loadClass(Object.class.getName());
+    Assert.assertNotSame(classLoader, clz.getClassLoader());
+  }
+
+  private void unjar(File jarFile, File targetDir) throws IOException {
+    JarInputStream jarInput = new JarInputStream(new FileInputStream(jarFile));
+    try {
+      JarEntry jarEntry = jarInput.getNextJarEntry();
+      while (jarEntry != null) {
+        File target = new File(targetDir, jarEntry.getName());
+        if (jarEntry.isDirectory()) {
+          target.mkdirs();
+        } else {
+          target.getParentFile().mkdirs();
+          ByteStreams.copy(jarInput, Files.newOutputStreamSupplier(target));
+        }
+
+        jarEntry = jarInput.getNextJarEntry();
+      }
+    } finally {
+      jarInput.close();
+    }
+  }
+
+  private ClassLoader createClassLoader(File dir) throws MalformedURLException {
+    List<URL> urls = Lists.newArrayList();
+    urls.add(new File(dir, "classes").toURI().toURL());
+    File[] libFiles = new File(dir, "lib").listFiles();
+    if (libFiles != null) {
+      for (File file : libFiles) {
+        urls.add(file.toURI().toURL());
+      }
+    }
+    return new URLClassLoader(urls.toArray(new URL[0])) {
+      @Override
+      protected synchronized Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
+        // Load class from the given URLs first before delegating to parent.
+        try {
+          return super.findClass(name);
+        } catch (ClassNotFoundException e) {
+          ClassLoader parent = getParent();
+          return parent == null ? ClassLoader.getSystemClassLoader().loadClass(name) : parent.loadClass(name);
+        }
+      }
+    };
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/test/java/org/apache/twill/kafka/client/KafkaTest.java
----------------------------------------------------------------------
diff --git a/twill-core/src/test/java/org/apache/twill/kafka/client/KafkaTest.java b/twill-core/src/test/java/org/apache/twill/kafka/client/KafkaTest.java
new file mode 100644
index 0000000..40fc3ed
--- /dev/null
+++ b/twill-core/src/test/java/org/apache/twill/kafka/client/KafkaTest.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.kafka.client;
+
+import org.apache.twill.common.Services;
+import org.apache.twill.internal.kafka.EmbeddedKafkaServer;
+import org.apache.twill.internal.kafka.client.Compression;
+import org.apache.twill.internal.kafka.client.SimpleKafkaClient;
+import org.apache.twill.internal.utils.Networks;
+import org.apache.twill.internal.zookeeper.InMemoryZKServer;
+import org.apache.twill.zookeeper.ZKClientService;
+import com.google.common.base.Charsets;
+import com.google.common.base.Preconditions;
+import com.google.common.io.ByteStreams;
+import com.google.common.io.Files;
+import com.google.common.util.concurrent.Futures;
+import org.apache.commons.compress.archivers.ArchiveEntry;
+import org.apache.commons.compress.archivers.ArchiveException;
+import org.apache.commons.compress.archivers.ArchiveInputStream;
+import org.apache.commons.compress.archivers.ArchiveStreamFactory;
+import org.apache.commons.compress.compressors.CompressorException;
+import org.apache.commons.compress.compressors.CompressorStreamFactory;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Iterator;
+import java.util.Properties;
+import java.util.concurrent.TimeUnit;
+
+/**
+ *
+ */
+public class KafkaTest {
+
+  private static final Logger LOG = LoggerFactory.getLogger(KafkaTest.class);
+
+  @ClassRule
+  public static final TemporaryFolder TMP_FOLDER = new TemporaryFolder();
+
+  private static InMemoryZKServer zkServer;
+  private static EmbeddedKafkaServer kafkaServer;
+  private static ZKClientService zkClientService;
+  private static KafkaClient kafkaClient;
+
+  @BeforeClass
+  public static void init() throws Exception {
+    zkServer = InMemoryZKServer.builder().setDataDir(TMP_FOLDER.newFolder()).build();
+    zkServer.startAndWait();
+
+    // Extract the kafka.tgz and start the kafka server
+    kafkaServer = new EmbeddedKafkaServer(extractKafka(), generateKafkaConfig(zkServer.getConnectionStr()));
+    kafkaServer.startAndWait();
+
+    zkClientService = ZKClientService.Builder.of(zkServer.getConnectionStr()).build();
+
+    kafkaClient = new SimpleKafkaClient(zkClientService);
+    Services.chainStart(zkClientService, kafkaClient).get();
+  }
+
+  @AfterClass
+  public static void finish() throws Exception {
+    Services.chainStop(kafkaClient, zkClientService).get();
+    kafkaServer.stopAndWait();
+    zkServer.stopAndWait();
+  }
+
+  @Test
+  public void testKafkaClient() throws Exception {
+    String topic = "testClient";
+
+    Thread t1 = createPublishThread(kafkaClient, topic, Compression.GZIP, "GZIP Testing message", 10);
+    Thread t2 = createPublishThread(kafkaClient, topic, Compression.NONE, "Testing message", 10);
+
+    t1.start();
+    t2.start();
+
+    Thread t3 = createPublishThread(kafkaClient, topic, Compression.SNAPPY, "Snappy Testing message", 10);
+    t2.join();
+    t3.start();
+
+    Iterator<FetchedMessage> consumer = kafkaClient.consume(topic, 0, 0, 1048576);
+    int count = 0;
+    long startTime = System.nanoTime();
+    while (count < 30 && consumer.hasNext() && secondsPassed(startTime, TimeUnit.NANOSECONDS) < 5) {
+      LOG.info(Charsets.UTF_8.decode(consumer.next().getBuffer()).toString());
+      count++;
+    }
+
+    Assert.assertEquals(30, count);
+  }
+
+  @Test (timeout = 10000)
+  public void testOffset() throws Exception {
+    String topic = "testOffset";
+
+    // Initial earliest offset should be 0.
+    long[] offsets = kafkaClient.getOffset(topic, 0, -2, 10).get();
+    Assert.assertArrayEquals(new long[]{0L}, offsets);
+
+    // Publish some messages
+    Thread publishThread = createPublishThread(kafkaClient, topic, Compression.NONE, "Testing", 2000);
+    publishThread.start();
+    publishThread.join();
+
+    // Fetch earliest offset, should still be 0.
+    offsets = kafkaClient.getOffset(topic, 0, -2, 10).get();
+    Assert.assertArrayEquals(new long[]{0L}, offsets);
+
+    // Fetch latest offset
+    offsets = kafkaClient.getOffset(topic, 0, -1, 10).get();
+    Iterator<FetchedMessage> consumer = kafkaClient.consume(topic, 0, offsets[0], 1048576);
+
+    // Publish one more message, the consumer should see the new message being published.
+    publishThread = createPublishThread(kafkaClient, topic, Compression.NONE, "Testing", 1, 3000);
+    publishThread.start();
+    publishThread.join();
+
+    // Should see the last message being published.
+    Assert.assertTrue(consumer.hasNext());
+    Assert.assertEquals("3000 Testing", Charsets.UTF_8.decode(consumer.next().getBuffer()).toString());
+  }
+
+  private Thread createPublishThread(final KafkaClient kafkaClient, final String topic,
+                                     final Compression compression, final String message, final int count) {
+    return createPublishThread(kafkaClient, topic, compression, message, count, 0);
+  }
+
+  private Thread createPublishThread(final KafkaClient kafkaClient, final String topic, final Compression compression,
+                                     final String message, final int count, final int base) {
+    return new Thread() {
+      public void run() {
+        PreparePublish preparePublish = kafkaClient.preparePublish(topic, compression);
+        for (int i = 0; i < count; i++) {
+          preparePublish.add(((base + i) + " " + message).getBytes(Charsets.UTF_8), 0);
+        }
+        Futures.getUnchecked(preparePublish.publish());
+      }
+    };
+  }
+
+  private long secondsPassed(long startTime, TimeUnit startUnit) {
+    return TimeUnit.SECONDS.convert(System.nanoTime() - TimeUnit.NANOSECONDS.convert(startTime, startUnit),
+                                    TimeUnit.NANOSECONDS);
+  }
+
+  private static File extractKafka() throws IOException, ArchiveException, CompressorException {
+    File kafkaExtract = TMP_FOLDER.newFolder();
+    InputStream kakfaResource = KafkaTest.class.getClassLoader().getResourceAsStream("kafka-0.7.2.tgz");
+    ArchiveInputStream archiveInput = new ArchiveStreamFactory()
+      .createArchiveInputStream(ArchiveStreamFactory.TAR,
+                                new CompressorStreamFactory()
+                                  .createCompressorInputStream(CompressorStreamFactory.GZIP, kakfaResource));
+
+    try {
+      ArchiveEntry entry = archiveInput.getNextEntry();
+      while (entry != null) {
+        File file = new File(kafkaExtract, entry.getName());
+        if (entry.isDirectory()) {
+          file.mkdirs();
+        } else {
+          ByteStreams.copy(archiveInput, Files.newOutputStreamSupplier(file));
+        }
+        entry = archiveInput.getNextEntry();
+      }
+    } finally {
+      archiveInput.close();
+    }
+    return kafkaExtract;
+  }
+
+  private static Properties generateKafkaConfig(String zkConnectStr) throws IOException {
+    int port = Networks.getRandomPort();
+    Preconditions.checkState(port > 0, "Failed to get random port.");
+
+    Properties prop = new Properties();
+    prop.setProperty("log.dir", TMP_FOLDER.newFolder().getAbsolutePath());
+    prop.setProperty("zk.connect", zkConnectStr);
+    prop.setProperty("num.threads", "8");
+    prop.setProperty("port", Integer.toString(port));
+    prop.setProperty("log.flush.interval", "1000");
+    prop.setProperty("max.socket.request.bytes", "104857600");
+    prop.setProperty("log.cleanup.interval.mins", "1");
+    prop.setProperty("log.default.flush.scheduler.interval.ms", "1000");
+    prop.setProperty("zk.connectiontimeout.ms", "1000000");
+    prop.setProperty("socket.receive.buffer", "1048576");
+    prop.setProperty("enable.zookeeper", "true");
+    prop.setProperty("log.retention.hours", "24");
+    prop.setProperty("brokerid", "0");
+    prop.setProperty("socket.send.buffer", "1048576");
+    prop.setProperty("num.partitions", "1");
+    // Use a really small file size to force some flush to happen
+    prop.setProperty("log.file.size", "1024");
+    prop.setProperty("log.default.flush.interval.ms", "1000");
+    return prop;
+  }
+}


[17/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/kafka/client/GZipMessageSetEncoder.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/kafka/client/GZipMessageSetEncoder.java b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/GZipMessageSetEncoder.java
new file mode 100644
index 0000000..daa0c2c
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/GZipMessageSetEncoder.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.kafka.client;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.zip.GZIPOutputStream;
+
+/**
+ * A {@link MessageSetEncoder} that compress message set using GZIP.
+ */
+final class GZipMessageSetEncoder extends AbstractCompressedMessageSetEncoder {
+
+  GZipMessageSetEncoder() {
+    super(Compression.GZIP);
+  }
+
+  @Override
+  protected OutputStream createCompressedStream(OutputStream os) throws IOException {
+    return new GZIPOutputStream(os);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/kafka/client/IdentityMessageSetEncoder.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/kafka/client/IdentityMessageSetEncoder.java b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/IdentityMessageSetEncoder.java
new file mode 100644
index 0000000..51dc746
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/IdentityMessageSetEncoder.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.kafka.client;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBuffers;
+
+/**
+ * A pass-through {@link MessageSetEncoder}.
+ */
+final class IdentityMessageSetEncoder extends AbstractMessageSetEncoder {
+
+  private ChannelBuffer messageSets = ChannelBuffers.EMPTY_BUFFER;
+
+  @Override
+  public MessageSetEncoder add(ChannelBuffer payload) {
+    messageSets = ChannelBuffers.wrappedBuffer(messageSets, encodePayload(payload));
+    return this;
+  }
+
+  @Override
+  public ChannelBuffer finish() {
+    ChannelBuffer buf = prefixLength(messageSets);
+    messageSets = ChannelBuffers.EMPTY_BUFFER;
+    return buf;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaBrokerCache.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaBrokerCache.java b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaBrokerCache.java
new file mode 100644
index 0000000..f2bb815
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaBrokerCache.java
@@ -0,0 +1,326 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.kafka.client;
+
+import org.apache.twill.common.Threads;
+import org.apache.twill.zookeeper.NodeChildren;
+import org.apache.twill.zookeeper.NodeData;
+import org.apache.twill.zookeeper.ZKClient;
+import com.google.common.base.Charsets;
+import com.google.common.base.Function;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.ImmutableSortedMap;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.AbstractIdleService;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.data.Stat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.net.InetSocketAddress;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.SortedMap;
+
+/**
+ * A Service to cache kafka broker information by subscribing to ZooKeeper.
+ */
+final class KafkaBrokerCache extends AbstractIdleService {
+
+  private static final Logger LOG = LoggerFactory.getLogger(KafkaBrokerCache.class);
+
+  private static final String BROKERS_PATH = "/brokers";
+
+  private final ZKClient zkClient;
+  private final Map<String, InetSocketAddress> brokers;
+  // topicBrokers is from topic->partition size->brokerId
+  private final Map<String, SortedMap<Integer, Set<String>>> topicBrokers;
+  private final Runnable invokeGetBrokers = new Runnable() {
+    @Override
+    public void run() {
+      getBrokers();
+    }
+  };
+  private final Runnable invokeGetTopics = new Runnable() {
+    @Override
+    public void run() {
+      getTopics();
+    }
+  };
+
+  KafkaBrokerCache(ZKClient zkClient) {
+    this.zkClient = zkClient;
+    this.brokers = Maps.newConcurrentMap();
+    this.topicBrokers = Maps.newConcurrentMap();
+  }
+
+  @Override
+  protected void startUp() throws Exception {
+    getBrokers();
+    getTopics();
+  }
+
+  @Override
+  protected void shutDown() throws Exception {
+    // No-op
+  }
+
+  public int getPartitionSize(String topic) {
+    SortedMap<Integer, Set<String>> partitionBrokers = topicBrokers.get(topic);
+    if (partitionBrokers == null || partitionBrokers.isEmpty()) {
+      return 1;
+    }
+    return partitionBrokers.lastKey();
+  }
+
+  public TopicBroker getBrokerAddress(String topic, int partition) {
+    SortedMap<Integer, Set<String>> partitionBrokers = topicBrokers.get(topic);
+    if (partitionBrokers == null || partitionBrokers.isEmpty()) {
+      return pickRandomBroker(topic);
+    }
+
+    // If the requested partition is greater than supported partition size, randomly pick one
+    if (partition >= partitionBrokers.lastKey()) {
+      return pickRandomBroker(topic);
+    }
+
+    // Randomly pick a partition size and randomly pick a broker from it
+    Random random = new Random();
+    partitionBrokers = partitionBrokers.tailMap(partition + 1);
+    List<Integer> sizes = Lists.newArrayList(partitionBrokers.keySet());
+    Integer partitionSize = pickRandomItem(sizes, random);
+    List<String> ids = Lists.newArrayList(partitionBrokers.get(partitionSize));
+    InetSocketAddress address = brokers.get(ids.get(new Random().nextInt(ids.size())));
+    return address == null ? pickRandomBroker(topic) : new TopicBroker(topic, address, partitionSize);
+  }
+
+  private TopicBroker pickRandomBroker(String topic) {
+    Map.Entry<String, InetSocketAddress> entry = Iterables.getFirst(brokers.entrySet(), null);
+    if (entry == null) {
+      return null;
+    }
+    InetSocketAddress address = entry.getValue();
+    return new TopicBroker(topic, address, 0);
+  }
+
+  private <T> T pickRandomItem(List<T> list, Random random) {
+    return list.get(random.nextInt(list.size()));
+  }
+
+  private void getBrokers() {
+    final String idsPath = BROKERS_PATH + "/ids";
+
+    Futures.addCallback(zkClient.getChildren(idsPath, new Watcher() {
+      @Override
+      public void process(WatchedEvent event) {
+        getBrokers();
+      }
+    }), new ExistsOnFailureFutureCallback<NodeChildren>(idsPath, invokeGetBrokers) {
+      @Override
+      public void onSuccess(NodeChildren result) {
+        Set<String> children = ImmutableSet.copyOf(result.getChildren());
+        for (String child : children) {
+          getBrokenData(idsPath + "/" + child, child);
+        }
+        // Remove all removed brokers
+        removeDiff(children, brokers);
+      }
+    });
+  }
+
+  private void getTopics() {
+    final String topicsPath = BROKERS_PATH + "/topics";
+    Futures.addCallback(zkClient.getChildren(topicsPath, new Watcher() {
+      @Override
+      public void process(WatchedEvent event) {
+        getTopics();
+      }
+    }), new ExistsOnFailureFutureCallback<NodeChildren>(topicsPath, invokeGetTopics) {
+      @Override
+      public void onSuccess(NodeChildren result) {
+        Set<String> children = ImmutableSet.copyOf(result.getChildren());
+
+        // Process new children
+        for (String topic : ImmutableSet.copyOf(Sets.difference(children, topicBrokers.keySet()))) {
+          getTopic(topicsPath + "/" + topic, topic);
+        }
+
+        // Remove old children
+        removeDiff(children, topicBrokers);
+      }
+    });
+  }
+
+  private void getBrokenData(String path, final String brokerId) {
+    Futures.addCallback(zkClient.getData(path), new FutureCallback<NodeData>() {
+      @Override
+      public void onSuccess(NodeData result) {
+        String data = new String(result.getData(), Charsets.UTF_8);
+        String hostPort = data.substring(data.indexOf(':') + 1);
+        int idx = hostPort.indexOf(':');
+        brokers.put(brokerId, new InetSocketAddress(hostPort.substring(0, idx),
+                                                    Integer.parseInt(hostPort.substring(idx + 1))));
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        // No-op, the watch on the parent node will handle it.
+      }
+    });
+  }
+
+  private void getTopic(final String path, final String topic) {
+    Futures.addCallback(zkClient.getChildren(path, new Watcher() {
+      @Override
+      public void process(WatchedEvent event) {
+        // Other event type changes are either could be ignored or handled by parent watcher
+        if (event.getType() == Event.EventType.NodeChildrenChanged) {
+          getTopic(path, topic);
+        }
+      }
+    }), new FutureCallback<NodeChildren>() {
+      @Override
+      public void onSuccess(NodeChildren result) {
+        List<String> children = result.getChildren();
+        final List<ListenableFuture<BrokerPartition>> futures = Lists.newArrayListWithCapacity(children.size());
+
+        // Fetch data from each broken node
+        for (final String brokerId : children) {
+          Futures.transform(zkClient.getData(path + "/" + brokerId), new Function<NodeData, BrokerPartition>() {
+            @Override
+            public BrokerPartition apply(NodeData input) {
+              return new BrokerPartition(brokerId, Integer.parseInt(new String(input.getData(), Charsets.UTF_8)));
+            }
+          });
+        }
+
+        // When all fetching is done, build the partition size->broker map for this topic
+        Futures.successfulAsList(futures).addListener(new Runnable() {
+          @Override
+          public void run() {
+            Map<Integer, Set<String>> partitionBrokers = Maps.newHashMap();
+            for (ListenableFuture<BrokerPartition> future : futures) {
+              try {
+                BrokerPartition info = future.get();
+                Set<String> brokerSet = partitionBrokers.get(info.getPartitionSize());
+                if (brokerSet == null) {
+                  brokerSet = Sets.newHashSet();
+                  partitionBrokers.put(info.getPartitionSize(), brokerSet);
+                }
+                brokerSet.add(info.getBrokerId());
+              } catch (Exception e) {
+                // Exception is ignored, as it will be handled by parent watcher
+              }
+            }
+            topicBrokers.put(topic, ImmutableSortedMap.copyOf(partitionBrokers));
+          }
+        }, Threads.SAME_THREAD_EXECUTOR);
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        // No-op. Failure would be handled by parent watcher already (e.g. node not exists -> children change in parent)
+      }
+    });
+  }
+
+  private <K, V> void removeDiff(Set<K> keys, Map<K, V> map) {
+    for (K key : ImmutableSet.copyOf(Sets.difference(map.keySet(), keys))) {
+      map.remove(key);
+    }
+  }
+
+  private abstract class ExistsOnFailureFutureCallback<V> implements FutureCallback<V> {
+
+    private final String path;
+    private final Runnable action;
+
+    protected ExistsOnFailureFutureCallback(String path, Runnable action) {
+      this.path = path;
+      this.action = action;
+    }
+
+    @Override
+    public final void onFailure(Throwable t) {
+      if (!isNotExists(t)) {
+        LOG.error("Fail to watch for kafka brokers: " + path, t);
+        return;
+      }
+
+      waitExists(path);
+    }
+
+    private boolean isNotExists(Throwable t) {
+      return ((t instanceof KeeperException) && ((KeeperException) t).code() == KeeperException.Code.NONODE);
+    }
+
+    private void waitExists(String path) {
+      LOG.info("Path " + path + " not exists. Watch for creation.");
+
+      // If the node doesn't exists, use the "exists" call to watch for node creation.
+      Futures.addCallback(zkClient.exists(path, new Watcher() {
+        @Override
+        public void process(WatchedEvent event) {
+          if (event.getType() == Event.EventType.NodeCreated || event.getType() == Event.EventType.NodeDeleted) {
+            action.run();
+          }
+        }
+      }), new FutureCallback<Stat>() {
+        @Override
+        public void onSuccess(Stat result) {
+          // If path exists, get children again, otherwise wait for watch to get triggered
+          if (result != null) {
+            action.run();
+          }
+        }
+        @Override
+        public void onFailure(Throwable t) {
+          action.run();
+        }
+      });
+    }
+  }
+
+  private static final class BrokerPartition {
+    private final String brokerId;
+    private final int partitionSize;
+
+    private BrokerPartition(String brokerId, int partitionSize) {
+      this.brokerId = brokerId;
+      this.partitionSize = partitionSize;
+    }
+
+    public String getBrokerId() {
+      return brokerId;
+    }
+
+    public int getPartitionSize() {
+      return partitionSize;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequest.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequest.java b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequest.java
new file mode 100644
index 0000000..7b43f8a
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequest.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.kafka.client;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+
+/**
+ *
+ */
+final class KafkaRequest {
+
+  public enum Type {
+    PRODUCE(0),
+    FETCH(1),
+    MULTI_FETCH(2),
+    MULTI_PRODUCE(3),
+    OFFSETS(4);
+
+    private final short id;
+
+    private Type(int id) {
+      this.id = (short) id;
+    }
+
+    public short getId() {
+      return id;
+    }
+  }
+
+  private final Type type;
+  private final String topic;
+  private final int partition;
+  private final ChannelBuffer body;
+  private final ResponseHandler responseHandler;
+
+
+  public static KafkaRequest createProduce(String topic, int partition, ChannelBuffer body) {
+    return new KafkaRequest(Type.PRODUCE, topic, partition, body, ResponseHandler.NO_OP);
+  }
+
+  public static KafkaRequest createFetch(String topic, int partition, ChannelBuffer body, ResponseHandler handler) {
+    return new KafkaRequest(Type.FETCH, topic, partition, body, handler);
+  }
+
+  public static KafkaRequest createOffsets(String topic, int partition, ChannelBuffer body, ResponseHandler handler) {
+    return new KafkaRequest(Type.OFFSETS, topic, partition, body, handler);
+  }
+
+  private KafkaRequest(Type type, String topic, int partition, ChannelBuffer body, ResponseHandler responseHandler) {
+    this.type = type;
+    this.topic = topic;
+    this.partition = partition;
+    this.body = body;
+    this.responseHandler = responseHandler;
+  }
+
+  Type getType() {
+    return type;
+  }
+
+  String getTopic() {
+    return topic;
+  }
+
+  int getPartition() {
+    return partition;
+  }
+
+  ChannelBuffer getBody() {
+    return body;
+  }
+
+  ResponseHandler getResponseHandler() {
+    return responseHandler;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequestEncoder.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequestEncoder.java b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequestEncoder.java
new file mode 100644
index 0000000..ef78c76
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequestEncoder.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.kafka.client;
+
+import com.google.common.base.Charsets;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBuffers;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelHandlerContext;
+import org.jboss.netty.handler.codec.oneone.OneToOneEncoder;
+
+import java.nio.ByteBuffer;
+
+/**
+ *
+ */
+final class KafkaRequestEncoder extends OneToOneEncoder {
+
+  @Override
+  protected Object encode(ChannelHandlerContext ctx, Channel channel, Object msg) throws Exception {
+    if (!(msg instanceof KafkaRequest)) {
+      return msg;
+    }
+    KafkaRequest req = (KafkaRequest) msg;
+    ByteBuffer topic = Charsets.UTF_8.encode(req.getTopic());
+
+    ChannelBuffer buffer = ChannelBuffers.dynamicBuffer(16 + topic.remaining() + req.getBody().readableBytes());
+    int writerIdx = buffer.writerIndex();
+    buffer.writerIndex(writerIdx + 4);    // Reserves 4 bytes for message length
+
+    // Write out <REQUEST_TYPE>, <TOPIC_LENGTH>, <TOPIC>, <PARTITION>
+    buffer.writeShort(req.getType().getId());
+    buffer.writeShort(topic.remaining());
+    buffer.writeBytes(topic);
+    buffer.writeInt(req.getPartition());
+
+    // Write out the size of the whole buffer (excluding the size field) at the beginning
+    buffer.setInt(writerIdx, buffer.readableBytes() - 4 + req.getBody().readableBytes());
+
+    ChannelBuffer buf = ChannelBuffers.wrappedBuffer(buffer, req.getBody());
+    buf = buf.readBytes(buf.readableBytes());
+
+    return buf;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequestSender.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequestSender.java b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequestSender.java
new file mode 100644
index 0000000..fbc552c
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaRequestSender.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.kafka.client;
+
+/**
+ *
+ */
+interface KafkaRequestSender {
+
+  void send(KafkaRequest request);
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponse.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponse.java b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponse.java
new file mode 100644
index 0000000..68c1bd8
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponse.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.kafka.client;
+
+import org.apache.twill.kafka.client.FetchException;
+import org.jboss.netty.buffer.ChannelBuffer;
+
+/**
+ *
+ */
+final class KafkaResponse {
+
+  private final FetchException.ErrorCode errorCode;
+  private final ChannelBuffer body;
+  private final int size;
+
+  KafkaResponse(FetchException.ErrorCode errorCode, ChannelBuffer body, int size) {
+    this.errorCode = errorCode;
+    this.body = body;
+    this.size = size;
+  }
+
+  public int getSize() {
+    return size;
+  }
+
+  public FetchException.ErrorCode getErrorCode() {
+    return errorCode;
+  }
+
+  public ChannelBuffer getBody() {
+    return body;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponseDispatcher.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponseDispatcher.java b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponseDispatcher.java
new file mode 100644
index 0000000..47f70ce
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponseDispatcher.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.kafka.client;
+
+import org.jboss.netty.channel.ChannelHandlerContext;
+import org.jboss.netty.channel.ExceptionEvent;
+import org.jboss.netty.channel.MessageEvent;
+import org.jboss.netty.channel.SimpleChannelHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.net.SocketException;
+import java.nio.channels.ClosedChannelException;
+
+/**
+ *
+ */
+final class KafkaResponseDispatcher extends SimpleChannelHandler {
+
+  private static final Logger LOG = LoggerFactory.getLogger(KafkaResponseDispatcher.class);
+
+  @Override
+  public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
+    Object attachment = ctx.getAttachment();
+    if (e.getMessage() instanceof KafkaResponse && attachment instanceof ResponseHandler) {
+      ((ResponseHandler) attachment).received((KafkaResponse) e.getMessage());
+    } else {
+      super.messageReceived(ctx, e);
+    }
+  }
+
+  @Override
+  public void writeRequested(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
+    if (e.getMessage() instanceof KafkaRequest) {
+      ctx.setAttachment(((KafkaRequest) e.getMessage()).getResponseHandler());
+    }
+    super.writeRequested(ctx, e);
+  }
+
+  @Override
+  public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception {
+    if (e.getCause() instanceof ClosedChannelException || e.getCause() instanceof SocketException) {
+      // No need to log for socket exception as the client has logic to retry.
+      return;
+    }
+    LOG.warn("Exception caught in kafka client connection.", e.getCause());
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponseHandler.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponseHandler.java b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponseHandler.java
new file mode 100644
index 0000000..5251e65
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/KafkaResponseHandler.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.kafka.client;
+
+import org.apache.twill.kafka.client.FetchException;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.channel.ChannelHandlerContext;
+import org.jboss.netty.channel.Channels;
+import org.jboss.netty.channel.MessageEvent;
+import org.jboss.netty.channel.SimpleChannelHandler;
+
+/**
+ *
+ */
+final class KafkaResponseHandler extends SimpleChannelHandler {
+
+  private final Bufferer bufferer = new Bufferer();
+
+  @Override
+  public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
+    Object msg = e.getMessage();
+    if (!(msg instanceof ChannelBuffer)) {
+      super.messageReceived(ctx, e);
+      return;
+    }
+
+    bufferer.apply((ChannelBuffer) msg);
+    ChannelBuffer buffer = bufferer.getNext();
+    while (buffer.readable()) {
+      // Send the response object upstream
+      Channels.fireMessageReceived(ctx, new KafkaResponse(FetchException.ErrorCode.fromCode(buffer.readShort()),
+                                                          buffer, buffer.readableBytes() + 6));
+      buffer = bufferer.getNext();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/kafka/client/MessageFetcher.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/kafka/client/MessageFetcher.java b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/MessageFetcher.java
new file mode 100644
index 0000000..0814917
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/MessageFetcher.java
@@ -0,0 +1,243 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.kafka.client;
+
+import org.apache.twill.common.Threads;
+import org.apache.twill.kafka.client.FetchException;
+import org.apache.twill.kafka.client.FetchedMessage;
+import com.google.common.base.Throwables;
+import com.google.common.collect.AbstractIterator;
+import com.google.common.io.ByteStreams;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBufferInputStream;
+import org.jboss.netty.buffer.ChannelBufferOutputStream;
+import org.jboss.netty.buffer.ChannelBuffers;
+import org.xerial.snappy.SnappyInputStream;
+
+import java.io.IOException;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.Executors;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.zip.GZIPInputStream;
+
+/**
+ * This class is for consuming messages from a kafka topic.
+ */
+final class MessageFetcher extends AbstractIterator<FetchedMessage> implements ResponseHandler {
+
+  private static final long BACKOFF_INTERVAL_MS = 100;
+
+  private final KafkaRequestSender sender;
+  private final String topic;
+  private final int partition;
+  private final int maxSize;
+  private final AtomicLong offset;
+  private final BlockingQueue<FetchResult> messages;
+  private final ScheduledExecutorService scheduler;
+  private volatile long backoffMillis;
+  private final Runnable sendFetchRequest = new Runnable() {
+    @Override
+    public void run() {
+      sendFetchRequest();
+    }
+  };
+
+  MessageFetcher(String topic, int partition, long offset, int maxSize, KafkaRequestSender sender) {
+    this.topic = topic;
+    this.partition = partition;
+    this.sender = sender;
+    this.offset = new AtomicLong(offset);
+    this.maxSize = maxSize;
+    this.messages = new LinkedBlockingQueue<FetchResult>();
+    this.scheduler = Executors.newSingleThreadScheduledExecutor(
+                        Threads.createDaemonThreadFactory("kafka-" + topic + "-consumer"));
+  }
+
+  @Override
+  public void received(KafkaResponse response) {
+    if (response.getErrorCode() != FetchException.ErrorCode.OK) {
+      messages.add(FetchResult.failure(new FetchException("Error in fetching: " + response.getErrorCode(),
+                                                          response.getErrorCode())));
+      return;
+    }
+
+    try {
+      if (decodeResponse(response.getBody(), -1)) {
+        backoffMillis = 0;
+      } else {
+        backoffMillis = Math.max(backoffMillis + BACKOFF_INTERVAL_MS, 1000);
+        scheduler.schedule(sendFetchRequest, backoffMillis, TimeUnit.MILLISECONDS);
+      }
+    } catch (Throwable t) {
+      messages.add(FetchResult.failure(t));
+    }
+  }
+
+  private boolean decodeResponse(ChannelBuffer buffer, long nextOffset) {
+    boolean hasMessage = false;
+    boolean computeOffset = nextOffset < 0;
+    while (buffer.readableBytes() >= 4) {
+      int size = buffer.readInt();
+      if (buffer.readableBytes() < size) {
+        if (!hasMessage) {
+          throw new IllegalStateException("Size too small");
+        }
+        break;
+      }
+      nextOffset = computeOffset ? offset.addAndGet(size + 4) : nextOffset;
+      decodeMessage(size, buffer, nextOffset);
+      hasMessage = true;
+    }
+    return hasMessage;
+
+  }
+
+  private void decodeMessage(int size, ChannelBuffer buffer, long nextOffset) {
+    int readerIdx = buffer.readerIndex();
+    int magic = buffer.readByte();
+    Compression compression = magic == 0 ? Compression.NONE : Compression.fromCode(buffer.readByte());
+    int crc = buffer.readInt();
+
+    ChannelBuffer payload = buffer.readSlice(size - (buffer.readerIndex() - readerIdx));
+
+    // Verify CRC?
+    enqueueMessage(compression, payload, nextOffset);
+  }
+
+  private void enqueueMessage(Compression compression, ChannelBuffer payload, long nextOffset) {
+    switch (compression) {
+      case NONE:
+        messages.add(FetchResult.success(new BasicFetchedMessage(nextOffset, payload.toByteBuffer())));
+        break;
+      case GZIP:
+        decodeResponse(gunzip(payload), nextOffset);
+        break;
+      case SNAPPY:
+        decodeResponse(unsnappy(payload), nextOffset);
+        break;
+    }
+  }
+
+  private ChannelBuffer gunzip(ChannelBuffer source) {
+    ChannelBufferOutputStream output = new ChannelBufferOutputStream(
+                                              ChannelBuffers.dynamicBuffer(source.readableBytes() * 2));
+    try {
+      try {
+        GZIPInputStream gzipInput = new GZIPInputStream(new ChannelBufferInputStream(source));
+        try {
+          ByteStreams.copy(gzipInput, output);
+          return output.buffer();
+        } finally {
+          gzipInput.close();
+        }
+      } finally {
+        output.close();
+      }
+    } catch (IOException e) {
+      throw Throwables.propagate(e);
+    }
+  }
+
+  private ChannelBuffer unsnappy(ChannelBuffer source) {
+    ChannelBufferOutputStream output = new ChannelBufferOutputStream(
+                                              ChannelBuffers.dynamicBuffer(source.readableBytes() * 2));
+    try {
+      try {
+        SnappyInputStream snappyInput = new SnappyInputStream(new ChannelBufferInputStream(source));
+        try {
+          ByteStreams.copy(snappyInput, output);
+          return output.buffer();
+        } finally {
+          snappyInput.close();
+        }
+      } finally {
+        output.close();
+      }
+    } catch (IOException e) {
+      throw Throwables.propagate(e);
+    }
+  }
+
+  private void sendFetchRequest() {
+    ChannelBuffer fetchBody = ChannelBuffers.buffer(12);
+    fetchBody.writeLong(offset.get());
+    fetchBody.writeInt(maxSize);
+    sender.send(KafkaRequest.createFetch(topic, partition, fetchBody, MessageFetcher.this));
+  }
+
+  @Override
+  protected FetchedMessage computeNext() {
+    FetchResult result = messages.poll();
+    if (result != null) {
+      return getMessage(result);
+    }
+
+    try {
+      sendFetchRequest();
+      return getMessage(messages.take());
+    } catch (InterruptedException e) {
+      scheduler.shutdownNow();
+      return endOfData();
+    }
+  }
+
+  private FetchedMessage getMessage(FetchResult result) {
+    try {
+      if (result.isSuccess()) {
+        return result.getMessage();
+      } else {
+        throw result.getErrorCause();
+      }
+    } catch (Throwable t) {
+      throw Throwables.propagate(t);
+    }
+  }
+
+  private static final class FetchResult {
+    private final FetchedMessage message;
+    private final Throwable errorCause;
+
+    static FetchResult success(FetchedMessage message) {
+      return new FetchResult(message, null);
+    }
+
+    static FetchResult failure(Throwable cause) {
+      return new FetchResult(null, cause);
+    }
+
+    private FetchResult(FetchedMessage message, Throwable errorCause) {
+      this.message = message;
+      this.errorCause = errorCause;
+    }
+
+    public FetchedMessage getMessage() {
+      return message;
+    }
+
+    public Throwable getErrorCause() {
+      return errorCause;
+    }
+
+    public boolean isSuccess() {
+      return message != null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/kafka/client/MessageSetEncoder.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/kafka/client/MessageSetEncoder.java b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/MessageSetEncoder.java
new file mode 100644
index 0000000..49008cc
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/MessageSetEncoder.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.kafka.client;
+
+import org.jboss.netty.buffer.ChannelBuffer;
+
+/**
+ * This represents a set of messages that goes into the same message set and get encoded as
+ * single kafka message set.
+ */
+interface MessageSetEncoder {
+
+  MessageSetEncoder add(ChannelBuffer payload);
+
+  ChannelBuffer finish();
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/kafka/client/ResponseHandler.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/kafka/client/ResponseHandler.java b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/ResponseHandler.java
new file mode 100644
index 0000000..f681b85
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/ResponseHandler.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.kafka.client;
+
+/**
+ * Represents handler for kafka response.
+ */
+interface ResponseHandler {
+
+  ResponseHandler NO_OP = new ResponseHandler() {
+    @Override
+    public void received(KafkaResponse response) {
+      // No-op
+    }
+  };
+
+  void received(KafkaResponse response);
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/kafka/client/SimpleKafkaClient.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/kafka/client/SimpleKafkaClient.java b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/SimpleKafkaClient.java
new file mode 100644
index 0000000..8ff4856
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/SimpleKafkaClient.java
@@ -0,0 +1,304 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.kafka.client;
+
+import org.apache.twill.common.Threads;
+import org.apache.twill.kafka.client.FetchException;
+import org.apache.twill.kafka.client.FetchedMessage;
+import org.apache.twill.kafka.client.KafkaClient;
+import org.apache.twill.kafka.client.PreparePublish;
+import org.apache.twill.zookeeper.ZKClient;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.primitives.Ints;
+import com.google.common.primitives.Longs;
+import com.google.common.util.concurrent.AbstractIdleService;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
+import org.jboss.netty.bootstrap.ClientBootstrap;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBuffers;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelFuture;
+import org.jboss.netty.channel.ChannelFutureListener;
+import org.jboss.netty.channel.ChannelPipeline;
+import org.jboss.netty.channel.ChannelPipelineFactory;
+import org.jboss.netty.channel.Channels;
+import org.jboss.netty.channel.socket.nio.NioClientBossPool;
+import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory;
+import org.jboss.netty.channel.socket.nio.NioWorkerPool;
+import org.jboss.netty.util.HashedWheelTimer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.nio.ByteBuffer;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ * Basic implementation of {@link KafkaClient}.
+ */
+public final class SimpleKafkaClient extends AbstractIdleService implements KafkaClient {
+
+  private static final Logger LOG = LoggerFactory.getLogger(SimpleKafkaClient.class);
+  private static final int BROKER_POLL_INTERVAL = 100;
+
+  private final KafkaBrokerCache brokerCache;
+  private ClientBootstrap bootstrap;
+  private ConnectionPool connectionPool;
+
+  public SimpleKafkaClient(ZKClient zkClient) {
+    this.brokerCache = new KafkaBrokerCache(zkClient);
+  }
+
+  @Override
+  protected void startUp() throws Exception {
+    brokerCache.startAndWait();
+    ThreadFactory threadFactory = Threads.createDaemonThreadFactory("kafka-client-netty-%d");
+    NioClientBossPool bossPool = new NioClientBossPool(Executors.newSingleThreadExecutor(threadFactory), 1,
+                                                       new HashedWheelTimer(threadFactory), null);
+    NioWorkerPool workerPool = new NioWorkerPool(Executors.newFixedThreadPool(4, threadFactory), 4);
+
+    bootstrap = new ClientBootstrap(new NioClientSocketChannelFactory(bossPool, workerPool));
+    bootstrap.setPipelineFactory(new KafkaChannelPipelineFactory());
+    connectionPool = new ConnectionPool(bootstrap);
+  }
+
+  @Override
+  protected void shutDown() throws Exception {
+    connectionPool.close();
+    bootstrap.releaseExternalResources();
+    brokerCache.stopAndWait();
+  }
+
+  @Override
+  public PreparePublish preparePublish(final String topic, final Compression compression) {
+    final Map<Integer, MessageSetEncoder> encoders = Maps.newHashMap();
+
+    return new PreparePublish() {
+      @Override
+      public PreparePublish add(byte[] payload, Object partitionKey) {
+        return add(ByteBuffer.wrap(payload), partitionKey);
+      }
+
+      @Override
+      public PreparePublish add(ByteBuffer payload, Object partitionKey) {
+        // TODO: Partition
+        int partition = 0;
+
+        MessageSetEncoder encoder = encoders.get(partition);
+        if (encoder == null) {
+          encoder = getEncoder(compression);
+          encoders.put(partition, encoder);
+        }
+        encoder.add(ChannelBuffers.wrappedBuffer(payload));
+
+        return this;
+      }
+
+      @Override
+      public ListenableFuture<?> publish() {
+        List<ListenableFuture<?>> futures = Lists.newArrayListWithCapacity(encoders.size());
+        for (Map.Entry<Integer, MessageSetEncoder> entry : encoders.entrySet()) {
+          futures.add(doPublish(topic, entry.getKey(), entry.getValue().finish()));
+        }
+        encoders.clear();
+        return Futures.allAsList(futures);
+      }
+
+      private ListenableFuture<?> doPublish(String topic, int partition, ChannelBuffer messageSet) {
+        final KafkaRequest request = KafkaRequest.createProduce(topic, partition, messageSet);
+        final SettableFuture<?> result = SettableFuture.create();
+        final ConnectionPool.ConnectResult connection =
+              connectionPool.connect(getTopicBroker(topic, partition).getAddress());
+
+        connection.getChannelFuture().addListener(new ChannelFutureListener() {
+          @Override
+          public void operationComplete(ChannelFuture future) throws Exception {
+            try {
+              future.getChannel().write(request).addListener(getPublishChannelFutureListener(result, null, connection));
+            } catch (Exception e) {
+              result.setException(e);
+            }
+          }
+        });
+
+        return result;
+      }
+    };
+  }
+
+  @Override
+  public Iterator<FetchedMessage> consume(final String topic, final int partition, long offset, int maxSize) {
+    Preconditions.checkArgument(maxSize >= 10, "Message size cannot be smaller than 10.");
+
+    // Connect to broker. Consumer connection are long connection. No need to worry about reuse.
+    final AtomicReference<ChannelFuture> channelFutureRef = new AtomicReference<ChannelFuture>(
+          connectionPool.connect(getTopicBroker(topic, partition).getAddress()).getChannelFuture());
+
+    return new MessageFetcher(topic, partition, offset, maxSize, new KafkaRequestSender() {
+
+      @Override
+      public void send(final KafkaRequest request) {
+        if (!isRunning()) {
+          return;
+        }
+        try {
+          // Try to send the request
+          Channel channel = channelFutureRef.get().getChannel();
+          if (!channel.write(request).await().isSuccess()) {
+            // If failed, retry
+            channel.close();
+            ChannelFuture channelFuture = connectionPool.connect(
+                                              getTopicBroker(topic, partition).getAddress()).getChannelFuture();
+            channelFutureRef.set(channelFuture);
+            channelFuture.addListener(new ChannelFutureListener() {
+              @Override
+              public void operationComplete(ChannelFuture channelFuture) throws Exception {
+                send(request);
+              }
+            });
+          }
+        } catch (InterruptedException e) {
+          // Ignore it
+          LOG.info("Interrupted when sending consume request", e);
+        }
+      }
+    });
+  }
+
+  @Override
+  public ListenableFuture<long[]> getOffset(final String topic, final int partition, long time, int maxOffsets) {
+    final SettableFuture<long[]> resultFuture = SettableFuture.create();
+    final ChannelBuffer body = ChannelBuffers.buffer(Longs.BYTES + Ints.BYTES);
+    body.writeLong(time);
+    body.writeInt(maxOffsets);
+
+    connectionPool.connect(getTopicBroker(topic, partition).getAddress())
+                  .getChannelFuture().addListener(new ChannelFutureListener() {
+      @Override
+      public void operationComplete(ChannelFuture future) throws Exception {
+        if (checkFailure(future)) {
+          return;
+        }
+
+        future.getChannel().write(KafkaRequest.createOffsets(topic, partition, body, new ResponseHandler() {
+          @Override
+          public void received(KafkaResponse response) {
+            if (response.getErrorCode() != FetchException.ErrorCode.OK) {
+              resultFuture.setException(new FetchException("Failed to fetch offset.", response.getErrorCode()));
+            } else {
+              // Decode the offset response, which contains 4 bytes number of offsets, followed by number of offsets,
+              // each 8 bytes in size.
+              ChannelBuffer resultBuffer = response.getBody();
+              int size = resultBuffer.readInt();
+              long[] result = new long[size];
+              for (int i = 0; i < size; i++) {
+                result[i] = resultBuffer.readLong();
+              }
+              resultFuture.set(result);
+            }
+          }
+        })).addListener(new ChannelFutureListener() {
+          @Override
+          public void operationComplete(ChannelFuture future) throws Exception {
+            checkFailure(future);
+          }
+        });
+      }
+
+      private boolean checkFailure(ChannelFuture future) {
+        if (!future.isSuccess()) {
+          if (future.isCancelled()) {
+            resultFuture.cancel(true);
+          } else {
+            resultFuture.setException(future.getCause());
+          }
+          return true;
+        }
+        return false;
+      }
+    });
+
+    return resultFuture;
+  }
+
+  private TopicBroker getTopicBroker(String topic, int partition) {
+    TopicBroker topicBroker = brokerCache.getBrokerAddress(topic, partition);
+    while (topicBroker == null) {
+      try {
+        TimeUnit.MILLISECONDS.sleep(BROKER_POLL_INTERVAL);
+      } catch (InterruptedException e) {
+        return null;
+      }
+      topicBroker = brokerCache.getBrokerAddress(topic, partition);
+    }
+    return topicBroker;
+  }
+
+  private MessageSetEncoder getEncoder(Compression compression) {
+    switch (compression) {
+      case GZIP:
+        return new GZipMessageSetEncoder();
+      case SNAPPY:
+        return new SnappyMessageSetEncoder();
+      default:
+        return new IdentityMessageSetEncoder();
+    }
+  }
+
+  private <V> ChannelFutureListener getPublishChannelFutureListener(final SettableFuture<V> result, final V resultObj,
+                                                                    final ConnectionPool.ConnectionReleaser releaser) {
+    return new ChannelFutureListener() {
+      @Override
+      public void operationComplete(ChannelFuture future) throws Exception {
+        try {
+          if (future.isSuccess()) {
+            result.set(resultObj);
+          } else if (future.isCancelled()) {
+            result.cancel(true);
+          } else {
+            result.setException(future.getCause());
+          }
+        } finally {
+          releaser.release();
+        }
+      }
+    };
+  }
+
+  private static final class KafkaChannelPipelineFactory implements ChannelPipelineFactory {
+
+    @Override
+    public ChannelPipeline getPipeline() throws Exception {
+      ChannelPipeline pipeline = Channels.pipeline();
+
+      pipeline.addLast("encoder", new KafkaRequestEncoder());
+      pipeline.addLast("decoder", new KafkaResponseHandler());
+      pipeline.addLast("dispatcher", new KafkaResponseDispatcher());
+      return pipeline;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/kafka/client/SnappyMessageSetEncoder.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/kafka/client/SnappyMessageSetEncoder.java b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/SnappyMessageSetEncoder.java
new file mode 100644
index 0000000..bf18c08
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/SnappyMessageSetEncoder.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.kafka.client;
+
+import org.xerial.snappy.SnappyOutputStream;
+
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ * A {@link MessageSetEncoder} that compress messages using snappy.
+ */
+final class SnappyMessageSetEncoder extends AbstractCompressedMessageSetEncoder {
+
+  SnappyMessageSetEncoder() {
+    super(Compression.SNAPPY);
+  }
+
+  @Override
+  protected OutputStream createCompressedStream(OutputStream os) throws IOException {
+    return new SnappyOutputStream(os);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/kafka/client/TopicBroker.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/kafka/client/TopicBroker.java b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/TopicBroker.java
new file mode 100644
index 0000000..fd4bf03
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/TopicBroker.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.kafka.client;
+
+import java.net.InetSocketAddress;
+
+/**
+ * Represents broker information of a given topic.
+ */
+final class TopicBroker {
+
+  private final String topic;
+  private final InetSocketAddress address;
+  private final int partitionSize;
+
+  TopicBroker(String topic, InetSocketAddress address, int partitionSize) {
+    this.topic = topic;
+    this.address = address;
+    this.partitionSize = partitionSize;
+  }
+
+  String getTopic() {
+    return topic;
+  }
+
+  InetSocketAddress getAddress() {
+    return address;
+  }
+
+  int getPartitionSize() {
+    return partitionSize;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/kafka/client/package-info.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/kafka/client/package-info.java b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/package-info.java
new file mode 100644
index 0000000..f3f615c
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/kafka/client/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * This package provides pure java kafka client implementation.
+ */
+package org.apache.twill.internal.kafka.client;

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/logging/KafkaAppender.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/logging/KafkaAppender.java b/twill-core/src/main/java/org/apache/twill/internal/logging/KafkaAppender.java
new file mode 100644
index 0000000..12818ef
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/logging/KafkaAppender.java
@@ -0,0 +1,303 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.logging;
+
+import org.apache.twill.common.Services;
+import org.apache.twill.common.Threads;
+import org.apache.twill.internal.kafka.client.Compression;
+import org.apache.twill.internal.kafka.client.SimpleKafkaClient;
+import org.apache.twill.kafka.client.KafkaClient;
+import org.apache.twill.kafka.client.PreparePublish;
+import org.apache.twill.zookeeper.RetryStrategies;
+import org.apache.twill.zookeeper.ZKClientService;
+import org.apache.twill.zookeeper.ZKClientServices;
+import org.apache.twill.zookeeper.ZKClients;
+import ch.qos.logback.classic.pattern.ClassOfCallerConverter;
+import ch.qos.logback.classic.pattern.FileOfCallerConverter;
+import ch.qos.logback.classic.pattern.LineOfCallerConverter;
+import ch.qos.logback.classic.pattern.MethodOfCallerConverter;
+import ch.qos.logback.classic.spi.ILoggingEvent;
+import ch.qos.logback.classic.spi.IThrowableProxy;
+import ch.qos.logback.classic.spi.StackTraceElementProxy;
+import ch.qos.logback.core.AppenderBase;
+import com.google.common.base.Charsets;
+import com.google.common.base.Function;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Throwables;
+import com.google.common.collect.Iterables;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.gson.stream.JsonWriter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.io.StringWriter;
+import java.util.Queue;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ *
+ */
+public final class KafkaAppender extends AppenderBase<ILoggingEvent> {
+
+  private static final Logger LOG = LoggerFactory.getLogger(KafkaAppender.class);
+
+  private final LogEventConverter eventConverter;
+  private final AtomicReference<PreparePublish> publisher;
+  private final Runnable flushTask;
+  /**
+   * Rough count of how many entries are being buffered. It's just approximate, not exact.
+   */
+  private final AtomicInteger bufferedSize;
+
+  private ZKClientService zkClientService;
+  private KafkaClient kafkaClient;
+  private String zkConnectStr;
+  private String hostname;
+  private String topic;
+  private Queue<String> buffer;
+  private int flushLimit = 20;
+  private int flushPeriod = 100;
+  private ScheduledExecutorService scheduler;
+
+  public KafkaAppender() {
+    eventConverter = new LogEventConverter();
+    publisher = new AtomicReference<PreparePublish>();
+    flushTask = createFlushTask();
+    bufferedSize = new AtomicInteger();
+    buffer = new ConcurrentLinkedQueue<String>();
+  }
+
+  /**
+   * Sets the zookeeper connection string. Called by slf4j.
+   */
+  @SuppressWarnings("unused")
+  public void setZookeeper(String zkConnectStr) {
+    this.zkConnectStr = zkConnectStr;
+  }
+
+  /**
+   * Sets the hostname. Called by slf4j.
+   */
+  @SuppressWarnings("unused")
+  public void setHostname(String hostname) {
+    this.hostname = hostname;
+  }
+
+  /**
+   * Sets the topic name for publishing logs. Called by slf4j.
+   */
+  @SuppressWarnings("unused")
+  public void setTopic(String topic) {
+    this.topic = topic;
+  }
+
+  /**
+   * Sets the maximum number of cached log entries before performing an force flush. Called by slf4j.
+   */
+  @SuppressWarnings("unused")
+  public void setFlushLimit(int flushLimit) {
+    this.flushLimit = flushLimit;
+  }
+
+  /**
+   * Sets the periodic flush time in milliseconds. Called by slf4j.
+   */
+  @SuppressWarnings("unused")
+  public void setFlushPeriod(int flushPeriod) {
+    this.flushPeriod = flushPeriod;
+  }
+
+  @Override
+  public void start() {
+    Preconditions.checkNotNull(zkConnectStr);
+
+    scheduler = Executors.newSingleThreadScheduledExecutor(Threads.createDaemonThreadFactory("kafka-logger"));
+
+    zkClientService = ZKClientServices.delegate(
+      ZKClients.reWatchOnExpire(
+        ZKClients.retryOnFailure(ZKClientService.Builder.of(zkConnectStr).build(),
+                                 RetryStrategies.fixDelay(1, TimeUnit.SECONDS))));
+
+    kafkaClient = new SimpleKafkaClient(zkClientService);
+    Futures.addCallback(Services.chainStart(zkClientService, kafkaClient), new FutureCallback<Object>() {
+      @Override
+      public void onSuccess(Object result) {
+        LOG.info("Kafka client started: " + zkConnectStr);
+        publisher.set(kafkaClient.preparePublish(topic, Compression.SNAPPY));
+        scheduler.scheduleWithFixedDelay(flushTask, 0, flushPeriod, TimeUnit.MILLISECONDS);
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        // Fail to talk to kafka. Other than logging, what can be done?
+        LOG.error("Failed to start kafka client.", t);
+      }
+    });
+
+    super.start();
+  }
+
+  @Override
+  public void stop() {
+    super.stop();
+    scheduler.shutdownNow();
+    Futures.getUnchecked(Services.chainStop(kafkaClient, zkClientService));
+  }
+
+  public void forceFlush() {
+    try {
+      publishLogs().get(2, TimeUnit.SECONDS);
+    } catch (Exception e) {
+      LOG.error("Failed to publish last batch of log.", e);
+    }
+  }
+
+  @Override
+  protected void append(ILoggingEvent eventObject) {
+    buffer.offer(eventConverter.convert(eventObject));
+    if (bufferedSize.incrementAndGet() >= flushLimit && publisher.get() != null) {
+      // Try to do a extra flush
+      scheduler.submit(flushTask);
+    }
+  }
+
+  private ListenableFuture<Integer> publishLogs() {
+    // If the publisher is not available, simply returns a completed future.
+    PreparePublish publisher = KafkaAppender.this.publisher.get();
+    if (publisher == null) {
+      return Futures.immediateFuture(0);
+    }
+
+    int count = 0;
+    for (String json : Iterables.consumingIterable(buffer)) {
+      publisher.add(Charsets.UTF_8.encode(json), 0);
+      count++;
+    }
+    // Nothing to publish, simply returns a completed future.
+    if (count == 0) {
+      return Futures.immediateFuture(0);
+    }
+
+    bufferedSize.set(0);
+    final int finalCount = count;
+    return Futures.transform(publisher.publish(), new Function<Object, Integer>() {
+      @Override
+      public Integer apply(Object input) {
+        return finalCount;
+      }
+    });
+  }
+
+  /**
+   * Creates a {@link Runnable} that writes all logs in the buffer into kafka.
+   * @return The Runnable task
+   */
+  private Runnable createFlushTask() {
+    return new Runnable() {
+      @Override
+      public void run() {
+        Futures.addCallback(publishLogs(), new FutureCallback<Integer>() {
+          @Override
+          public void onSuccess(Integer result) {
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Log entries published, size=" + result);
+            }
+          }
+
+          @Override
+          public void onFailure(Throwable t) {
+            LOG.error("Failed to push logs to kafka. Log entries dropped.", t);
+          }
+        });
+      }
+    };
+  }
+
+  /**
+   * Helper class to convert {@link ILoggingEvent} into json string.
+   */
+  private final class LogEventConverter {
+
+    private final ClassOfCallerConverter classNameConverter = new ClassOfCallerConverter();
+    private final MethodOfCallerConverter methodConverter = new MethodOfCallerConverter();
+    private final FileOfCallerConverter fileConverter = new FileOfCallerConverter();
+    private final LineOfCallerConverter lineConverter = new LineOfCallerConverter();
+
+    private String convert(ILoggingEvent event) {
+      StringWriter result = new StringWriter();
+      JsonWriter writer = new JsonWriter(result);
+
+      try {
+        try {
+          writer.beginObject();
+          writer.name("name").value(event.getLoggerName());
+          writer.name("host").value(hostname);
+          writer.name("timestamp").value(Long.toString(event.getTimeStamp()));
+          writer.name("level").value(event.getLevel().toString());
+          writer.name("className").value(classNameConverter.convert(event));
+          writer.name("method").value(methodConverter.convert(event));
+          writer.name("file").value(fileConverter.convert(event));
+          writer.name("line").value(lineConverter.convert(event));
+          writer.name("thread").value(event.getThreadName());
+          writer.name("message").value(event.getFormattedMessage());
+          writer.name("stackTraces");
+          encodeStackTraces(event.getThrowableProxy(), writer);
+
+          writer.endObject();
+        } finally {
+          writer.close();
+        }
+      } catch (IOException e) {
+        throw Throwables.propagate(e);
+      }
+
+      return result.toString();
+    }
+
+    private void encodeStackTraces(IThrowableProxy throwable, JsonWriter writer) throws IOException {
+      writer.beginArray();
+      try {
+        if (throwable == null) {
+          return;
+        }
+
+        for (StackTraceElementProxy stackTrace : throwable.getStackTraceElementProxyArray()) {
+          writer.beginObject();
+
+          StackTraceElement element = stackTrace.getStackTraceElement();
+          writer.name("className").value(element.getClassName());
+          writer.name("method").value(element.getMethodName());
+          writer.name("file").value(element.getFileName());
+          writer.name("line").value(element.getLineNumber());
+
+          writer.endObject();
+        }
+      } finally {
+        writer.endArray();
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/logging/KafkaTwillRunnable.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/logging/KafkaTwillRunnable.java b/twill-core/src/main/java/org/apache/twill/internal/logging/KafkaTwillRunnable.java
new file mode 100644
index 0000000..c1695de
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/logging/KafkaTwillRunnable.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.logging;
+
+import org.apache.twill.api.Command;
+import org.apache.twill.api.TwillContext;
+import org.apache.twill.api.TwillRunnable;
+import org.apache.twill.api.TwillRunnableSpecification;
+import org.apache.twill.internal.EnvKeys;
+import org.apache.twill.internal.kafka.EmbeddedKafkaServer;
+import org.apache.twill.internal.utils.Networks;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Throwables;
+import com.google.common.collect.ImmutableMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.CountDownLatch;
+
+/**
+ * A {@link org.apache.twill.api.TwillRunnable} for managing Kafka server.
+ */
+public final class KafkaTwillRunnable implements TwillRunnable {
+
+  private static final Logger LOG = LoggerFactory.getLogger(KafkaTwillRunnable.class);
+
+  private final String kafkaDir;
+  private EmbeddedKafkaServer server;
+  private CountDownLatch stopLatch;
+
+  public KafkaTwillRunnable(String kafkaDir) {
+    this.kafkaDir = kafkaDir;
+  }
+
+  @Override
+  public TwillRunnableSpecification configure() {
+    return TwillRunnableSpecification.Builder.with()
+      .setName("kafka")
+      .withConfigs(ImmutableMap.of("kafkaDir", kafkaDir))
+      .build();
+  }
+
+  @Override
+  public void initialize(TwillContext context) {
+    Map<String, String> args = context.getSpecification().getConfigs();
+    String zkConnectStr = System.getenv(EnvKeys.TWILL_LOG_KAFKA_ZK);
+    stopLatch = new CountDownLatch(1);
+
+    try {
+      server = new EmbeddedKafkaServer(new File(args.get("kafkaDir")), generateKafkaConfig(zkConnectStr));
+      server.startAndWait();
+    } catch (Exception e) {
+      throw Throwables.propagate(e);
+    }
+  }
+
+  @Override
+  public void handleCommand(Command command) throws Exception {
+  }
+
+  @Override
+  public void stop() {
+    stopLatch.countDown();
+  }
+
+  @Override
+  public void destroy() {
+    server.stopAndWait();
+  }
+
+  @Override
+  public void run() {
+    try {
+      stopLatch.await();
+    } catch (InterruptedException e) {
+      LOG.info("Running thread interrupted, shutting down kafka server.", e);
+    }
+  }
+
+  private Properties generateKafkaConfig(String zkConnectStr) {
+    int port = Networks.getRandomPort();
+    Preconditions.checkState(port > 0, "Failed to get random port.");
+
+    Properties prop = new Properties();
+    prop.setProperty("log.dir", new File("kafka-logs").getAbsolutePath());
+    prop.setProperty("zk.connect", zkConnectStr);
+    prop.setProperty("num.threads", "8");
+    prop.setProperty("port", Integer.toString(port));
+    prop.setProperty("log.flush.interval", "10000");
+    prop.setProperty("max.socket.request.bytes", "104857600");
+    prop.setProperty("log.cleanup.interval.mins", "1");
+    prop.setProperty("log.default.flush.scheduler.interval.ms", "1000");
+    prop.setProperty("zk.connectiontimeout.ms", "1000000");
+    prop.setProperty("socket.receive.buffer", "1048576");
+    prop.setProperty("enable.zookeeper", "true");
+    prop.setProperty("log.retention.hours", "168");
+    prop.setProperty("brokerid", "0");
+    prop.setProperty("socket.send.buffer", "1048576");
+    prop.setProperty("num.partitions", "1");
+    prop.setProperty("log.file.size", "536870912");
+    prop.setProperty("log.default.flush.interval.ms", "1000");
+    return prop;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/logging/LogEntryDecoder.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/logging/LogEntryDecoder.java b/twill-core/src/main/java/org/apache/twill/internal/logging/LogEntryDecoder.java
new file mode 100644
index 0000000..dc11666
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/logging/LogEntryDecoder.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.logging;
+
+import org.apache.twill.api.logging.LogEntry;
+import org.apache.twill.internal.json.JsonUtils;
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+
+import java.lang.reflect.Type;
+
+/**
+ * A {@link com.google.gson.Gson} decoder for {@link LogEntry}.
+ */
+public final class LogEntryDecoder implements JsonDeserializer<LogEntry> {
+
+  @Override
+  public LogEntry deserialize(JsonElement json, Type typeOfT,
+                              JsonDeserializationContext context) throws JsonParseException {
+    if (!json.isJsonObject()) {
+      return null;
+    }
+    JsonObject jsonObj = json.getAsJsonObject();
+
+    final String name = JsonUtils.getAsString(jsonObj, "name");
+    final String host = JsonUtils.getAsString(jsonObj, "host");
+    final long timestamp = JsonUtils.getAsLong(jsonObj, "timestamp", 0);
+    LogEntry.Level l;
+    try {
+      l = LogEntry.Level.valueOf(JsonUtils.getAsString(jsonObj, "level"));
+    } catch (Exception e) {
+      l = LogEntry.Level.FATAL;
+    }
+    final LogEntry.Level logLevel = l;
+    final String className = JsonUtils.getAsString(jsonObj, "className");
+    final String method = JsonUtils.getAsString(jsonObj, "method");
+    final String file = JsonUtils.getAsString(jsonObj, "file");
+    final String line = JsonUtils.getAsString(jsonObj, "line");
+    final String thread = JsonUtils.getAsString(jsonObj, "thread");
+    final String message = JsonUtils.getAsString(jsonObj, "message");
+
+    final StackTraceElement[] stackTraces = context.deserialize(jsonObj.get("stackTraces").getAsJsonArray(),
+                                                                StackTraceElement[].class);
+
+    return new LogEntry() {
+      @Override
+      public String getLoggerName() {
+        return name;
+      }
+
+      @Override
+      public String getHost() {
+        return host;
+      }
+
+      @Override
+      public long getTimestamp() {
+        return timestamp;
+      }
+
+      @Override
+      public Level getLogLevel() {
+        return logLevel;
+      }
+
+      @Override
+      public String getSourceClassName() {
+        return className;
+      }
+
+      @Override
+      public String getSourceMethodName() {
+        return method;
+      }
+
+      @Override
+      public String getFileName() {
+        return file;
+      }
+
+      @Override
+      public int getLineNumber() {
+        if (line.equals("?")) {
+          return -1;
+        } else {
+          return Integer.parseInt(line);
+        }
+      }
+
+      @Override
+      public String getThreadName() {
+        return thread;
+      }
+
+      @Override
+      public String getMessage() {
+        return message;
+      }
+
+      @Override
+      public StackTraceElement[] getStackTraces() {
+        return stackTraces;
+      }
+    };
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/logging/Loggings.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/logging/Loggings.java b/twill-core/src/main/java/org/apache/twill/internal/logging/Loggings.java
new file mode 100644
index 0000000..9baed63
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/logging/Loggings.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.logging;
+
+import ch.qos.logback.classic.Logger;
+import ch.qos.logback.classic.LoggerContext;
+import ch.qos.logback.classic.spi.ILoggingEvent;
+import ch.qos.logback.core.Appender;
+import org.slf4j.ILoggerFactory;
+import org.slf4j.LoggerFactory;
+
+/**
+ *
+ */
+public final class Loggings {
+
+  public static void forceFlush() {
+    ILoggerFactory loggerFactory = LoggerFactory.getILoggerFactory();
+
+    if (loggerFactory instanceof LoggerContext) {
+      Appender<ILoggingEvent> appender = ((LoggerContext) loggerFactory).getLogger(Logger.ROOT_LOGGER_NAME)
+                                                                        .getAppender("KAFKA");
+      if (appender != null && appender instanceof KafkaAppender) {
+        ((KafkaAppender) appender).forceFlush();
+      }
+    }
+  }
+
+  private Loggings() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/package-info.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/package-info.java b/twill-core/src/main/java/org/apache/twill/internal/package-info.java
new file mode 100644
index 0000000..a8459e0
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * This package provides internal classes for Twill.
+ */
+package org.apache.twill.internal;

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-core/src/main/java/org/apache/twill/internal/state/Message.java
----------------------------------------------------------------------
diff --git a/twill-core/src/main/java/org/apache/twill/internal/state/Message.java b/twill-core/src/main/java/org/apache/twill/internal/state/Message.java
new file mode 100644
index 0000000..6c3e719
--- /dev/null
+++ b/twill-core/src/main/java/org/apache/twill/internal/state/Message.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.state;
+
+import org.apache.twill.api.Command;
+
+/**
+ *
+ */
+public interface Message {
+
+  /**
+   * Type of message.
+   */
+  enum Type {
+    SYSTEM,
+    USER
+  }
+
+  /**
+   * Scope of the message.
+   */
+  enum Scope {
+    APPLICATION,
+    ALL_RUNNABLE,
+    RUNNABLE
+  }
+
+  Type getType();
+
+  Scope getScope();
+
+  /**
+   * @return the name of the target runnable if scope is {@link Scope#RUNNABLE} or {@code null} otherwise.
+   */
+  String getRunnableName();
+
+  Command getCommand();
+}


[13/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/AbstractTwillService.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/AbstractTwillService.java b/twill-yarn/src/main/java/org/apache/twill/internal/AbstractTwillService.java
new file mode 100644
index 0000000..47dd07c
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/AbstractTwillService.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.filesystem.Location;
+import org.apache.twill.internal.state.Message;
+import org.apache.twill.internal.state.SystemMessages;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.Service;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedInputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.util.concurrent.Executor;
+
+/**
+ * A base implementation of {@link Service} handle secure token update.
+ */
+public abstract class AbstractTwillService implements Service {
+
+  private static final Logger LOG = LoggerFactory.getLogger(AbstractTwillService.class);
+
+  protected final Location applicationLocation;
+
+  protected volatile Credentials credentials;
+
+  protected AbstractTwillService(Location applicationLocation) {
+    this.applicationLocation = applicationLocation;
+  }
+
+  protected abstract Service getServiceDelegate();
+
+  /**
+   * Returns the location of the secure store, or {@code null} if either not running in secure mode or an error
+   * occur when trying to acquire the location.
+   */
+  protected final Location getSecureStoreLocation() {
+    if (!UserGroupInformation.isSecurityEnabled()) {
+      return null;
+    }
+    try {
+      return applicationLocation.append(Constants.Files.CREDENTIALS);
+    } catch (IOException e) {
+      LOG.error("Failed to create secure store location.", e);
+      return null;
+    }
+  }
+
+  /**
+   * Attempts to handle secure store update.
+   *
+   * @param message The message received
+   * @return {@code true} if the message requests for secure store update, {@code false} otherwise.
+   */
+  protected final boolean handleSecureStoreUpdate(Message message) {
+    if (!SystemMessages.SECURE_STORE_UPDATED.equals(message)) {
+      return false;
+    }
+
+    // If not in secure mode, simply ignore the message.
+    if (!UserGroupInformation.isSecurityEnabled()) {
+      return true;
+    }
+
+    try {
+      Credentials credentials = new Credentials();
+      Location location = getSecureStoreLocation();
+      DataInputStream input = new DataInputStream(new BufferedInputStream(location.getInputStream()));
+      try {
+        credentials.readTokenStorageStream(input);
+      } finally {
+        input.close();
+      }
+
+      UserGroupInformation.getCurrentUser().addCredentials(credentials);
+      this.credentials = credentials;
+
+      LOG.info("Secure store updated from {}.", location.toURI());
+
+    } catch (Throwable t) {
+      LOG.error("Failed to update secure store.", t);
+    }
+
+    return true;
+  }
+
+  @Override
+  public final ListenableFuture<State> start() {
+    return getServiceDelegate().start();
+  }
+
+  @Override
+  public final State startAndWait() {
+    return Futures.getUnchecked(start());
+  }
+
+  @Override
+  public final boolean isRunning() {
+    return getServiceDelegate().isRunning();
+  }
+
+  @Override
+  public final State state() {
+    return getServiceDelegate().state();
+  }
+
+  @Override
+  public final ListenableFuture<State> stop() {
+    return getServiceDelegate().stop();
+  }
+
+  @Override
+  public final State stopAndWait() {
+    return Futures.getUnchecked(stop());
+  }
+
+  @Override
+  public final void addListener(Listener listener, Executor executor) {
+    getServiceDelegate().addListener(listener, executor);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/ServiceMain.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/ServiceMain.java b/twill-yarn/src/main/java/org/apache/twill/internal/ServiceMain.java
new file mode 100644
index 0000000..4ffb023
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/ServiceMain.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal;
+
+import org.apache.twill.common.Services;
+import org.apache.twill.filesystem.HDFSLocationFactory;
+import org.apache.twill.filesystem.LocalLocationFactory;
+import org.apache.twill.filesystem.Location;
+import org.apache.twill.internal.logging.KafkaAppender;
+import org.apache.twill.zookeeper.ZKClientService;
+import ch.qos.logback.classic.LoggerContext;
+import ch.qos.logback.classic.joran.JoranConfigurator;
+import ch.qos.logback.classic.util.ContextInitializer;
+import ch.qos.logback.core.joran.spi.JoranException;
+import com.google.common.base.Throwables;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.Service;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.ILoggerFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.xml.sax.InputSource;
+
+import java.io.File;
+import java.io.StringReader;
+import java.net.URI;
+import java.util.concurrent.ExecutionException;
+
+/**
+ * Class for main method that starts a service.
+ */
+public abstract class ServiceMain {
+
+  private static final Logger LOG = LoggerFactory.getLogger(ServiceMain.class);
+
+  static {
+    // This is to work around detection of HADOOP_HOME (HADOOP-9422)
+    if (!System.getenv().containsKey("HADOOP_HOME") && System.getProperty("hadoop.home.dir") == null) {
+      System.setProperty("hadoop.home.dir", new File("").getAbsolutePath());
+    }
+  }
+
+  protected final void doMain(final ZKClientService zkClientService,
+                              final Service service) throws ExecutionException, InterruptedException {
+    configureLogger();
+
+    final String serviceName = service.toString();
+    Runtime.getRuntime().addShutdownHook(new Thread() {
+      @Override
+      public void run() {
+        Services.chainStop(service, zkClientService);
+      }
+    });
+
+    // Listener for state changes of the service
+    ListenableFuture<Service.State> completion = Services.getCompletionFuture(service);
+
+    // Starts the service
+    LOG.info("Starting service {}.", serviceName);
+    Futures.getUnchecked(Services.chainStart(zkClientService, service));
+    LOG.info("Service {} started.", serviceName);
+    try {
+      completion.get();
+      LOG.info("Service {} completed.", serviceName);
+    } catch (Throwable t) {
+      LOG.warn("Exception thrown from service {}.", serviceName, t);
+      throw Throwables.propagate(t);
+    } finally {
+      ILoggerFactory loggerFactory = LoggerFactory.getILoggerFactory();
+      if (loggerFactory instanceof LoggerContext) {
+        ((LoggerContext) loggerFactory).stop();
+      }
+    }
+  }
+
+  protected abstract String getHostname();
+
+  protected abstract String getKafkaZKConnect();
+
+  /**
+   * Returns the {@link Location} for the application based on the env {@link EnvKeys#TWILL_APP_DIR}.
+   */
+  protected static Location createAppLocation(Configuration conf) {
+    // Note: It's a little bit hacky based on the uri schema to create the LocationFactory, refactor it later.
+    URI appDir = URI.create(System.getenv(EnvKeys.TWILL_APP_DIR));
+
+    try {
+      if ("file".equals(appDir.getScheme())) {
+        return new LocalLocationFactory().create(appDir);
+      }
+
+      if ("hdfs".equals(appDir.getScheme())) {
+        if (UserGroupInformation.isSecurityEnabled()) {
+          return new HDFSLocationFactory(FileSystem.get(conf)).create(appDir);
+        }
+
+        String fsUser = System.getenv(EnvKeys.TWILL_FS_USER);
+        if (fsUser == null) {
+          throw new IllegalStateException("Missing environment variable " + EnvKeys.TWILL_FS_USER);
+        }
+        return new HDFSLocationFactory(FileSystem.get(FileSystem.getDefaultUri(conf), conf, fsUser)).create(appDir);
+      }
+
+      LOG.warn("Unsupported location type {}.", appDir);
+      throw new IllegalArgumentException("Unsupported location type " + appDir);
+
+    } catch (Exception e) {
+      LOG.error("Failed to create application location for {}.", appDir);
+      throw Throwables.propagate(e);
+    }
+  }
+
+  private void configureLogger() {
+    // Check if SLF4J is bound to logback in the current environment
+    ILoggerFactory loggerFactory = LoggerFactory.getILoggerFactory();
+    if (!(loggerFactory instanceof LoggerContext)) {
+      return;
+    }
+
+    LoggerContext context = (LoggerContext) loggerFactory;
+    context.reset();
+    JoranConfigurator configurator = new JoranConfigurator();
+    configurator.setContext(context);
+
+    try {
+      File twillLogback = new File(Constants.Files.LOGBACK_TEMPLATE);
+      if (twillLogback.exists()) {
+        configurator.doConfigure(twillLogback);
+      }
+      new ContextInitializer(context).autoConfig();
+    } catch (JoranException e) {
+      throw Throwables.propagate(e);
+    }
+    doConfigure(configurator, getLogConfig(getLoggerLevel(context.getLogger(Logger.ROOT_LOGGER_NAME))));
+  }
+
+  private void doConfigure(JoranConfigurator configurator, String config) {
+    try {
+      configurator.doConfigure(new InputSource(new StringReader(config)));
+    } catch (Exception e) {
+      throw Throwables.propagate(e);
+    }
+  }
+
+  private String getLogConfig(String rootLevel) {
+    return
+      "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" +
+      "<configuration>\n" +
+      "    <appender name=\"KAFKA\" class=\"" + KafkaAppender.class.getName() + "\">\n" +
+      "        <topic>" + Constants.LOG_TOPIC + "</topic>\n" +
+      "        <hostname>" + getHostname() + "</hostname>\n" +
+      "        <zookeeper>" + getKafkaZKConnect() + "</zookeeper>\n" +
+      "    </appender>\n" +
+      "    <logger name=\"org.apache.twill.internal.logging\" additivity=\"false\" />\n" +
+      "    <root level=\"" + rootLevel + "\">\n" +
+      "        <appender-ref ref=\"KAFKA\"/>\n" +
+      "    </root>\n" +
+      "</configuration>";
+  }
+
+  private String getLoggerLevel(Logger logger) {
+    if (logger instanceof ch.qos.logback.classic.Logger) {
+      return ((ch.qos.logback.classic.Logger) logger).getLevel().toString();
+    }
+
+    if (logger.isTraceEnabled()) {
+      return "TRACE";
+    }
+    if (logger.isDebugEnabled()) {
+      return "DEBUG";
+    }
+    if (logger.isInfoEnabled()) {
+      return "INFO";
+    }
+    if (logger.isWarnEnabled()) {
+      return "WARN";
+    }
+    if (logger.isErrorEnabled()) {
+      return "ERROR";
+    }
+    return "OFF";
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterLiveNodeData.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterLiveNodeData.java b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterLiveNodeData.java
new file mode 100644
index 0000000..028df7b
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterLiveNodeData.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.appmaster;
+
+/**
+ * Represents data being stored in the live node of the application master.
+ */
+public final class ApplicationMasterLiveNodeData {
+
+  private final int appId;
+  private final long appIdClusterTime;
+  private final String containerId;
+
+  public ApplicationMasterLiveNodeData(int appId, long appIdClusterTime, String containerId) {
+    this.appId = appId;
+    this.appIdClusterTime = appIdClusterTime;
+    this.containerId = containerId;
+  }
+
+  public int getAppId() {
+    return appId;
+  }
+
+  public long getAppIdClusterTime() {
+    return appIdClusterTime;
+  }
+
+  public String getContainerId() {
+    return containerId;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterMain.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterMain.java b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterMain.java
new file mode 100644
index 0000000..b34a7a2
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterMain.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.appmaster;
+
+import org.apache.twill.api.RunId;
+import org.apache.twill.internal.Constants;
+import org.apache.twill.internal.EnvKeys;
+import org.apache.twill.internal.RunIds;
+import org.apache.twill.internal.ServiceMain;
+import org.apache.twill.internal.yarn.VersionDetectYarnAMClientFactory;
+import org.apache.twill.zookeeper.RetryStrategies;
+import org.apache.twill.zookeeper.ZKClientService;
+import org.apache.twill.zookeeper.ZKClientServices;
+import org.apache.twill.zookeeper.ZKClients;
+import com.google.common.util.concurrent.Service;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+
+import java.io.File;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Main class for launching {@link ApplicationMasterService}.
+ */
+public final class ApplicationMasterMain extends ServiceMain {
+
+  private final String kafkaZKConnect;
+
+  private ApplicationMasterMain(String kafkaZKConnect) {
+    this.kafkaZKConnect = kafkaZKConnect;
+  }
+
+  /**
+   * Starts the application master.
+   */
+  public static void main(String[] args) throws Exception {
+    String zkConnect = System.getenv(EnvKeys.TWILL_ZK_CONNECT);
+    File twillSpec = new File(Constants.Files.TWILL_SPEC);
+    RunId runId = RunIds.fromString(System.getenv(EnvKeys.TWILL_RUN_ID));
+
+    ZKClientService zkClientService =
+      ZKClientServices.delegate(
+        ZKClients.reWatchOnExpire(
+          ZKClients.retryOnFailure(
+            ZKClientService.Builder.of(zkConnect).build(),
+            RetryStrategies.fixDelay(1, TimeUnit.SECONDS))));
+
+    Configuration conf = new YarnConfiguration(new HdfsConfiguration(new Configuration()));
+    Service service = new ApplicationMasterService(runId, zkClientService, twillSpec,
+                                                   new VersionDetectYarnAMClientFactory(conf), createAppLocation(conf));
+    new ApplicationMasterMain(String.format("%s/%s/kafka", zkConnect, runId.getId())).doMain(zkClientService, service);
+  }
+
+  @Override
+  protected String getHostname() {
+    try {
+      return InetAddress.getLocalHost().getCanonicalHostName();
+    } catch (UnknownHostException e) {
+      return "unknown";
+    }
+  }
+
+  @Override
+  protected String getKafkaZKConnect() {
+    return kafkaZKConnect;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterProcessLauncher.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterProcessLauncher.java b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterProcessLauncher.java
new file mode 100644
index 0000000..b51bb63
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterProcessLauncher.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.appmaster;
+
+import org.apache.twill.internal.Constants;
+import org.apache.twill.internal.EnvKeys;
+import org.apache.twill.internal.ProcessController;
+import org.apache.twill.internal.yarn.AbstractYarnProcessLauncher;
+import org.apache.twill.internal.yarn.YarnLaunchContext;
+import org.apache.twill.internal.yarn.YarnUtils;
+import com.google.common.collect.ImmutableMap;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.util.Records;
+
+import java.util.Map;
+
+/**
+ * A {@link org.apache.twill.internal.ProcessLauncher} for launching Application Master from the client.
+ */
+public final class ApplicationMasterProcessLauncher extends AbstractYarnProcessLauncher<ApplicationId> {
+
+  private final ApplicationSubmitter submitter;
+
+  public ApplicationMasterProcessLauncher(ApplicationId appId, ApplicationSubmitter submitter) {
+    super(appId);
+    this.submitter = submitter;
+  }
+
+  @Override
+  protected boolean useArchiveSuffix() {
+    return false;
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  protected <R> ProcessController<R> doLaunch(YarnLaunchContext launchContext) {
+    final ApplicationId appId = getContainerInfo();
+
+    // Set the resource requirement for AM
+    Resource capability = Records.newRecord(Resource.class);
+    capability.setMemory(Constants.APP_MASTER_MEMORY_MB);
+    YarnUtils.setVirtualCores(capability, 1);
+
+    // Put in extra environments
+    Map<String, String> env = ImmutableMap.<String, String>builder()
+      .putAll(launchContext.getEnvironment())
+      .put(EnvKeys.YARN_APP_ID, Integer.toString(appId.getId()))
+      .put(EnvKeys.YARN_APP_ID_CLUSTER_TIME, Long.toString(appId.getClusterTimestamp()))
+      .put(EnvKeys.YARN_APP_ID_STR, appId.toString())
+      .put(EnvKeys.YARN_CONTAINER_MEMORY_MB, Integer.toString(Constants.APP_MASTER_MEMORY_MB))
+      .put(EnvKeys.YARN_CONTAINER_VIRTUAL_CORES, Integer.toString(YarnUtils.getVirtualCores(capability)))
+      .build();
+
+    launchContext.setEnvironment(env);
+    return (ProcessController<R>) submitter.submit(launchContext, capability);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterService.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterService.java b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterService.java
new file mode 100644
index 0000000..51c8503
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterService.java
@@ -0,0 +1,799 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.appmaster;
+
+import org.apache.twill.api.Command;
+import org.apache.twill.api.EventHandler;
+import org.apache.twill.api.EventHandlerSpecification;
+import org.apache.twill.api.LocalFile;
+import org.apache.twill.api.ResourceSpecification;
+import org.apache.twill.api.RunId;
+import org.apache.twill.api.RuntimeSpecification;
+import org.apache.twill.api.TwillRunResources;
+import org.apache.twill.api.TwillSpecification;
+import org.apache.twill.common.Threads;
+import org.apache.twill.filesystem.Location;
+import org.apache.twill.internal.AbstractTwillService;
+import org.apache.twill.internal.Configs;
+import org.apache.twill.internal.Constants;
+import org.apache.twill.internal.DefaultTwillRunResources;
+import org.apache.twill.internal.EnvKeys;
+import org.apache.twill.internal.ProcessLauncher;
+import org.apache.twill.internal.TwillContainerLauncher;
+import org.apache.twill.internal.ZKServiceDecorator;
+import org.apache.twill.internal.json.LocalFileCodec;
+import org.apache.twill.internal.json.TwillSpecificationAdapter;
+import org.apache.twill.internal.kafka.EmbeddedKafkaServer;
+import org.apache.twill.internal.logging.Loggings;
+import org.apache.twill.internal.state.Message;
+import org.apache.twill.internal.state.MessageCallback;
+import org.apache.twill.internal.utils.Instances;
+import org.apache.twill.internal.utils.Networks;
+import org.apache.twill.internal.yarn.YarnAMClient;
+import org.apache.twill.internal.yarn.YarnAMClientFactory;
+import org.apache.twill.internal.yarn.YarnContainerInfo;
+import org.apache.twill.internal.yarn.YarnContainerStatus;
+import org.apache.twill.internal.yarn.YarnUtils;
+import org.apache.twill.zookeeper.ZKClient;
+import org.apache.twill.zookeeper.ZKClients;
+import com.google.common.base.Charsets;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Predicate;
+import com.google.common.base.Supplier;
+import com.google.common.base.Throwables;
+import com.google.common.collect.HashMultiset;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableMultimap;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Multiset;
+import com.google.common.collect.Sets;
+import com.google.common.io.CharStreams;
+import com.google.common.io.Files;
+import com.google.common.io.InputSupplier;
+import com.google.common.reflect.TypeToken;
+import com.google.common.util.concurrent.AbstractExecutionThreadService;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.Service;
+import com.google.common.util.concurrent.SettableFuture;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import com.google.gson.JsonElement;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.util.Records;
+import org.apache.zookeeper.CreateMode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.Reader;
+import java.net.URI;
+import java.net.URL;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Queue;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+/**
+ *
+ */
+public final class ApplicationMasterService extends AbstractTwillService {
+
+  private static final Logger LOG = LoggerFactory.getLogger(ApplicationMasterService.class);
+
+  // Copied from org.apache.hadoop.yarn.security.AMRMTokenIdentifier.KIND_NAME since it's missing in Hadoop-2.0
+  private static final Text AMRM_TOKEN_KIND_NAME = new Text("YARN_AM_RM_TOKEN");
+
+  private final RunId runId;
+  private final ZKClient zkClient;
+  private final TwillSpecification twillSpec;
+  private final ApplicationMasterLiveNodeData amLiveNode;
+  private final ZKServiceDecorator serviceDelegate;
+  private final RunningContainers runningContainers;
+  private final ExpectedContainers expectedContainers;
+  private final TrackerService trackerService;
+  private final YarnAMClient amClient;
+  private final String jvmOpts;
+  private final int reservedMemory;
+  private final EventHandler eventHandler;
+  private final Location applicationLocation;
+
+  private EmbeddedKafkaServer kafkaServer;
+  private Queue<RunnableContainerRequest> runnableContainerRequests;
+  private ExecutorService instanceChangeExecutor;
+
+  public ApplicationMasterService(RunId runId, ZKClient zkClient, File twillSpecFile,
+                                  YarnAMClientFactory amClientFactory, Location applicationLocation) throws Exception {
+    super(applicationLocation);
+
+    this.runId = runId;
+    this.twillSpec = TwillSpecificationAdapter.create().fromJson(twillSpecFile);
+    this.zkClient = zkClient;
+    this.applicationLocation = applicationLocation;
+    this.amClient = amClientFactory.create();
+    this.credentials = createCredentials();
+    this.jvmOpts = loadJvmOptions();
+    this.reservedMemory = getReservedMemory();
+
+    amLiveNode = new ApplicationMasterLiveNodeData(Integer.parseInt(System.getenv(EnvKeys.YARN_APP_ID)),
+                                                   Long.parseLong(System.getenv(EnvKeys.YARN_APP_ID_CLUSTER_TIME)),
+                                                   amClient.getContainerId().toString());
+
+    serviceDelegate = new ZKServiceDecorator(zkClient, runId, createLiveNodeDataSupplier(),
+                                             new ServiceDelegate(), new Runnable() {
+      @Override
+      public void run() {
+        amClient.stopAndWait();
+      }
+    });
+    expectedContainers = initExpectedContainers(twillSpec);
+    runningContainers = initRunningContainers(amClient.getContainerId(), amClient.getHost());
+    trackerService = new TrackerService(runningContainers.getResourceReport(), amClient.getHost());
+    eventHandler = createEventHandler(twillSpec);
+  }
+
+  private String loadJvmOptions() throws IOException {
+    final File jvmOptsFile = new File(Constants.Files.JVM_OPTIONS);
+    if (!jvmOptsFile.exists()) {
+      return "";
+    }
+
+    return CharStreams.toString(new InputSupplier<Reader>() {
+      @Override
+      public Reader getInput() throws IOException {
+        return new FileReader(jvmOptsFile);
+      }
+    });
+  }
+
+  private int getReservedMemory() {
+    String value = System.getenv(EnvKeys.TWILL_RESERVED_MEMORY_MB);
+    if (value == null) {
+      return Configs.Defaults.JAVA_RESERVED_MEMORY_MB;
+    }
+    try {
+      return Integer.parseInt(value);
+    } catch (Exception e) {
+      return Configs.Defaults.JAVA_RESERVED_MEMORY_MB;
+    }
+  }
+
+  private EventHandler createEventHandler(TwillSpecification twillSpec) {
+    try {
+      // Should be able to load by this class ClassLoader, as they packaged in the same jar.
+      EventHandlerSpecification handlerSpec = twillSpec.getEventHandler();
+
+      Class<?> handlerClass = getClass().getClassLoader().loadClass(handlerSpec.getClassName());
+      Preconditions.checkArgument(EventHandler.class.isAssignableFrom(handlerClass),
+                                  "Class {} does not implements {}",
+                                  handlerClass, EventHandler.class.getName());
+      return Instances.newInstance((Class<? extends EventHandler>) handlerClass);
+    } catch (Exception e) {
+      throw Throwables.propagate(e);
+    }
+  }
+
+  private Supplier<? extends JsonElement> createLiveNodeDataSupplier() {
+    return new Supplier<JsonElement>() {
+      @Override
+      public JsonElement get() {
+        return new Gson().toJsonTree(amLiveNode);
+      }
+    };
+  }
+
+  private RunningContainers initRunningContainers(ContainerId appMasterContainerId,
+                                                  String appMasterHost) throws Exception {
+    TwillRunResources appMasterResources = new DefaultTwillRunResources(
+      0,
+      appMasterContainerId.toString(),
+      Integer.parseInt(System.getenv(EnvKeys.YARN_CONTAINER_VIRTUAL_CORES)),
+      Integer.parseInt(System.getenv(EnvKeys.YARN_CONTAINER_MEMORY_MB)),
+      appMasterHost);
+    String appId = appMasterContainerId.getApplicationAttemptId().getApplicationId().toString();
+    return new RunningContainers(appId, appMasterResources);
+  }
+
+  private ExpectedContainers initExpectedContainers(TwillSpecification twillSpec) {
+    Map<String, Integer> expectedCounts = Maps.newHashMap();
+    for (RuntimeSpecification runtimeSpec : twillSpec.getRunnables().values()) {
+      expectedCounts.put(runtimeSpec.getName(), runtimeSpec.getResourceSpecification().getInstances());
+    }
+    return new ExpectedContainers(expectedCounts);
+  }
+
+  private void doStart() throws Exception {
+    LOG.info("Start application master with spec: " + TwillSpecificationAdapter.create().toJson(twillSpec));
+
+    // initialize the event handler, if it fails, it will fail the application.
+    eventHandler.initialize(new BasicEventHandlerContext(twillSpec.getEventHandler()));
+
+    instanceChangeExecutor = Executors.newSingleThreadExecutor(Threads.createDaemonThreadFactory("instanceChanger"));
+
+    kafkaServer = new EmbeddedKafkaServer(new File(Constants.Files.KAFKA), generateKafkaConfig());
+
+    // Must start tracker before start AMClient
+    LOG.info("Starting application master tracker server");
+    trackerService.startAndWait();
+    URL trackerUrl = trackerService.getUrl();
+    LOG.info("Started application master tracker server on " + trackerUrl);
+
+    amClient.setTracker(trackerService.getBindAddress(), trackerUrl);
+    amClient.startAndWait();
+
+    // Creates ZK path for runnable and kafka logging service
+    Futures.allAsList(ImmutableList.of(
+      zkClient.create("/" + runId.getId() + "/runnables", null, CreateMode.PERSISTENT),
+      zkClient.create("/" + runId.getId() + "/kafka", null, CreateMode.PERSISTENT))
+    ).get();
+
+    // Starts kafka server
+    LOG.info("Starting kafka server");
+
+    kafkaServer.startAndWait();
+    LOG.info("Kafka server started");
+
+    runnableContainerRequests = initContainerRequests();
+  }
+
+  private void doStop() throws Exception {
+    Thread.interrupted();     // This is just to clear the interrupt flag
+
+    LOG.info("Stop application master with spec: {}", TwillSpecificationAdapter.create().toJson(twillSpec));
+
+    try {
+      // call event handler destroy. If there is error, only log and not affected stop sequence.
+      eventHandler.destroy();
+    } catch (Throwable t) {
+      LOG.warn("Exception when calling {}.destroy()", twillSpec.getEventHandler().getClassName(), t);
+    }
+
+    instanceChangeExecutor.shutdownNow();
+
+    // For checking if all containers are stopped.
+    final Set<String> ids = Sets.newHashSet(runningContainers.getContainerIds());
+    YarnAMClient.AllocateHandler handler = new YarnAMClient.AllocateHandler() {
+      @Override
+      public void acquired(List<ProcessLauncher<YarnContainerInfo>> launchers) {
+        // no-op
+      }
+
+      @Override
+      public void completed(List<YarnContainerStatus> completed) {
+        for (YarnContainerStatus status : completed) {
+          ids.remove(status.getContainerId());
+        }
+      }
+    };
+
+    runningContainers.stopAll();
+
+    // Poll for 5 seconds to wait for containers to stop.
+    int count = 0;
+    while (!ids.isEmpty() && count++ < 5) {
+      amClient.allocate(0.0f, handler);
+      TimeUnit.SECONDS.sleep(1);
+    }
+
+    LOG.info("Stopping application master tracker server");
+    try {
+      trackerService.stopAndWait();
+      LOG.info("Stopped application master tracker server");
+    } catch (Exception e) {
+      LOG.error("Failed to stop tracker service.", e);
+    } finally {
+      try {
+        // App location cleanup
+        cleanupDir(URI.create(System.getenv(EnvKeys.TWILL_APP_DIR)));
+        Loggings.forceFlush();
+        // Sleep a short while to let kafka clients to have chance to fetch the log
+        TimeUnit.SECONDS.sleep(1);
+      } finally {
+        kafkaServer.stopAndWait();
+        LOG.info("Kafka server stopped");
+      }
+    }
+  }
+
+  private void cleanupDir(URI appDir) {
+    try {
+      if (applicationLocation.delete(true)) {
+        LOG.info("Application directory deleted: {}", appDir);
+      } else {
+        LOG.warn("Failed to cleanup directory {}.", appDir);
+      }
+    } catch (Exception e) {
+      LOG.warn("Exception while cleanup directory {}.", appDir, e);
+    }
+  }
+
+
+  private void doRun() throws Exception {
+    // The main loop
+    Map.Entry<Resource, ? extends Collection<RuntimeSpecification>> currentRequest = null;
+    final Queue<ProvisionRequest> provisioning = Lists.newLinkedList();
+
+    YarnAMClient.AllocateHandler allocateHandler = new YarnAMClient.AllocateHandler() {
+      @Override
+      public void acquired(List<ProcessLauncher<YarnContainerInfo>> launchers) {
+        launchRunnable(launchers, provisioning);
+      }
+
+      @Override
+      public void completed(List<YarnContainerStatus> completed) {
+        handleCompleted(completed);
+      }
+    };
+
+    long nextTimeoutCheck = System.currentTimeMillis() + Constants.PROVISION_TIMEOUT;
+    while (isRunning()) {
+      // Call allocate. It has to be made at first in order to be able to get cluster resource availability.
+      amClient.allocate(0.0f, allocateHandler);
+
+      // Looks for containers requests.
+      if (provisioning.isEmpty() && runnableContainerRequests.isEmpty() && runningContainers.isEmpty()) {
+        LOG.info("All containers completed. Shutting down application master.");
+        break;
+      }
+
+      // If nothing is in provisioning, and no pending request, move to next one
+      while (provisioning.isEmpty() && currentRequest == null && !runnableContainerRequests.isEmpty()) {
+        currentRequest = runnableContainerRequests.peek().takeRequest();
+        if (currentRequest == null) {
+          // All different types of resource request from current order is done, move to next one
+          // TODO: Need to handle order type as well
+          runnableContainerRequests.poll();
+        }
+      }
+      // Nothing in provision, makes the next batch of provision request
+      if (provisioning.isEmpty() && currentRequest != null) {
+        addContainerRequests(currentRequest.getKey(), currentRequest.getValue(), provisioning);
+        currentRequest = null;
+      }
+
+      nextTimeoutCheck = checkProvisionTimeout(nextTimeoutCheck);
+
+      if (isRunning()) {
+        TimeUnit.SECONDS.sleep(1);
+      }
+    }
+  }
+
+  /**
+   * Handling containers that are completed.
+   */
+  private void handleCompleted(List<YarnContainerStatus> completedContainersStatuses) {
+    Multiset<String> restartRunnables = HashMultiset.create();
+    for (YarnContainerStatus status : completedContainersStatuses) {
+      LOG.info("Container {} completed with {}:{}.",
+               status.getContainerId(), status.getState(), status.getDiagnostics());
+      runningContainers.handleCompleted(status, restartRunnables);
+    }
+
+    for (Multiset.Entry<String> entry : restartRunnables.entrySet()) {
+      LOG.info("Re-request container for {} with {} instances.", entry.getElement(), entry.getCount());
+      for (int i = 0; i < entry.getCount(); i++) {
+        runnableContainerRequests.add(createRunnableContainerRequest(entry.getElement()));
+      }
+    }
+
+    // For all runnables that needs to re-request for containers, update the expected count timestamp
+    // so that the EventHandler would triggered with the right expiration timestamp.
+    expectedContainers.updateRequestTime(restartRunnables.elementSet());
+  }
+
+  /**
+   * Check for containers provision timeout and invoke eventHandler if necessary.
+   *
+   * @return the timestamp for the next time this method needs to be called.
+   */
+  private long checkProvisionTimeout(long nextTimeoutCheck) {
+    if (System.currentTimeMillis() < nextTimeoutCheck) {
+      return nextTimeoutCheck;
+    }
+
+    // Invoke event handler for provision request timeout
+    Map<String, ExpectedContainers.ExpectedCount> expiredRequests = expectedContainers.getAll();
+    Map<String, Integer> runningCounts = runningContainers.countAll();
+
+    List<EventHandler.TimeoutEvent> timeoutEvents = Lists.newArrayList();
+    for (Map.Entry<String, ExpectedContainers.ExpectedCount> entry : expiredRequests.entrySet()) {
+      String runnableName = entry.getKey();
+      ExpectedContainers.ExpectedCount expectedCount = entry.getValue();
+      int runningCount = runningCounts.containsKey(runnableName) ? runningCounts.get(runnableName) : 0;
+      if (expectedCount.getCount() != runningCount) {
+        timeoutEvents.add(new EventHandler.TimeoutEvent(runnableName, expectedCount.getCount(),
+                                                                   runningCount, expectedCount.getTimestamp()));
+      }
+    }
+
+    if (!timeoutEvents.isEmpty()) {
+      try {
+        EventHandler.TimeoutAction action = eventHandler.launchTimeout(timeoutEvents);
+        if (action.getTimeout() < 0) {
+          // Abort application
+          stop();
+        } else {
+          return nextTimeoutCheck + action.getTimeout();
+        }
+      } catch (Throwable t) {
+        LOG.warn("Exception when calling EventHandler {}. Ignore the result.", t);
+      }
+    }
+    return nextTimeoutCheck + Constants.PROVISION_TIMEOUT;
+  }
+
+  private Credentials createCredentials() {
+    Credentials credentials = new Credentials();
+    if (!UserGroupInformation.isSecurityEnabled()) {
+      return credentials;
+    }
+
+    try {
+      credentials.addAll(UserGroupInformation.getCurrentUser().getCredentials());
+
+      // Remove the AM->RM tokens
+      Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
+      while (iter.hasNext()) {
+        Token<?> token = iter.next();
+        if (token.getKind().equals(AMRM_TOKEN_KIND_NAME)) {
+          iter.remove();
+        }
+      }
+    } catch (IOException e) {
+      LOG.warn("Failed to get current user. No credentials will be provided to containers.", e);
+    }
+
+    return credentials;
+  }
+
+  private Queue<RunnableContainerRequest> initContainerRequests() {
+    // Orderly stores container requests.
+    Queue<RunnableContainerRequest> requests = Lists.newLinkedList();
+    // For each order in the twillSpec, create container request for each runnable.
+    for (TwillSpecification.Order order : twillSpec.getOrders()) {
+      // Group container requests based on resource requirement.
+      ImmutableMultimap.Builder<Resource, RuntimeSpecification> builder = ImmutableMultimap.builder();
+      for (String runnableName : order.getNames()) {
+        RuntimeSpecification runtimeSpec = twillSpec.getRunnables().get(runnableName);
+        Resource capability = createCapability(runtimeSpec.getResourceSpecification());
+        builder.put(capability, runtimeSpec);
+      }
+      requests.add(new RunnableContainerRequest(order.getType(), builder.build()));
+    }
+    return requests;
+  }
+
+  /**
+   * Adds container requests with the given resource capability for each runtime.
+   */
+  private void addContainerRequests(Resource capability,
+                                    Collection<RuntimeSpecification> runtimeSpecs,
+                                    Queue<ProvisionRequest> provisioning) {
+    for (RuntimeSpecification runtimeSpec : runtimeSpecs) {
+      String name = runtimeSpec.getName();
+      int newContainers = expectedContainers.getExpected(name) - runningContainers.count(name);
+      if (newContainers > 0) {
+        // TODO: Allow user to set priority?
+        LOG.info("Request {} container with capability {}", newContainers, capability);
+        String requestId = amClient.addContainerRequest(capability, newContainers).setPriority(0).apply();
+        provisioning.add(new ProvisionRequest(runtimeSpec, requestId, newContainers));
+      }
+    }
+  }
+
+  /**
+   * Launches runnables in the provisioned containers.
+   */
+  private void launchRunnable(List<ProcessLauncher<YarnContainerInfo>> launchers,
+                              Queue<ProvisionRequest> provisioning) {
+    for (ProcessLauncher<YarnContainerInfo> processLauncher : launchers) {
+      LOG.info("Got container {}", processLauncher.getContainerInfo().getId());
+      ProvisionRequest provisionRequest = provisioning.peek();
+      if (provisionRequest == null) {
+        continue;
+      }
+
+      String runnableName = provisionRequest.getRuntimeSpec().getName();
+      LOG.info("Starting runnable {} with {}", runnableName, processLauncher);
+
+      int containerCount = expectedContainers.getExpected(runnableName);
+
+      ProcessLauncher.PrepareLaunchContext launchContext = processLauncher.prepareLaunch(
+        ImmutableMap.<String, String>builder()
+          .put(EnvKeys.TWILL_APP_DIR, System.getenv(EnvKeys.TWILL_APP_DIR))
+          .put(EnvKeys.TWILL_FS_USER, System.getenv(EnvKeys.TWILL_FS_USER))
+          .put(EnvKeys.TWILL_APP_RUN_ID, runId.getId())
+          .put(EnvKeys.TWILL_APP_NAME, twillSpec.getName())
+          .put(EnvKeys.TWILL_ZK_CONNECT, zkClient.getConnectString())
+          .put(EnvKeys.TWILL_LOG_KAFKA_ZK, getKafkaZKConnect())
+          .build()
+        , getLocalizeFiles(), credentials
+      );
+
+      TwillContainerLauncher launcher = new TwillContainerLauncher(
+        twillSpec.getRunnables().get(runnableName), launchContext,
+        ZKClients.namespace(zkClient, getZKNamespace(runnableName)),
+        containerCount, jvmOpts, reservedMemory, getSecureStoreLocation());
+
+      runningContainers.start(runnableName, processLauncher.getContainerInfo(), launcher);
+
+      // Need to call complete to workaround bug in YARN AMRMClient
+      if (provisionRequest.containerAcquired()) {
+        amClient.completeContainerRequest(provisionRequest.getRequestId());
+      }
+
+      if (expectedContainers.getExpected(runnableName) == runningContainers.count(runnableName)) {
+        LOG.info("Runnable " + runnableName + " fully provisioned with " + containerCount + " instances.");
+        provisioning.poll();
+      }
+    }
+  }
+
+  private List<LocalFile> getLocalizeFiles() {
+    try {
+      Reader reader = Files.newReader(new File(Constants.Files.LOCALIZE_FILES), Charsets.UTF_8);
+      try {
+        return new GsonBuilder().registerTypeAdapter(LocalFile.class, new LocalFileCodec())
+                                .create().fromJson(reader, new TypeToken<List<LocalFile>>() {}.getType());
+      } finally {
+        reader.close();
+      }
+    } catch (IOException e) {
+      throw Throwables.propagate(e);
+    }
+  }
+
+  private String getZKNamespace(String runnableName) {
+    return String.format("/%s/runnables/%s", runId.getId(), runnableName);
+  }
+
+  private String getKafkaZKConnect() {
+    return String.format("%s/%s/kafka", zkClient.getConnectString(), runId.getId());
+  }
+
+  private Properties generateKafkaConfig() {
+    int port = Networks.getRandomPort();
+    Preconditions.checkState(port > 0, "Failed to get random port.");
+
+    Properties prop = new Properties();
+    prop.setProperty("log.dir", new File("kafka-logs").getAbsolutePath());
+    prop.setProperty("zk.connect", getKafkaZKConnect());
+    prop.setProperty("num.threads", "8");
+    prop.setProperty("port", Integer.toString(port));
+    prop.setProperty("log.flush.interval", "10000");
+    prop.setProperty("max.socket.request.bytes", "104857600");
+    prop.setProperty("log.cleanup.interval.mins", "1");
+    prop.setProperty("log.default.flush.scheduler.interval.ms", "1000");
+    prop.setProperty("zk.connectiontimeout.ms", "1000000");
+    prop.setProperty("socket.receive.buffer", "1048576");
+    prop.setProperty("enable.zookeeper", "true");
+    prop.setProperty("log.retention.hours", "24");
+    prop.setProperty("brokerid", "0");
+    prop.setProperty("socket.send.buffer", "1048576");
+    prop.setProperty("num.partitions", "1");
+    prop.setProperty("log.file.size", "536870912");
+    prop.setProperty("log.default.flush.interval.ms", "1000");
+    return prop;
+  }
+
+  private ListenableFuture<String> processMessage(final String messageId, Message message) {
+    LOG.debug("Message received: {} {}.", messageId, message);
+
+    SettableFuture<String> result = SettableFuture.create();
+    Runnable completion = getMessageCompletion(messageId, result);
+
+    if (handleSecureStoreUpdate(message)) {
+      runningContainers.sendToAll(message, completion);
+      return result;
+    }
+
+    if (handleSetInstances(message, completion)) {
+      return result;
+    }
+
+    // Replicate messages to all runnables
+    if (message.getScope() == Message.Scope.ALL_RUNNABLE) {
+      runningContainers.sendToAll(message, completion);
+      return result;
+    }
+
+    // Replicate message to a particular runnable.
+    if (message.getScope() == Message.Scope.RUNNABLE) {
+      runningContainers.sendToRunnable(message.getRunnableName(), message, completion);
+      return result;
+    }
+
+    LOG.info("Message ignored. {}", message);
+    return Futures.immediateFuture(messageId);
+  }
+
+  /**
+   * Attempts to change the number of running instances.
+   * @return {@code true} if the message does requests for changes in number of running instances of a runnable,
+   *         {@code false} otherwise.
+   */
+  private boolean handleSetInstances(final Message message, final Runnable completion) {
+    if (message.getType() != Message.Type.SYSTEM || message.getScope() != Message.Scope.RUNNABLE) {
+      return false;
+    }
+
+    Command command = message.getCommand();
+    Map<String, String> options = command.getOptions();
+    if (!"instances".equals(command.getCommand()) || !options.containsKey("count")) {
+      return false;
+    }
+
+    final String runnableName = message.getRunnableName();
+    if (runnableName == null || runnableName.isEmpty() || !twillSpec.getRunnables().containsKey(runnableName)) {
+      LOG.info("Unknown runnable {}", runnableName);
+      return false;
+    }
+
+    final int newCount = Integer.parseInt(options.get("count"));
+    final int oldCount = expectedContainers.getExpected(runnableName);
+
+    LOG.info("Received change instances request for {}, from {} to {}.", runnableName, oldCount, newCount);
+
+    if (newCount == oldCount) {   // Nothing to do, simply complete the request.
+      completion.run();
+      return true;
+    }
+
+    instanceChangeExecutor.execute(createSetInstanceRunnable(message, completion, oldCount, newCount));
+    return true;
+  }
+
+  /**
+   * Creates a Runnable for execution of change instance request.
+   */
+  private Runnable createSetInstanceRunnable(final Message message, final Runnable completion,
+                                             final int oldCount, final int newCount) {
+    return new Runnable() {
+      @Override
+      public void run() {
+        final String runnableName = message.getRunnableName();
+
+        LOG.info("Processing change instance request for {}, from {} to {}.", runnableName, oldCount, newCount);
+        try {
+          // Wait until running container count is the same as old count
+          runningContainers.waitForCount(runnableName, oldCount);
+          LOG.info("Confirmed {} containers running for {}.", oldCount, runnableName);
+
+          expectedContainers.setExpected(runnableName, newCount);
+
+          try {
+            if (newCount < oldCount) {
+              // Shutdown some running containers
+              for (int i = 0; i < oldCount - newCount; i++) {
+                runningContainers.removeLast(runnableName);
+              }
+            } else {
+              // Increase the number of instances
+              runnableContainerRequests.add(createRunnableContainerRequest(runnableName));
+            }
+          } finally {
+            runningContainers.sendToRunnable(runnableName, message, completion);
+            LOG.info("Change instances request completed. From {} to {}.", oldCount, newCount);
+          }
+        } catch (InterruptedException e) {
+          // If the wait is being interrupted, discard the message.
+          completion.run();
+        }
+      }
+    };
+  }
+
+  private RunnableContainerRequest createRunnableContainerRequest(final String runnableName) {
+    // Find the current order of the given runnable in order to create a RunnableContainerRequest.
+    TwillSpecification.Order order = Iterables.find(twillSpec.getOrders(), new Predicate<TwillSpecification.Order>() {
+      @Override
+      public boolean apply(TwillSpecification.Order input) {
+        return (input.getNames().contains(runnableName));
+      }
+    });
+
+    RuntimeSpecification runtimeSpec = twillSpec.getRunnables().get(runnableName);
+    Resource capability = createCapability(runtimeSpec.getResourceSpecification());
+    return new RunnableContainerRequest(order.getType(), ImmutableMultimap.of(capability, runtimeSpec));
+  }
+
+  private Runnable getMessageCompletion(final String messageId, final SettableFuture<String> future) {
+    return new Runnable() {
+      @Override
+      public void run() {
+        future.set(messageId);
+      }
+    };
+  }
+
+  private Resource createCapability(ResourceSpecification resourceSpec) {
+    Resource capability = Records.newRecord(Resource.class);
+
+    if (!YarnUtils.setVirtualCores(capability, resourceSpec.getVirtualCores())) {
+      LOG.debug("Virtual cores limit not supported.");
+    }
+
+    capability.setMemory(resourceSpec.getMemorySize());
+    return capability;
+  }
+
+  @Override
+  protected Service getServiceDelegate() {
+    return serviceDelegate;
+  }
+
+  /**
+   * A private class for service lifecycle. It's done this way so that we can have {@link ZKServiceDecorator} to
+   * wrap around this to reflect status in ZK.
+   */
+  private final class ServiceDelegate extends AbstractExecutionThreadService implements MessageCallback {
+
+    private volatile Thread runThread;
+
+    @Override
+    protected void run() throws Exception {
+      runThread = Thread.currentThread();
+      try {
+        doRun();
+      } catch (InterruptedException e) {
+        // It's ok to get interrupted exception, as it's a signal to stop
+        Thread.currentThread().interrupt();
+      }
+    }
+
+    @Override
+    protected void startUp() throws Exception {
+      doStart();
+    }
+
+    @Override
+    protected void shutDown() throws Exception {
+      doStop();
+    }
+
+    @Override
+    protected void triggerShutdown() {
+      Thread runThread = this.runThread;
+      if (runThread != null) {
+        runThread.interrupt();
+      }
+    }
+
+    @Override
+    public ListenableFuture<String> onReceived(String messageId, Message message) {
+      return processMessage(messageId, message);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationSubmitter.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationSubmitter.java b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationSubmitter.java
new file mode 100644
index 0000000..931c5ef
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationSubmitter.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.appmaster;
+
+import org.apache.twill.internal.ProcessController;
+import org.apache.twill.internal.yarn.YarnApplicationReport;
+import org.apache.twill.internal.yarn.YarnLaunchContext;
+import org.apache.hadoop.yarn.api.records.Resource;
+
+/**
+ * Interface for submitting a new application to run.
+ */
+public interface ApplicationSubmitter {
+
+  ProcessController<YarnApplicationReport> submit(YarnLaunchContext launchContext, Resource capability);
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/BasicEventHandlerContext.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/BasicEventHandlerContext.java b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/BasicEventHandlerContext.java
new file mode 100644
index 0000000..1769910
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/BasicEventHandlerContext.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.appmaster;
+
+import org.apache.twill.api.EventHandlerContext;
+import org.apache.twill.api.EventHandlerSpecification;
+
+/**
+ *
+ */
+final class BasicEventHandlerContext implements EventHandlerContext {
+
+  private final EventHandlerSpecification specification;
+
+  BasicEventHandlerContext(EventHandlerSpecification specification) {
+    this.specification = specification;
+  }
+
+  @Override
+  public EventHandlerSpecification getSpecification() {
+    return specification;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ExpectedContainers.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ExpectedContainers.java b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ExpectedContainers.java
new file mode 100644
index 0000000..f4ebbd0
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ExpectedContainers.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.appmaster;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+
+import java.util.Map;
+
+/**
+ * This class hold information about the expected container count for each runnable. It also
+ * keep track of the timestamp where the expected count has been updated.
+ */
+final class ExpectedContainers {
+
+  private final Map<String, ExpectedCount> expectedCounts;
+
+  ExpectedContainers(Map<String, Integer> expected) {
+    expectedCounts = Maps.newHashMap();
+    long now = System.currentTimeMillis();
+
+    for (Map.Entry<String, Integer> entry : expected.entrySet()) {
+      expectedCounts.put(entry.getKey(), new ExpectedCount(entry.getValue(), now));
+    }
+  }
+
+  synchronized void setExpected(String runnable, int expected) {
+    expectedCounts.put(runnable, new ExpectedCount(expected, System.currentTimeMillis()));
+  }
+
+  /**
+   * Updates the ExpectCount timestamp to current time.
+   * @param runnables List of runnable names.
+   */
+  synchronized void updateRequestTime(Iterable<String> runnables) {
+    for (String runnable : runnables) {
+      ExpectedCount oldCount = expectedCounts.get(runnable);
+      expectedCounts.put(runnable, new ExpectedCount(oldCount.getCount(), System.currentTimeMillis()));
+    }
+  }
+
+  synchronized int getExpected(String runnable) {
+    return expectedCounts.get(runnable).getCount();
+  }
+
+  synchronized Map<String, ExpectedCount> getAll() {
+    return ImmutableMap.copyOf(expectedCounts);
+  }
+
+  static final class ExpectedCount {
+    private final int count;
+    private final long timestamp;
+
+    private ExpectedCount(int count, long timestamp) {
+      this.count = count;
+      this.timestamp = timestamp;
+    }
+
+    int getCount() {
+      return count;
+    }
+
+    long getTimestamp() {
+      return timestamp;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/LoggerContextListenerAdapter.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/LoggerContextListenerAdapter.java b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/LoggerContextListenerAdapter.java
new file mode 100644
index 0000000..2d41aa6
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/LoggerContextListenerAdapter.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.appmaster;
+
+import ch.qos.logback.classic.Level;
+import ch.qos.logback.classic.Logger;
+import ch.qos.logback.classic.LoggerContext;
+import ch.qos.logback.classic.spi.LoggerContextListener;
+
+/**
+ *
+ */
+abstract class LoggerContextListenerAdapter implements LoggerContextListener {
+
+  private final boolean resetResistant;
+
+  protected LoggerContextListenerAdapter(boolean resetResistant) {
+    this.resetResistant = resetResistant;
+  }
+
+  @Override
+  public final boolean isResetResistant() {
+    return resetResistant;
+  }
+
+  @Override
+  public void onStart(LoggerContext context) {
+  }
+
+  @Override
+  public void onReset(LoggerContext context) {
+  }
+
+  @Override
+  public void onStop(LoggerContext context) {
+  }
+
+  @Override
+  public void onLevelChange(Logger logger, Level level) {
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ProvisionRequest.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ProvisionRequest.java b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ProvisionRequest.java
new file mode 100644
index 0000000..002d2a5
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/ProvisionRequest.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.appmaster;
+
+import org.apache.twill.api.RuntimeSpecification;
+
+/**
+ * Package private class to help AM to track in progress container request.
+ */
+final class ProvisionRequest {
+  private final RuntimeSpecification runtimeSpec;
+  private final String requestId;
+  private int requestCount;
+
+  ProvisionRequest(RuntimeSpecification runtimeSpec, String requestId, int requestCount) {
+    this.runtimeSpec = runtimeSpec;
+    this.requestId = requestId;
+    this.requestCount = requestCount;
+  }
+
+  RuntimeSpecification getRuntimeSpec() {
+    return runtimeSpec;
+  }
+
+  String getRequestId() {
+    return requestId;
+  }
+
+  /**
+   * Called to notify a container has been provision for this request.
+   * @return {@code true} if the requested container count has been provisioned.
+   */
+  boolean containerAcquired() {
+    requestCount--;
+    return requestCount == 0;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/RunnableContainerRequest.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/RunnableContainerRequest.java b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/RunnableContainerRequest.java
new file mode 100644
index 0000000..7f28443
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/RunnableContainerRequest.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.appmaster;
+
+import org.apache.twill.api.RuntimeSpecification;
+import org.apache.twill.api.TwillSpecification;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Multimap;
+import org.apache.hadoop.yarn.api.records.Resource;
+
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.Map;
+
+/**
+ * Data structure for holding set of runnable specifications based on resource capability.
+ */
+final class RunnableContainerRequest {
+  private final TwillSpecification.Order.Type orderType;
+  private final Iterator<Map.Entry<Resource, Collection<RuntimeSpecification>>> requests;
+
+  RunnableContainerRequest(TwillSpecification.Order.Type orderType,
+                           Multimap<Resource, RuntimeSpecification> requests) {
+    this.orderType = orderType;
+    this.requests = requests.asMap().entrySet().iterator();
+  }
+
+  TwillSpecification.Order.Type getOrderType() {
+    return orderType;
+  }
+
+  /**
+   * Remove a resource request and return it.
+   * @return The {@link Resource} and {@link Collection} of {@link RuntimeSpecification} or
+   *         {@code null} if there is no more request.
+   */
+  Map.Entry<Resource, ? extends Collection<RuntimeSpecification>> takeRequest() {
+    Map.Entry<Resource, Collection<RuntimeSpecification>> next = Iterators.getNext(requests, null);
+    return next == null ? null : Maps.immutableEntry(next.getKey(), ImmutableList.copyOf(next.getValue()));
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/RunnableProcessLauncher.java
----------------------------------------------------------------------
diff --git a/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/RunnableProcessLauncher.java b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/RunnableProcessLauncher.java
new file mode 100644
index 0000000..b4b27a9
--- /dev/null
+++ b/twill-yarn/src/main/java/org/apache/twill/internal/appmaster/RunnableProcessLauncher.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.internal.appmaster;
+
+import org.apache.twill.common.Cancellable;
+import org.apache.twill.internal.EnvKeys;
+import org.apache.twill.internal.ProcessController;
+import org.apache.twill.internal.yarn.AbstractYarnProcessLauncher;
+import org.apache.twill.internal.yarn.YarnContainerInfo;
+import org.apache.twill.internal.yarn.YarnLaunchContext;
+import org.apache.twill.internal.yarn.YarnNMClient;
+import com.google.common.base.Objects;
+import com.google.common.collect.Maps;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Map;
+
+/**
+ *
+ */
+public final class RunnableProcessLauncher extends AbstractYarnProcessLauncher<YarnContainerInfo> {
+
+  private static final Logger LOG = LoggerFactory.getLogger(RunnableProcessLauncher.class);
+
+  private final YarnContainerInfo containerInfo;
+  private final YarnNMClient nmClient;
+  private boolean launched;
+
+  public RunnableProcessLauncher(YarnContainerInfo containerInfo, YarnNMClient nmClient) {
+    super(containerInfo);
+    this.containerInfo = containerInfo;
+    this.nmClient = nmClient;
+  }
+
+  @Override
+  public String toString() {
+    return Objects.toStringHelper(this)
+      .add("container", containerInfo)
+      .toString();
+  }
+
+  @Override
+  protected <R> ProcessController<R> doLaunch(YarnLaunchContext launchContext) {
+    Map<String, String> env = Maps.newHashMap(launchContext.getEnvironment());
+
+    // Set extra environments
+    env.put(EnvKeys.YARN_CONTAINER_ID, containerInfo.getId());
+    env.put(EnvKeys.YARN_CONTAINER_HOST, containerInfo.getHost().getHostName());
+    env.put(EnvKeys.YARN_CONTAINER_PORT, Integer.toString(containerInfo.getPort()));
+    env.put(EnvKeys.YARN_CONTAINER_MEMORY_MB, Integer.toString(containerInfo.getMemoryMB()));
+    env.put(EnvKeys.YARN_CONTAINER_VIRTUAL_CORES, Integer.toString(containerInfo.getVirtualCores()));
+
+    launchContext.setEnvironment(env);
+
+    LOG.info("Launching in container {}, {}", containerInfo.getId(), launchContext.getCommands());
+    final Cancellable cancellable = nmClient.start(containerInfo, launchContext);
+    launched = true;
+
+    return new ProcessController<R>() {
+      @Override
+      public R getReport() {
+        // No reporting support for runnable launch yet.
+        return null;
+
+      }
+
+      @Override
+      public void cancel() {
+        cancellable.cancel();
+      }
+    };
+  }
+
+  public boolean isLaunched() {
+    return launched;
+  }
+}


[28/28] git commit: Making maven site works.

Posted by ch...@apache.org.
Making maven site works.

Project: http://git-wip-us.apache.org/repos/asf/incubator-twill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-twill/commit/35dfccc4
Tree: http://git-wip-us.apache.org/repos/asf/incubator-twill/tree/35dfccc4
Diff: http://git-wip-us.apache.org/repos/asf/incubator-twill/diff/35dfccc4

Branch: refs/heads/site
Commit: 35dfccc4d5cfe84717faec7f6502d24eb06ef0c5
Parents: 7d69074
Author: Terence Yim <te...@continuuity.com>
Authored: Thu Dec 12 13:59:25 2013 -0800
Committer: Terence Yim <te...@continuuity.com>
Committed: Thu Dec 12 13:59:25 2013 -0800

----------------------------------------------------------------------
 api/pom.xml                                     |  54 --
 .../apache/twill/api/AbstractTwillRunnable.java |  75 --
 .../main/java/org/apache/twill/api/Command.java | 114 ---
 .../java/org/apache/twill/api/EventHandler.java | 146 ----
 .../apache/twill/api/EventHandlerContext.java   |  26 -
 .../twill/api/EventHandlerSpecification.java    |  30 -
 .../java/org/apache/twill/api/LocalFile.java    |  46 --
 .../org/apache/twill/api/ResourceReport.java    |  56 --
 .../apache/twill/api/ResourceSpecification.java | 152 ----
 .../main/java/org/apache/twill/api/RunId.java   |  26 -
 .../apache/twill/api/RuntimeSpecification.java  |  34 -
 .../java/org/apache/twill/api/SecureStore.java  |  26 -
 .../apache/twill/api/SecureStoreUpdater.java    |  33 -
 .../org/apache/twill/api/ServiceAnnouncer.java  |  33 -
 .../org/apache/twill/api/ServiceController.java |  70 --
 .../org/apache/twill/api/TwillApplication.java  |  30 -
 .../java/org/apache/twill/api/TwillContext.java |  76 --
 .../org/apache/twill/api/TwillController.java   |  61 --
 .../org/apache/twill/api/TwillPreparer.java     | 146 ----
 .../org/apache/twill/api/TwillRunResources.java |  51 --
 .../org/apache/twill/api/TwillRunnable.java     |  57 --
 .../twill/api/TwillRunnableSpecification.java   |  76 --
 .../java/org/apache/twill/api/TwillRunner.java  | 107 ---
 .../apache/twill/api/TwillRunnerService.java    |  29 -
 .../apache/twill/api/TwillSpecification.java    | 327 --------
 .../org/apache/twill/api/logging/LogEntry.java  |  58 --
 .../apache/twill/api/logging/LogHandler.java    |  26 -
 .../twill/api/logging/PrinterLogHandler.java    | 101 ---
 .../apache/twill/api/logging/package-info.java  |  22 -
 .../java/org/apache/twill/api/package-info.java |  21 -
 .../DefaultEventHandlerSpecification.java       |  57 --
 .../apache/twill/internal/DefaultLocalFile.java |  76 --
 .../twill/internal/DefaultResourceReport.java   | 123 ---
 .../internal/DefaultResourceSpecification.java  |  70 --
 .../internal/DefaultRuntimeSpecification.java   |  67 --
 .../internal/DefaultTwillRunResources.java      | 106 ---
 .../DefaultTwillRunnableSpecification.java      |  60 --
 .../internal/DefaultTwillSpecification.java     | 103 ---
 .../java/org/apache/twill/internal/RunIds.java  |  76 --
 .../org/apache/twill/internal/package-info.java |  22 -
 common/pom.xml                                  |  51 --
 .../org/apache/twill/common/Cancellable.java    |  29 -
 .../twill/common/ServiceListenerAdapter.java    |  50 --
 .../java/org/apache/twill/common/Services.java  | 140 ----
 .../java/org/apache/twill/common/Threads.java   |  52 --
 .../filesystem/ForwardingLocationFactory.java   |  34 -
 .../apache/twill/filesystem/LocalLocation.java  | 205 -----
 .../twill/filesystem/LocalLocationFactory.java  |  58 --
 .../org/apache/twill/filesystem/Location.java   | 154 ----
 .../twill/filesystem/LocationFactories.java     |  67 --
 .../twill/filesystem/LocationFactory.java       |  46 --
 .../org/apache/twill/common/ServicesTest.java   | 106 ---
 .../twill/filesystem/LocalLocationTest.java     |  64 --
 core/pom.xml                                    |  89 ---
 .../AbstractExecutionServiceController.java     | 207 -----
 .../twill/internal/AbstractTwillController.java | 180 -----
 .../internal/AbstractZKServiceController.java   | 314 --------
 .../twill/internal/ApplicationBundler.java      | 362 ---------
 .../org/apache/twill/internal/Arguments.java    |  46 --
 .../twill/internal/BasicTwillContext.java       | 131 ---
 .../java/org/apache/twill/internal/Configs.java |  45 --
 .../org/apache/twill/internal/Constants.java    |  64 --
 .../apache/twill/internal/ContainerInfo.java    |  36 -
 .../twill/internal/ContainerLiveNodeData.java   |  40 -
 .../apache/twill/internal/EnvContainerInfo.java |  65 --
 .../java/org/apache/twill/internal/EnvKeys.java |  59 --
 .../apache/twill/internal/ListenerExecutor.java | 134 ----
 .../twill/internal/LogOnlyEventHandler.java     |  43 -
 .../twill/internal/ProcessController.java       |  35 -
 .../apache/twill/internal/ProcessLauncher.java  |  94 ---
 .../internal/SingleRunnableApplication.java     |  49 --
 .../internal/TwillContainerController.java      |  36 -
 .../twill/internal/TwillContainerLauncher.java  | 181 -----
 .../org/apache/twill/internal/ZKMessages.java   |  94 ---
 .../twill/internal/ZKServiceDecorator.java      | 482 -----------
 .../twill/internal/json/ArgumentsCodec.java     |  95 ---
 .../apache/twill/internal/json/JsonUtils.java   |  66 --
 .../twill/internal/json/LocalFileCodec.java     |  67 --
 .../internal/json/ResourceReportAdapter.java    |  62 --
 .../internal/json/ResourceReportCodec.java      |  67 --
 .../json/ResourceSpecificationCodec.java        |  61 --
 .../json/RuntimeSpecificationCodec.java         |  69 --
 .../internal/json/StackTraceElementCodec.java   |  56 --
 .../twill/internal/json/StateNodeCodec.java     |  60 --
 .../internal/json/TwillRunResourcesCodec.java   |  61 --
 .../json/TwillRunnableSpecificationCodec.java   |  63 --
 .../json/TwillSpecificationAdapter.java         | 163 ----
 .../internal/json/TwillSpecificationCodec.java  | 127 ---
 .../internal/kafka/EmbeddedKafkaServer.java     |  93 ---
 .../AbstractCompressedMessageSetEncoder.java    |  78 --
 .../kafka/client/AbstractMessageSetEncoder.java |  79 --
 .../kafka/client/BasicFetchedMessage.java       |  46 --
 .../twill/internal/kafka/client/Bufferer.java   |  61 --
 .../internal/kafka/client/Compression.java      |  49 --
 .../internal/kafka/client/ConnectionPool.java   | 125 ---
 .../kafka/client/GZipMessageSetEncoder.java     |  37 -
 .../kafka/client/IdentityMessageSetEncoder.java |  42 -
 .../internal/kafka/client/KafkaBrokerCache.java | 326 --------
 .../internal/kafka/client/KafkaRequest.java     |  91 ---
 .../kafka/client/KafkaRequestEncoder.java       |  60 --
 .../kafka/client/KafkaRequestSender.java        |  26 -
 .../internal/kafka/client/KafkaResponse.java    |  49 --
 .../kafka/client/KafkaResponseDispatcher.java   |  63 --
 .../kafka/client/KafkaResponseHandler.java      |  51 --
 .../internal/kafka/client/MessageFetcher.java   | 243 ------
 .../kafka/client/MessageSetEncoder.java         |  31 -
 .../internal/kafka/client/ResponseHandler.java  |  33 -
 .../kafka/client/SimpleKafkaClient.java         | 304 -------
 .../kafka/client/SnappyMessageSetEncoder.java   |  38 -
 .../internal/kafka/client/TopicBroker.java      |  48 --
 .../internal/kafka/client/package-info.java     |  21 -
 .../twill/internal/logging/KafkaAppender.java   | 303 -------
 .../internal/logging/KafkaTwillRunnable.java    | 122 ---
 .../twill/internal/logging/LogEntryDecoder.java | 124 ---
 .../apache/twill/internal/logging/Loggings.java |  46 --
 .../org/apache/twill/internal/package-info.java |  21 -
 .../apache/twill/internal/state/Message.java    |  54 --
 .../twill/internal/state/MessageCallback.java   |  34 -
 .../twill/internal/state/MessageCodec.java      | 125 ---
 .../apache/twill/internal/state/Messages.java   |  52 --
 .../twill/internal/state/SimpleMessage.java     |  89 ---
 .../apache/twill/internal/state/StateNode.java  |  84 --
 .../twill/internal/state/SystemMessages.java    |  48 --
 .../twill/internal/utils/Dependencies.java      | 323 --------
 .../apache/twill/internal/utils/Instances.java  | 112 ---
 .../apache/twill/internal/utils/Networks.java   |  47 --
 .../org/apache/twill/internal/utils/Paths.java  |  46 --
 .../twill/kafka/client/FetchException.java      |  77 --
 .../twill/kafka/client/FetchedMessage.java      |  36 -
 .../apache/twill/kafka/client/KafkaClient.java  |  50 --
 .../twill/kafka/client/PreparePublish.java      |  34 -
 .../apache/twill/kafka/client/package-info.java |  21 -
 .../apache/twill/launcher/TwillLauncher.java    | 236 ------
 core/src/main/resources/kafka-0.7.2.tgz         | Bin 8811693 -> 0 bytes
 .../apache/twill/internal/ControllerTest.java   | 211 -----
 .../twill/internal/state/MessageCodecTest.java  |  78 --
 .../internal/state/ZKServiceDecoratorTest.java  | 157 ----
 .../internal/utils/ApplicationBundlerTest.java  | 113 ---
 .../apache/twill/kafka/client/KafkaTest.java    | 220 -----
 core/src/test/resources/logback-test.xml        |  18 -
 discovery-api/pom.xml                           |  39 -
 .../apache/twill/discovery/Discoverable.java    |  37 -
 .../twill/discovery/DiscoveryService.java       |  35 -
 .../twill/discovery/DiscoveryServiceClient.java |  34 -
 discovery-core/pom.xml                          |  52 --
 .../twill/discovery/DiscoverableWrapper.java    |  69 --
 .../discovery/InMemoryDiscoveryService.java     |  73 --
 .../twill/discovery/ZKDiscoveryService.java     | 511 ------------
 .../apache/twill/discovery/package-info.java    |  21 -
 .../discovery/InMemoryDiscoveryServiceTest.java |  67 --
 .../twill/discovery/ZKDiscoveryServiceTest.java | 253 ------
 .../src/test/resources/logback-test.xml         |  17 -
 pom.xml                                         | 102 ++-
 src/site/markdown/GettingStarted.md             |  54 ++
 src/site/markdown/index.md                      |  27 +
 src/site/site.xml                               |  80 ++
 twill-api/pom.xml                               |  54 ++
 .../apache/twill/api/AbstractTwillRunnable.java |  75 ++
 .../main/java/org/apache/twill/api/Command.java | 114 +++
 .../java/org/apache/twill/api/EventHandler.java | 146 ++++
 .../apache/twill/api/EventHandlerContext.java   |  26 +
 .../twill/api/EventHandlerSpecification.java    |  30 +
 .../java/org/apache/twill/api/LocalFile.java    |  46 ++
 .../org/apache/twill/api/ResourceReport.java    |  56 ++
 .../apache/twill/api/ResourceSpecification.java | 152 ++++
 .../main/java/org/apache/twill/api/RunId.java   |  26 +
 .../apache/twill/api/RuntimeSpecification.java  |  34 +
 .../java/org/apache/twill/api/SecureStore.java  |  26 +
 .../apache/twill/api/SecureStoreUpdater.java    |  33 +
 .../org/apache/twill/api/ServiceAnnouncer.java  |  33 +
 .../org/apache/twill/api/ServiceController.java |  70 ++
 .../org/apache/twill/api/TwillApplication.java  |  30 +
 .../java/org/apache/twill/api/TwillContext.java |  76 ++
 .../org/apache/twill/api/TwillController.java   |  61 ++
 .../org/apache/twill/api/TwillPreparer.java     | 146 ++++
 .../org/apache/twill/api/TwillRunResources.java |  51 ++
 .../org/apache/twill/api/TwillRunnable.java     |  57 ++
 .../twill/api/TwillRunnableSpecification.java   |  76 ++
 .../java/org/apache/twill/api/TwillRunner.java  | 107 +++
 .../apache/twill/api/TwillRunnerService.java    |  29 +
 .../apache/twill/api/TwillSpecification.java    | 327 ++++++++
 .../org/apache/twill/api/logging/LogEntry.java  |  58 ++
 .../apache/twill/api/logging/LogHandler.java    |  26 +
 .../twill/api/logging/PrinterLogHandler.java    | 101 +++
 .../apache/twill/api/logging/package-info.java  |  22 +
 .../java/org/apache/twill/api/package-info.java |  21 +
 .../DefaultEventHandlerSpecification.java       |  57 ++
 .../apache/twill/internal/DefaultLocalFile.java |  76 ++
 .../twill/internal/DefaultResourceReport.java   | 123 +++
 .../internal/DefaultResourceSpecification.java  |  70 ++
 .../internal/DefaultRuntimeSpecification.java   |  67 ++
 .../internal/DefaultTwillRunResources.java      | 106 +++
 .../DefaultTwillRunnableSpecification.java      |  60 ++
 .../internal/DefaultTwillSpecification.java     | 103 +++
 .../java/org/apache/twill/internal/RunIds.java  |  76 ++
 .../org/apache/twill/internal/package-info.java |  22 +
 twill-common/pom.xml                            |  51 ++
 .../org/apache/twill/common/Cancellable.java    |  29 +
 .../twill/common/ServiceListenerAdapter.java    |  50 ++
 .../java/org/apache/twill/common/Services.java  | 140 ++++
 .../java/org/apache/twill/common/Threads.java   |  52 ++
 .../filesystem/ForwardingLocationFactory.java   |  34 +
 .../apache/twill/filesystem/LocalLocation.java  | 205 +++++
 .../twill/filesystem/LocalLocationFactory.java  |  58 ++
 .../org/apache/twill/filesystem/Location.java   | 154 ++++
 .../twill/filesystem/LocationFactories.java     |  67 ++
 .../twill/filesystem/LocationFactory.java       |  46 ++
 .../org/apache/twill/common/ServicesTest.java   | 106 +++
 .../twill/filesystem/LocalLocationTest.java     |  64 ++
 twill-core/pom.xml                              |  89 +++
 .../AbstractExecutionServiceController.java     | 207 +++++
 .../twill/internal/AbstractTwillController.java | 180 +++++
 .../internal/AbstractZKServiceController.java   | 314 ++++++++
 .../twill/internal/ApplicationBundler.java      | 362 +++++++++
 .../org/apache/twill/internal/Arguments.java    |  46 ++
 .../twill/internal/BasicTwillContext.java       | 131 +++
 .../java/org/apache/twill/internal/Configs.java |  45 ++
 .../org/apache/twill/internal/Constants.java    |  64 ++
 .../apache/twill/internal/ContainerInfo.java    |  36 +
 .../twill/internal/ContainerLiveNodeData.java   |  40 +
 .../apache/twill/internal/EnvContainerInfo.java |  65 ++
 .../java/org/apache/twill/internal/EnvKeys.java |  59 ++
 .../apache/twill/internal/ListenerExecutor.java | 134 ++++
 .../twill/internal/LogOnlyEventHandler.java     |  43 +
 .../twill/internal/ProcessController.java       |  35 +
 .../apache/twill/internal/ProcessLauncher.java  |  94 +++
 .../internal/SingleRunnableApplication.java     |  49 ++
 .../internal/TwillContainerController.java      |  36 +
 .../twill/internal/TwillContainerLauncher.java  | 181 +++++
 .../org/apache/twill/internal/ZKMessages.java   |  94 +++
 .../twill/internal/ZKServiceDecorator.java      | 482 +++++++++++
 .../twill/internal/json/ArgumentsCodec.java     |  95 +++
 .../apache/twill/internal/json/JsonUtils.java   |  66 ++
 .../twill/internal/json/LocalFileCodec.java     |  67 ++
 .../internal/json/ResourceReportAdapter.java    |  62 ++
 .../internal/json/ResourceReportCodec.java      |  67 ++
 .../json/ResourceSpecificationCodec.java        |  61 ++
 .../json/RuntimeSpecificationCodec.java         |  69 ++
 .../internal/json/StackTraceElementCodec.java   |  56 ++
 .../twill/internal/json/StateNodeCodec.java     |  60 ++
 .../internal/json/TwillRunResourcesCodec.java   |  61 ++
 .../json/TwillRunnableSpecificationCodec.java   |  63 ++
 .../json/TwillSpecificationAdapter.java         | 163 ++++
 .../internal/json/TwillSpecificationCodec.java  | 127 +++
 .../internal/kafka/EmbeddedKafkaServer.java     |  93 +++
 .../AbstractCompressedMessageSetEncoder.java    |  78 ++
 .../kafka/client/AbstractMessageSetEncoder.java |  79 ++
 .../kafka/client/BasicFetchedMessage.java       |  46 ++
 .../twill/internal/kafka/client/Bufferer.java   |  61 ++
 .../internal/kafka/client/Compression.java      |  49 ++
 .../internal/kafka/client/ConnectionPool.java   | 125 +++
 .../kafka/client/GZipMessageSetEncoder.java     |  37 +
 .../kafka/client/IdentityMessageSetEncoder.java |  42 +
 .../internal/kafka/client/KafkaBrokerCache.java | 326 ++++++++
 .../internal/kafka/client/KafkaRequest.java     |  91 +++
 .../kafka/client/KafkaRequestEncoder.java       |  60 ++
 .../kafka/client/KafkaRequestSender.java        |  26 +
 .../internal/kafka/client/KafkaResponse.java    |  49 ++
 .../kafka/client/KafkaResponseDispatcher.java   |  63 ++
 .../kafka/client/KafkaResponseHandler.java      |  51 ++
 .../internal/kafka/client/MessageFetcher.java   | 243 ++++++
 .../kafka/client/MessageSetEncoder.java         |  31 +
 .../internal/kafka/client/ResponseHandler.java  |  33 +
 .../kafka/client/SimpleKafkaClient.java         | 304 +++++++
 .../kafka/client/SnappyMessageSetEncoder.java   |  38 +
 .../internal/kafka/client/TopicBroker.java      |  48 ++
 .../internal/kafka/client/package-info.java     |  21 +
 .../twill/internal/logging/KafkaAppender.java   | 303 +++++++
 .../internal/logging/KafkaTwillRunnable.java    | 122 +++
 .../twill/internal/logging/LogEntryDecoder.java | 124 +++
 .../apache/twill/internal/logging/Loggings.java |  46 ++
 .../org/apache/twill/internal/package-info.java |  21 +
 .../apache/twill/internal/state/Message.java    |  54 ++
 .../twill/internal/state/MessageCallback.java   |  34 +
 .../twill/internal/state/MessageCodec.java      | 125 +++
 .../apache/twill/internal/state/Messages.java   |  52 ++
 .../twill/internal/state/SimpleMessage.java     |  89 +++
 .../apache/twill/internal/state/StateNode.java  |  84 ++
 .../twill/internal/state/SystemMessages.java    |  48 ++
 .../twill/internal/utils/Dependencies.java      | 323 ++++++++
 .../apache/twill/internal/utils/Instances.java  | 112 +++
 .../apache/twill/internal/utils/Networks.java   |  47 ++
 .../org/apache/twill/internal/utils/Paths.java  |  46 ++
 .../twill/kafka/client/FetchException.java      |  77 ++
 .../twill/kafka/client/FetchedMessage.java      |  36 +
 .../apache/twill/kafka/client/KafkaClient.java  |  50 ++
 .../twill/kafka/client/PreparePublish.java      |  34 +
 .../apache/twill/kafka/client/package-info.java |  21 +
 .../apache/twill/launcher/TwillLauncher.java    | 236 ++++++
 twill-core/src/main/resources/kafka-0.7.2.tgz   | Bin 0 -> 8811693 bytes
 .../apache/twill/internal/ControllerTest.java   | 211 +++++
 .../twill/internal/state/MessageCodecTest.java  |  78 ++
 .../internal/state/ZKServiceDecoratorTest.java  | 157 ++++
 .../internal/utils/ApplicationBundlerTest.java  | 113 +++
 .../apache/twill/kafka/client/KafkaTest.java    | 220 +++++
 twill-core/src/test/resources/logback-test.xml  |  18 +
 twill-discovery-api/pom.xml                     |  39 +
 .../apache/twill/discovery/Discoverable.java    |  37 +
 .../twill/discovery/DiscoveryService.java       |  35 +
 .../twill/discovery/DiscoveryServiceClient.java |  34 +
 twill-discovery-core/pom.xml                    |  52 ++
 .../twill/discovery/DiscoverableWrapper.java    |  69 ++
 .../discovery/InMemoryDiscoveryService.java     |  73 ++
 .../twill/discovery/ZKDiscoveryService.java     | 511 ++++++++++++
 .../apache/twill/discovery/package-info.java    |  21 +
 .../discovery/InMemoryDiscoveryServiceTest.java |  67 ++
 .../twill/discovery/ZKDiscoveryServiceTest.java | 253 ++++++
 .../src/test/resources/logback-test.xml         |  17 +
 twill-yarn/pom.xml                              | 127 +++
 .../internal/yarn/Hadoop20YarnAMClient.java     | 213 +++++
 .../internal/yarn/Hadoop20YarnAppClient.java    | 197 +++++
 .../yarn/Hadoop20YarnApplicationReport.java     | 107 +++
 .../yarn/Hadoop20YarnContainerInfo.java         |  70 ++
 .../yarn/Hadoop20YarnContainerStatus.java       |  53 ++
 .../yarn/Hadoop20YarnLaunchContext.java         |  99 +++
 .../yarn/Hadoop20YarnLocalResource.java         | 101 +++
 .../internal/yarn/Hadoop20YarnNMClient.java     | 121 +++
 .../twill/internal/yarn/ports/AMRMClient.java   | 149 ++++
 .../internal/yarn/ports/AMRMClientImpl.java     | 412 ++++++++++
 .../internal/yarn/ports/AllocationResponse.java |  38 +
 .../yarn/ports/AllocationResponses.java         | 111 +++
 .../internal/yarn/Hadoop21YarnAMClient.java     | 207 +++++
 .../internal/yarn/Hadoop21YarnAppClient.java    | 177 ++++
 .../yarn/Hadoop21YarnApplicationReport.java     | 107 +++
 .../yarn/Hadoop21YarnContainerInfo.java         |  70 ++
 .../yarn/Hadoop21YarnContainerStatus.java       |  53 ++
 .../yarn/Hadoop21YarnLaunchContext.java         |  99 +++
 .../yarn/Hadoop21YarnLocalResource.java         | 101 +++
 .../internal/yarn/Hadoop21YarnNMClient.java     |  99 +++
 .../apache/twill/filesystem/HDFSLocation.java   | 193 +++++
 .../twill/filesystem/HDFSLocationFactory.java   |  95 +++
 .../apache/twill/filesystem/package-info.java   |  21 +
 .../twill/internal/AbstractTwillService.java    | 141 ++++
 .../org/apache/twill/internal/ServiceMain.java  | 201 +++++
 .../ApplicationMasterLiveNodeData.java          |  46 ++
 .../appmaster/ApplicationMasterMain.java        |  85 ++
 .../ApplicationMasterProcessLauncher.java       |  73 ++
 .../appmaster/ApplicationMasterService.java     | 799 +++++++++++++++++++
 .../appmaster/ApplicationSubmitter.java         |  31 +
 .../appmaster/BasicEventHandlerContext.java     |  38 +
 .../internal/appmaster/ExpectedContainers.java  |  82 ++
 .../appmaster/LoggerContextListenerAdapter.java |  56 ++
 .../internal/appmaster/ProvisionRequest.java    |  52 ++
 .../appmaster/RunnableContainerRequest.java     |  58 ++
 .../appmaster/RunnableProcessLauncher.java      |  93 +++
 .../internal/appmaster/RunningContainers.java   | 427 ++++++++++
 .../internal/appmaster/TrackerService.java      | 222 ++++++
 .../twill/internal/appmaster/package-info.java  |  21 +
 .../internal/container/TwillContainerMain.java  | 182 +++++
 .../container/TwillContainerService.java        | 168 ++++
 .../yarn/AbstractYarnProcessLauncher.java       | 220 +++++
 .../yarn/VersionDetectYarnAMClientFactory.java  |  55 ++
 .../yarn/VersionDetectYarnAppClientFactory.java |  50 ++
 .../twill/internal/yarn/YarnAMClient.java       | 117 +++
 .../internal/yarn/YarnAMClientFactory.java      |  26 +
 .../twill/internal/yarn/YarnAppClient.java      |  45 ++
 .../internal/yarn/YarnAppClientFactory.java     |  28 +
 .../internal/yarn/YarnApplicationReport.java    | 126 +++
 .../twill/internal/yarn/YarnContainerInfo.java  |  28 +
 .../internal/yarn/YarnContainerStatus.java      |  34 +
 .../twill/internal/yarn/YarnLaunchContext.java  |  49 ++
 .../twill/internal/yarn/YarnLocalResource.java  | 115 +++
 .../twill/internal/yarn/YarnNMClient.java       |  37 +
 .../apache/twill/internal/yarn/YarnUtils.java   | 279 +++++++
 .../twill/internal/yarn/package-info.java       |  21 +
 .../twill/yarn/LocationSecureStoreUpdater.java  |  54 ++
 .../apache/twill/yarn/ResourceReportClient.java |  63 ++
 .../org/apache/twill/yarn/YarnSecureStore.java  |  42 +
 .../apache/twill/yarn/YarnTwillController.java  | 208 +++++
 .../twill/yarn/YarnTwillControllerFactory.java  |  34 +
 .../apache/twill/yarn/YarnTwillPreparer.java    | 600 ++++++++++++++
 .../twill/yarn/YarnTwillRunnerService.java      | 583 ++++++++++++++
 .../org/apache/twill/yarn/package-info.java     |  21 +
 .../src/main/resources/logback-template.xml     |  11 +
 .../java/org/apache/twill/yarn/BuggyServer.java |  41 +
 .../twill/yarn/DistributeShellTestRun.java      |  64 ++
 .../org/apache/twill/yarn/DistributedShell.java |  70 ++
 .../java/org/apache/twill/yarn/EchoServer.java  |  48 ++
 .../apache/twill/yarn/EchoServerTestRun.java    | 138 ++++
 .../twill/yarn/EnvironmentEchoServer.java       |  35 +
 .../twill/yarn/FailureRestartTestRun.java       | 133 +++
 .../org/apache/twill/yarn/LocalFileTestRun.java | 148 ++++
 .../twill/yarn/ProvisionTimeoutTestRun.java     | 128 +++
 .../twill/yarn/ResourceReportTestRun.java       | 268 +++++++
 .../org/apache/twill/yarn/SocketServer.java     | 133 +++
 .../apache/twill/yarn/TaskCompletedTestRun.java |  93 +++
 .../twill/yarn/TwillSpecificationTest.java      |  87 ++
 .../org/apache/twill/yarn/YarnTestSuite.java    | 127 +++
 twill-yarn/src/test/resources/header.txt        |   1 +
 twill-yarn/src/test/resources/logback-test.xml  |  17 +
 twill-zookeeper/pom.xml                         |  67 ++
 .../internal/zookeeper/BasicNodeChildren.java   |  66 ++
 .../twill/internal/zookeeper/BasicNodeData.java |  67 ++
 .../zookeeper/DefaultZKClientService.java       | 525 ++++++++++++
 .../zookeeper/FailureRetryZKClient.java         | 240 ++++++
 .../internal/zookeeper/InMemoryZKServer.java    | 198 +++++
 .../twill/internal/zookeeper/KillZKSession.java |  69 ++
 .../internal/zookeeper/NamespaceZKClient.java   | 163 ++++
 .../twill/internal/zookeeper/RetryUtils.java    |  50 ++
 .../zookeeper/RewatchOnExpireWatcher.java       | 207 +++++
 .../zookeeper/RewatchOnExpireZKClient.java      |  95 +++
 .../zookeeper/SettableOperationFuture.java      |  68 ++
 .../twill/internal/zookeeper/package-info.java  |  22 +
 .../twill/zookeeper/ForwardingZKClient.java     | 116 +++
 .../zookeeper/ForwardingZKClientService.java    |  78 ++
 .../apache/twill/zookeeper/NodeChildren.java    |  38 +
 .../org/apache/twill/zookeeper/NodeData.java    |  39 +
 .../apache/twill/zookeeper/OperationFuture.java |  33 +
 .../apache/twill/zookeeper/RetryStrategies.java | 117 +++
 .../apache/twill/zookeeper/RetryStrategy.java   |  48 ++
 .../org/apache/twill/zookeeper/ZKClient.java    | 161 ++++
 .../apache/twill/zookeeper/ZKClientService.java |  96 +++
 .../twill/zookeeper/ZKClientServices.java       | 145 ++++
 .../org/apache/twill/zookeeper/ZKClients.java   |  61 ++
 .../apache/twill/zookeeper/ZKOperations.java    | 355 ++++++++
 .../apache/twill/zookeeper/package-info.java    |  22 +
 .../twill/zookeeper/RetryStrategyTest.java      |  94 +++
 .../apache/twill/zookeeper/ZKClientTest.java    | 254 ++++++
 .../twill/zookeeper/ZKOperationsTest.java       |  63 ++
 .../src/test/resources/logback-test.xml         |  17 +
 yarn/pom.xml                                    | 127 ---
 .../internal/yarn/Hadoop20YarnAMClient.java     | 213 -----
 .../internal/yarn/Hadoop20YarnAppClient.java    | 197 -----
 .../yarn/Hadoop20YarnApplicationReport.java     | 107 ---
 .../yarn/Hadoop20YarnContainerInfo.java         |  70 --
 .../yarn/Hadoop20YarnContainerStatus.java       |  53 --
 .../yarn/Hadoop20YarnLaunchContext.java         |  99 ---
 .../yarn/Hadoop20YarnLocalResource.java         | 101 ---
 .../internal/yarn/Hadoop20YarnNMClient.java     | 121 ---
 .../twill/internal/yarn/ports/AMRMClient.java   | 149 ----
 .../internal/yarn/ports/AMRMClientImpl.java     | 412 ----------
 .../internal/yarn/ports/AllocationResponse.java |  38 -
 .../yarn/ports/AllocationResponses.java         | 111 ---
 .../internal/yarn/Hadoop21YarnAMClient.java     | 207 -----
 .../internal/yarn/Hadoop21YarnAppClient.java    | 177 ----
 .../yarn/Hadoop21YarnApplicationReport.java     | 107 ---
 .../yarn/Hadoop21YarnContainerInfo.java         |  70 --
 .../yarn/Hadoop21YarnContainerStatus.java       |  53 --
 .../yarn/Hadoop21YarnLaunchContext.java         |  99 ---
 .../yarn/Hadoop21YarnLocalResource.java         | 101 ---
 .../internal/yarn/Hadoop21YarnNMClient.java     |  99 ---
 .../apache/twill/filesystem/HDFSLocation.java   | 193 -----
 .../twill/filesystem/HDFSLocationFactory.java   |  95 ---
 .../apache/twill/filesystem/package-info.java   |  21 -
 .../twill/internal/AbstractTwillService.java    | 141 ----
 .../org/apache/twill/internal/ServiceMain.java  | 201 -----
 .../ApplicationMasterLiveNodeData.java          |  46 --
 .../appmaster/ApplicationMasterMain.java        |  85 --
 .../ApplicationMasterProcessLauncher.java       |  73 --
 .../appmaster/ApplicationMasterService.java     | 799 -------------------
 .../appmaster/ApplicationSubmitter.java         |  31 -
 .../appmaster/BasicEventHandlerContext.java     |  38 -
 .../internal/appmaster/ExpectedContainers.java  |  82 --
 .../appmaster/LoggerContextListenerAdapter.java |  56 --
 .../internal/appmaster/ProvisionRequest.java    |  52 --
 .../appmaster/RunnableContainerRequest.java     |  58 --
 .../appmaster/RunnableProcessLauncher.java      |  93 ---
 .../internal/appmaster/RunningContainers.java   | 427 ----------
 .../internal/appmaster/TrackerService.java      | 222 ------
 .../twill/internal/appmaster/package-info.java  |  21 -
 .../internal/container/TwillContainerMain.java  | 182 -----
 .../container/TwillContainerService.java        | 168 ----
 .../yarn/AbstractYarnProcessLauncher.java       | 220 -----
 .../yarn/VersionDetectYarnAMClientFactory.java  |  55 --
 .../yarn/VersionDetectYarnAppClientFactory.java |  50 --
 .../twill/internal/yarn/YarnAMClient.java       | 117 ---
 .../internal/yarn/YarnAMClientFactory.java      |  26 -
 .../twill/internal/yarn/YarnAppClient.java      |  45 --
 .../internal/yarn/YarnAppClientFactory.java     |  28 -
 .../internal/yarn/YarnApplicationReport.java    | 126 ---
 .../twill/internal/yarn/YarnContainerInfo.java  |  28 -
 .../internal/yarn/YarnContainerStatus.java      |  34 -
 .../twill/internal/yarn/YarnLaunchContext.java  |  49 --
 .../twill/internal/yarn/YarnLocalResource.java  | 115 ---
 .../twill/internal/yarn/YarnNMClient.java       |  37 -
 .../apache/twill/internal/yarn/YarnUtils.java   | 279 -------
 .../twill/internal/yarn/package-info.java       |  21 -
 .../twill/yarn/LocationSecureStoreUpdater.java  |  54 --
 .../apache/twill/yarn/ResourceReportClient.java |  63 --
 .../org/apache/twill/yarn/YarnSecureStore.java  |  42 -
 .../apache/twill/yarn/YarnTwillController.java  | 208 -----
 .../twill/yarn/YarnTwillControllerFactory.java  |  34 -
 .../apache/twill/yarn/YarnTwillPreparer.java    | 600 --------------
 .../twill/yarn/YarnTwillRunnerService.java      | 583 --------------
 .../org/apache/twill/yarn/package-info.java     |  21 -
 yarn/src/main/resources/logback-template.xml    |  11 -
 .../java/org/apache/twill/yarn/BuggyServer.java |  41 -
 .../twill/yarn/DistributeShellTestRun.java      |  64 --
 .../org/apache/twill/yarn/DistributedShell.java |  70 --
 .../java/org/apache/twill/yarn/EchoServer.java  |  48 --
 .../apache/twill/yarn/EchoServerTestRun.java    | 138 ----
 .../twill/yarn/EnvironmentEchoServer.java       |  35 -
 .../twill/yarn/FailureRestartTestRun.java       | 133 ---
 .../org/apache/twill/yarn/LocalFileTestRun.java | 148 ----
 .../twill/yarn/ProvisionTimeoutTestRun.java     | 128 ---
 .../twill/yarn/ResourceReportTestRun.java       | 268 -------
 .../org/apache/twill/yarn/SocketServer.java     | 133 ---
 .../apache/twill/yarn/TaskCompletedTestRun.java |  93 ---
 .../twill/yarn/TwillSpecificationTest.java      |  87 --
 .../org/apache/twill/yarn/YarnTestSuite.java    | 127 ---
 yarn/src/test/resources/header.txt              |   1 -
 yarn/src/test/resources/logback-test.xml        |  17 -
 zookeeper/pom.xml                               |  67 --
 .../internal/zookeeper/BasicNodeChildren.java   |  66 --
 .../twill/internal/zookeeper/BasicNodeData.java |  67 --
 .../zookeeper/DefaultZKClientService.java       | 525 ------------
 .../zookeeper/FailureRetryZKClient.java         | 240 ------
 .../internal/zookeeper/InMemoryZKServer.java    | 198 -----
 .../twill/internal/zookeeper/KillZKSession.java |  69 --
 .../internal/zookeeper/NamespaceZKClient.java   | 163 ----
 .../twill/internal/zookeeper/RetryUtils.java    |  50 --
 .../zookeeper/RewatchOnExpireWatcher.java       | 207 -----
 .../zookeeper/RewatchOnExpireZKClient.java      |  95 ---
 .../zookeeper/SettableOperationFuture.java      |  68 --
 .../twill/internal/zookeeper/package-info.java  |  22 -
 .../twill/zookeeper/ForwardingZKClient.java     | 116 ---
 .../zookeeper/ForwardingZKClientService.java    |  78 --
 .../apache/twill/zookeeper/NodeChildren.java    |  38 -
 .../org/apache/twill/zookeeper/NodeData.java    |  39 -
 .../apache/twill/zookeeper/OperationFuture.java |  33 -
 .../apache/twill/zookeeper/RetryStrategies.java | 117 ---
 .../apache/twill/zookeeper/RetryStrategy.java   |  48 --
 .../org/apache/twill/zookeeper/ZKClient.java    | 161 ----
 .../apache/twill/zookeeper/ZKClientService.java |  96 ---
 .../twill/zookeeper/ZKClientServices.java       | 145 ----
 .../org/apache/twill/zookeeper/ZKClients.java   |  61 --
 .../apache/twill/zookeeper/ZKOperations.java    | 355 --------
 .../apache/twill/zookeeper/package-info.java    |  22 -
 .../twill/zookeeper/RetryStrategyTest.java      |  94 ---
 .../apache/twill/zookeeper/ZKClientTest.java    | 254 ------
 .../twill/zookeeper/ZKOperationsTest.java       |  63 --
 zookeeper/src/test/resources/logback-test.xml   |  17 -
 532 files changed, 27811 insertions(+), 27566 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/pom.xml
----------------------------------------------------------------------
diff --git a/api/pom.xml b/api/pom.xml
deleted file mode 100644
index 49b92c8..0000000
--- a/api/pom.xml
+++ /dev/null
@@ -1,54 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <parent>
-        <groupId>org.apache.twill</groupId>
-        <artifactId>twill-parent</artifactId>
-        <version>0.1.0-SNAPSHOT</version>
-    </parent>
-
-    <artifactId>twill-api</artifactId>
-    <packaging>jar</packaging>
-    <name>Twill API</name>
-
-    <dependencies>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>twill-common</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>twill-discovery-api</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-        <dependency>
-            <groupId>com.google.guava</groupId>
-            <artifactId>guava</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>com.google.code.findbugs</groupId>
-            <artifactId>jsr305</artifactId>
-        </dependency>
-    </dependencies>
-</project>

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/AbstractTwillRunnable.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/AbstractTwillRunnable.java b/api/src/main/java/org/apache/twill/api/AbstractTwillRunnable.java
deleted file mode 100644
index 67cec0a..0000000
--- a/api/src/main/java/org/apache/twill/api/AbstractTwillRunnable.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-import com.google.common.collect.ImmutableMap;
-
-import java.util.Map;
-
-/**
- * This abstract class provides default implementation of the {@link TwillRunnable}.
- */
-public abstract class AbstractTwillRunnable implements TwillRunnable {
-
-  private Map<String, String> args;
-  private TwillContext context;
-
-  protected AbstractTwillRunnable() {
-    this.args = ImmutableMap.of();
-  }
-
-  protected AbstractTwillRunnable(Map<String, String> args) {
-    this.args = ImmutableMap.copyOf(args);
-  }
-
-  @Override
-  public TwillRunnableSpecification configure() {
-    return TwillRunnableSpecification.Builder.with()
-      .setName(getClass().getSimpleName())
-      .withConfigs(args)
-      .build();
-  }
-
-  @Override
-  public void initialize(TwillContext context) {
-    this.context = context;
-    this.args = context.getSpecification().getConfigs();
-  }
-
-  @Override
-  public void handleCommand(org.apache.twill.api.Command command) throws Exception {
-    // No-op by default. Left for children class to override.
-  }
-
-  @Override
-  public void destroy() {
-    // No-op by default. Left for children class to override.
-  }
-
-  protected Map<String, String> getArguments() {
-    return args;
-  }
-
-  protected String getArgument(String key) {
-    return args.get(key);
-  }
-
-  protected TwillContext getContext() {
-    return context;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/Command.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/Command.java b/api/src/main/java/org/apache/twill/api/Command.java
deleted file mode 100644
index b23b3a8..0000000
--- a/api/src/main/java/org/apache/twill/api/Command.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-import com.google.common.base.Objects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-
-import java.util.Map;
-
-/**
- * Represents command objects.
- */
-public interface Command {
-
-  String getCommand();
-
-  Map<String, String> getOptions();
-
-  /**
-   * Builder for creating {@link Command} object.
-   */
-  static final class Builder {
-
-    private final String command;
-    private final ImmutableMap.Builder<String, String> options = ImmutableMap.builder();
-
-    public static Builder of(String command) {
-      Preconditions.checkArgument(command != null, "Command cannot be null.");
-      return new Builder(command);
-    }
-
-    public Builder addOption(String key, String value) {
-      options.put(key, value);
-      return this;
-    }
-
-    public Builder addOptions(Map<String, String> map) {
-      options.putAll(map);
-      return this;
-    }
-
-    public Command build() {
-      return new SimpleCommand(command, options.build());
-    }
-
-    private Builder(String command) {
-      this.command = command;
-    }
-
-    /**
-     * Simple implementation of {@link org.apache.twill.api.Command}.
-     */
-    private static final class SimpleCommand implements Command {
-      private final String command;
-      private final Map<String, String> options;
-
-      SimpleCommand(String command, Map<String, String> options) {
-        this.command = command;
-        this.options = options;
-      }
-
-      @Override
-      public String getCommand() {
-        return command;
-      }
-
-      @Override
-      public Map<String, String> getOptions() {
-        return options;
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hashCode(command, options);
-      }
-
-      @Override
-      public String toString() {
-        return Objects.toStringHelper(Command.class)
-          .add("command", command)
-          .add("options", options)
-          .toString();
-      }
-
-      @Override
-      public boolean equals(Object obj) {
-        if (obj == this) {
-          return true;
-        }
-        if (!(obj instanceof Command)) {
-          return false;
-        }
-        Command other = (Command) obj;
-        return command.equals(other.getCommand()) && options.equals(other.getOptions());
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/EventHandler.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/EventHandler.java b/api/src/main/java/org/apache/twill/api/EventHandler.java
deleted file mode 100644
index ede5b65..0000000
--- a/api/src/main/java/org/apache/twill/api/EventHandler.java
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-import com.google.common.collect.ImmutableMap;
-
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-/**
- * A callback handler for acting on application events related to {@link TwillRunnable} lifecycle events.
- */
-public abstract class EventHandler {
-
-  protected EventHandlerContext context;
-
-  /**
-   * Represents action to act upon runnable launch timeout.
-   */
-  public static final class TimeoutAction {
-
-    // Next timeout in milliseconds.
-    private final long timeout;
-
-    /**
-     * Creates a {@link TimeoutAction} to indicate aborting the application.
-     */
-    public static TimeoutAction abort() {
-      return new TimeoutAction(-1);
-    }
-
-    /**
-     * Creates a {@link TimeoutAction} to indicate recheck again after the given time has passed.
-     * @param elapse Time to elapse before checking for the timeout again.
-     * @param unit Unit of the elapse time.
-     */
-    public static TimeoutAction recheck(long elapse, TimeUnit unit) {
-      return new TimeoutAction(TimeUnit.MILLISECONDS.convert(elapse, unit));
-    }
-
-    private TimeoutAction(long timeout) {
-      this.timeout = timeout;
-    }
-
-    /**
-     * Returns timeout in milliseconds or {@code -1} if to abort the application.
-     */
-    public long getTimeout() {
-      return timeout;
-    }
-  }
-
-  /**
-   * This class holds information about a launch timeout event.
-   */
-  public static final class TimeoutEvent {
-    private final String runnableName;
-    private final int expectedInstances;
-    private final int actualInstances;
-    private final long requestTime;
-
-    public TimeoutEvent(String runnableName, int expectedInstances, int actualInstances, long requestTime) {
-      this.runnableName = runnableName;
-      this.expectedInstances = expectedInstances;
-      this.actualInstances = actualInstances;
-      this.requestTime = requestTime;
-    }
-
-    public String getRunnableName() {
-      return runnableName;
-    }
-
-    public int getExpectedInstances() {
-      return expectedInstances;
-    }
-
-    public int getActualInstances() {
-      return actualInstances;
-    }
-
-    public long getRequestTime() {
-      return requestTime;
-    }
-  }
-
-  /**
-   * Returns an {@link EventHandlerSpecification} for configuring this handler class.
-   */
-  public EventHandlerSpecification configure() {
-    return new EventHandlerSpecification() {
-      @Override
-      public String getClassName() {
-        return EventHandler.this.getClass().getName();
-      }
-
-      @Override
-      public Map<String, String> getConfigs() {
-        return EventHandler.this.getConfigs();
-      }
-    };
-  }
-
-  /**
-   * Invoked by the application to initialize this EventHandler instance.
-   * @param context
-   */
-  public void initialize(EventHandlerContext context) {
-    this.context = context;
-  }
-
-  /**
-   * Invoked by the application when shutting down.
-   */
-  public void destroy() {
-    // No-op
-  }
-
-  /**
-   * Invoked when the number of expected instances doesn't match with number of actual instances.
-   * @param timeoutEvents An Iterable of {@link TimeoutEvent} that contains information about runnable launch timeout.
-   * @return A {@link TimeoutAction} to govern action to act.
-   */
-  public abstract TimeoutAction launchTimeout(Iterable<TimeoutEvent> timeoutEvents);
-
-  /**
-   * Returns set of configurations available at runtime for access.
-   */
-  protected Map<String, String> getConfigs() {
-    return ImmutableMap.of();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/EventHandlerContext.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/EventHandlerContext.java b/api/src/main/java/org/apache/twill/api/EventHandlerContext.java
deleted file mode 100644
index 8e58af6..0000000
--- a/api/src/main/java/org/apache/twill/api/EventHandlerContext.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-/**
- * Represents runtime context for {@link EventHandler}.
- */
-public interface EventHandlerContext {
-
-  EventHandlerSpecification getSpecification();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/EventHandlerSpecification.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/EventHandlerSpecification.java b/api/src/main/java/org/apache/twill/api/EventHandlerSpecification.java
deleted file mode 100644
index 190f222..0000000
--- a/api/src/main/java/org/apache/twill/api/EventHandlerSpecification.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-import java.util.Map;
-
-/**
- * Specification for {@link EventHandler}.
- */
-public interface EventHandlerSpecification {
-
-  String getClassName();
-
-  Map<String, String> getConfigs();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/LocalFile.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/LocalFile.java b/api/src/main/java/org/apache/twill/api/LocalFile.java
deleted file mode 100644
index df35a3b..0000000
--- a/api/src/main/java/org/apache/twill/api/LocalFile.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-import javax.annotation.Nullable;
-import java.net.URI;
-
-/**
- * This interface represents a local file that will be available for the container running a {@link TwillRunnable}.
- */
-public interface LocalFile {
-
-  String getName();
-
-  URI getURI();
-
-  /**
-   * Returns the the last modified time of the file or {@code -1} if unknown.
-   */
-  long getLastModified();
-
-  /**
-   * Returns the size of the file or {@code -1} if unknown.
-   */
-  long getSize();
-
-  boolean isArchive();
-
-  @Nullable
-  String getPattern();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/ResourceReport.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/ResourceReport.java b/api/src/main/java/org/apache/twill/api/ResourceReport.java
deleted file mode 100644
index 0d63378..0000000
--- a/api/src/main/java/org/apache/twill/api/ResourceReport.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-import java.util.Collection;
-import java.util.Map;
-
-/**
- * This interface provides a snapshot of the resources an application is using
- * broken down by each runnable.
- */
-public interface ResourceReport {
-  /**
-   * Get all the run resources being used by all instances of the specified runnable.
-   *
-   * @param runnableName the runnable name.
-   * @return resources being used by all instances of the runnable.
-   */
-  public Collection<TwillRunResources> getRunnableResources(String runnableName);
-
-  /**
-   * Get all the run resources being used across all runnables.
-   *
-   * @return all run resources used by all instances of all runnables.
-   */
-  public Map<String, Collection<TwillRunResources>> getResources();
-
-  /**
-   * Get the resources application master is using.
-   *
-   * @return resources being used by the application master.
-   */
-  public TwillRunResources getAppMasterResources();
-
-  /**
-   * Get the id of the application master.
-   *
-   * @return id of the application master.
-   */
-  public String getApplicationId();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/ResourceSpecification.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/ResourceSpecification.java b/api/src/main/java/org/apache/twill/api/ResourceSpecification.java
deleted file mode 100644
index b40682f..0000000
--- a/api/src/main/java/org/apache/twill/api/ResourceSpecification.java
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-import org.apache.twill.internal.DefaultResourceSpecification;
-
-/**
- * This interface provides specifications for resource requirements including set and get methods for number of cores, amount of memory, and number of instances.
- */
-public interface ResourceSpecification {
-
-  final ResourceSpecification BASIC = Builder.with().setVirtualCores(1).setMemory(512, SizeUnit.MEGA).build();
-
-  /**
-   * Unit for specifying memory size.
-   */
-  enum SizeUnit {
-    MEGA(1),
-    GIGA(1024);
-
-    private final int multiplier;
-
-    private SizeUnit(int multiplier) {
-      this.multiplier = multiplier;
-    }
-  }
-
-  /**
-   * Returns the number of virtual CPU cores. DEPRECATED, use getVirtualCores instead.
-   * @return Number of virtual CPU cores.
-   */
-  @Deprecated
-  int getCores();
-
-  /**
-   * Returns the number of virtual CPU cores.
-   * @return Number of virtual CPU cores.
-   */
-  int getVirtualCores();
-
-  /**
-   * Returns the memory size in MB.
-   * @return Memory size
-   */
-  int getMemorySize();
-
-  /**
-   * Returns the uplink bandwidth in Mbps.
-   * @return Uplink bandwidth or -1 representing unlimited bandwidth.
-   */
-  int getUplink();
-
-  /**
-   * Returns the downlink bandwidth in Mbps.
-   * @return Downlink bandwidth or -1 representing unlimited bandwidth.
-   */
-  int getDownlink();
-
-  /**
-   * Returns number of execution instances.
-   * @return Number of execution instances.
-   */
-  int getInstances();
-
-  /**
-   * Builder for creating {@link ResourceSpecification}.
-   */
-  static final class Builder {
-
-    private int cores;
-    private int memory;
-    private int uplink = -1;
-    private int downlink = -1;
-    private int instances = 1;
-
-    public static CoreSetter with() {
-      return new Builder().new CoreSetter();
-    }
-
-    public final class CoreSetter {
-      @Deprecated
-      public MemorySetter setCores(int cores) {
-        Builder.this.cores = cores;
-        return new MemorySetter();
-      }
-
-      public MemorySetter setVirtualCores(int cores) {
-        Builder.this.cores = cores;
-        return new MemorySetter();
-      }
-    }
-
-    public final class MemorySetter {
-      public AfterMemory setMemory(int size, SizeUnit unit) {
-        Builder.this.memory = size * unit.multiplier;
-        return new AfterMemory();
-      }
-    }
-
-    public final class AfterMemory extends Build {
-      public AfterInstances setInstances(int instances) {
-        Builder.this.instances = instances;
-        return new AfterInstances();
-      }
-    }
-
-    public final class AfterInstances extends Build {
-      public AfterUplink setUplink(int uplink, SizeUnit unit) {
-        Builder.this.uplink = uplink * unit.multiplier;
-        return new AfterUplink();
-      }
-    }
-
-    public final class AfterUplink extends Build {
-      public AfterDownlink setDownlink(int downlink, SizeUnit unit) {
-        Builder.this.downlink = downlink * unit.multiplier;
-        return new AfterDownlink();
-      }
-    }
-
-    public final class AfterDownlink extends Build {
-
-      @Override
-      public ResourceSpecification build() {
-        return super.build();
-      }
-    }
-
-    public abstract class Build {
-      public ResourceSpecification build() {
-        return new DefaultResourceSpecification(cores, memory, instances, uplink, downlink);
-      }
-    }
-
-    private Builder() {}
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/RunId.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/RunId.java b/api/src/main/java/org/apache/twill/api/RunId.java
deleted file mode 100644
index 7f3c4fe..0000000
--- a/api/src/main/java/org/apache/twill/api/RunId.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-/**
- * Represents the unique ID of a particular execution.
- */
-public interface RunId {
-
-  String getId();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/RuntimeSpecification.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/RuntimeSpecification.java b/api/src/main/java/org/apache/twill/api/RuntimeSpecification.java
deleted file mode 100644
index 99e11a4..0000000
--- a/api/src/main/java/org/apache/twill/api/RuntimeSpecification.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-import java.util.Collection;
-
-/**
- * Specifications for runtime requirements.
- */
-public interface RuntimeSpecification {
-
-  String getName();
-
-  TwillRunnableSpecification getRunnableSpecification();
-
-  ResourceSpecification getResourceSpecification();
-
-  Collection<LocalFile> getLocalFiles();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/SecureStore.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/SecureStore.java b/api/src/main/java/org/apache/twill/api/SecureStore.java
deleted file mode 100644
index 707a152..0000000
--- a/api/src/main/java/org/apache/twill/api/SecureStore.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-/**
- * Represents storage of secure tokens.
- */
-public interface SecureStore {
-
-  <T> T getStore();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/SecureStoreUpdater.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/SecureStoreUpdater.java b/api/src/main/java/org/apache/twill/api/SecureStoreUpdater.java
deleted file mode 100644
index 5912247..0000000
--- a/api/src/main/java/org/apache/twill/api/SecureStoreUpdater.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-/**
- * Represents class capable of creating update of {@link SecureStore} for live applications.
- */
-public interface SecureStoreUpdater {
-
-  /**
-   * Invoked when an update to SecureStore is needed.
-   *
-   * @param application The name of the application.
-   * @param runId The runId of the live application.
-   * @return A new {@link SecureStore}.
-   */
-  SecureStore update(String application, RunId runId);
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/ServiceAnnouncer.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/ServiceAnnouncer.java b/api/src/main/java/org/apache/twill/api/ServiceAnnouncer.java
deleted file mode 100644
index d8e4358..0000000
--- a/api/src/main/java/org/apache/twill/api/ServiceAnnouncer.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-import org.apache.twill.common.Cancellable;
-
-/**
- * This interface provides a way to announce the availability of a service.
- */
-public interface ServiceAnnouncer {
-
-  /**
-   * Registers an endpoint that could be discovered by external party.
-   * @param serviceName Name of the endpoint
-   * @param port Port of the endpoint.
-   */
-  Cancellable announce(String serviceName, int port);
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/ServiceController.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/ServiceController.java b/api/src/main/java/org/apache/twill/api/ServiceController.java
deleted file mode 100644
index 0ea64f9..0000000
--- a/api/src/main/java/org/apache/twill/api/ServiceController.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.Service;
-
-import java.util.concurrent.Executor;
-
-/**
- * This interface is for controlling a remote running service.
- */
-public interface ServiceController extends Service {
-
-  /**
-   * Returns the {@link RunId} of the running application.
-   */
-  RunId getRunId();
-
-  /**
-   * Sends a user command to the running application.
-   * @param command The command to send.
-   * @return A {@link ListenableFuture} that will be completed when the command is successfully processed
-   *         by the target application.
-   */
-  ListenableFuture<Command> sendCommand(Command command);
-
-  /**
-   * Sends a user command to the given runnable of the running application.
-   * @param runnableName Name of the {@link TwillRunnable}.
-   * @param command The command to send.
-   * @return A {@link ListenableFuture} that will be completed when the command is successfully processed
-   *         by the target runnable.
-   */
-  ListenableFuture<Command> sendCommand(String runnableName, Command command);
-
-  /**
-   * Requests to forcefully kill a running service.
-   */
-  void kill();
-
-  /**
-   * Registers a {@link Listener} to be {@linkplain Executor#execute executed} on the given
-   * executor.  The listener will have the corresponding transition method called whenever the
-   * service changes state. When added, the current state of the service will be reflected through
-   * callback to the listener. Methods on the listener is guaranteed to be called no more than once.
-   *
-   * @param listener the listener to run when the service changes state is complete
-   * @param executor the executor in which the the listeners callback methods will be run. For fast,
-   *     lightweight listeners that would be safe to execute in any thread, consider
-   *     {@link com.google.common.util.concurrent.MoreExecutors#sameThreadExecutor}.
-   */
-  @Override
-  void addListener(Listener listener, Executor executor);
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/TwillApplication.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/TwillApplication.java b/api/src/main/java/org/apache/twill/api/TwillApplication.java
deleted file mode 100644
index b49e7a7..0000000
--- a/api/src/main/java/org/apache/twill/api/TwillApplication.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-/**
- * Represents a application that can be launched by Twill.
- */
-public interface TwillApplication {
-
-  /**
-   * Invoked when launching the application on the client side.
-   * @return A {@link TwillSpecification} specifying properties about this application.
-   */
-  TwillSpecification configure();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/TwillContext.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/TwillContext.java b/api/src/main/java/org/apache/twill/api/TwillContext.java
deleted file mode 100644
index b4ddb6e..0000000
--- a/api/src/main/java/org/apache/twill/api/TwillContext.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-import java.net.InetAddress;
-
-/**
- * Represents the runtime context of a {@link TwillRunnable}.
- */
-public interface TwillContext extends ServiceAnnouncer {
-
-  /**
-   * Returns the {@link RunId} of this running instance of {@link TwillRunnable}.
-   */
-  RunId getRunId();
-
-  /**
-   * Returns the {@link RunId} of this running application.
-   */
-  RunId getApplicationRunId();
-
-  /**
-   * Returns the number of running instances assigned for this {@link TwillRunnable}.
-   */
-  int getInstanceCount();
-
-  /**
-   * Returns the hostname that the runnable is running on.
-   */
-  InetAddress getHost();
-
-  /**
-   * Returns the runtime arguments that are passed to the {@link TwillRunnable}.
-   */
-  String[] getArguments();
-
-  /**
-   * Returns the runtime arguments that are passed to the {@link TwillApplication}.
-   */
-  String[] getApplicationArguments();
-
-  /**
-   * Returns the {@link TwillRunnableSpecification} that was created by {@link TwillRunnable#configure()}.
-   */
-  TwillRunnableSpecification getSpecification();
-
-  /**
-   * Returns an integer id from 0 to (instanceCount - 1).
-   */
-  int getInstanceId();
-
-  /**
-   * Returns the number of virtual cores the runnable is allowed to use.
-   */
-  int getVirtualCores();
-
-  /**
-   * Returns the amount of memory in MB the runnable is allowed to use.
-   */
-  int getMaxMemoryMB();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/TwillController.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/TwillController.java b/api/src/main/java/org/apache/twill/api/TwillController.java
deleted file mode 100644
index f31d3f9..0000000
--- a/api/src/main/java/org/apache/twill/api/TwillController.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-import org.apache.twill.api.logging.LogHandler;
-import org.apache.twill.discovery.Discoverable;
-import com.google.common.util.concurrent.ListenableFuture;
-
-/**
- * For controlling a running application.
- */
-public interface TwillController extends ServiceController {
-
-  /**
-   * Adds a {@link LogHandler} for receiving application log.
-   * @param handler The handler to add.
-   */
-  void addLogHandler(LogHandler handler);
-
-  /**
-   * Discovers the set of {@link Discoverable} endpoints that provides service for the given service name.
-   * @param serviceName Name of the service to discovery.
-   * @return An {@link Iterable} that gives set of latest {@link Discoverable} every time when
-   *         {@link Iterable#iterator()}} is invoked.
-   */
-  Iterable<Discoverable> discoverService(String serviceName);
-
-
-  /**
-   * Changes the number of running instances of a given runnable.
-   *
-   * @param runnable The name of the runnable.
-   * @param newCount Number of instances for the given runnable.
-   * @return A {@link ListenableFuture} that will be completed when the number running instances has been
-   *         successfully changed. The future will carry the new count as the result. If there is any error
-   *         while changing instances, it'll be reflected in the future.
-   */
-  ListenableFuture<Integer> changeInstances(String runnable, int newCount);
-
-  /**
-   * Get a snapshot of the resources used by the application, broken down by each runnable.
-   *
-   * @return A {@link ResourceReport} containing information about resources used by the application.
-   */
-  ResourceReport getResourceReport();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/TwillPreparer.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/TwillPreparer.java b/api/src/main/java/org/apache/twill/api/TwillPreparer.java
deleted file mode 100644
index b2a3ce2..0000000
--- a/api/src/main/java/org/apache/twill/api/TwillPreparer.java
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-import org.apache.twill.api.logging.LogHandler;
-
-import java.net.URI;
-
-/**
- * This interface exposes methods to set up the Twill runtime environment and start a Twill application.
- */
-public interface TwillPreparer {
-
-  /**
-   * Adds a {@link LogHandler} for receiving an application log.
-   * @param handler The {@link LogHandler}.
-   * @return This {@link TwillPreparer}.
-   */
-  TwillPreparer addLogHandler(LogHandler handler);
-
-  /**
-   * Sets the user name that runs the application. Default value is get from {@code "user.name"} by calling
-   * {@link System#getProperty(String)}.
-   * @param user User name
-   * @return This {@link TwillPreparer}.
-   *
-   * @deprecated This method will be removed in future version.
-   */
-  @Deprecated
-  TwillPreparer setUser(String user);
-
-  /**
-   * Sets the list of arguments that will be passed to the application. The arguments can be retrieved
-   * from {@link TwillContext#getApplicationArguments()}.
-   *
-   * @param args Array of arguments.
-   * @return This {@link TwillPreparer}.
-   */
-  TwillPreparer withApplicationArguments(String... args);
-
-  /**
-   * Sets the list of arguments that will be passed to the application. The arguments can be retrieved
-   * from {@link TwillContext#getApplicationArguments()}.
-   *
-   * @param args Iterable of arguments.
-   * @return This {@link TwillPreparer}.
-   */
-  TwillPreparer withApplicationArguments(Iterable<String> args);
-
-  /**
-   * Sets the list of arguments that will be passed to the {@link TwillRunnable} identified by the given name.
-   * The arguments can be retrieved from {@link TwillContext#getArguments()}.
-   *
-   * @param runnableName Name of the {@link TwillRunnable}.
-   * @param args Array of arguments.
-   * @return This {@link TwillPreparer}.
-   */
-  TwillPreparer withArguments(String runnableName, String...args);
-
-  /**
-   * Sets the list of arguments that will be passed to the {@link TwillRunnable} identified by the given name.
-   * The arguments can be retrieved from {@link TwillContext#getArguments()}.
-   *
-   * @param runnableName Name of the {@link TwillRunnable}.
-   * @param args Iterable of arguments.
-   * @return This {@link TwillPreparer}.
-   */
-  TwillPreparer withArguments(String runnableName, Iterable<String> args);
-
-  /**
-   * Adds extra classes that the application is dependent on and is not traceable from the application itself.
-   * @see #withDependencies(Iterable)
-   * @return This {@link TwillPreparer}.
-   */
-  TwillPreparer withDependencies(Class<?>...classes);
-
-  /**
-   * Adds extra classes that the application is dependent on and is not traceable from the application itself.
-   * E.g. Class name used in {@link Class#forName(String)}.
-   * @param classes set of classes to add to dependency list for generating the deployment jar.
-   * @return This {@link TwillPreparer}.
-   */
-  TwillPreparer withDependencies(Iterable<Class<?>> classes);
-
-  /**
-   * Adds resources that will be available through the ClassLoader of the {@link TwillRunnable runnables}.
-   * @see #withResources(Iterable)
-   * @return This {@link TwillPreparer}.
-   */
-  TwillPreparer withResources(URI...resources);
-
-  /**
-   * Adds resources that will be available through the ClassLoader of the {@link TwillRunnable runnables}.
-   * Useful for adding extra resource files or libraries that are not traceable from the application itself.
-   * If the URI is a jar file, classes inside would be loadable by the ClassLoader. If the URI is a directory,
-   * everything underneath would be available.
-   *
-   * @param resources Set of URI to the resources.
-   * @return This {@link TwillPreparer}.
-   */
-  TwillPreparer withResources(Iterable<URI> resources);
-
-  /**
-   * Adds the set of paths to the classpath on the target machine for all runnables.
-   * @see #withClassPaths(Iterable)
-   * @return This {@link TwillPreparer}
-   */
-  TwillPreparer withClassPaths(String... classPaths);
-
-  /**
-   * Adds the set of paths to the classpath on the target machine for all runnables.
-   * Note that the paths would be just added without verification.
-   * @param classPaths Set of classpaths
-   * @return This {@link TwillPreparer}
-   */
-  TwillPreparer withClassPaths(Iterable<String> classPaths);
-
-  /**
-   * Adds security credentials for the runtime environment to gives application access to resources.
-   *
-   * @param secureStore Contains security token available for the runtime environment.
-   * @return This {@link TwillPreparer}.
-   */
-  TwillPreparer addSecureStore(SecureStore secureStore);
-
-  /**
-   * Starts the application.
-   * @return A {@link TwillController} for controlling the running application.
-   */
-  TwillController start();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/TwillRunResources.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/TwillRunResources.java b/api/src/main/java/org/apache/twill/api/TwillRunResources.java
deleted file mode 100644
index 4c3d2e7..0000000
--- a/api/src/main/java/org/apache/twill/api/TwillRunResources.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-/**
- * Information about the container the {@link TwillRunnable}
- * is running in.
- */
-public interface TwillRunResources {
-
-  /**
-   * @return instance id of the runnable.
-   */
-  int getInstanceId();
-
-  /**
-   * @return number of cores the runnable is allowed to use.  YARN must be at least v2.1.0 and
-   *   it must be configured to use cgroups in order for this to be a reflection of truth.
-   */
-  int getVirtualCores();
-
-  /**
-   * @return amount of memory in MB the runnable is allowed to use.
-   */
-  int getMemoryMB();
-
-  /**
-   * @return the host the runnable is running on.
-   */
-  String getHost();
-
-  /**
-   * @return id of the container the runnable is running in.
-   */
-  String getContainerId();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/api/src/main/java/org/apache/twill/api/TwillRunnable.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/twill/api/TwillRunnable.java b/api/src/main/java/org/apache/twill/api/TwillRunnable.java
deleted file mode 100644
index 4350bfb..0000000
--- a/api/src/main/java/org/apache/twill/api/TwillRunnable.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.api;
-
-/**
- * The {@link TwillRunnable} interface should be implemented by any
- * class whose instances are intended to be executed in a Twill cluster.
- */
-public interface TwillRunnable extends Runnable {
-
-  /**
-   * Called at submission time. Executed on the client side.
-   * @return A {@link TwillRunnableSpecification} built by {@link TwillRunnableSpecification.Builder}.
-   */
-  TwillRunnableSpecification configure();
-
-  /**
-   * Called when the container process starts. Executed in container machine.
-   * @param context Contains information about the runtime context.
-   */
-  void initialize(TwillContext context);
-
-  /**
-   * Called when a command is received. A normal return denotes the command has been processed successfully, otherwise
-   * {@link Exception} should be thrown.
-   * @param command Contains details of the command.
-   * @throws Exception
-   */
-  void handleCommand(Command command) throws Exception;
-
-  /**
-   * Requests to stop the running service.
-   */
-  void stop();
-
-  /**
-   * Called when the {@link TwillRunnable#run()} completed. Useful for doing
-   * resource cleanup. This method would only get called if the call to {@link #initialize(TwillContext)} was
-   * successful.
-   */
-  void destroy();
-}


[08/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ZKOperations.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ZKOperations.java b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ZKOperations.java
new file mode 100644
index 0000000..6dcd1a7
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/ZKOperations.java
@@ -0,0 +1,355 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.zookeeper;
+
+import org.apache.twill.common.Cancellable;
+import org.apache.twill.common.Threads;
+import org.apache.twill.internal.zookeeper.SettableOperationFuture;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.data.Stat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * Collection of helper methods for common operations that usually needed when interacting with ZooKeeper.
+ */
+public final class ZKOperations {
+
+  private static final Logger LOG = LoggerFactory.getLogger(ZKOperations.class);
+
+  /**
+   * Represents a ZK operation updates callback.
+   * @param <T> Type of updated data.
+   */
+  public interface Callback<T> {
+    void updated(T data);
+  }
+
+  /**
+   * Interface for defining callback method to receive node data updates.
+   */
+  public interface DataCallback extends Callback<NodeData> {
+    /**
+     * Invoked when data of the node changed.
+     * @param nodeData New data of the node, or {@code null} if the node has been deleted.
+     */
+    @Override
+    void updated(NodeData nodeData);
+  }
+
+  /**
+   * Interface for defining callback method to receive children nodes updates.
+   */
+  public interface ChildrenCallback extends Callback<NodeChildren> {
+    @Override
+    void updated(NodeChildren nodeChildren);
+  }
+
+  private interface Operation<T> {
+    ZKClient getZKClient();
+
+    OperationFuture<T> exec(String path, Watcher watcher);
+  }
+
+  /**
+   * Watch for data changes of the given path. The callback will be triggered whenever changes has been
+   * detected. Note that the callback won't see every single changes, as that's not the guarantee of ZooKeeper.
+   * If the node doesn't exists, it will watch for its creation then starts watching for data changes.
+   * When the node is deleted afterwards,
+   *
+   * @param zkClient The {@link ZKClient} for the operation
+   * @param path Path to watch
+   * @param callback Callback to be invoked when data changes is detected.
+   * @return A {@link Cancellable} to cancel the watch.
+   */
+  public static Cancellable watchData(final ZKClient zkClient, final String path, final DataCallback callback) {
+    final AtomicBoolean cancelled = new AtomicBoolean(false);
+    watchChanges(new Operation<NodeData>() {
+
+      @Override
+      public ZKClient getZKClient() {
+        return zkClient;
+      }
+
+      @Override
+      public OperationFuture<NodeData> exec(String path, Watcher watcher) {
+        return zkClient.getData(path, watcher);
+      }
+    }, path, callback, cancelled);
+
+    return new Cancellable() {
+      @Override
+      public void cancel() {
+        cancelled.set(true);
+      }
+    };
+  }
+
+  public static ListenableFuture<String> watchDeleted(final ZKClient zkClient, final String path) {
+    SettableFuture<String> completion = SettableFuture.create();
+    watchDeleted(zkClient, path, completion);
+    return completion;
+  }
+
+  public static void watchDeleted(final ZKClient zkClient, final String path,
+                                  final SettableFuture<String> completion) {
+
+    Futures.addCallback(zkClient.exists(path, new Watcher() {
+      @Override
+      public void process(WatchedEvent event) {
+        if (!completion.isDone()) {
+          if (event.getType() == Event.EventType.NodeDeleted) {
+            completion.set(path);
+          } else {
+            watchDeleted(zkClient, path, completion);
+          }
+        }
+      }
+    }), new FutureCallback<Stat>() {
+      @Override
+      public void onSuccess(Stat result) {
+        if (result == null) {
+          completion.set(path);
+        }
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        completion.setException(t);
+      }
+    });
+  }
+
+  public static Cancellable watchChildren(final ZKClient zkClient, String path, ChildrenCallback callback) {
+    final AtomicBoolean cancelled = new AtomicBoolean(false);
+    watchChanges(new Operation<NodeChildren>() {
+
+      @Override
+      public ZKClient getZKClient() {
+        return zkClient;
+      }
+
+      @Override
+      public OperationFuture<NodeChildren> exec(String path, Watcher watcher) {
+        return zkClient.getChildren(path, watcher);
+      }
+    }, path, callback, cancelled);
+
+    return new Cancellable() {
+      @Override
+      public void cancel() {
+        cancelled.set(true);
+      }
+    };
+  }
+
+  /**
+   * Returns a new {@link OperationFuture} that the result will be the same as the given future, except that when
+   * the source future is having an exception matching the giving exception type, the errorResult will be set
+   * in to the returned {@link OperationFuture}.
+   * @param future The source future.
+   * @param exceptionType Type of {@link KeeperException} to be ignored.
+   * @param errorResult Object to be set into the resulting future on a matching exception.
+   * @param <V> Type of the result.
+   * @return A new {@link OperationFuture}.
+   */
+  public static <V> OperationFuture<V> ignoreError(OperationFuture<V> future,
+                                                   final Class<? extends KeeperException> exceptionType,
+                                                   final V errorResult) {
+    final SettableOperationFuture<V> resultFuture = SettableOperationFuture.create(future.getRequestPath(),
+                                                                                   Threads.SAME_THREAD_EXECUTOR);
+
+    Futures.addCallback(future, new FutureCallback<V>() {
+      @Override
+      public void onSuccess(V result) {
+        resultFuture.set(result);
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        if (exceptionType.isAssignableFrom(t.getClass())) {
+          resultFuture.set(errorResult);
+        } else if (t instanceof CancellationException) {
+          resultFuture.cancel(true);
+        } else {
+          resultFuture.setException(t);
+        }
+      }
+    }, Threads.SAME_THREAD_EXECUTOR);
+
+    return resultFuture;
+  }
+
+  /**
+   * Deletes the given path recursively. The delete method will keep running until the given path is successfully
+   * removed, which means if there are new node created under the given path while deleting, they'll get deleted
+   * again.  If there is {@link KeeperException} during the deletion other than
+   * {@link KeeperException.NotEmptyException} or {@link KeeperException.NoNodeException},
+   * the exception would be reflected in the result future and deletion process will stop,
+   * leaving the given path with intermediate state.
+   *
+   * @param path The path to delete.
+   * @return An {@link OperationFuture} that will be completed when the given path is deleted or bailed due to
+   *         exception.
+   */
+  public static OperationFuture<String> recursiveDelete(final ZKClient zkClient, final String path) {
+    final SettableOperationFuture<String> resultFuture =
+      SettableOperationFuture.create(path, Threads.SAME_THREAD_EXECUTOR);
+
+    // Try to delete the given path.
+    Futures.addCallback(zkClient.delete(path), new FutureCallback<String>() {
+      private final FutureCallback<String> deleteCallback = this;
+
+      @Override
+      public void onSuccess(String result) {
+        // Path deleted successfully. Operation done.
+        resultFuture.set(result);
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        // Failed to delete the given path
+        if (!(t instanceof KeeperException.NotEmptyException || t instanceof KeeperException.NoNodeException)) {
+          // For errors other than NotEmptyException, treat the operation as failed.
+          resultFuture.setException(t);
+          return;
+        }
+
+        // If failed because of NotEmptyException, get the list of children under the given path
+        Futures.addCallback(zkClient.getChildren(path), new FutureCallback<NodeChildren>() {
+
+          @Override
+          public void onSuccess(NodeChildren result) {
+            // Delete all children nodes recursively.
+            final List<OperationFuture<String>> deleteFutures = Lists.newLinkedList();
+            for (String child :result.getChildren()) {
+              deleteFutures.add(recursiveDelete(zkClient, path + "/" + child));
+            }
+
+            // When deletion of all children succeeded, delete the given path again.
+            Futures.successfulAsList(deleteFutures).addListener(new Runnable() {
+              @Override
+              public void run() {
+                for (OperationFuture<String> deleteFuture : deleteFutures) {
+                  try {
+                    // If any exception when deleting children, treat the operation as failed.
+                    deleteFuture.get();
+                  } catch (Exception e) {
+                    resultFuture.setException(e.getCause());
+                  }
+                }
+                Futures.addCallback(zkClient.delete(path), deleteCallback, Threads.SAME_THREAD_EXECUTOR);
+              }
+            }, Threads.SAME_THREAD_EXECUTOR);
+          }
+
+          @Override
+          public void onFailure(Throwable t) {
+            // If failed to get list of children, treat the operation as failed.
+            resultFuture.setException(t);
+          }
+        }, Threads.SAME_THREAD_EXECUTOR);
+      }
+    }, Threads.SAME_THREAD_EXECUTOR);
+
+    return resultFuture;
+  }
+
+  /**
+   * Watch for the given path until it exists.
+   * @param zkClient The {@link ZKClient} to use.
+   * @param path A ZooKeeper path to watch for existent.
+   */
+  private static void watchExists(final ZKClient zkClient, final String path, final SettableFuture<String> completion) {
+    Futures.addCallback(zkClient.exists(path, new Watcher() {
+      @Override
+      public void process(WatchedEvent event) {
+        if (!completion.isDone()) {
+          watchExists(zkClient, path, completion);
+        }
+      }
+    }), new FutureCallback<Stat>() {
+      @Override
+      public void onSuccess(Stat result) {
+        if (result != null) {
+          completion.set(path);
+        }
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        completion.setException(t);
+      }
+    });
+  }
+
+  private static <T> void watchChanges(final Operation<T> operation, final String path,
+                                       final Callback<T> callback, final AtomicBoolean cancelled) {
+    Futures.addCallback(operation.exec(path, new Watcher() {
+      @Override
+      public void process(WatchedEvent event) {
+        if (!cancelled.get()) {
+          watchChanges(operation, path, callback, cancelled);
+        }
+      }
+    }), new FutureCallback<T>() {
+      @Override
+      public void onSuccess(T result) {
+        if (!cancelled.get()) {
+          callback.updated(result);
+        }
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        if (t instanceof KeeperException && ((KeeperException) t).code() == KeeperException.Code.NONODE) {
+          final SettableFuture<String> existCompletion = SettableFuture.create();
+          existCompletion.addListener(new Runnable() {
+            @Override
+            public void run() {
+              try {
+                if (!cancelled.get()) {
+                  watchChanges(operation, existCompletion.get(), callback, cancelled);
+                }
+              } catch (Exception e) {
+                LOG.error("Failed to watch children for path " + path, e);
+              }
+            }
+          }, Threads.SAME_THREAD_EXECUTOR);
+          watchExists(operation.getZKClient(), path, existCompletion);
+          return;
+        }
+        LOG.error("Failed to watch data for path " + path + " " + t, t);
+      }
+    });
+  }
+
+  private ZKOperations() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/package-info.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/package-info.java b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/package-info.java
new file mode 100644
index 0000000..e5bd237
--- /dev/null
+++ b/twill-zookeeper/src/main/java/org/apache/twill/zookeeper/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This package provides functionality for ZooKeeper interactions.
+ */
+package org.apache.twill.zookeeper;

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/test/java/org/apache/twill/zookeeper/RetryStrategyTest.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/test/java/org/apache/twill/zookeeper/RetryStrategyTest.java b/twill-zookeeper/src/test/java/org/apache/twill/zookeeper/RetryStrategyTest.java
new file mode 100644
index 0000000..601f0bd
--- /dev/null
+++ b/twill-zookeeper/src/test/java/org/apache/twill/zookeeper/RetryStrategyTest.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.zookeeper;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ *
+ */
+public class RetryStrategyTest {
+
+  @Test
+  public void testNoRetry() {
+    RetryStrategy strategy = RetryStrategies.noRetry();
+    long startTime = System.currentTimeMillis();
+    for (int i = 1; i <= 10; i++) {
+      Assert.assertEquals(-1L, strategy.nextRetry(i, startTime, RetryStrategy.OperationType.CREATE, "/"));
+    }
+  }
+
+  @Test
+  public void testLimit() {
+    RetryStrategy strategy = RetryStrategies.limit(10, RetryStrategies.fixDelay(1, TimeUnit.MILLISECONDS));
+    long startTime = System.currentTimeMillis();
+    for (int i = 1; i <= 10; i++) {
+      Assert.assertEquals(1L, strategy.nextRetry(i, startTime, RetryStrategy.OperationType.CREATE, "/"));
+    }
+    Assert.assertEquals(-1L, strategy.nextRetry(11, startTime, RetryStrategy.OperationType.CREATE, "/"));
+  }
+
+  @Test
+  public void testUnlimited() {
+    RetryStrategy strategy = RetryStrategies.fixDelay(1, TimeUnit.MILLISECONDS);
+    long startTime = System.currentTimeMillis();
+    for (int i = 1; i <= 10; i++) {
+      Assert.assertEquals(1L, strategy.nextRetry(i, startTime, RetryStrategy.OperationType.CREATE, "/"));
+    }
+    Assert.assertEquals(1L, strategy.nextRetry(100000, startTime, RetryStrategy.OperationType.CREATE, "/"));
+  }
+
+  @Test
+  public void testExponential() {
+    RetryStrategy strategy = RetryStrategies.exponentialDelay(1, 60000, TimeUnit.MILLISECONDS);
+    long startTime = System.currentTimeMillis();
+    for (int i = 1; i <= 16; i++) {
+      Assert.assertEquals(1L << (i - 1), strategy.nextRetry(i, startTime, RetryStrategy.OperationType.CREATE, "/"));
+    }
+    for (int i = 60; i <= 80; i++) {
+      Assert.assertEquals(60000, strategy.nextRetry(i, startTime, RetryStrategy.OperationType.CREATE, "/"));
+    }
+  }
+
+  @Test
+  public void testExponentialLimit() {
+    RetryStrategy strategy = RetryStrategies.limit(99,
+                                                   RetryStrategies.exponentialDelay(1, 60000, TimeUnit.MILLISECONDS));
+    long startTime = System.currentTimeMillis();
+    for (int i = 1; i <= 16; i++) {
+      Assert.assertEquals(1L << (i - 1), strategy.nextRetry(i, startTime, RetryStrategy.OperationType.CREATE, "/"));
+    }
+    for (int i = 60; i <= 80; i++) {
+      Assert.assertEquals(60000, strategy.nextRetry(i, startTime, RetryStrategy.OperationType.CREATE, "/"));
+    }
+    Assert.assertEquals(-1L, strategy.nextRetry(100, startTime, RetryStrategy.OperationType.CREATE, "/"));
+  }
+
+  @Test
+  public void testTimeLimit() throws InterruptedException {
+    RetryStrategy strategy = RetryStrategies.timeLimit(1, TimeUnit.SECONDS,
+                                                       RetryStrategies.fixDelay(1, TimeUnit.MILLISECONDS));
+    long startTime = System.currentTimeMillis();
+    Assert.assertEquals(1L, strategy.nextRetry(1, startTime, RetryStrategy.OperationType.CREATE, "/"));
+    TimeUnit.MILLISECONDS.sleep(1100);
+    Assert.assertEquals(-1L, strategy.nextRetry(2, startTime, RetryStrategy.OperationType.CREATE, "/"));
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/test/java/org/apache/twill/zookeeper/ZKClientTest.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/test/java/org/apache/twill/zookeeper/ZKClientTest.java b/twill-zookeeper/src/test/java/org/apache/twill/zookeeper/ZKClientTest.java
new file mode 100644
index 0000000..f1db74a
--- /dev/null
+++ b/twill-zookeeper/src/test/java/org/apache/twill/zookeeper/ZKClientTest.java
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.zookeeper;
+
+import org.apache.twill.internal.zookeeper.InMemoryZKServer;
+import org.apache.twill.internal.zookeeper.KillZKSession;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+import com.google.common.io.Files;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ *
+ */
+public class ZKClientTest {
+
+  @Test
+  public void testChroot() throws Exception {
+    InMemoryZKServer zkServer = InMemoryZKServer.builder().setTickTime(1000).build();
+    zkServer.startAndWait();
+
+    try {
+      ZKClientService client = ZKClientService.Builder.of(zkServer.getConnectionStr() + "/chroot").build();
+      client.startAndWait();
+      try {
+        List<OperationFuture<String>> futures = Lists.newArrayList();
+        futures.add(client.create("/test1/test2", null, CreateMode.PERSISTENT));
+        futures.add(client.create("/test1/test3", null, CreateMode.PERSISTENT));
+        Futures.successfulAsList(futures).get();
+
+        Assert.assertNotNull(client.exists("/test1/test2").get());
+        Assert.assertNotNull(client.exists("/test1/test3").get());
+
+      } finally {
+        client.stopAndWait();
+      }
+    } finally {
+      zkServer.stopAndWait();
+    }
+  }
+
+  @Test
+  public void testCreateParent() throws ExecutionException, InterruptedException {
+    InMemoryZKServer zkServer = InMemoryZKServer.builder().setTickTime(1000).build();
+    zkServer.startAndWait();
+
+    try {
+      ZKClientService client = ZKClientService.Builder.of(zkServer.getConnectionStr()).build();
+      client.startAndWait();
+
+      try {
+        String path = client.create("/test1/test2/test3/test4/test5",
+                                    "testing".getBytes(), CreateMode.PERSISTENT_SEQUENTIAL).get();
+        Assert.assertTrue(path.startsWith("/test1/test2/test3/test4/test5"));
+
+        String dataPath = "";
+        for (int i = 1; i <= 4; i++) {
+          dataPath = dataPath + "/test" + i;
+          Assert.assertNull(client.getData(dataPath).get().getData());
+        }
+        Assert.assertTrue(Arrays.equals("testing".getBytes(), client.getData(path).get().getData()));
+      } finally {
+        client.stopAndWait();
+      }
+    } finally {
+      zkServer.stopAndWait();
+    }
+  }
+
+  @Test
+  public void testGetChildren() throws ExecutionException, InterruptedException {
+    InMemoryZKServer zkServer = InMemoryZKServer.builder().setTickTime(1000).build();
+    zkServer.startAndWait();
+
+    try {
+      ZKClientService client = ZKClientService.Builder.of(zkServer.getConnectionStr()).build();
+      client.startAndWait();
+
+      try {
+        client.create("/test", null, CreateMode.PERSISTENT).get();
+        Assert.assertTrue(client.getChildren("/test").get().getChildren().isEmpty());
+
+        Futures.allAsList(ImmutableList.of(client.create("/test/c1", null, CreateMode.EPHEMERAL),
+                                           client.create("/test/c2", null, CreateMode.EPHEMERAL))).get();
+
+        NodeChildren nodeChildren = client.getChildren("/test").get();
+        Assert.assertEquals(2, nodeChildren.getChildren().size());
+
+        Assert.assertEquals(ImmutableSet.of("c1", "c2"), ImmutableSet.copyOf(nodeChildren.getChildren()));
+
+      } finally {
+        client.stopAndWait();
+      }
+    } finally {
+      zkServer.stopAndWait();
+    }
+  }
+
+  @Test
+  public void testSetData() throws ExecutionException, InterruptedException {
+    InMemoryZKServer zkServer = InMemoryZKServer.builder().setTickTime(1000).build();
+    zkServer.startAndWait();
+
+    try {
+      ZKClientService client = ZKClientService.Builder.of(zkServer.getConnectionStr()).build();
+      client.startAndWait();
+
+      client.create("/test", null, CreateMode.PERSISTENT).get();
+      Assert.assertNull(client.getData("/test").get().getData());
+
+      client.setData("/test", "testing".getBytes()).get();
+      Assert.assertTrue(Arrays.equals("testing".getBytes(), client.getData("/test").get().getData()));
+
+    } finally {
+      zkServer.stopAndWait();
+    }
+  }
+
+  @Test
+  public void testExpireRewatch() throws InterruptedException, IOException, ExecutionException {
+    InMemoryZKServer zkServer = InMemoryZKServer.builder().setTickTime(1000).build();
+    zkServer.startAndWait();
+
+    try {
+      final CountDownLatch expireReconnectLatch = new CountDownLatch(1);
+      final AtomicBoolean expired = new AtomicBoolean(false);
+      final ZKClientService client = ZKClientServices.delegate(ZKClients.reWatchOnExpire(
+                                        ZKClientService.Builder.of(zkServer.getConnectionStr())
+                                                       .setSessionTimeout(2000)
+                                                       .setConnectionWatcher(new Watcher() {
+            @Override
+            public void process(WatchedEvent event) {
+              if (event.getState() == Event.KeeperState.Expired) {
+                expired.set(true);
+              } else if (event.getState() == Event.KeeperState.SyncConnected && expired.compareAndSet(true, true)) {
+                expireReconnectLatch.countDown();
+              }
+            }
+          }).build()));
+      client.startAndWait();
+
+      try {
+        final BlockingQueue<Watcher.Event.EventType> events = new LinkedBlockingQueue<Watcher.Event.EventType>();
+        client.exists("/expireRewatch", new Watcher() {
+          @Override
+          public void process(WatchedEvent event) {
+            client.exists("/expireRewatch", this);
+            events.add(event.getType());
+          }
+        });
+
+        client.create("/expireRewatch", null, CreateMode.PERSISTENT);
+        Assert.assertEquals(Watcher.Event.EventType.NodeCreated, events.poll(2, TimeUnit.SECONDS));
+
+        KillZKSession.kill(client.getZooKeeperSupplier().get(), zkServer.getConnectionStr(), 1000);
+
+        Assert.assertTrue(expireReconnectLatch.await(5, TimeUnit.SECONDS));
+
+        client.delete("/expireRewatch");
+        Assert.assertEquals(Watcher.Event.EventType.NodeDeleted, events.poll(4, TimeUnit.SECONDS));
+      } finally {
+        client.stopAndWait();
+      }
+    } finally {
+      zkServer.stopAndWait();
+    }
+  }
+
+  @Test
+  public void testRetry() throws ExecutionException, InterruptedException, TimeoutException {
+    File dataDir = Files.createTempDir();
+    InMemoryZKServer zkServer = InMemoryZKServer.builder().setDataDir(dataDir).setTickTime(1000).build();
+    zkServer.startAndWait();
+    int port = zkServer.getLocalAddress().getPort();
+
+    final CountDownLatch disconnectLatch = new CountDownLatch(1);
+    ZKClientService client = ZKClientServices.delegate(ZKClients.retryOnFailure(
+      ZKClientService.Builder.of(zkServer.getConnectionStr()).setConnectionWatcher(new Watcher() {
+      @Override
+      public void process(WatchedEvent event) {
+        if (event.getState() == Event.KeeperState.Disconnected) {
+          disconnectLatch.countDown();
+        }
+      }
+    }).build(), RetryStrategies.fixDelay(0, TimeUnit.SECONDS)));
+    client.startAndWait();
+
+    zkServer.stopAndWait();
+
+    Assert.assertTrue(disconnectLatch.await(1, TimeUnit.SECONDS));
+
+    final CountDownLatch createLatch = new CountDownLatch(1);
+    Futures.addCallback(client.create("/testretry/test", null, CreateMode.PERSISTENT), new FutureCallback<String>() {
+      @Override
+      public void onSuccess(String result) {
+        createLatch.countDown();
+      }
+
+      @Override
+      public void onFailure(Throwable t) {
+        t.printStackTrace(System.out);
+      }
+    });
+
+    TimeUnit.SECONDS.sleep(2);
+    zkServer = InMemoryZKServer.builder()
+                               .setDataDir(dataDir)
+                               .setAutoCleanDataDir(true)
+                               .setPort(port)
+                               .setTickTime(1000)
+                               .build();
+    zkServer.startAndWait();
+
+    try {
+      Assert.assertTrue(createLatch.await(5, TimeUnit.SECONDS));
+    } finally {
+      zkServer.stopAndWait();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/test/java/org/apache/twill/zookeeper/ZKOperationsTest.java
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/test/java/org/apache/twill/zookeeper/ZKOperationsTest.java b/twill-zookeeper/src/test/java/org/apache/twill/zookeeper/ZKOperationsTest.java
new file mode 100644
index 0000000..9518d6e
--- /dev/null
+++ b/twill-zookeeper/src/test/java/org/apache/twill/zookeeper/ZKOperationsTest.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.twill.zookeeper;
+
+import org.apache.twill.internal.zookeeper.InMemoryZKServer;
+import org.apache.zookeeper.CreateMode;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+/**
+ *
+ */
+public class ZKOperationsTest {
+
+  @Test
+  public void recursiveDelete() throws ExecutionException, InterruptedException, TimeoutException {
+    InMemoryZKServer zkServer = InMemoryZKServer.builder().setTickTime(1000).build();
+    zkServer.startAndWait();
+
+    try {
+      ZKClientService client = ZKClientService.Builder.of(zkServer.getConnectionStr()).build();
+      client.startAndWait();
+
+      try {
+        client.create("/test1/test10/test101", null, CreateMode.PERSISTENT).get();
+        client.create("/test1/test10/test102", null, CreateMode.PERSISTENT).get();
+        client.create("/test1/test10/test103", null, CreateMode.PERSISTENT).get();
+
+        client.create("/test1/test11/test111", null, CreateMode.PERSISTENT).get();
+        client.create("/test1/test11/test112", null, CreateMode.PERSISTENT).get();
+        client.create("/test1/test11/test113", null, CreateMode.PERSISTENT).get();
+
+        ZKOperations.recursiveDelete(client, "/test1").get(2, TimeUnit.SECONDS);
+
+        Assert.assertNull(client.exists("/test1").get(2, TimeUnit.SECONDS));
+
+      } finally {
+        client.stopAndWait();
+      }
+    } finally {
+      zkServer.stopAndWait();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/twill-zookeeper/src/test/resources/logback-test.xml
----------------------------------------------------------------------
diff --git a/twill-zookeeper/src/test/resources/logback-test.xml b/twill-zookeeper/src/test/resources/logback-test.xml
new file mode 100644
index 0000000..157df6e
--- /dev/null
+++ b/twill-zookeeper/src/test/resources/logback-test.xml
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!-- Default logback configuration for twill library -->
+<configuration>
+    <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+        <encoder>
+            <pattern>%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n</pattern>
+        </encoder>
+    </appender>
+
+    <logger name="org.apache.hadoop" level="WARN" />
+    <logger name="org.apache.zookeeper" level="WARN" />
+
+    <root level="INFO">
+        <appender-ref ref="STDOUT"/>
+    </root>
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/pom.xml
----------------------------------------------------------------------
diff --git a/yarn/pom.xml b/yarn/pom.xml
deleted file mode 100644
index b11bc7a..0000000
--- a/yarn/pom.xml
+++ /dev/null
@@ -1,127 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <parent>
-        <artifactId>twill-parent</artifactId>
-        <groupId>org.apache.twill</groupId>
-        <version>0.1.0-SNAPSHOT</version>
-    </parent>
-    <modelVersion>4.0.0</modelVersion>
-
-    <artifactId>twill-yarn</artifactId>
-    <name>Twill Apache Hadoop YARN library</name>
-
-    <properties>
-        <output.dir>target/classes</output.dir>
-    </properties>
-
-    <dependencies>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>twill-core</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>twill-discovery-core</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-        <dependency>
-            <groupId>com.google.guava</groupId>
-            <artifactId>guava</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.slf4j</groupId>
-            <artifactId>slf4j-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.slf4j</groupId>
-            <artifactId>jcl-over-slf4j</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-yarn-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-yarn-common</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-yarn-client</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-common</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-hdfs</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-minicluster</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>junit</groupId>
-            <artifactId>junit</artifactId>
-        </dependency>
-    </dependencies>
-
-    <build>
-        <outputDirectory>${output.dir}</outputDirectory>
-    </build>
-
-    <profiles>
-        <profile>
-            <id>hadoop-2.0</id>
-            <properties>
-                <output.dir>${hadoop20.output.dir}</output.dir>
-            </properties>
-        </profile>
-        <profile>
-            <id>hadoop-2.1</id>
-            <build>
-                <resources>
-                    <resource>
-                        <directory>${hadoop20.output.dir}</directory>
-                    </resource>
-                    <resource>
-                        <directory>src/main/resources</directory>
-                    </resource>
-                </resources>
-            </build>
-        </profile>
-        <profile>
-            <id>hadoop-2.2</id>
-            <build>
-                <resources>
-                    <resource>
-                        <directory>${hadoop20.output.dir}</directory>
-                    </resource>
-                    <resource>
-                        <directory>src/main/resources</directory>
-                    </resource>
-                </resources>
-            </build>
-        </profile>
-    </profiles>
-</project>

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnAMClient.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnAMClient.java b/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnAMClient.java
deleted file mode 100644
index d98dee1..0000000
--- a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnAMClient.java
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import org.apache.twill.internal.ProcessLauncher;
-import org.apache.twill.internal.appmaster.RunnableProcessLauncher;
-import org.apache.twill.internal.yarn.ports.AMRMClient;
-import org.apache.twill.internal.yarn.ports.AMRMClientImpl;
-import org.apache.twill.internal.yarn.ports.AllocationResponse;
-import com.google.common.base.Function;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ArrayListMultimap;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Multimap;
-import com.google.common.util.concurrent.AbstractIdleService;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.ApplicationConstants;
-import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
-import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerStatus;
-import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.ipc.YarnRPC;
-import org.apache.hadoop.yarn.util.ConverterUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.net.InetSocketAddress;
-import java.net.URL;
-import java.util.List;
-import java.util.UUID;
-
-/**
- *
- */
-public final class Hadoop20YarnAMClient extends AbstractIdleService implements YarnAMClient {
-
-  private static final Logger LOG = LoggerFactory.getLogger(Hadoop20YarnAMClient.class);
-  private static final Function<ContainerStatus, YarnContainerStatus> STATUS_TRANSFORM;
-
-  static {
-    STATUS_TRANSFORM = new Function<ContainerStatus, YarnContainerStatus>() {
-      @Override
-      public YarnContainerStatus apply(ContainerStatus status) {
-        return new Hadoop20YarnContainerStatus(status);
-      }
-    };
-  }
-
-  private final ContainerId containerId;
-  private final Multimap<String, AMRMClient.ContainerRequest> containerRequests;
-  private final AMRMClient amrmClient;
-  private final YarnNMClient nmClient;
-  private InetSocketAddress trackerAddr;
-  private URL trackerUrl;
-  private Resource maxCapability;
-  private Resource minCapability;
-
-  public Hadoop20YarnAMClient(Configuration conf) {
-    String masterContainerId = System.getenv().get(ApplicationConstants.AM_CONTAINER_ID_ENV);
-    Preconditions.checkArgument(masterContainerId != null,
-                                "Missing %s from environment", ApplicationConstants.AM_CONTAINER_ID_ENV);
-    this.containerId = ConverterUtils.toContainerId(masterContainerId);
-    this.containerRequests = ArrayListMultimap.create();
-
-    this.amrmClient = new AMRMClientImpl(containerId.getApplicationAttemptId());
-    this.amrmClient.init(conf);
-    this.nmClient = new Hadoop20YarnNMClient(YarnRPC.create(conf), conf);
-  }
-
-  @Override
-  protected void startUp() throws Exception {
-    Preconditions.checkNotNull(trackerAddr, "Tracker address not set.");
-    Preconditions.checkNotNull(trackerUrl, "Tracker URL not set.");
-
-    amrmClient.start();
-
-    RegisterApplicationMasterResponse response = amrmClient.registerApplicationMaster(trackerAddr.getHostName(),
-                                                                                      trackerAddr.getPort(),
-                                                                                      trackerUrl.toString());
-    maxCapability = response.getMaximumResourceCapability();
-    minCapability = response.getMinimumResourceCapability();
-  }
-
-  @Override
-  protected void shutDown() throws Exception {
-    amrmClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, trackerUrl.toString());
-    amrmClient.stop();
-  }
-
-  @Override
-  public ContainerId getContainerId() {
-    return containerId;
-  }
-
-  @Override
-  public String getHost() {
-    return System.getenv().get(ApplicationConstants.NM_HOST_ENV);
-  }
-
-  @Override
-  public void setTracker(InetSocketAddress trackerAddr, URL trackerUrl) {
-    this.trackerAddr = trackerAddr;
-    this.trackerUrl = trackerUrl;
-  }
-
-  @Override
-  public synchronized void allocate(float progress, AllocateHandler handler) throws Exception {
-    AllocationResponse response = amrmClient.allocate(progress);
-    List<ProcessLauncher<YarnContainerInfo>> launchers
-      = Lists.newArrayListWithCapacity(response.getAllocatedContainers().size());
-
-    for (Container container : response.getAllocatedContainers()) {
-      launchers.add(new RunnableProcessLauncher(new Hadoop20YarnContainerInfo(container), nmClient));
-    }
-
-    if (!launchers.isEmpty()) {
-      handler.acquired(launchers);
-
-      // If no process has been launched through the given launcher, return the container.
-      for (ProcessLauncher<YarnContainerInfo> l : launchers) {
-        // This cast always works.
-        RunnableProcessLauncher launcher = (RunnableProcessLauncher) l;
-        if (!launcher.isLaunched()) {
-          Container container = launcher.getContainerInfo().getContainer();
-          LOG.info("Nothing to run in container, releasing it: {}", container);
-          amrmClient.releaseAssignedContainer(container.getId());
-        }
-      }
-    }
-
-    List<YarnContainerStatus> completed = ImmutableList.copyOf(
-      Iterables.transform(response.getCompletedContainersStatuses(), STATUS_TRANSFORM));
-    if (!completed.isEmpty()) {
-      handler.completed(completed);
-    }
-  }
-
-  @Override
-  public ContainerRequestBuilder addContainerRequest(Resource capability) {
-    return addContainerRequest(capability, 1);
-  }
-
-  @Override
-  public ContainerRequestBuilder addContainerRequest(Resource capability, int count) {
-    return new ContainerRequestBuilder(adjustCapability(capability), count) {
-      @Override
-      public String apply() {
-        synchronized (Hadoop20YarnAMClient.this) {
-          String id = UUID.randomUUID().toString();
-
-          String[] hosts = this.hosts.isEmpty() ? null : this.hosts.toArray(new String[this.hosts.size()]);
-          String[] racks = this.racks.isEmpty() ? null : this.racks.toArray(new String[this.racks.size()]);
-
-          for (int i = 0; i < count; i++) {
-            AMRMClient.ContainerRequest request = new AMRMClient.ContainerRequest(capability, hosts, racks,
-                                                                                  priority, 1);
-            containerRequests.put(id, request);
-            amrmClient.addContainerRequest(request);
-          }
-
-          return id;
-        }
-      }
-    };
-  }
-
-  @Override
-  public synchronized void completeContainerRequest(String id) {
-    for (AMRMClient.ContainerRequest request : containerRequests.removeAll(id)) {
-      amrmClient.removeContainerRequest(request);
-    }
-  }
-
-  private Resource adjustCapability(Resource resource) {
-    int cores = YarnUtils.getVirtualCores(resource);
-    int updatedCores = Math.max(Math.min(cores, YarnUtils.getVirtualCores(maxCapability)),
-                                YarnUtils.getVirtualCores(minCapability));
-    // Try and set the virtual cores, which older versions of YARN don't support this.
-    if (cores != updatedCores && YarnUtils.setVirtualCores(resource, updatedCores)) {
-      LOG.info("Adjust virtual cores requirement from {} to {}.", cores, updatedCores);
-    }
-
-    int updatedMemory = Math.min(resource.getMemory(), maxCapability.getMemory());
-    int minMemory = minCapability.getMemory();
-    updatedMemory = (int) Math.ceil(((double) updatedMemory / minMemory)) * minMemory;
-
-    if (resource.getMemory() != updatedMemory) {
-      resource.setMemory(updatedMemory);
-      LOG.info("Adjust memory requirement from {} to {} MB.", resource.getMemory(), updatedMemory);
-    }
-
-    return resource;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnAppClient.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnAppClient.java b/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnAppClient.java
deleted file mode 100644
index bfec34e..0000000
--- a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnAppClient.java
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import org.apache.twill.api.TwillSpecification;
-import org.apache.twill.internal.ProcessController;
-import org.apache.twill.internal.ProcessLauncher;
-import org.apache.twill.internal.appmaster.ApplicationMasterProcessLauncher;
-import org.apache.twill.internal.appmaster.ApplicationSubmitter;
-import com.google.common.base.Throwables;
-import com.google.common.util.concurrent.AbstractIdleService;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
-import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
-import org.apache.hadoop.yarn.api.records.DelegationToken;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.client.YarnClient;
-import org.apache.hadoop.yarn.client.YarnClientImpl;
-import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
-import org.apache.hadoop.yarn.util.Records;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.net.InetSocketAddress;
-
-/**
- *
- */
-public final class Hadoop20YarnAppClient extends AbstractIdleService implements YarnAppClient {
-
-  private static final Logger LOG = LoggerFactory.getLogger(Hadoop20YarnAppClient.class);
-  private final YarnClient yarnClient;
-  private String user;
-
-  public Hadoop20YarnAppClient(Configuration configuration) {
-    this.yarnClient = new YarnClientImpl();
-    yarnClient.init(configuration);
-    this.user = System.getProperty("user.name");
-  }
-
-  @Override
-  public ProcessLauncher<ApplicationId> createLauncher(TwillSpecification twillSpec) throws Exception {
-    // Request for new application
-    final GetNewApplicationResponse response = yarnClient.getNewApplication();
-    final ApplicationId appId = response.getApplicationId();
-
-    // Setup the context for application submission
-    final ApplicationSubmissionContext appSubmissionContext = Records.newRecord(ApplicationSubmissionContext.class);
-    appSubmissionContext.setApplicationId(appId);
-    appSubmissionContext.setApplicationName(twillSpec.getName());
-    appSubmissionContext.setUser(user);
-
-    ApplicationSubmitter submitter = new ApplicationSubmitter() {
-
-      @Override
-      public ProcessController<YarnApplicationReport> submit(YarnLaunchContext launchContext, Resource capability) {
-        ContainerLaunchContext context = launchContext.getLaunchContext();
-        addRMToken(context);
-        context.setUser(appSubmissionContext.getUser());
-        context.setResource(adjustMemory(response, capability));
-        appSubmissionContext.setAMContainerSpec(context);
-
-        try {
-          yarnClient.submitApplication(appSubmissionContext);
-          return new ProcessControllerImpl(yarnClient, appId);
-        } catch (YarnRemoteException e) {
-          LOG.error("Failed to submit application {}", appId, e);
-          throw Throwables.propagate(e);
-        }
-      }
-    };
-
-    return new ApplicationMasterProcessLauncher(appId, submitter);
-  }
-
-  private Resource adjustMemory(GetNewApplicationResponse response, Resource capability) {
-    int minMemory = response.getMinimumResourceCapability().getMemory();
-
-    int updatedMemory = Math.min(capability.getMemory(), response.getMaximumResourceCapability().getMemory());
-    updatedMemory = (int) Math.ceil(((double) updatedMemory / minMemory)) * minMemory;
-
-    if (updatedMemory != capability.getMemory()) {
-      capability.setMemory(updatedMemory);
-    }
-
-    return capability;
-  }
-
-  private void addRMToken(ContainerLaunchContext context) {
-    if (!UserGroupInformation.isSecurityEnabled()) {
-      return;
-    }
-
-    try {
-      Credentials credentials = YarnUtils.decodeCredentials(context.getContainerTokens());
-
-      Configuration config = yarnClient.getConfig();
-      Token<TokenIdentifier> token = convertToken(
-        yarnClient.getRMDelegationToken(new Text(YarnUtils.getYarnTokenRenewer(config))),
-        YarnUtils.getRMAddress(config));
-
-      LOG.info("Added RM delegation token {}", token);
-      credentials.addToken(token.getService(), token);
-
-      context.setContainerTokens(YarnUtils.encodeCredentials(credentials));
-
-    } catch (Exception e) {
-      LOG.error("Fails to create credentials.", e);
-      throw Throwables.propagate(e);
-    }
-  }
-
-  private <T extends TokenIdentifier> Token<T> convertToken(DelegationToken protoToken, InetSocketAddress serviceAddr) {
-    Token<T> token = new Token<T>(protoToken.getIdentifier().array(),
-                                  protoToken.getPassword().array(),
-                                  new Text(protoToken.getKind()),
-                                  new Text(protoToken.getService()));
-    if (serviceAddr != null) {
-      SecurityUtil.setTokenService(token, serviceAddr);
-    }
-    return token;
-  }
-
-  @Override
-  public ProcessLauncher<ApplicationId> createLauncher(String user, TwillSpecification twillSpec) throws Exception {
-    this.user = user;
-    return createLauncher(twillSpec);
-  }
-
-  @Override
-  public ProcessController<YarnApplicationReport> createProcessController(ApplicationId appId) {
-    return new ProcessControllerImpl(yarnClient, appId);
-  }
-
-  @Override
-  protected void startUp() throws Exception {
-    yarnClient.start();
-  }
-
-  @Override
-  protected void shutDown() throws Exception {
-    yarnClient.stop();
-  }
-
-  private static final class ProcessControllerImpl implements ProcessController<YarnApplicationReport> {
-    private final YarnClient yarnClient;
-    private final ApplicationId appId;
-
-    public ProcessControllerImpl(YarnClient yarnClient, ApplicationId appId) {
-      this.yarnClient = yarnClient;
-      this.appId = appId;
-    }
-
-    @Override
-    public YarnApplicationReport getReport() {
-      try {
-        return new Hadoop20YarnApplicationReport(yarnClient.getApplicationReport(appId));
-      } catch (YarnRemoteException e) {
-        LOG.error("Failed to get application report {}", appId, e);
-        throw Throwables.propagate(e);
-      }
-    }
-
-    @Override
-    public void cancel() {
-      try {
-        yarnClient.killApplication(appId);
-      } catch (YarnRemoteException e) {
-        LOG.error("Failed to kill application {}", appId, e);
-        throw Throwables.propagate(e);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnApplicationReport.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnApplicationReport.java b/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnApplicationReport.java
deleted file mode 100644
index 6c1b764..0000000
--- a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnApplicationReport.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationReport;
-import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
-import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.api.records.YarnApplicationState;
-
-/**
- *
- */
-public final class Hadoop20YarnApplicationReport implements YarnApplicationReport {
-
-  private final ApplicationReport report;
-
-  public Hadoop20YarnApplicationReport(ApplicationReport report) {
-    this.report = report;
-  }
-
-  @Override
-  public ApplicationId getApplicationId() {
-    return report.getApplicationId();
-  }
-
-  @Override
-  public ApplicationAttemptId getCurrentApplicationAttemptId() {
-    return report.getCurrentApplicationAttemptId();
-  }
-
-  @Override
-  public String getQueue() {
-    return report.getQueue();
-  }
-
-  @Override
-  public String getName() {
-    return report.getName();
-  }
-
-  @Override
-  public String getHost() {
-    return report.getHost();
-  }
-
-  @Override
-  public int getRpcPort() {
-    return report.getRpcPort();
-  }
-
-  @Override
-  public YarnApplicationState getYarnApplicationState() {
-    return report.getYarnApplicationState();
-  }
-
-  @Override
-  public String getDiagnostics() {
-    return report.getDiagnostics();
-  }
-
-  @Override
-  public String getTrackingUrl() {
-    return report.getTrackingUrl();
-  }
-
-  @Override
-  public String getOriginalTrackingUrl() {
-    return report.getOriginalTrackingUrl();
-  }
-
-  @Override
-  public long getStartTime() {
-    return report.getStartTime();
-  }
-
-  @Override
-  public long getFinishTime() {
-    return report.getFinishTime();
-  }
-
-  @Override
-  public FinalApplicationStatus getFinalApplicationStatus() {
-    return report.getFinalApplicationStatus();
-  }
-
-  @Override
-  public ApplicationResourceUsageReport getApplicationResourceUsageReport() {
-    return report.getApplicationResourceUsageReport();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnContainerInfo.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnContainerInfo.java b/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnContainerInfo.java
deleted file mode 100644
index 79b2cb5..0000000
--- a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnContainerInfo.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import com.google.common.base.Throwables;
-import org.apache.hadoop.yarn.api.records.Container;
-
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-
-/**
- *
- */
-public final class Hadoop20YarnContainerInfo implements YarnContainerInfo {
-
-  private final Container container;
-
-  public Hadoop20YarnContainerInfo(Container container) {
-    this.container = container;
-  }
-
-  @Override
-  public <T> T getContainer() {
-    return (T) container;
-  }
-
-  @Override
-  public String getId() {
-    return container.getId().toString();
-  }
-
-  @Override
-  public InetAddress getHost() {
-    try {
-      return InetAddress.getByName(container.getNodeId().getHost());
-    } catch (UnknownHostException e) {
-      throw Throwables.propagate(e);
-    }
-  }
-
-  @Override
-  public int getPort() {
-    return container.getNodeId().getPort();
-  }
-
-  @Override
-  public int getMemoryMB() {
-    return container.getResource().getMemory();
-  }
-
-  @Override
-  public int getVirtualCores() {
-    return YarnUtils.getVirtualCores(container.getResource());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnContainerStatus.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnContainerStatus.java b/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnContainerStatus.java
deleted file mode 100644
index cc61856..0000000
--- a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnContainerStatus.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import org.apache.hadoop.yarn.api.records.ContainerState;
-import org.apache.hadoop.yarn.api.records.ContainerStatus;
-
-/**
- *
- */
-public final class Hadoop20YarnContainerStatus implements YarnContainerStatus {
-
-  private final ContainerStatus containerStatus;
-
-  public Hadoop20YarnContainerStatus(ContainerStatus containerStatus) {
-    this.containerStatus = containerStatus;
-  }
-
-  @Override
-  public String getContainerId() {
-    return containerStatus.getContainerId().toString();
-  }
-
-  @Override
-  public ContainerState getState() {
-    return containerStatus.getState();
-  }
-
-  @Override
-  public int getExitStatus() {
-    return containerStatus.getExitStatus();
-  }
-
-  @Override
-  public String getDiagnostics() {
-    return containerStatus.getDiagnostics();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnLaunchContext.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnLaunchContext.java b/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnLaunchContext.java
deleted file mode 100644
index b1f6d66..0000000
--- a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnLaunchContext.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import com.google.common.base.Function;
-import com.google.common.collect.Maps;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
-import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
-import org.apache.hadoop.yarn.api.records.LocalResource;
-import org.apache.hadoop.yarn.util.Records;
-
-import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.Map;
-
-/**
- *
- */
-public final class Hadoop20YarnLaunchContext implements YarnLaunchContext {
-
-  private static final Function<YarnLocalResource, LocalResource> RESOURCE_TRANSFORM;
-
-  static {
-    // Creates transform function from YarnLocalResource -> LocalResource
-    RESOURCE_TRANSFORM = new Function<YarnLocalResource, LocalResource>() {
-      @Override
-      public LocalResource apply(YarnLocalResource input) {
-        return input.getLocalResource();
-      }
-    };
-  }
-
-  private final ContainerLaunchContext launchContext;
-
-  public Hadoop20YarnLaunchContext() {
-    launchContext = Records.newRecord(ContainerLaunchContext.class);
-  }
-
-  @Override
-  public <T> T getLaunchContext() {
-    return (T) launchContext;
-  }
-
-  @Override
-  public void setCredentials(Credentials credentials) {
-    launchContext.setContainerTokens(YarnUtils.encodeCredentials(credentials));
-  }
-
-  @Override
-  public void setLocalResources(Map<String, YarnLocalResource> localResources) {
-    launchContext.setLocalResources(Maps.transformValues(localResources, RESOURCE_TRANSFORM));
-  }
-
-  @Override
-  public void setServiceData(Map<String, ByteBuffer> serviceData) {
-    launchContext.setServiceData(serviceData);
-  }
-
-  @Override
-  public Map<String, String> getEnvironment() {
-    return launchContext.getEnvironment();
-  }
-
-  @Override
-  public void setEnvironment(Map<String, String> environment) {
-    launchContext.setEnvironment(environment);
-  }
-
-  @Override
-  public List<String> getCommands() {
-    return launchContext.getCommands();
-  }
-
-  @Override
-  public void setCommands(List<String> commands) {
-    launchContext.setCommands(commands);
-  }
-
-  @Override
-  public void setApplicationACLs(Map<ApplicationAccessType, String> acls) {
-    launchContext.setApplicationACLs(acls);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnLocalResource.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnLocalResource.java b/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnLocalResource.java
deleted file mode 100644
index b327b94..0000000
--- a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnLocalResource.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import org.apache.hadoop.yarn.api.records.LocalResource;
-import org.apache.hadoop.yarn.api.records.LocalResourceType;
-import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
-import org.apache.hadoop.yarn.api.records.URL;
-import org.apache.hadoop.yarn.util.Records;
-
-/**
- *
- */
-public final class Hadoop20YarnLocalResource implements YarnLocalResource {
-
-  private final LocalResource localResource;
-
-  public Hadoop20YarnLocalResource() {
-    this.localResource = Records.newRecord(LocalResource.class);
-  }
-
-  @Override
-  public <T> T getLocalResource() {
-    return (T) localResource;
-  }
-
-  @Override
-  public URL getResource() {
-    return localResource.getResource();
-  }
-
-  @Override
-  public void setResource(URL resource) {
-    localResource.setResource(resource);
-  }
-
-  @Override
-  public long getSize() {
-    return localResource.getSize();
-  }
-
-  @Override
-  public void setSize(long size) {
-    localResource.setSize(size);
-  }
-
-  @Override
-  public long getTimestamp() {
-    return localResource.getTimestamp();
-  }
-
-  @Override
-  public void setTimestamp(long timestamp) {
-    localResource.setTimestamp(timestamp);
-  }
-
-  @Override
-  public LocalResourceType getType() {
-    return localResource.getType();
-  }
-
-  @Override
-  public void setType(LocalResourceType type) {
-    localResource.setType(type);
-  }
-
-  @Override
-  public LocalResourceVisibility getVisibility() {
-    return localResource.getVisibility();
-  }
-
-  @Override
-  public void setVisibility(LocalResourceVisibility visibility) {
-    localResource.setVisibility(visibility);
-  }
-
-  @Override
-  public String getPattern() {
-    return localResource.getPattern();
-  }
-
-  @Override
-  public void setPattern(String pattern) {
-    localResource.setPattern(pattern);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnNMClient.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnNMClient.java b/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnNMClient.java
deleted file mode 100644
index 98ecc67..0000000
--- a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/Hadoop20YarnNMClient.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import org.apache.twill.common.Cancellable;
-import com.google.common.base.Throwables;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.yarn.api.ContainerManager;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
-import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
-import org.apache.hadoop.yarn.api.records.ContainerState;
-import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
-import org.apache.hadoop.yarn.ipc.YarnRPC;
-import org.apache.hadoop.yarn.util.Records;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.net.InetSocketAddress;
-
-/**
- *
- */
-public final class Hadoop20YarnNMClient implements YarnNMClient {
-
-  private static final Logger LOG = LoggerFactory.getLogger(Hadoop20YarnNMClient.class);
-
-  private final YarnRPC yarnRPC;
-  private final Configuration yarnConf;
-
-  public Hadoop20YarnNMClient(YarnRPC yarnRPC, Configuration yarnConf) {
-    this.yarnRPC = yarnRPC;
-    this.yarnConf = yarnConf;
-  }
-
-  @Override
-  public Cancellable start(YarnContainerInfo containerInfo, YarnLaunchContext launchContext) {
-    ContainerLaunchContext context = launchContext.getLaunchContext();
-    context.setUser(System.getProperty("user.name"));
-
-    Container container = containerInfo.getContainer();
-
-    context.setContainerId(container.getId());
-    context.setResource(container.getResource());
-
-    StartContainerRequest startRequest = Records.newRecord(StartContainerRequest.class);
-    startRequest.setContainerLaunchContext(context);
-
-    ContainerManager manager = connectContainerManager(container);
-    try {
-      manager.startContainer(startRequest);
-      return new ContainerTerminator(container, manager);
-    } catch (YarnRemoteException e) {
-      LOG.error("Error in launching process", e);
-      throw Throwables.propagate(e);
-    }
-
-  }
-
-  /**
-   * Helper to connect to container manager (node manager).
-   */
-  private ContainerManager connectContainerManager(Container container) {
-    String cmIpPortStr = String.format("%s:%d", container.getNodeId().getHost(), container.getNodeId().getPort());
-    InetSocketAddress cmAddress = NetUtils.createSocketAddr(cmIpPortStr);
-    return ((ContainerManager) yarnRPC.getProxy(ContainerManager.class, cmAddress, yarnConf));
-  }
-
-  private static final class ContainerTerminator implements Cancellable {
-
-    private final Container container;
-    private final ContainerManager manager;
-
-    private ContainerTerminator(Container container, ContainerManager manager) {
-      this.container = container;
-      this.manager = manager;
-    }
-
-    @Override
-    public void cancel() {
-      LOG.info("Request to stop container {}.", container.getId());
-      StopContainerRequest stopRequest = Records.newRecord(StopContainerRequest.class);
-      stopRequest.setContainerId(container.getId());
-      try {
-        manager.stopContainer(stopRequest);
-        boolean completed = false;
-        while (!completed) {
-          GetContainerStatusRequest statusRequest = Records.newRecord(GetContainerStatusRequest.class);
-          statusRequest.setContainerId(container.getId());
-          GetContainerStatusResponse statusResponse = manager.getContainerStatus(statusRequest);
-          LOG.info("Container status: {} {}", statusResponse.getStatus(), statusResponse.getStatus().getDiagnostics());
-
-          completed = (statusResponse.getStatus().getState() == ContainerState.COMPLETE);
-        }
-        LOG.info("Container {} stopped.", container.getId());
-      } catch (YarnRemoteException e) {
-        LOG.error("Fail to stop container {}", container.getId(), e);
-        throw Throwables.propagate(e);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AMRMClient.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AMRMClient.java b/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AMRMClient.java
deleted file mode 100644
index 26b6fa2..0000000
--- a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AMRMClient.java
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn.ports;
-
-import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.api.records.Priority;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
-import org.apache.hadoop.yarn.service.Service;
-
-/**
- * Ported from Apache Hadoop YARN.
- */
-public interface AMRMClient extends Service {
-
-  /**
-   * Value used to define no locality.
-   */
-  static final String ANY = "*";
-
-  /**
-   * Object to represent container request for resources.
-   * Resources may be localized to nodes and racks.
-   * Resources may be assigned priorities.
-   * Can ask for multiple containers of a given type.
-   */
-  public static class ContainerRequest {
-    Resource capability;
-    String[] hosts;
-    String[] racks;
-    Priority priority;
-    int containerCount;
-
-    public ContainerRequest(Resource capability, String[] hosts,
-                            String[] racks, Priority priority, int containerCount) {
-      this.capability = capability;
-      this.hosts = (hosts != null ? hosts.clone() : null);
-      this.racks = (racks != null ? racks.clone() : null);
-      this.priority = priority;
-      this.containerCount = containerCount;
-    }
-    public String toString() {
-      StringBuilder sb = new StringBuilder();
-      sb.append("Capability[").append(capability).append("]");
-      sb.append("Priority[").append(priority).append("]");
-      sb.append("ContainerCount[").append(containerCount).append("]");
-      return sb.toString();
-    }
-  }
-
-  /**
-   * Register the application master. This must be called before any
-   * other interaction
-   * @param appHostName Name of the host on which master is running
-   * @param appHostPort Port master is listening on
-   * @param appTrackingUrl URL at which the master info can be seen
-   * @return <code>RegisterApplicationMasterResponse</code>
-   * @throws org.apache.hadoop.yarn.exceptions.YarnRemoteException
-   */
-  public RegisterApplicationMasterResponse
-  registerApplicationMaster(String appHostName,
-                            int appHostPort,
-                            String appTrackingUrl)
-    throws YarnRemoteException;
-
-  /**
-   * Request additional containers and receive new container allocations.
-   * Requests made via <code>addContainerRequest</code> are sent to the
-   * <code>ResourceManager</code>. New containers assigned to the master are
-   * retrieved. Status of completed containers and node health updates are
-   * also retrieved.
-   * This also doubles as a heartbeat to the ResourceManager and must be
-   * made periodically.
-   * The call may not always return any new allocations of containers.
-   * App should not make concurrent allocate requests. May cause request loss.
-   * @param progressIndicator Indicates progress made by the master
-   * @return the response of the allocate request
-   * @throws YarnRemoteException
-   */
-  public AllocationResponse allocate(float progressIndicator)
-    throws YarnRemoteException;
-
-  /**
-   * Unregister the Application Master. This must be called in the end.
-   * @param appStatus Success/Failure status of the master
-   * @param appMessage Diagnostics message on failure
-   * @param appTrackingUrl New URL to get master info
-   * @throws YarnRemoteException
-   */
-  public void unregisterApplicationMaster(FinalApplicationStatus appStatus,
-                                          String appMessage,
-                                          String appTrackingUrl)
-    throws YarnRemoteException;
-
-  /**
-   * Request containers for resources before calling <code>allocate</code>.
-   * @param req Resource request
-   */
-  public void addContainerRequest(ContainerRequest req);
-
-  /**
-   * Remove previous container request. The previous container request may have
-   * already been sent to the ResourceManager. So even after the remove request
-   * the app must be prepared to receive an allocation for the previous request
-   * even after the remove request
-   * @param req Resource request
-   */
-  public void removeContainerRequest(ContainerRequest req);
-
-  /**
-   * Release containers assigned by the Resource Manager. If the app cannot use
-   * the container or wants to give up the container then it can release it.
-   * The app needs to make new requests for the released resource capability if
-   * it still needs it. For example, if it released non-local resources
-   * @param containerId
-   */
-  public void releaseAssignedContainer(ContainerId containerId);
-
-  /**
-   * Get the currently available resources in the cluster.
-   * A valid value is available after a call to allocate has been made
-   * @return Currently available resources
-   */
-  public Resource getClusterAvailableResources();
-
-  /**
-   * Get the current number of nodes in the cluster.
-   * A valid values is available after a call to allocate has been made
-   * @return Current number of nodes in the cluster
-   */
-  public int getClusterNodeCount();
-}


[06/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterProcessLauncher.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterProcessLauncher.java b/yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterProcessLauncher.java
deleted file mode 100644
index b51bb63..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterProcessLauncher.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.appmaster;
-
-import org.apache.twill.internal.Constants;
-import org.apache.twill.internal.EnvKeys;
-import org.apache.twill.internal.ProcessController;
-import org.apache.twill.internal.yarn.AbstractYarnProcessLauncher;
-import org.apache.twill.internal.yarn.YarnLaunchContext;
-import org.apache.twill.internal.yarn.YarnUtils;
-import com.google.common.collect.ImmutableMap;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.util.Records;
-
-import java.util.Map;
-
-/**
- * A {@link org.apache.twill.internal.ProcessLauncher} for launching Application Master from the client.
- */
-public final class ApplicationMasterProcessLauncher extends AbstractYarnProcessLauncher<ApplicationId> {
-
-  private final ApplicationSubmitter submitter;
-
-  public ApplicationMasterProcessLauncher(ApplicationId appId, ApplicationSubmitter submitter) {
-    super(appId);
-    this.submitter = submitter;
-  }
-
-  @Override
-  protected boolean useArchiveSuffix() {
-    return false;
-  }
-
-  @Override
-  @SuppressWarnings("unchecked")
-  protected <R> ProcessController<R> doLaunch(YarnLaunchContext launchContext) {
-    final ApplicationId appId = getContainerInfo();
-
-    // Set the resource requirement for AM
-    Resource capability = Records.newRecord(Resource.class);
-    capability.setMemory(Constants.APP_MASTER_MEMORY_MB);
-    YarnUtils.setVirtualCores(capability, 1);
-
-    // Put in extra environments
-    Map<String, String> env = ImmutableMap.<String, String>builder()
-      .putAll(launchContext.getEnvironment())
-      .put(EnvKeys.YARN_APP_ID, Integer.toString(appId.getId()))
-      .put(EnvKeys.YARN_APP_ID_CLUSTER_TIME, Long.toString(appId.getClusterTimestamp()))
-      .put(EnvKeys.YARN_APP_ID_STR, appId.toString())
-      .put(EnvKeys.YARN_CONTAINER_MEMORY_MB, Integer.toString(Constants.APP_MASTER_MEMORY_MB))
-      .put(EnvKeys.YARN_CONTAINER_VIRTUAL_CORES, Integer.toString(YarnUtils.getVirtualCores(capability)))
-      .build();
-
-    launchContext.setEnvironment(env);
-    return (ProcessController<R>) submitter.submit(launchContext, capability);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterService.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterService.java b/yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterService.java
deleted file mode 100644
index 51c8503..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterService.java
+++ /dev/null
@@ -1,799 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.appmaster;
-
-import org.apache.twill.api.Command;
-import org.apache.twill.api.EventHandler;
-import org.apache.twill.api.EventHandlerSpecification;
-import org.apache.twill.api.LocalFile;
-import org.apache.twill.api.ResourceSpecification;
-import org.apache.twill.api.RunId;
-import org.apache.twill.api.RuntimeSpecification;
-import org.apache.twill.api.TwillRunResources;
-import org.apache.twill.api.TwillSpecification;
-import org.apache.twill.common.Threads;
-import org.apache.twill.filesystem.Location;
-import org.apache.twill.internal.AbstractTwillService;
-import org.apache.twill.internal.Configs;
-import org.apache.twill.internal.Constants;
-import org.apache.twill.internal.DefaultTwillRunResources;
-import org.apache.twill.internal.EnvKeys;
-import org.apache.twill.internal.ProcessLauncher;
-import org.apache.twill.internal.TwillContainerLauncher;
-import org.apache.twill.internal.ZKServiceDecorator;
-import org.apache.twill.internal.json.LocalFileCodec;
-import org.apache.twill.internal.json.TwillSpecificationAdapter;
-import org.apache.twill.internal.kafka.EmbeddedKafkaServer;
-import org.apache.twill.internal.logging.Loggings;
-import org.apache.twill.internal.state.Message;
-import org.apache.twill.internal.state.MessageCallback;
-import org.apache.twill.internal.utils.Instances;
-import org.apache.twill.internal.utils.Networks;
-import org.apache.twill.internal.yarn.YarnAMClient;
-import org.apache.twill.internal.yarn.YarnAMClientFactory;
-import org.apache.twill.internal.yarn.YarnContainerInfo;
-import org.apache.twill.internal.yarn.YarnContainerStatus;
-import org.apache.twill.internal.yarn.YarnUtils;
-import org.apache.twill.zookeeper.ZKClient;
-import org.apache.twill.zookeeper.ZKClients;
-import com.google.common.base.Charsets;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicate;
-import com.google.common.base.Supplier;
-import com.google.common.base.Throwables;
-import com.google.common.collect.HashMultiset;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableMultimap;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Multiset;
-import com.google.common.collect.Sets;
-import com.google.common.io.CharStreams;
-import com.google.common.io.Files;
-import com.google.common.io.InputSupplier;
-import com.google.common.reflect.TypeToken;
-import com.google.common.util.concurrent.AbstractExecutionThreadService;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.Service;
-import com.google.common.util.concurrent.SettableFuture;
-import com.google.gson.Gson;
-import com.google.gson.GsonBuilder;
-import com.google.gson.JsonElement;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.util.Records;
-import org.apache.zookeeper.CreateMode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.FileReader;
-import java.io.IOException;
-import java.io.Reader;
-import java.net.URI;
-import java.net.URL;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Queue;
-import java.util.Set;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-/**
- *
- */
-public final class ApplicationMasterService extends AbstractTwillService {
-
-  private static final Logger LOG = LoggerFactory.getLogger(ApplicationMasterService.class);
-
-  // Copied from org.apache.hadoop.yarn.security.AMRMTokenIdentifier.KIND_NAME since it's missing in Hadoop-2.0
-  private static final Text AMRM_TOKEN_KIND_NAME = new Text("YARN_AM_RM_TOKEN");
-
-  private final RunId runId;
-  private final ZKClient zkClient;
-  private final TwillSpecification twillSpec;
-  private final ApplicationMasterLiveNodeData amLiveNode;
-  private final ZKServiceDecorator serviceDelegate;
-  private final RunningContainers runningContainers;
-  private final ExpectedContainers expectedContainers;
-  private final TrackerService trackerService;
-  private final YarnAMClient amClient;
-  private final String jvmOpts;
-  private final int reservedMemory;
-  private final EventHandler eventHandler;
-  private final Location applicationLocation;
-
-  private EmbeddedKafkaServer kafkaServer;
-  private Queue<RunnableContainerRequest> runnableContainerRequests;
-  private ExecutorService instanceChangeExecutor;
-
-  public ApplicationMasterService(RunId runId, ZKClient zkClient, File twillSpecFile,
-                                  YarnAMClientFactory amClientFactory, Location applicationLocation) throws Exception {
-    super(applicationLocation);
-
-    this.runId = runId;
-    this.twillSpec = TwillSpecificationAdapter.create().fromJson(twillSpecFile);
-    this.zkClient = zkClient;
-    this.applicationLocation = applicationLocation;
-    this.amClient = amClientFactory.create();
-    this.credentials = createCredentials();
-    this.jvmOpts = loadJvmOptions();
-    this.reservedMemory = getReservedMemory();
-
-    amLiveNode = new ApplicationMasterLiveNodeData(Integer.parseInt(System.getenv(EnvKeys.YARN_APP_ID)),
-                                                   Long.parseLong(System.getenv(EnvKeys.YARN_APP_ID_CLUSTER_TIME)),
-                                                   amClient.getContainerId().toString());
-
-    serviceDelegate = new ZKServiceDecorator(zkClient, runId, createLiveNodeDataSupplier(),
-                                             new ServiceDelegate(), new Runnable() {
-      @Override
-      public void run() {
-        amClient.stopAndWait();
-      }
-    });
-    expectedContainers = initExpectedContainers(twillSpec);
-    runningContainers = initRunningContainers(amClient.getContainerId(), amClient.getHost());
-    trackerService = new TrackerService(runningContainers.getResourceReport(), amClient.getHost());
-    eventHandler = createEventHandler(twillSpec);
-  }
-
-  private String loadJvmOptions() throws IOException {
-    final File jvmOptsFile = new File(Constants.Files.JVM_OPTIONS);
-    if (!jvmOptsFile.exists()) {
-      return "";
-    }
-
-    return CharStreams.toString(new InputSupplier<Reader>() {
-      @Override
-      public Reader getInput() throws IOException {
-        return new FileReader(jvmOptsFile);
-      }
-    });
-  }
-
-  private int getReservedMemory() {
-    String value = System.getenv(EnvKeys.TWILL_RESERVED_MEMORY_MB);
-    if (value == null) {
-      return Configs.Defaults.JAVA_RESERVED_MEMORY_MB;
-    }
-    try {
-      return Integer.parseInt(value);
-    } catch (Exception e) {
-      return Configs.Defaults.JAVA_RESERVED_MEMORY_MB;
-    }
-  }
-
-  private EventHandler createEventHandler(TwillSpecification twillSpec) {
-    try {
-      // Should be able to load by this class ClassLoader, as they packaged in the same jar.
-      EventHandlerSpecification handlerSpec = twillSpec.getEventHandler();
-
-      Class<?> handlerClass = getClass().getClassLoader().loadClass(handlerSpec.getClassName());
-      Preconditions.checkArgument(EventHandler.class.isAssignableFrom(handlerClass),
-                                  "Class {} does not implements {}",
-                                  handlerClass, EventHandler.class.getName());
-      return Instances.newInstance((Class<? extends EventHandler>) handlerClass);
-    } catch (Exception e) {
-      throw Throwables.propagate(e);
-    }
-  }
-
-  private Supplier<? extends JsonElement> createLiveNodeDataSupplier() {
-    return new Supplier<JsonElement>() {
-      @Override
-      public JsonElement get() {
-        return new Gson().toJsonTree(amLiveNode);
-      }
-    };
-  }
-
-  private RunningContainers initRunningContainers(ContainerId appMasterContainerId,
-                                                  String appMasterHost) throws Exception {
-    TwillRunResources appMasterResources = new DefaultTwillRunResources(
-      0,
-      appMasterContainerId.toString(),
-      Integer.parseInt(System.getenv(EnvKeys.YARN_CONTAINER_VIRTUAL_CORES)),
-      Integer.parseInt(System.getenv(EnvKeys.YARN_CONTAINER_MEMORY_MB)),
-      appMasterHost);
-    String appId = appMasterContainerId.getApplicationAttemptId().getApplicationId().toString();
-    return new RunningContainers(appId, appMasterResources);
-  }
-
-  private ExpectedContainers initExpectedContainers(TwillSpecification twillSpec) {
-    Map<String, Integer> expectedCounts = Maps.newHashMap();
-    for (RuntimeSpecification runtimeSpec : twillSpec.getRunnables().values()) {
-      expectedCounts.put(runtimeSpec.getName(), runtimeSpec.getResourceSpecification().getInstances());
-    }
-    return new ExpectedContainers(expectedCounts);
-  }
-
-  private void doStart() throws Exception {
-    LOG.info("Start application master with spec: " + TwillSpecificationAdapter.create().toJson(twillSpec));
-
-    // initialize the event handler, if it fails, it will fail the application.
-    eventHandler.initialize(new BasicEventHandlerContext(twillSpec.getEventHandler()));
-
-    instanceChangeExecutor = Executors.newSingleThreadExecutor(Threads.createDaemonThreadFactory("instanceChanger"));
-
-    kafkaServer = new EmbeddedKafkaServer(new File(Constants.Files.KAFKA), generateKafkaConfig());
-
-    // Must start tracker before start AMClient
-    LOG.info("Starting application master tracker server");
-    trackerService.startAndWait();
-    URL trackerUrl = trackerService.getUrl();
-    LOG.info("Started application master tracker server on " + trackerUrl);
-
-    amClient.setTracker(trackerService.getBindAddress(), trackerUrl);
-    amClient.startAndWait();
-
-    // Creates ZK path for runnable and kafka logging service
-    Futures.allAsList(ImmutableList.of(
-      zkClient.create("/" + runId.getId() + "/runnables", null, CreateMode.PERSISTENT),
-      zkClient.create("/" + runId.getId() + "/kafka", null, CreateMode.PERSISTENT))
-    ).get();
-
-    // Starts kafka server
-    LOG.info("Starting kafka server");
-
-    kafkaServer.startAndWait();
-    LOG.info("Kafka server started");
-
-    runnableContainerRequests = initContainerRequests();
-  }
-
-  private void doStop() throws Exception {
-    Thread.interrupted();     // This is just to clear the interrupt flag
-
-    LOG.info("Stop application master with spec: {}", TwillSpecificationAdapter.create().toJson(twillSpec));
-
-    try {
-      // call event handler destroy. If there is error, only log and not affected stop sequence.
-      eventHandler.destroy();
-    } catch (Throwable t) {
-      LOG.warn("Exception when calling {}.destroy()", twillSpec.getEventHandler().getClassName(), t);
-    }
-
-    instanceChangeExecutor.shutdownNow();
-
-    // For checking if all containers are stopped.
-    final Set<String> ids = Sets.newHashSet(runningContainers.getContainerIds());
-    YarnAMClient.AllocateHandler handler = new YarnAMClient.AllocateHandler() {
-      @Override
-      public void acquired(List<ProcessLauncher<YarnContainerInfo>> launchers) {
-        // no-op
-      }
-
-      @Override
-      public void completed(List<YarnContainerStatus> completed) {
-        for (YarnContainerStatus status : completed) {
-          ids.remove(status.getContainerId());
-        }
-      }
-    };
-
-    runningContainers.stopAll();
-
-    // Poll for 5 seconds to wait for containers to stop.
-    int count = 0;
-    while (!ids.isEmpty() && count++ < 5) {
-      amClient.allocate(0.0f, handler);
-      TimeUnit.SECONDS.sleep(1);
-    }
-
-    LOG.info("Stopping application master tracker server");
-    try {
-      trackerService.stopAndWait();
-      LOG.info("Stopped application master tracker server");
-    } catch (Exception e) {
-      LOG.error("Failed to stop tracker service.", e);
-    } finally {
-      try {
-        // App location cleanup
-        cleanupDir(URI.create(System.getenv(EnvKeys.TWILL_APP_DIR)));
-        Loggings.forceFlush();
-        // Sleep a short while to let kafka clients to have chance to fetch the log
-        TimeUnit.SECONDS.sleep(1);
-      } finally {
-        kafkaServer.stopAndWait();
-        LOG.info("Kafka server stopped");
-      }
-    }
-  }
-
-  private void cleanupDir(URI appDir) {
-    try {
-      if (applicationLocation.delete(true)) {
-        LOG.info("Application directory deleted: {}", appDir);
-      } else {
-        LOG.warn("Failed to cleanup directory {}.", appDir);
-      }
-    } catch (Exception e) {
-      LOG.warn("Exception while cleanup directory {}.", appDir, e);
-    }
-  }
-
-
-  private void doRun() throws Exception {
-    // The main loop
-    Map.Entry<Resource, ? extends Collection<RuntimeSpecification>> currentRequest = null;
-    final Queue<ProvisionRequest> provisioning = Lists.newLinkedList();
-
-    YarnAMClient.AllocateHandler allocateHandler = new YarnAMClient.AllocateHandler() {
-      @Override
-      public void acquired(List<ProcessLauncher<YarnContainerInfo>> launchers) {
-        launchRunnable(launchers, provisioning);
-      }
-
-      @Override
-      public void completed(List<YarnContainerStatus> completed) {
-        handleCompleted(completed);
-      }
-    };
-
-    long nextTimeoutCheck = System.currentTimeMillis() + Constants.PROVISION_TIMEOUT;
-    while (isRunning()) {
-      // Call allocate. It has to be made at first in order to be able to get cluster resource availability.
-      amClient.allocate(0.0f, allocateHandler);
-
-      // Looks for containers requests.
-      if (provisioning.isEmpty() && runnableContainerRequests.isEmpty() && runningContainers.isEmpty()) {
-        LOG.info("All containers completed. Shutting down application master.");
-        break;
-      }
-
-      // If nothing is in provisioning, and no pending request, move to next one
-      while (provisioning.isEmpty() && currentRequest == null && !runnableContainerRequests.isEmpty()) {
-        currentRequest = runnableContainerRequests.peek().takeRequest();
-        if (currentRequest == null) {
-          // All different types of resource request from current order is done, move to next one
-          // TODO: Need to handle order type as well
-          runnableContainerRequests.poll();
-        }
-      }
-      // Nothing in provision, makes the next batch of provision request
-      if (provisioning.isEmpty() && currentRequest != null) {
-        addContainerRequests(currentRequest.getKey(), currentRequest.getValue(), provisioning);
-        currentRequest = null;
-      }
-
-      nextTimeoutCheck = checkProvisionTimeout(nextTimeoutCheck);
-
-      if (isRunning()) {
-        TimeUnit.SECONDS.sleep(1);
-      }
-    }
-  }
-
-  /**
-   * Handling containers that are completed.
-   */
-  private void handleCompleted(List<YarnContainerStatus> completedContainersStatuses) {
-    Multiset<String> restartRunnables = HashMultiset.create();
-    for (YarnContainerStatus status : completedContainersStatuses) {
-      LOG.info("Container {} completed with {}:{}.",
-               status.getContainerId(), status.getState(), status.getDiagnostics());
-      runningContainers.handleCompleted(status, restartRunnables);
-    }
-
-    for (Multiset.Entry<String> entry : restartRunnables.entrySet()) {
-      LOG.info("Re-request container for {} with {} instances.", entry.getElement(), entry.getCount());
-      for (int i = 0; i < entry.getCount(); i++) {
-        runnableContainerRequests.add(createRunnableContainerRequest(entry.getElement()));
-      }
-    }
-
-    // For all runnables that needs to re-request for containers, update the expected count timestamp
-    // so that the EventHandler would triggered with the right expiration timestamp.
-    expectedContainers.updateRequestTime(restartRunnables.elementSet());
-  }
-
-  /**
-   * Check for containers provision timeout and invoke eventHandler if necessary.
-   *
-   * @return the timestamp for the next time this method needs to be called.
-   */
-  private long checkProvisionTimeout(long nextTimeoutCheck) {
-    if (System.currentTimeMillis() < nextTimeoutCheck) {
-      return nextTimeoutCheck;
-    }
-
-    // Invoke event handler for provision request timeout
-    Map<String, ExpectedContainers.ExpectedCount> expiredRequests = expectedContainers.getAll();
-    Map<String, Integer> runningCounts = runningContainers.countAll();
-
-    List<EventHandler.TimeoutEvent> timeoutEvents = Lists.newArrayList();
-    for (Map.Entry<String, ExpectedContainers.ExpectedCount> entry : expiredRequests.entrySet()) {
-      String runnableName = entry.getKey();
-      ExpectedContainers.ExpectedCount expectedCount = entry.getValue();
-      int runningCount = runningCounts.containsKey(runnableName) ? runningCounts.get(runnableName) : 0;
-      if (expectedCount.getCount() != runningCount) {
-        timeoutEvents.add(new EventHandler.TimeoutEvent(runnableName, expectedCount.getCount(),
-                                                                   runningCount, expectedCount.getTimestamp()));
-      }
-    }
-
-    if (!timeoutEvents.isEmpty()) {
-      try {
-        EventHandler.TimeoutAction action = eventHandler.launchTimeout(timeoutEvents);
-        if (action.getTimeout() < 0) {
-          // Abort application
-          stop();
-        } else {
-          return nextTimeoutCheck + action.getTimeout();
-        }
-      } catch (Throwable t) {
-        LOG.warn("Exception when calling EventHandler {}. Ignore the result.", t);
-      }
-    }
-    return nextTimeoutCheck + Constants.PROVISION_TIMEOUT;
-  }
-
-  private Credentials createCredentials() {
-    Credentials credentials = new Credentials();
-    if (!UserGroupInformation.isSecurityEnabled()) {
-      return credentials;
-    }
-
-    try {
-      credentials.addAll(UserGroupInformation.getCurrentUser().getCredentials());
-
-      // Remove the AM->RM tokens
-      Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
-      while (iter.hasNext()) {
-        Token<?> token = iter.next();
-        if (token.getKind().equals(AMRM_TOKEN_KIND_NAME)) {
-          iter.remove();
-        }
-      }
-    } catch (IOException e) {
-      LOG.warn("Failed to get current user. No credentials will be provided to containers.", e);
-    }
-
-    return credentials;
-  }
-
-  private Queue<RunnableContainerRequest> initContainerRequests() {
-    // Orderly stores container requests.
-    Queue<RunnableContainerRequest> requests = Lists.newLinkedList();
-    // For each order in the twillSpec, create container request for each runnable.
-    for (TwillSpecification.Order order : twillSpec.getOrders()) {
-      // Group container requests based on resource requirement.
-      ImmutableMultimap.Builder<Resource, RuntimeSpecification> builder = ImmutableMultimap.builder();
-      for (String runnableName : order.getNames()) {
-        RuntimeSpecification runtimeSpec = twillSpec.getRunnables().get(runnableName);
-        Resource capability = createCapability(runtimeSpec.getResourceSpecification());
-        builder.put(capability, runtimeSpec);
-      }
-      requests.add(new RunnableContainerRequest(order.getType(), builder.build()));
-    }
-    return requests;
-  }
-
-  /**
-   * Adds container requests with the given resource capability for each runtime.
-   */
-  private void addContainerRequests(Resource capability,
-                                    Collection<RuntimeSpecification> runtimeSpecs,
-                                    Queue<ProvisionRequest> provisioning) {
-    for (RuntimeSpecification runtimeSpec : runtimeSpecs) {
-      String name = runtimeSpec.getName();
-      int newContainers = expectedContainers.getExpected(name) - runningContainers.count(name);
-      if (newContainers > 0) {
-        // TODO: Allow user to set priority?
-        LOG.info("Request {} container with capability {}", newContainers, capability);
-        String requestId = amClient.addContainerRequest(capability, newContainers).setPriority(0).apply();
-        provisioning.add(new ProvisionRequest(runtimeSpec, requestId, newContainers));
-      }
-    }
-  }
-
-  /**
-   * Launches runnables in the provisioned containers.
-   */
-  private void launchRunnable(List<ProcessLauncher<YarnContainerInfo>> launchers,
-                              Queue<ProvisionRequest> provisioning) {
-    for (ProcessLauncher<YarnContainerInfo> processLauncher : launchers) {
-      LOG.info("Got container {}", processLauncher.getContainerInfo().getId());
-      ProvisionRequest provisionRequest = provisioning.peek();
-      if (provisionRequest == null) {
-        continue;
-      }
-
-      String runnableName = provisionRequest.getRuntimeSpec().getName();
-      LOG.info("Starting runnable {} with {}", runnableName, processLauncher);
-
-      int containerCount = expectedContainers.getExpected(runnableName);
-
-      ProcessLauncher.PrepareLaunchContext launchContext = processLauncher.prepareLaunch(
-        ImmutableMap.<String, String>builder()
-          .put(EnvKeys.TWILL_APP_DIR, System.getenv(EnvKeys.TWILL_APP_DIR))
-          .put(EnvKeys.TWILL_FS_USER, System.getenv(EnvKeys.TWILL_FS_USER))
-          .put(EnvKeys.TWILL_APP_RUN_ID, runId.getId())
-          .put(EnvKeys.TWILL_APP_NAME, twillSpec.getName())
-          .put(EnvKeys.TWILL_ZK_CONNECT, zkClient.getConnectString())
-          .put(EnvKeys.TWILL_LOG_KAFKA_ZK, getKafkaZKConnect())
-          .build()
-        , getLocalizeFiles(), credentials
-      );
-
-      TwillContainerLauncher launcher = new TwillContainerLauncher(
-        twillSpec.getRunnables().get(runnableName), launchContext,
-        ZKClients.namespace(zkClient, getZKNamespace(runnableName)),
-        containerCount, jvmOpts, reservedMemory, getSecureStoreLocation());
-
-      runningContainers.start(runnableName, processLauncher.getContainerInfo(), launcher);
-
-      // Need to call complete to workaround bug in YARN AMRMClient
-      if (provisionRequest.containerAcquired()) {
-        amClient.completeContainerRequest(provisionRequest.getRequestId());
-      }
-
-      if (expectedContainers.getExpected(runnableName) == runningContainers.count(runnableName)) {
-        LOG.info("Runnable " + runnableName + " fully provisioned with " + containerCount + " instances.");
-        provisioning.poll();
-      }
-    }
-  }
-
-  private List<LocalFile> getLocalizeFiles() {
-    try {
-      Reader reader = Files.newReader(new File(Constants.Files.LOCALIZE_FILES), Charsets.UTF_8);
-      try {
-        return new GsonBuilder().registerTypeAdapter(LocalFile.class, new LocalFileCodec())
-                                .create().fromJson(reader, new TypeToken<List<LocalFile>>() {}.getType());
-      } finally {
-        reader.close();
-      }
-    } catch (IOException e) {
-      throw Throwables.propagate(e);
-    }
-  }
-
-  private String getZKNamespace(String runnableName) {
-    return String.format("/%s/runnables/%s", runId.getId(), runnableName);
-  }
-
-  private String getKafkaZKConnect() {
-    return String.format("%s/%s/kafka", zkClient.getConnectString(), runId.getId());
-  }
-
-  private Properties generateKafkaConfig() {
-    int port = Networks.getRandomPort();
-    Preconditions.checkState(port > 0, "Failed to get random port.");
-
-    Properties prop = new Properties();
-    prop.setProperty("log.dir", new File("kafka-logs").getAbsolutePath());
-    prop.setProperty("zk.connect", getKafkaZKConnect());
-    prop.setProperty("num.threads", "8");
-    prop.setProperty("port", Integer.toString(port));
-    prop.setProperty("log.flush.interval", "10000");
-    prop.setProperty("max.socket.request.bytes", "104857600");
-    prop.setProperty("log.cleanup.interval.mins", "1");
-    prop.setProperty("log.default.flush.scheduler.interval.ms", "1000");
-    prop.setProperty("zk.connectiontimeout.ms", "1000000");
-    prop.setProperty("socket.receive.buffer", "1048576");
-    prop.setProperty("enable.zookeeper", "true");
-    prop.setProperty("log.retention.hours", "24");
-    prop.setProperty("brokerid", "0");
-    prop.setProperty("socket.send.buffer", "1048576");
-    prop.setProperty("num.partitions", "1");
-    prop.setProperty("log.file.size", "536870912");
-    prop.setProperty("log.default.flush.interval.ms", "1000");
-    return prop;
-  }
-
-  private ListenableFuture<String> processMessage(final String messageId, Message message) {
-    LOG.debug("Message received: {} {}.", messageId, message);
-
-    SettableFuture<String> result = SettableFuture.create();
-    Runnable completion = getMessageCompletion(messageId, result);
-
-    if (handleSecureStoreUpdate(message)) {
-      runningContainers.sendToAll(message, completion);
-      return result;
-    }
-
-    if (handleSetInstances(message, completion)) {
-      return result;
-    }
-
-    // Replicate messages to all runnables
-    if (message.getScope() == Message.Scope.ALL_RUNNABLE) {
-      runningContainers.sendToAll(message, completion);
-      return result;
-    }
-
-    // Replicate message to a particular runnable.
-    if (message.getScope() == Message.Scope.RUNNABLE) {
-      runningContainers.sendToRunnable(message.getRunnableName(), message, completion);
-      return result;
-    }
-
-    LOG.info("Message ignored. {}", message);
-    return Futures.immediateFuture(messageId);
-  }
-
-  /**
-   * Attempts to change the number of running instances.
-   * @return {@code true} if the message does requests for changes in number of running instances of a runnable,
-   *         {@code false} otherwise.
-   */
-  private boolean handleSetInstances(final Message message, final Runnable completion) {
-    if (message.getType() != Message.Type.SYSTEM || message.getScope() != Message.Scope.RUNNABLE) {
-      return false;
-    }
-
-    Command command = message.getCommand();
-    Map<String, String> options = command.getOptions();
-    if (!"instances".equals(command.getCommand()) || !options.containsKey("count")) {
-      return false;
-    }
-
-    final String runnableName = message.getRunnableName();
-    if (runnableName == null || runnableName.isEmpty() || !twillSpec.getRunnables().containsKey(runnableName)) {
-      LOG.info("Unknown runnable {}", runnableName);
-      return false;
-    }
-
-    final int newCount = Integer.parseInt(options.get("count"));
-    final int oldCount = expectedContainers.getExpected(runnableName);
-
-    LOG.info("Received change instances request for {}, from {} to {}.", runnableName, oldCount, newCount);
-
-    if (newCount == oldCount) {   // Nothing to do, simply complete the request.
-      completion.run();
-      return true;
-    }
-
-    instanceChangeExecutor.execute(createSetInstanceRunnable(message, completion, oldCount, newCount));
-    return true;
-  }
-
-  /**
-   * Creates a Runnable for execution of change instance request.
-   */
-  private Runnable createSetInstanceRunnable(final Message message, final Runnable completion,
-                                             final int oldCount, final int newCount) {
-    return new Runnable() {
-      @Override
-      public void run() {
-        final String runnableName = message.getRunnableName();
-
-        LOG.info("Processing change instance request for {}, from {} to {}.", runnableName, oldCount, newCount);
-        try {
-          // Wait until running container count is the same as old count
-          runningContainers.waitForCount(runnableName, oldCount);
-          LOG.info("Confirmed {} containers running for {}.", oldCount, runnableName);
-
-          expectedContainers.setExpected(runnableName, newCount);
-
-          try {
-            if (newCount < oldCount) {
-              // Shutdown some running containers
-              for (int i = 0; i < oldCount - newCount; i++) {
-                runningContainers.removeLast(runnableName);
-              }
-            } else {
-              // Increase the number of instances
-              runnableContainerRequests.add(createRunnableContainerRequest(runnableName));
-            }
-          } finally {
-            runningContainers.sendToRunnable(runnableName, message, completion);
-            LOG.info("Change instances request completed. From {} to {}.", oldCount, newCount);
-          }
-        } catch (InterruptedException e) {
-          // If the wait is being interrupted, discard the message.
-          completion.run();
-        }
-      }
-    };
-  }
-
-  private RunnableContainerRequest createRunnableContainerRequest(final String runnableName) {
-    // Find the current order of the given runnable in order to create a RunnableContainerRequest.
-    TwillSpecification.Order order = Iterables.find(twillSpec.getOrders(), new Predicate<TwillSpecification.Order>() {
-      @Override
-      public boolean apply(TwillSpecification.Order input) {
-        return (input.getNames().contains(runnableName));
-      }
-    });
-
-    RuntimeSpecification runtimeSpec = twillSpec.getRunnables().get(runnableName);
-    Resource capability = createCapability(runtimeSpec.getResourceSpecification());
-    return new RunnableContainerRequest(order.getType(), ImmutableMultimap.of(capability, runtimeSpec));
-  }
-
-  private Runnable getMessageCompletion(final String messageId, final SettableFuture<String> future) {
-    return new Runnable() {
-      @Override
-      public void run() {
-        future.set(messageId);
-      }
-    };
-  }
-
-  private Resource createCapability(ResourceSpecification resourceSpec) {
-    Resource capability = Records.newRecord(Resource.class);
-
-    if (!YarnUtils.setVirtualCores(capability, resourceSpec.getVirtualCores())) {
-      LOG.debug("Virtual cores limit not supported.");
-    }
-
-    capability.setMemory(resourceSpec.getMemorySize());
-    return capability;
-  }
-
-  @Override
-  protected Service getServiceDelegate() {
-    return serviceDelegate;
-  }
-
-  /**
-   * A private class for service lifecycle. It's done this way so that we can have {@link ZKServiceDecorator} to
-   * wrap around this to reflect status in ZK.
-   */
-  private final class ServiceDelegate extends AbstractExecutionThreadService implements MessageCallback {
-
-    private volatile Thread runThread;
-
-    @Override
-    protected void run() throws Exception {
-      runThread = Thread.currentThread();
-      try {
-        doRun();
-      } catch (InterruptedException e) {
-        // It's ok to get interrupted exception, as it's a signal to stop
-        Thread.currentThread().interrupt();
-      }
-    }
-
-    @Override
-    protected void startUp() throws Exception {
-      doStart();
-    }
-
-    @Override
-    protected void shutDown() throws Exception {
-      doStop();
-    }
-
-    @Override
-    protected void triggerShutdown() {
-      Thread runThread = this.runThread;
-      if (runThread != null) {
-        runThread.interrupt();
-      }
-    }
-
-    @Override
-    public ListenableFuture<String> onReceived(String messageId, Message message) {
-      return processMessage(messageId, message);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationSubmitter.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationSubmitter.java b/yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationSubmitter.java
deleted file mode 100644
index 931c5ef..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationSubmitter.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.appmaster;
-
-import org.apache.twill.internal.ProcessController;
-import org.apache.twill.internal.yarn.YarnApplicationReport;
-import org.apache.twill.internal.yarn.YarnLaunchContext;
-import org.apache.hadoop.yarn.api.records.Resource;
-
-/**
- * Interface for submitting a new application to run.
- */
-public interface ApplicationSubmitter {
-
-  ProcessController<YarnApplicationReport> submit(YarnLaunchContext launchContext, Resource capability);
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/appmaster/BasicEventHandlerContext.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/appmaster/BasicEventHandlerContext.java b/yarn/src/main/java/org/apache/twill/internal/appmaster/BasicEventHandlerContext.java
deleted file mode 100644
index 1769910..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/appmaster/BasicEventHandlerContext.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.appmaster;
-
-import org.apache.twill.api.EventHandlerContext;
-import org.apache.twill.api.EventHandlerSpecification;
-
-/**
- *
- */
-final class BasicEventHandlerContext implements EventHandlerContext {
-
-  private final EventHandlerSpecification specification;
-
-  BasicEventHandlerContext(EventHandlerSpecification specification) {
-    this.specification = specification;
-  }
-
-  @Override
-  public EventHandlerSpecification getSpecification() {
-    return specification;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/appmaster/ExpectedContainers.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/appmaster/ExpectedContainers.java b/yarn/src/main/java/org/apache/twill/internal/appmaster/ExpectedContainers.java
deleted file mode 100644
index f4ebbd0..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/appmaster/ExpectedContainers.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.appmaster;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Maps;
-
-import java.util.Map;
-
-/**
- * This class hold information about the expected container count for each runnable. It also
- * keep track of the timestamp where the expected count has been updated.
- */
-final class ExpectedContainers {
-
-  private final Map<String, ExpectedCount> expectedCounts;
-
-  ExpectedContainers(Map<String, Integer> expected) {
-    expectedCounts = Maps.newHashMap();
-    long now = System.currentTimeMillis();
-
-    for (Map.Entry<String, Integer> entry : expected.entrySet()) {
-      expectedCounts.put(entry.getKey(), new ExpectedCount(entry.getValue(), now));
-    }
-  }
-
-  synchronized void setExpected(String runnable, int expected) {
-    expectedCounts.put(runnable, new ExpectedCount(expected, System.currentTimeMillis()));
-  }
-
-  /**
-   * Updates the ExpectCount timestamp to current time.
-   * @param runnables List of runnable names.
-   */
-  synchronized void updateRequestTime(Iterable<String> runnables) {
-    for (String runnable : runnables) {
-      ExpectedCount oldCount = expectedCounts.get(runnable);
-      expectedCounts.put(runnable, new ExpectedCount(oldCount.getCount(), System.currentTimeMillis()));
-    }
-  }
-
-  synchronized int getExpected(String runnable) {
-    return expectedCounts.get(runnable).getCount();
-  }
-
-  synchronized Map<String, ExpectedCount> getAll() {
-    return ImmutableMap.copyOf(expectedCounts);
-  }
-
-  static final class ExpectedCount {
-    private final int count;
-    private final long timestamp;
-
-    private ExpectedCount(int count, long timestamp) {
-      this.count = count;
-      this.timestamp = timestamp;
-    }
-
-    int getCount() {
-      return count;
-    }
-
-    long getTimestamp() {
-      return timestamp;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/appmaster/LoggerContextListenerAdapter.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/appmaster/LoggerContextListenerAdapter.java b/yarn/src/main/java/org/apache/twill/internal/appmaster/LoggerContextListenerAdapter.java
deleted file mode 100644
index 2d41aa6..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/appmaster/LoggerContextListenerAdapter.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.appmaster;
-
-import ch.qos.logback.classic.Level;
-import ch.qos.logback.classic.Logger;
-import ch.qos.logback.classic.LoggerContext;
-import ch.qos.logback.classic.spi.LoggerContextListener;
-
-/**
- *
- */
-abstract class LoggerContextListenerAdapter implements LoggerContextListener {
-
-  private final boolean resetResistant;
-
-  protected LoggerContextListenerAdapter(boolean resetResistant) {
-    this.resetResistant = resetResistant;
-  }
-
-  @Override
-  public final boolean isResetResistant() {
-    return resetResistant;
-  }
-
-  @Override
-  public void onStart(LoggerContext context) {
-  }
-
-  @Override
-  public void onReset(LoggerContext context) {
-  }
-
-  @Override
-  public void onStop(LoggerContext context) {
-  }
-
-  @Override
-  public void onLevelChange(Logger logger, Level level) {
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/appmaster/ProvisionRequest.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/appmaster/ProvisionRequest.java b/yarn/src/main/java/org/apache/twill/internal/appmaster/ProvisionRequest.java
deleted file mode 100644
index 002d2a5..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/appmaster/ProvisionRequest.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.appmaster;
-
-import org.apache.twill.api.RuntimeSpecification;
-
-/**
- * Package private class to help AM to track in progress container request.
- */
-final class ProvisionRequest {
-  private final RuntimeSpecification runtimeSpec;
-  private final String requestId;
-  private int requestCount;
-
-  ProvisionRequest(RuntimeSpecification runtimeSpec, String requestId, int requestCount) {
-    this.runtimeSpec = runtimeSpec;
-    this.requestId = requestId;
-    this.requestCount = requestCount;
-  }
-
-  RuntimeSpecification getRuntimeSpec() {
-    return runtimeSpec;
-  }
-
-  String getRequestId() {
-    return requestId;
-  }
-
-  /**
-   * Called to notify a container has been provision for this request.
-   * @return {@code true} if the requested container count has been provisioned.
-   */
-  boolean containerAcquired() {
-    requestCount--;
-    return requestCount == 0;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/appmaster/RunnableContainerRequest.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/appmaster/RunnableContainerRequest.java b/yarn/src/main/java/org/apache/twill/internal/appmaster/RunnableContainerRequest.java
deleted file mode 100644
index 7f28443..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/appmaster/RunnableContainerRequest.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.appmaster;
-
-import org.apache.twill.api.RuntimeSpecification;
-import org.apache.twill.api.TwillSpecification;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Iterators;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Multimap;
-import org.apache.hadoop.yarn.api.records.Resource;
-
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.Map;
-
-/**
- * Data structure for holding set of runnable specifications based on resource capability.
- */
-final class RunnableContainerRequest {
-  private final TwillSpecification.Order.Type orderType;
-  private final Iterator<Map.Entry<Resource, Collection<RuntimeSpecification>>> requests;
-
-  RunnableContainerRequest(TwillSpecification.Order.Type orderType,
-                           Multimap<Resource, RuntimeSpecification> requests) {
-    this.orderType = orderType;
-    this.requests = requests.asMap().entrySet().iterator();
-  }
-
-  TwillSpecification.Order.Type getOrderType() {
-    return orderType;
-  }
-
-  /**
-   * Remove a resource request and return it.
-   * @return The {@link Resource} and {@link Collection} of {@link RuntimeSpecification} or
-   *         {@code null} if there is no more request.
-   */
-  Map.Entry<Resource, ? extends Collection<RuntimeSpecification>> takeRequest() {
-    Map.Entry<Resource, Collection<RuntimeSpecification>> next = Iterators.getNext(requests, null);
-    return next == null ? null : Maps.immutableEntry(next.getKey(), ImmutableList.copyOf(next.getValue()));
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/appmaster/RunnableProcessLauncher.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/appmaster/RunnableProcessLauncher.java b/yarn/src/main/java/org/apache/twill/internal/appmaster/RunnableProcessLauncher.java
deleted file mode 100644
index b4b27a9..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/appmaster/RunnableProcessLauncher.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.appmaster;
-
-import org.apache.twill.common.Cancellable;
-import org.apache.twill.internal.EnvKeys;
-import org.apache.twill.internal.ProcessController;
-import org.apache.twill.internal.yarn.AbstractYarnProcessLauncher;
-import org.apache.twill.internal.yarn.YarnContainerInfo;
-import org.apache.twill.internal.yarn.YarnLaunchContext;
-import org.apache.twill.internal.yarn.YarnNMClient;
-import com.google.common.base.Objects;
-import com.google.common.collect.Maps;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Map;
-
-/**
- *
- */
-public final class RunnableProcessLauncher extends AbstractYarnProcessLauncher<YarnContainerInfo> {
-
-  private static final Logger LOG = LoggerFactory.getLogger(RunnableProcessLauncher.class);
-
-  private final YarnContainerInfo containerInfo;
-  private final YarnNMClient nmClient;
-  private boolean launched;
-
-  public RunnableProcessLauncher(YarnContainerInfo containerInfo, YarnNMClient nmClient) {
-    super(containerInfo);
-    this.containerInfo = containerInfo;
-    this.nmClient = nmClient;
-  }
-
-  @Override
-  public String toString() {
-    return Objects.toStringHelper(this)
-      .add("container", containerInfo)
-      .toString();
-  }
-
-  @Override
-  protected <R> ProcessController<R> doLaunch(YarnLaunchContext launchContext) {
-    Map<String, String> env = Maps.newHashMap(launchContext.getEnvironment());
-
-    // Set extra environments
-    env.put(EnvKeys.YARN_CONTAINER_ID, containerInfo.getId());
-    env.put(EnvKeys.YARN_CONTAINER_HOST, containerInfo.getHost().getHostName());
-    env.put(EnvKeys.YARN_CONTAINER_PORT, Integer.toString(containerInfo.getPort()));
-    env.put(EnvKeys.YARN_CONTAINER_MEMORY_MB, Integer.toString(containerInfo.getMemoryMB()));
-    env.put(EnvKeys.YARN_CONTAINER_VIRTUAL_CORES, Integer.toString(containerInfo.getVirtualCores()));
-
-    launchContext.setEnvironment(env);
-
-    LOG.info("Launching in container {}, {}", containerInfo.getId(), launchContext.getCommands());
-    final Cancellable cancellable = nmClient.start(containerInfo, launchContext);
-    launched = true;
-
-    return new ProcessController<R>() {
-      @Override
-      public R getReport() {
-        // No reporting support for runnable launch yet.
-        return null;
-
-      }
-
-      @Override
-      public void cancel() {
-        cancellable.cancel();
-      }
-    };
-  }
-
-  public boolean isLaunched() {
-    return launched;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/appmaster/RunningContainers.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/appmaster/RunningContainers.java b/yarn/src/main/java/org/apache/twill/internal/appmaster/RunningContainers.java
deleted file mode 100644
index beef0d4..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/appmaster/RunningContainers.java
+++ /dev/null
@@ -1,427 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.appmaster;
-
-import org.apache.twill.api.ResourceReport;
-import org.apache.twill.api.RunId;
-import org.apache.twill.api.ServiceController;
-import org.apache.twill.api.TwillRunResources;
-import org.apache.twill.internal.ContainerInfo;
-import org.apache.twill.internal.DefaultResourceReport;
-import org.apache.twill.internal.DefaultTwillRunResources;
-import org.apache.twill.internal.RunIds;
-import org.apache.twill.internal.TwillContainerController;
-import org.apache.twill.internal.TwillContainerLauncher;
-import org.apache.twill.internal.container.TwillContainerMain;
-import org.apache.twill.internal.state.Message;
-import org.apache.twill.internal.yarn.YarnContainerStatus;
-import com.google.common.base.Function;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.HashBasedTable;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Multiset;
-import com.google.common.collect.Table;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import org.apache.hadoop.yarn.api.records.ContainerState;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.BitSet;
-import java.util.Collection;
-import java.util.Deque;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.Condition;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-/**
- * A helper class for ApplicationMasterService to keep track of running containers and to interact
- * with them.
- */
-final class RunningContainers {
-  private static final Logger LOG = LoggerFactory.getLogger(RunningContainers.class);
-
-  /**
-   * Function to return cardinality of a given BitSet.
-   */
-  private static final Function<BitSet, Integer> BITSET_CARDINALITY = new Function<BitSet, Integer>() {
-    @Override
-    public Integer apply(BitSet input) {
-      return input.cardinality();
-    }
-  };
-
-  // Table of <runnableName, containerId, controller>
-  private final Table<String, String, TwillContainerController> containers;
-
-  // Map from runnableName to a BitSet, with the <instanceId> bit turned on for having an instance running.
-  private final Map<String, BitSet> runnableInstances;
-  private final DefaultResourceReport resourceReport;
-  private final Deque<String> startSequence;
-  private final Lock containerLock;
-  private final Condition containerChange;
-
-  RunningContainers(String appId, TwillRunResources appMasterResources) {
-    containers = HashBasedTable.create();
-    runnableInstances = Maps.newHashMap();
-    startSequence = Lists.newLinkedList();
-    containerLock = new ReentrantLock();
-    containerChange = containerLock.newCondition();
-    resourceReport = new DefaultResourceReport(appId, appMasterResources);
-  }
-
-  /**
-   * Returns {@code true} if there is no live container.
-   */
-  boolean isEmpty() {
-    containerLock.lock();
-    try {
-      return runnableInstances.isEmpty();
-    } finally {
-      containerLock.unlock();
-    }
-  }
-
-  void start(String runnableName, ContainerInfo containerInfo, TwillContainerLauncher launcher) {
-    containerLock.lock();
-    try {
-      int instanceId = getStartInstanceId(runnableName);
-      RunId runId = getRunId(runnableName, instanceId);
-      TwillContainerController controller = launcher.start(runId, instanceId,
-                                                           TwillContainerMain.class, "$HADOOP_CONF_DIR");
-      containers.put(runnableName, containerInfo.getId(), controller);
-
-      TwillRunResources resources = new DefaultTwillRunResources(instanceId,
-                                                                 containerInfo.getId(),
-                                                                 containerInfo.getVirtualCores(),
-                                                                 containerInfo.getMemoryMB(),
-                                                                 containerInfo.getHost().getHostName());
-      resourceReport.addRunResources(runnableName, resources);
-
-      if (startSequence.isEmpty() || !runnableName.equals(startSequence.peekLast())) {
-        startSequence.addLast(runnableName);
-      }
-      containerChange.signalAll();
-
-    } finally {
-      containerLock.unlock();
-    }
-  }
-
-  ResourceReport getResourceReport() {
-    return resourceReport;
-  }
-
-  /**
-   * Stops and removes the last running container of the given runnable.
-   */
-  void removeLast(String runnableName) {
-    containerLock.lock();
-    try {
-      int maxInstanceId = getMaxInstanceId(runnableName);
-      if (maxInstanceId < 0) {
-        LOG.warn("No running container found for {}", runnableName);
-        return;
-      }
-
-      String lastContainerId = null;
-      TwillContainerController lastController = null;
-
-      // Find the controller with the maxInstanceId
-      for (Map.Entry<String, TwillContainerController> entry : containers.row(runnableName).entrySet()) {
-        if (getInstanceId(entry.getValue().getRunId()) == maxInstanceId) {
-          lastContainerId = entry.getKey();
-          lastController = entry.getValue();
-          break;
-        }
-      }
-
-      Preconditions.checkState(lastContainerId != null,
-                               "No container found for {} with instanceId = {}", runnableName, maxInstanceId);
-
-      LOG.info("Stopping service: {} {}", runnableName, lastController.getRunId());
-      lastController.stopAndWait();
-      containers.remove(runnableName, lastContainerId);
-      removeInstanceId(runnableName, maxInstanceId);
-      resourceReport.removeRunnableResources(runnableName, lastContainerId);
-      containerChange.signalAll();
-    } finally {
-      containerLock.unlock();
-    }
-  }
-
-  /**
-   * Blocks until there are changes in running containers.
-   */
-  void waitForCount(String runnableName, int count) throws InterruptedException {
-    containerLock.lock();
-    try {
-      while (getRunningInstances(runnableName) != count) {
-        containerChange.await();
-      }
-    } finally {
-      containerLock.unlock();
-    }
-  }
-
-  /**
-   * Returns the number of running instances of the given runnable.
-   */
-  int count(String runnableName) {
-    containerLock.lock();
-    try {
-      return getRunningInstances(runnableName);
-    } finally {
-      containerLock.unlock();
-    }
-  }
-
-  /**
-   * Returns a Map contains running instances of all runnables.
-   */
-  Map<String, Integer> countAll() {
-    containerLock.lock();
-    try {
-      return ImmutableMap.copyOf(Maps.transformValues(runnableInstances, BITSET_CARDINALITY));
-    } finally {
-      containerLock.unlock();
-    }
-  }
-
-  void sendToAll(Message message, Runnable completion) {
-    containerLock.lock();
-    try {
-      if (containers.isEmpty()) {
-        completion.run();
-      }
-
-      // Sends the command to all running containers
-      AtomicInteger count = new AtomicInteger(containers.size());
-      for (Map.Entry<String, Map<String, TwillContainerController>> entry : containers.rowMap().entrySet()) {
-        for (TwillContainerController controller : entry.getValue().values()) {
-          sendMessage(entry.getKey(), message, controller, count, completion);
-        }
-      }
-    } finally {
-      containerLock.unlock();
-    }
-  }
-
-  void sendToRunnable(String runnableName, Message message, Runnable completion) {
-    containerLock.lock();
-    try {
-      Collection<TwillContainerController> controllers = containers.row(runnableName).values();
-      if (controllers.isEmpty()) {
-        completion.run();
-      }
-
-      AtomicInteger count = new AtomicInteger(controllers.size());
-      for (TwillContainerController controller : controllers) {
-        sendMessage(runnableName, message, controller, count, completion);
-      }
-    } finally {
-      containerLock.unlock();
-    }
-  }
-
-  /**
-   * Stops all running services. Only called when the AppMaster stops.
-   */
-  void stopAll() {
-    containerLock.lock();
-    try {
-      // Stop it one by one in reverse order of start sequence
-      Iterator<String> itor = startSequence.descendingIterator();
-      List<ListenableFuture<ServiceController.State>> futures = Lists.newLinkedList();
-      while (itor.hasNext()) {
-        String runnableName = itor.next();
-        LOG.info("Stopping all instances of " + runnableName);
-
-        futures.clear();
-        // Parallel stops all running containers of the current runnable.
-        for (TwillContainerController controller : containers.row(runnableName).values()) {
-          futures.add(controller.stop());
-        }
-        // Wait for containers to stop. Assumes the future returned by Futures.successfulAsList won't throw exception.
-        Futures.getUnchecked(Futures.successfulAsList(futures));
-
-        LOG.info("Terminated all instances of " + runnableName);
-      }
-      containers.clear();
-      runnableInstances.clear();
-    } finally {
-      containerLock.unlock();
-    }
-  }
-
-  Set<String> getContainerIds() {
-    containerLock.lock();
-    try {
-      return ImmutableSet.copyOf(containers.columnKeySet());
-    } finally {
-      containerLock.unlock();
-    }
-  }
-
-  /**
-   * Handle completion of container.
-   * @param status The completion status.
-   * @param restartRunnables Set of runnable names that requires restart.
-   */
-  void handleCompleted(YarnContainerStatus status, Multiset<String> restartRunnables) {
-    containerLock.lock();
-    String containerId = status.getContainerId();
-    int exitStatus = status.getExitStatus();
-    ContainerState state = status.getState();
-
-    try {
-      Map<String, TwillContainerController> lookup = containers.column(containerId);
-      if (lookup.isEmpty()) {
-        // It's OK because if a container is stopped through removeLast, this would be empty.
-        return;
-      }
-
-      if (lookup.size() != 1) {
-        LOG.warn("More than one controller found for container {}", containerId);
-      }
-
-      if (exitStatus != 0) {
-        LOG.warn("Container {} exited abnormally with state {}, exit code {}. Re-request the container.",
-                 containerId, state, exitStatus);
-        restartRunnables.add(lookup.keySet().iterator().next());
-      } else {
-        LOG.info("Container {} exited normally with state {}", containerId, state);
-      }
-
-      for (Map.Entry<String, TwillContainerController> completedEntry : lookup.entrySet()) {
-        String runnableName = completedEntry.getKey();
-        TwillContainerController controller = completedEntry.getValue();
-        controller.completed(exitStatus);
-
-        removeInstanceId(runnableName, getInstanceId(controller.getRunId()));
-        resourceReport.removeRunnableResources(runnableName, containerId);
-      }
-
-      lookup.clear();
-      containerChange.signalAll();
-    } finally {
-      containerLock.unlock();
-    }
-  }
-
-  /**
-   * Sends a command through the given {@link org.apache.twill.internal.TwillContainerController} of a runnable. Decrements the count
-   * when the sending of command completed. Triggers completion when count reaches zero.
-   */
-  private void sendMessage(final String runnableName, final Message message,
-                           final TwillContainerController controller, final AtomicInteger count,
-                           final Runnable completion) {
-    Futures.addCallback(controller.sendMessage(message), new FutureCallback<Message>() {
-      @Override
-      public void onSuccess(Message result) {
-        if (count.decrementAndGet() == 0) {
-          completion.run();
-        }
-      }
-
-      @Override
-      public void onFailure(Throwable t) {
-        try {
-          LOG.error("Failed to send message. Runnable: {}, RunId: {}, Message: {}.",
-                    runnableName, controller.getRunId(), message, t);
-        } finally {
-          if (count.decrementAndGet() == 0) {
-            completion.run();
-          }
-        }
-      }
-    });
-  }
-
-  /**
-   * Returns the instanceId to start the given runnable.
-   */
-  private int getStartInstanceId(String runnableName) {
-    BitSet instances = runnableInstances.get(runnableName);
-    if (instances == null) {
-      instances = new BitSet();
-      runnableInstances.put(runnableName, instances);
-    }
-    int instanceId = instances.nextClearBit(0);
-    instances.set(instanceId);
-    return instanceId;
-  }
-
-  private void removeInstanceId(String runnableName, int instanceId) {
-    BitSet instances = runnableInstances.get(runnableName);
-    if (instances == null) {
-      return;
-    }
-    instances.clear(instanceId);
-    if (instances.isEmpty()) {
-      runnableInstances.remove(runnableName);
-    }
-  }
-
-  /**
-   * Returns the largest instanceId for the given runnable. Returns -1 if no container is running.
-   */
-  private int getMaxInstanceId(String runnableName) {
-    BitSet instances = runnableInstances.get(runnableName);
-    if (instances == null || instances.isEmpty()) {
-      return -1;
-    }
-    return instances.length() - 1;
-  }
-
-  /**
-   * Returns nnumber of running instances for the given runnable.
-   */
-  private int getRunningInstances(String runableName) {
-    BitSet instances = runnableInstances.get(runableName);
-    return instances == null ? 0 : instances.cardinality();
-  }
-
-  private RunId getRunId(String runnableName, int instanceId) {
-    RunId baseId;
-
-    Collection<TwillContainerController> controllers = containers.row(runnableName).values();
-    if (controllers.isEmpty()) {
-      baseId = RunIds.generate();
-    } else {
-      String id = controllers.iterator().next().getRunId().getId();
-      baseId = RunIds.fromString(id.substring(0, id.lastIndexOf('-')));
-    }
-
-    return RunIds.fromString(baseId.getId() + '-' + instanceId);
-  }
-
-  private int getInstanceId(RunId runId) {
-    String id = runId.getId();
-    return Integer.parseInt(id.substring(id.lastIndexOf('-') + 1));
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/appmaster/TrackerService.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/appmaster/TrackerService.java b/yarn/src/main/java/org/apache/twill/internal/appmaster/TrackerService.java
deleted file mode 100644
index ca299e0..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/appmaster/TrackerService.java
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.appmaster;
-
-import org.apache.twill.api.ResourceReport;
-import org.apache.twill.internal.json.ResourceReportAdapter;
-import com.google.common.util.concurrent.AbstractIdleService;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.jboss.netty.bootstrap.ServerBootstrap;
-import org.jboss.netty.buffer.ChannelBuffer;
-import org.jboss.netty.buffer.ChannelBufferOutputStream;
-import org.jboss.netty.buffer.ChannelBuffers;
-import org.jboss.netty.channel.Channel;
-import org.jboss.netty.channel.ChannelFactory;
-import org.jboss.netty.channel.ChannelFuture;
-import org.jboss.netty.channel.ChannelFutureListener;
-import org.jboss.netty.channel.ChannelHandlerContext;
-import org.jboss.netty.channel.ChannelPipeline;
-import org.jboss.netty.channel.ChannelPipelineFactory;
-import org.jboss.netty.channel.Channels;
-import org.jboss.netty.channel.ExceptionEvent;
-import org.jboss.netty.channel.MessageEvent;
-import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
-import org.jboss.netty.channel.group.ChannelGroup;
-import org.jboss.netty.channel.group.DefaultChannelGroup;
-import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
-import org.jboss.netty.handler.codec.http.DefaultHttpResponse;
-import org.jboss.netty.handler.codec.http.HttpChunkAggregator;
-import org.jboss.netty.handler.codec.http.HttpContentCompressor;
-import org.jboss.netty.handler.codec.http.HttpHeaders;
-import org.jboss.netty.handler.codec.http.HttpMethod;
-import org.jboss.netty.handler.codec.http.HttpRequest;
-import org.jboss.netty.handler.codec.http.HttpRequestDecoder;
-import org.jboss.netty.handler.codec.http.HttpResponse;
-import org.jboss.netty.handler.codec.http.HttpResponseEncoder;
-import org.jboss.netty.handler.codec.http.HttpResponseStatus;
-import org.jboss.netty.handler.codec.http.HttpVersion;
-import org.jboss.netty.util.CharsetUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.OutputStreamWriter;
-import java.io.Writer;
-import java.net.InetSocketAddress;
-import java.net.URI;
-import java.net.URL;
-import java.util.concurrent.Executor;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Webservice that the Application Master will register back to the resource manager
- * for clients to track application progress.  Currently used purely for getting a
- * breakdown of resource usage as a {@link org.apache.twill.api.ResourceReport}.
- */
-public final class TrackerService extends AbstractIdleService {
-
-  // TODO: This is temporary. When support more REST API, this would get moved.
-  public static final String PATH = "/resources";
-
-  private static final Logger LOG  = LoggerFactory.getLogger(TrackerService.class);
-  private static final int NUM_BOSS_THREADS = 1;
-  private static final int CLOSE_CHANNEL_TIMEOUT = 5;
-  private static final int MAX_INPUT_SIZE = 100 * 1024 * 1024;
-
-  private final String host;
-  private ServerBootstrap bootstrap;
-  private InetSocketAddress bindAddress;
-  private URL url;
-  private final ChannelGroup channelGroup;
-  private final ResourceReport resourceReport;
-
-  /**
-   * Initialize the service.
-   *
-   * @param resourceReport live report that the service will return to clients.
-   * @param appMasterHost the application master host.
-   */
-  public TrackerService(ResourceReport resourceReport, String appMasterHost) {
-    this.channelGroup = new DefaultChannelGroup("appMasterTracker");
-    this.resourceReport = resourceReport;
-    this.host = appMasterHost;
-  }
-
-  /**
-   * Returns the address this tracker service is bounded to.
-   */
-  public InetSocketAddress getBindAddress() {
-    return bindAddress;
-  }
-
-  /**
-   * @return tracker url.
-   */
-  public URL getUrl() {
-    return url;
-  }
-
-  @Override
-  protected void startUp() throws Exception {
-    Executor bossThreads = Executors.newFixedThreadPool(NUM_BOSS_THREADS,
-                                                        new ThreadFactoryBuilder()
-                                                          .setDaemon(true)
-                                                          .setNameFormat("boss-thread")
-                                                          .build());
-
-    Executor workerThreads = Executors.newCachedThreadPool(new ThreadFactoryBuilder()
-                                                             .setDaemon(true)
-                                                             .setNameFormat("worker-thread#%d")
-                                                             .build());
-
-    ChannelFactory factory = new NioServerSocketChannelFactory(bossThreads, workerThreads);
-
-    bootstrap = new ServerBootstrap(factory);
-
-    bootstrap.setPipelineFactory(new ChannelPipelineFactory() {
-      public ChannelPipeline getPipeline() {
-        ChannelPipeline pipeline = Channels.pipeline();
-
-        pipeline.addLast("decoder", new HttpRequestDecoder());
-        pipeline.addLast("aggregator", new HttpChunkAggregator(MAX_INPUT_SIZE));
-        pipeline.addLast("encoder", new HttpResponseEncoder());
-        pipeline.addLast("compressor", new HttpContentCompressor());
-        pipeline.addLast("handler", new ReportHandler(resourceReport));
-
-        return pipeline;
-      }
-    });
-
-    Channel channel = bootstrap.bind(new InetSocketAddress(host, 0));
-    bindAddress = (InetSocketAddress) channel.getLocalAddress();
-    url = URI.create(String.format("http://%s:%d", host, bindAddress.getPort()))
-             .resolve(TrackerService.PATH).toURL();
-    channelGroup.add(channel);
-  }
-
-  @Override
-  protected void shutDown() throws Exception {
-    try {
-      if (!channelGroup.close().await(CLOSE_CHANNEL_TIMEOUT, TimeUnit.SECONDS)) {
-        LOG.warn("Timeout when closing all channels.");
-      }
-    } finally {
-      bootstrap.releaseExternalResources();
-    }
-  }
-
-  /**
-   * Handler to return resources used by this application master, which will be available through
-   * the host and port set when this application master registered itself to the resource manager.
-   */
-  public class ReportHandler extends SimpleChannelUpstreamHandler {
-    private final ResourceReport report;
-    private final ResourceReportAdapter reportAdapter;
-
-    public ReportHandler(ResourceReport report) {
-      this.report = report;
-      this.reportAdapter = ResourceReportAdapter.create();
-    }
-
-    @Override
-    public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
-      HttpRequest request = (HttpRequest) e.getMessage();
-      if (!isValid(request)) {
-        write404(e);
-        return;
-      }
-
-      writeResponse(e);
-    }
-
-    // only accepts GET on /resources for now
-    private boolean isValid(HttpRequest request) {
-      return (request.getMethod() == HttpMethod.GET) && PATH.equals(request.getUri());
-    }
-
-    private void write404(MessageEvent e) {
-      HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.NOT_FOUND);
-      ChannelFuture future = e.getChannel().write(response);
-      future.addListener(ChannelFutureListener.CLOSE);
-    }
-
-    private void writeResponse(MessageEvent e) {
-      HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
-      response.setHeader(HttpHeaders.Names.CONTENT_TYPE, "application/json; charset=UTF-8");
-
-      ChannelBuffer content = ChannelBuffers.dynamicBuffer();
-      Writer writer = new OutputStreamWriter(new ChannelBufferOutputStream(content), CharsetUtil.UTF_8);
-      reportAdapter.toJson(report, writer);
-      try {
-        writer.close();
-      } catch (IOException e1) {
-        LOG.error("error writing resource report", e1);
-      }
-      response.setContent(content);
-      ChannelFuture future = e.getChannel().write(response);
-      future.addListener(ChannelFutureListener.CLOSE);
-    }
-
-    @Override
-    public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {
-      e.getChannel().close();
-    }
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/appmaster/package-info.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/appmaster/package-info.java b/yarn/src/main/java/org/apache/twill/internal/appmaster/package-info.java
deleted file mode 100644
index bf8e677..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/appmaster/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * This package contains implementation of Twill application master.
- */
-package org.apache.twill.internal.appmaster;


[07/28] Making maven site works.

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AMRMClientImpl.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AMRMClientImpl.java b/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AMRMClientImpl.java
deleted file mode 100644
index c1bd75a..0000000
--- a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AMRMClientImpl.java
+++ /dev/null
@@ -1,412 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn.ports;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.yarn.YarnException;
-import org.apache.hadoop.yarn.api.AMRMProtocol;
-import org.apache.hadoop.yarn.api.ApplicationConstants;
-import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.api.records.Priority;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
-import org.apache.hadoop.yarn.factories.RecordFactory;
-import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
-import org.apache.hadoop.yarn.ipc.YarnRPC;
-import org.apache.hadoop.yarn.service.AbstractService;
-import org.apache.hadoop.yarn.util.BuilderUtils;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.security.PrivilegedAction;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.TreeSet;
-
-/**
- * Ported from Apache Hadoop YARN.
- */
-public final class AMRMClientImpl extends AbstractService implements AMRMClient {
-
-  private static final Log LOG = LogFactory.getLog(AMRMClientImpl.class);
-
-  private final RecordFactory recordFactory =
-    RecordFactoryProvider.getRecordFactory(null);
-
-  private int lastResponseId = 0;
-
-  protected AMRMProtocol rmClient;
-  protected final ApplicationAttemptId appAttemptId;
-  protected Resource clusterAvailableResources;
-  protected int clusterNodeCount;
-
-  //Key -> Priority
-  //Value -> Map
-  //Key->ResourceName (e.g., hostname, rackname, *)
-  //Value->Map
-  //Key->Resource Capability
-  //Value->ResourceRequest
-  protected final
-  Map<Priority, Map<String, Map<Resource, ResourceRequest>>>
-    remoteRequestsTable =
-    new TreeMap<Priority, Map<String, Map<Resource, ResourceRequest>>>();
-
-  protected final Set<ResourceRequest> ask = new TreeSet<ResourceRequest>(
-    new org.apache.hadoop.yarn.util.BuilderUtils.ResourceRequestComparator());
-  protected final Set<ContainerId> release = new TreeSet<ContainerId>();
-
-  public AMRMClientImpl(ApplicationAttemptId appAttemptId) {
-    super(AMRMClientImpl.class.getName());
-    this.appAttemptId = appAttemptId;
-  }
-
-  @Override
-  public synchronized void init(Configuration conf) {
-    super.init(conf);
-  }
-
-  @Override
-  public synchronized void start() {
-    final YarnConfiguration conf = new YarnConfiguration(getConfig());
-    final YarnRPC rpc = YarnRPC.create(conf);
-    final InetSocketAddress rmAddress = conf.getSocketAddr(
-      YarnConfiguration.RM_SCHEDULER_ADDRESS,
-      YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS,
-      YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT);
-
-    UserGroupInformation currentUser;
-    try {
-      currentUser = UserGroupInformation.getCurrentUser();
-    } catch (IOException e) {
-      throw new YarnException(e);
-    }
-
-    if (UserGroupInformation.isSecurityEnabled()) {
-      String tokenURLEncodedStr = System.getenv().get(
-        ApplicationConstants.APPLICATION_MASTER_TOKEN_ENV_NAME);
-      Token<? extends TokenIdentifier> token = new Token<TokenIdentifier>();
-
-      try {
-        token.decodeFromUrlString(tokenURLEncodedStr);
-      } catch (IOException e) {
-        throw new YarnException(e);
-      }
-
-      SecurityUtil.setTokenService(token, rmAddress);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("AppMasterToken is " + token);
-      }
-      currentUser.addToken(token);
-    }
-
-    rmClient = currentUser.doAs(new PrivilegedAction<AMRMProtocol>() {
-      @Override
-      public AMRMProtocol run() {
-        return (AMRMProtocol) rpc.getProxy(AMRMProtocol.class, rmAddress,
-                                           conf);
-      }
-    });
-    LOG.debug("Connecting to ResourceManager at " + rmAddress);
-    super.start();
-  }
-
-  @Override
-  public synchronized void stop() {
-    if (this.rmClient != null) {
-      RPC.stopProxy(this.rmClient);
-    }
-    super.stop();
-  }
-
-  @Override
-  public RegisterApplicationMasterResponse registerApplicationMaster(
-    String appHostName, int appHostPort, String appTrackingUrl)
-    throws YarnRemoteException {
-    // do this only once ???
-    RegisterApplicationMasterRequest request = recordFactory
-      .newRecordInstance(RegisterApplicationMasterRequest.class);
-    synchronized (this) {
-      request.setApplicationAttemptId(appAttemptId);
-    }
-    request.setHost(appHostName);
-    request.setRpcPort(appHostPort);
-    if (appTrackingUrl != null) {
-      request.setTrackingUrl(appTrackingUrl);
-    }
-    RegisterApplicationMasterResponse response = rmClient
-      .registerApplicationMaster(request);
-    return response;
-  }
-
-  @Override
-  public AllocationResponse allocate(float progressIndicator)
-    throws YarnRemoteException {
-    AllocateResponse allocateResponse = null;
-    ArrayList<ResourceRequest> askList = null;
-    ArrayList<ContainerId> releaseList = null;
-    AllocateRequest allocateRequest = null;
-
-    try {
-      synchronized (this) {
-        askList = new ArrayList<ResourceRequest>(ask);
-        releaseList = new ArrayList<ContainerId>(release);
-        // optimistically clear this collection assuming no RPC failure
-        ask.clear();
-        release.clear();
-        allocateRequest = BuilderUtils
-          .newAllocateRequest(appAttemptId, lastResponseId, progressIndicator,
-                              askList, releaseList);
-      }
-
-      allocateResponse = rmClient.allocate(allocateRequest);
-      AllocationResponse response = AllocationResponses.create(allocateResponse);
-
-      synchronized (this) {
-        // update these on successful RPC
-        clusterNodeCount = allocateResponse.getNumClusterNodes();
-        lastResponseId = response.getResponseId();
-        clusterAvailableResources = response.getAvailableResources();
-      }
-
-      return response;
-    } finally {
-      // TODO how to differentiate remote YARN exception vs error in RPC
-      if (allocateResponse == null) {
-        // We hit an exception in allocate()
-        // Preserve ask and release for next call to allocate()
-        synchronized (this) {
-          release.addAll(releaseList);
-          // Requests could have been added or deleted during call to allocate.
-          // If requests were added/removed then there is nothing to do since
-          // the ResourceRequest object in ask would have the actual new value.
-          // If ask does not have this ResourceRequest then it was unchanged and
-          // so we can add the value back safely.
-          // This assumes that there will no concurrent calls to allocate() and
-          // so we don't have to worry about ask being changed in the
-          // synchronized block at the beginning of this method.
-          for (ResourceRequest oldAsk : askList) {
-            if (!ask.contains(oldAsk)) {
-              ask.add(oldAsk);
-            }
-          }
-        }
-      }
-    }
-  }
-
-  @Override
-  public void unregisterApplicationMaster(FinalApplicationStatus appStatus,
-                                          String appMessage, String appTrackingUrl) throws YarnRemoteException {
-    FinishApplicationMasterRequest request = recordFactory
-      .newRecordInstance(FinishApplicationMasterRequest.class);
-    request.setAppAttemptId(appAttemptId);
-    request.setFinishApplicationStatus(appStatus);
-    if (appMessage != null) {
-      request.setDiagnostics(appMessage);
-    }
-    if (appTrackingUrl != null) {
-      request.setTrackingUrl(appTrackingUrl);
-    }
-    rmClient.finishApplicationMaster(request);
-  }
-
-  @Override
-  public synchronized void addContainerRequest(ContainerRequest req) {
-    // Create resource requests
-    if (req.hosts != null) {
-      for (String host : req.hosts) {
-        addResourceRequest(req.priority, host, req.capability, req.containerCount);
-      }
-    }
-
-    if (req.racks != null) {
-      for (String rack : req.racks) {
-        addResourceRequest(req.priority, rack, req.capability, req.containerCount);
-      }
-    }
-
-    // Off switch
-    addResourceRequest(req.priority, ANY, req.capability, req.containerCount);
-  }
-
-  @Override
-  public synchronized void removeContainerRequest(ContainerRequest req) {
-    // Update resource requests
-    if (req.hosts != null) {
-      for (String hostName : req.hosts) {
-        decResourceRequest(req.priority, hostName, req.capability, req.containerCount);
-      }
-    }
-
-    if (req.racks != null) {
-      for (String rack : req.racks) {
-        decResourceRequest(req.priority, rack, req.capability, req.containerCount);
-      }
-    }
-
-    decResourceRequest(req.priority, ANY, req.capability, req.containerCount);
-  }
-
-  @Override
-  public synchronized void releaseAssignedContainer(ContainerId containerId) {
-    release.add(containerId);
-  }
-
-  @Override
-  public synchronized Resource getClusterAvailableResources() {
-    return clusterAvailableResources;
-  }
-
-  @Override
-  public synchronized int getClusterNodeCount() {
-    return clusterNodeCount;
-  }
-
-  private void addResourceRequestToAsk(ResourceRequest remoteRequest) {
-    // This code looks weird but is needed because of the following scenario.
-    // A ResourceRequest is removed from the remoteRequestTable. A 0 container 
-    // request is added to 'ask' to notify the RM about not needing it any more.
-    // Before the call to allocate, the user now requests more containers. If 
-    // the locations of the 0 size request and the new request are the same
-    // (with the difference being only container count), then the set comparator
-    // will consider both to be the same and not add the new request to ask. So 
-    // we need to check for the "same" request being present and remove it and 
-    // then add it back. The comparator is container count agnostic.
-    // This should happen only rarely but we do need to guard against it.
-    if (ask.contains(remoteRequest)) {
-      ask.remove(remoteRequest);
-    }
-    ask.add(remoteRequest);
-  }
-
-  private void addResourceRequest(Priority priority, String resourceName,
-                                  Resource capability, int containerCount) {
-    Map<String, Map<Resource, ResourceRequest>> remoteRequests =
-      this.remoteRequestsTable.get(priority);
-    if (remoteRequests == null) {
-      remoteRequests = new HashMap<String, Map<Resource, ResourceRequest>>();
-      this.remoteRequestsTable.put(priority, remoteRequests);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Added priority=" + priority);
-      }
-    }
-    Map<Resource, ResourceRequest> reqMap = remoteRequests.get(resourceName);
-    if (reqMap == null) {
-      reqMap = new HashMap<Resource, ResourceRequest>();
-      remoteRequests.put(resourceName, reqMap);
-    }
-    ResourceRequest remoteRequest = reqMap.get(capability);
-    if (remoteRequest == null) {
-      remoteRequest = BuilderUtils.
-        newResourceRequest(priority, resourceName, capability, 0);
-      reqMap.put(capability, remoteRequest);
-    }
-
-    remoteRequest.setNumContainers(remoteRequest.getNumContainers() + containerCount);
-
-    // Note this down for next interaction with ResourceManager
-    addResourceRequestToAsk(remoteRequest);
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("addResourceRequest:" + " applicationId="
-                  + appAttemptId + " priority=" + priority.getPriority()
-                  + " resourceName=" + resourceName + " numContainers="
-                  + remoteRequest.getNumContainers() + " #asks=" + ask.size());
-    }
-  }
-
-  private void decResourceRequest(Priority priority, String resourceName,
-                                  Resource capability, int containerCount) {
-    Map<String, Map<Resource, ResourceRequest>> remoteRequests =
-      this.remoteRequestsTable.get(priority);
-
-    if (remoteRequests == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Not decrementing resource as priority " + priority
-                    + " is not present in request table");
-      }
-      return;
-    }
-
-    Map<Resource, ResourceRequest> reqMap = remoteRequests.get(resourceName);
-    if (reqMap == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Not decrementing resource as " + resourceName
-                    + " is not present in request table");
-      }
-      return;
-    }
-    ResourceRequest remoteRequest = reqMap.get(capability);
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("BEFORE decResourceRequest:" + " applicationId="
-                  + appAttemptId + " priority=" + priority.getPriority()
-                  + " resourceName=" + resourceName + " numContainers="
-                  + remoteRequest.getNumContainers() + " #asks=" + ask.size());
-    }
-
-    remoteRequest.
-      setNumContainers(remoteRequest.getNumContainers() - containerCount);
-    if (remoteRequest.getNumContainers() < 0) {
-      // guard against spurious removals
-      remoteRequest.setNumContainers(0);
-    }
-    // Send the ResourceRequest to RM even if is 0 because it needs to override
-    // a previously sent value. If ResourceRequest was not sent previously then
-    // sending 0 ought to be a no-op on RM.
-    addResourceRequestToAsk(remoteRequest);
-
-    // Delete entries from map if no longer needed.
-    if (remoteRequest.getNumContainers() == 0) {
-      reqMap.remove(capability);
-      if (reqMap.size() == 0) {
-        remoteRequests.remove(resourceName);
-      }
-      if (remoteRequests.size() == 0) {
-        remoteRequestsTable.remove(priority);
-      }
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.info("AFTER decResourceRequest:" + " applicationId="
-                 + appAttemptId + " priority=" + priority.getPriority()
-                 + " resourceName=" + resourceName + " numContainers="
-                 + remoteRequest.getNumContainers() + " #asks=" + ask.size());
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AllocationResponse.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AllocationResponse.java b/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AllocationResponse.java
deleted file mode 100644
index 89734fc..0000000
--- a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AllocationResponse.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn.ports;
-
-import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.ContainerStatus;
-import org.apache.hadoop.yarn.api.records.Resource;
-
-import java.util.List;
-
-/**
- * This interface is to abstract the differences in Vanilla Hadoop YARN 2.0 and CDH 4.4
- */
-public interface AllocationResponse {
-
-  int getResponseId();
-
-  Resource getAvailableResources();
-
-  List<Container> getAllocatedContainers();
-
-  List<ContainerStatus> getCompletedContainersStatuses();
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AllocationResponses.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AllocationResponses.java b/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AllocationResponses.java
deleted file mode 100644
index ea46c3b..0000000
--- a/yarn/src/main/hadoop20/org/apache/twill/internal/yarn/ports/AllocationResponses.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn.ports;
-
-import com.google.common.base.Throwables;
-import com.google.common.reflect.TypeToken;
-import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
-import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.ContainerStatus;
-import org.apache.hadoop.yarn.api.records.Resource;
-
-import java.util.List;
-
-/**
- * Factory for building instance of {@link AllocationResponse} based on the response type.
- */
-public final class AllocationResponses {
-
-  /**
-   * A hack for CDH 4.4.0, as the AllocateResponse class is being rewritten and diverted from YARN 2.0
-   */
-  private static final boolean IS_CDH_4_4;
-
-  static {
-    boolean result = false;
-    try {
-      try {
-        // See if it is standard YARN 2.0 AllocateResponse object.
-        AllocateResponse.class.getMethod("getAMResponse");
-      } catch (NoSuchMethodException e) {
-          // See if it is CDH 4.4 AllocateResponse object.
-        AllocationResponse.class.getMethod("getAllocatedContainers");
-        result = true;
-      }
-    } catch (Exception e) {
-      // Something very wrong in here, as it shouldn't arrive here.
-      e.printStackTrace();
-      throw Throwables.propagate(e);
-    }
-
-    IS_CDH_4_4 = result;
-  }
-
-  public static AllocationResponse create(Object response) {
-    if (IS_CDH_4_4) {
-      return new ReflectionAllocationResponse(response);
-    }
-
-    try {
-      Object amResponse = response.getClass().getMethod("getAMResponse").invoke(response);
-      return new ReflectionAllocationResponse(amResponse);
-    } catch (Exception e) {
-      throw Throwables.propagate(e);
-    }
-  }
-
-  private static final class ReflectionAllocationResponse implements AllocationResponse {
-
-    private final Object response;
-
-    private ReflectionAllocationResponse(Object response) {
-      this.response = response;
-    }
-
-    @Override
-    public int getResponseId() {
-      return call("getResponseId", TypeToken.of(Integer.class));
-    }
-
-    @Override
-    public Resource getAvailableResources() {
-      return call("getAvailableResources", TypeToken.of(Resource.class));
-    }
-
-    @Override
-    public List<Container> getAllocatedContainers() {
-      return call("getAllocatedContainers", new TypeToken<List<Container>>() {});
-    }
-
-    @Override
-    public List<ContainerStatus> getCompletedContainersStatuses() {
-      return call("getCompletedContainersStatuses", new TypeToken<List<ContainerStatus>>() {});
-    }
-
-    private <T> T call(String methodName, TypeToken<T> resultType) {
-      try {
-        return (T) resultType.getRawType().cast(response.getClass().getMethod(methodName).invoke(response));
-      } catch (Exception e) {
-        throw Throwables.propagate(e);
-      }
-    }
-  }
-
-  private AllocationResponses() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnAMClient.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnAMClient.java b/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnAMClient.java
deleted file mode 100644
index ce8f90f..0000000
--- a/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnAMClient.java
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import org.apache.twill.internal.ProcessLauncher;
-import org.apache.twill.internal.appmaster.RunnableProcessLauncher;
-import com.google.common.base.Function;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ArrayListMultimap;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Multimap;
-import com.google.common.util.concurrent.AbstractIdleService;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.ApplicationConstants;
-import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
-import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerStatus;
-import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.client.api.AMRMClient;
-import org.apache.hadoop.yarn.util.ConverterUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.net.InetSocketAddress;
-import java.net.URL;
-import java.util.List;
-import java.util.UUID;
-
-/**
- *
- */
-public final class Hadoop21YarnAMClient extends AbstractIdleService implements YarnAMClient {
-
-  private static final Logger LOG = LoggerFactory.getLogger(Hadoop21YarnAMClient.class);
-
-  private static final Function<ContainerStatus, YarnContainerStatus> STATUS_TRANSFORM;
-
-  static {
-    STATUS_TRANSFORM = new Function<ContainerStatus, YarnContainerStatus>() {
-      @Override
-      public YarnContainerStatus apply(ContainerStatus status) {
-        return new Hadoop21YarnContainerStatus(status);
-      }
-    };
-  }
-
-  private final ContainerId containerId;
-  private final Multimap<String, AMRMClient.ContainerRequest> containerRequests;
-  private final AMRMClient<AMRMClient.ContainerRequest> amrmClient;
-  private final Hadoop21YarnNMClient nmClient;
-  private InetSocketAddress trackerAddr;
-  private URL trackerUrl;
-  private Resource maxCapability;
-
-  public Hadoop21YarnAMClient(Configuration conf) {
-    String masterContainerId = System.getenv().get(ApplicationConstants.Environment.CONTAINER_ID.name());
-    Preconditions.checkArgument(masterContainerId != null,
-                                "Missing %s from environment", ApplicationConstants.Environment.CONTAINER_ID.name());
-    this.containerId = ConverterUtils.toContainerId(masterContainerId);
-    this.containerRequests = ArrayListMultimap.create();
-
-    this.amrmClient = AMRMClient.createAMRMClient();
-    this.amrmClient.init(conf);
-    this.nmClient = new Hadoop21YarnNMClient(conf);
-  }
-
-  @Override
-  protected void startUp() throws Exception {
-    Preconditions.checkNotNull(trackerAddr, "Tracker address not set.");
-    Preconditions.checkNotNull(trackerUrl, "Tracker URL not set.");
-
-    amrmClient.start();
-    RegisterApplicationMasterResponse response = amrmClient.registerApplicationMaster(trackerAddr.getHostName(),
-                                                                                      trackerAddr.getPort(),
-                                                                                      trackerUrl.toString());
-    maxCapability = response.getMaximumResourceCapability();
-    nmClient.startAndWait();
-  }
-
-  @Override
-  protected void shutDown() throws Exception {
-    nmClient.stopAndWait();
-    amrmClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, trackerUrl.toString());
-    amrmClient.stop();
-  }
-
-  @Override
-  public ContainerId getContainerId() {
-    return containerId;
-  }
-
-  @Override
-  public String getHost() {
-    return System.getenv().get(ApplicationConstants.Environment.NM_HOST.name());
-  }
-
-  @Override
-  public void setTracker(InetSocketAddress trackerAddr, URL trackerUrl) {
-    this.trackerAddr = trackerAddr;
-    this.trackerUrl = trackerUrl;
-  }
-
-  @Override
-  public synchronized void allocate(float progress, AllocateHandler handler) throws Exception {
-    AllocateResponse allocateResponse = amrmClient.allocate(progress);
-    List<ProcessLauncher<YarnContainerInfo>> launchers
-      = Lists.newArrayListWithCapacity(allocateResponse.getAllocatedContainers().size());
-
-    for (Container container : allocateResponse.getAllocatedContainers()) {
-      launchers.add(new RunnableProcessLauncher(new Hadoop21YarnContainerInfo(container), nmClient));
-    }
-
-    if (!launchers.isEmpty()) {
-      handler.acquired(launchers);
-
-      // If no process has been launched through the given launcher, return the container.
-      for (ProcessLauncher<YarnContainerInfo> l : launchers) {
-        // This cast always works.
-        RunnableProcessLauncher launcher = (RunnableProcessLauncher) l;
-        if (!launcher.isLaunched()) {
-          Container container = launcher.getContainerInfo().getContainer();
-          LOG.info("Nothing to run in container, releasing it: {}", container);
-          amrmClient.releaseAssignedContainer(container.getId());
-        }
-      }
-    }
-
-    List<YarnContainerStatus> completed = ImmutableList.copyOf(
-      Iterables.transform(allocateResponse.getCompletedContainersStatuses(), STATUS_TRANSFORM));
-    if (!completed.isEmpty()) {
-      handler.completed(completed);
-    }
-  }
-
-  @Override
-  public ContainerRequestBuilder addContainerRequest(Resource capability) {
-    return addContainerRequest(capability, 1);
-  }
-
-  @Override
-  public ContainerRequestBuilder addContainerRequest(Resource capability, int count) {
-    return new ContainerRequestBuilder(adjustCapability(capability), count) {
-      @Override
-      public String apply() {
-        synchronized (Hadoop21YarnAMClient.this) {
-          String id = UUID.randomUUID().toString();
-
-          String[] hosts = this.hosts.isEmpty() ? null : this.hosts.toArray(new String[this.hosts.size()]);
-          String[] racks = this.racks.isEmpty() ? null : this.racks.toArray(new String[this.racks.size()]);
-
-          for (int i = 0; i < count; i++) {
-            AMRMClient.ContainerRequest request = new AMRMClient.ContainerRequest(capability, hosts, racks, priority);
-            containerRequests.put(id, request);
-            amrmClient.addContainerRequest(request);
-          }
-
-          return id;
-        }
-      }
-    };
-  }
-
-  @Override
-  public synchronized void completeContainerRequest(String id) {
-    for (AMRMClient.ContainerRequest request : containerRequests.removeAll(id)) {
-      amrmClient.removeContainerRequest(request);
-    }
-  }
-
-  private Resource adjustCapability(Resource resource) {
-    int cores = resource.getVirtualCores();
-    int updatedCores = Math.min(resource.getVirtualCores(), maxCapability.getVirtualCores());
-
-    if (cores != updatedCores) {
-      resource.setVirtualCores(updatedCores);
-      LOG.info("Adjust virtual cores requirement from {} to {}.", cores, updatedCores);
-    }
-
-    int updatedMemory = Math.min(resource.getMemory(), maxCapability.getMemory());
-    if (resource.getMemory() != updatedMemory) {
-      resource.setMemory(updatedMemory);
-      LOG.info("Adjust memory requirement from {} to {} MB.", resource.getMemory(), updatedMemory);
-    }
-
-    return resource;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnAppClient.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnAppClient.java b/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnAppClient.java
deleted file mode 100644
index 50b212d..0000000
--- a/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnAppClient.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import org.apache.twill.api.TwillSpecification;
-import org.apache.twill.internal.ProcessController;
-import org.apache.twill.internal.ProcessLauncher;
-import org.apache.twill.internal.appmaster.ApplicationMasterProcessLauncher;
-import org.apache.twill.internal.appmaster.ApplicationSubmitter;
-import com.google.common.base.Throwables;
-import com.google.common.util.concurrent.AbstractIdleService;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
-import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.client.api.YarnClient;
-import org.apache.hadoop.yarn.client.api.YarnClientApplication;
-import org.apache.hadoop.yarn.util.ConverterUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- *
- */
-public final class Hadoop21YarnAppClient extends AbstractIdleService implements YarnAppClient {
-
-  private static final Logger LOG = LoggerFactory.getLogger(Hadoop21YarnAppClient.class);
-  private final YarnClient yarnClient;
-
-  public Hadoop21YarnAppClient(Configuration configuration) {
-    this.yarnClient = YarnClient.createYarnClient();
-    yarnClient.init(configuration);
-  }
-
-  @Override
-  public ProcessLauncher<ApplicationId> createLauncher(TwillSpecification twillSpec) throws Exception {
-    // Request for new application
-    YarnClientApplication application = yarnClient.createApplication();
-    final GetNewApplicationResponse response = application.getNewApplicationResponse();
-    final ApplicationId appId = response.getApplicationId();
-
-    // Setup the context for application submission
-    final ApplicationSubmissionContext appSubmissionContext = application.getApplicationSubmissionContext();
-    appSubmissionContext.setApplicationId(appId);
-    appSubmissionContext.setApplicationName(twillSpec.getName());
-
-    ApplicationSubmitter submitter = new ApplicationSubmitter() {
-      @Override
-      public ProcessController<YarnApplicationReport> submit(YarnLaunchContext context, Resource capability) {
-        ContainerLaunchContext launchContext = context.getLaunchContext();
-
-        addRMToken(launchContext);
-        appSubmissionContext.setAMContainerSpec(launchContext);
-        appSubmissionContext.setResource(adjustMemory(response, capability));
-        appSubmissionContext.setMaxAppAttempts(2);
-
-        try {
-          yarnClient.submitApplication(appSubmissionContext);
-          return new ProcessControllerImpl(yarnClient, appId);
-        } catch (Exception e) {
-          LOG.error("Failed to submit application {}", appId, e);
-          throw Throwables.propagate(e);
-        }
-      }
-    };
-
-    return new ApplicationMasterProcessLauncher(appId, submitter);
-  }
-
-  private Resource adjustMemory(GetNewApplicationResponse response, Resource capability) {
-    int maxMemory = response.getMaximumResourceCapability().getMemory();
-    int updatedMemory = capability.getMemory();
-
-    if (updatedMemory > maxMemory) {
-      capability.setMemory(maxMemory);
-    }
-
-    return capability;
-  }
-
-  private void addRMToken(ContainerLaunchContext context) {
-    if (!UserGroupInformation.isSecurityEnabled()) {
-      return;
-    }
-
-    try {
-      Credentials credentials = YarnUtils.decodeCredentials(context.getTokens());
-
-      Configuration config = yarnClient.getConfig();
-      Token<TokenIdentifier> token = ConverterUtils.convertFromYarn(
-        yarnClient.getRMDelegationToken(new Text(YarnUtils.getYarnTokenRenewer(config))),
-        YarnUtils.getRMAddress(config));
-
-      LOG.info("Added RM delegation token {}", token);
-      credentials.addToken(token.getService(), token);
-
-      context.setTokens(YarnUtils.encodeCredentials(credentials));
-
-    } catch (Exception e) {
-      LOG.error("Fails to create credentials.", e);
-      throw Throwables.propagate(e);
-    }
-  }
-
-  @Override
-  public ProcessLauncher<ApplicationId> createLauncher(String user, TwillSpecification twillSpec) throws Exception {
-    // Ignore user
-    return createLauncher(twillSpec);
-  }
-
-  @Override
-  public ProcessController<YarnApplicationReport> createProcessController(ApplicationId appId) {
-    return new ProcessControllerImpl(yarnClient, appId);
-  }
-
-  @Override
-  protected void startUp() throws Exception {
-    yarnClient.start();
-  }
-
-  @Override
-  protected void shutDown() throws Exception {
-    yarnClient.stop();
-  }
-
-  private static final class ProcessControllerImpl implements ProcessController<YarnApplicationReport> {
-    private final YarnClient yarnClient;
-    private final ApplicationId appId;
-
-    public ProcessControllerImpl(YarnClient yarnClient, ApplicationId appId) {
-      this.yarnClient = yarnClient;
-      this.appId = appId;
-    }
-
-    @Override
-    public YarnApplicationReport getReport() {
-      try {
-        return new Hadoop21YarnApplicationReport(yarnClient.getApplicationReport(appId));
-      } catch (Exception e) {
-        LOG.error("Failed to get application report {}", appId, e);
-        throw Throwables.propagate(e);
-      }
-    }
-
-    @Override
-    public void cancel() {
-      try {
-        yarnClient.killApplication(appId);
-      } catch (Exception e) {
-        LOG.error("Failed to kill application {}", appId, e);
-        throw Throwables.propagate(e);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnApplicationReport.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnApplicationReport.java b/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnApplicationReport.java
deleted file mode 100644
index 6e614f5..0000000
--- a/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnApplicationReport.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationReport;
-import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
-import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.api.records.YarnApplicationState;
-
-/**
- *
- */
-public final class Hadoop21YarnApplicationReport implements YarnApplicationReport {
-
-  private final ApplicationReport report;
-
-  public Hadoop21YarnApplicationReport(ApplicationReport report) {
-    this.report = report;
-  }
-
-  @Override
-  public ApplicationId getApplicationId() {
-    return report.getApplicationId();
-  }
-
-  @Override
-  public ApplicationAttemptId getCurrentApplicationAttemptId() {
-    return report.getCurrentApplicationAttemptId();
-  }
-
-  @Override
-  public String getQueue() {
-    return report.getQueue();
-  }
-
-  @Override
-  public String getName() {
-    return report.getName();
-  }
-
-  @Override
-  public String getHost() {
-    return report.getHost();
-  }
-
-  @Override
-  public int getRpcPort() {
-    return report.getRpcPort();
-  }
-
-  @Override
-  public YarnApplicationState getYarnApplicationState() {
-    return report.getYarnApplicationState();
-  }
-
-  @Override
-  public String getDiagnostics() {
-    return report.getDiagnostics();
-  }
-
-  @Override
-  public String getTrackingUrl() {
-    return report.getTrackingUrl();
-  }
-
-  @Override
-  public String getOriginalTrackingUrl() {
-    return report.getOriginalTrackingUrl();
-  }
-
-  @Override
-  public long getStartTime() {
-    return report.getStartTime();
-  }
-
-  @Override
-  public long getFinishTime() {
-    return report.getFinishTime();
-  }
-
-  @Override
-  public FinalApplicationStatus getFinalApplicationStatus() {
-    return report.getFinalApplicationStatus();
-  }
-
-  @Override
-  public ApplicationResourceUsageReport getApplicationResourceUsageReport() {
-    return report.getApplicationResourceUsageReport();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnContainerInfo.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnContainerInfo.java b/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnContainerInfo.java
deleted file mode 100644
index 86903c1..0000000
--- a/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnContainerInfo.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import com.google.common.base.Throwables;
-import org.apache.hadoop.yarn.api.records.Container;
-
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-
-/**
- *
- */
-public final class Hadoop21YarnContainerInfo implements YarnContainerInfo {
-
-  private final Container container;
-
-  public Hadoop21YarnContainerInfo(Container container) {
-    this.container = container;
-  }
-
-  @Override
-  public <T> T getContainer() {
-    return (T) container;
-  }
-
-  @Override
-  public String getId() {
-    return container.getId().toString();
-  }
-
-  @Override
-  public InetAddress getHost() {
-    try {
-      return InetAddress.getByName(container.getNodeId().getHost());
-    } catch (UnknownHostException e) {
-      throw Throwables.propagate(e);
-    }
-  }
-
-  @Override
-  public int getPort() {
-    return container.getNodeId().getPort();
-  }
-
-  @Override
-  public int getMemoryMB() {
-    return container.getResource().getMemory();
-  }
-
-  @Override
-  public int getVirtualCores() {
-    return container.getResource().getVirtualCores();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnContainerStatus.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnContainerStatus.java b/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnContainerStatus.java
deleted file mode 100644
index f5758c7..0000000
--- a/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnContainerStatus.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import org.apache.hadoop.yarn.api.records.ContainerState;
-import org.apache.hadoop.yarn.api.records.ContainerStatus;
-
-/**
- *
- */
-public final class Hadoop21YarnContainerStatus implements YarnContainerStatus {
-
-  private final ContainerStatus containerStatus;
-
-  public Hadoop21YarnContainerStatus(ContainerStatus containerStatus) {
-    this.containerStatus = containerStatus;
-  }
-
-  @Override
-  public String getContainerId() {
-    return containerStatus.getContainerId().toString();
-  }
-
-  @Override
-  public ContainerState getState() {
-    return containerStatus.getState();
-  }
-
-  @Override
-  public int getExitStatus() {
-    return containerStatus.getExitStatus();
-  }
-
-  @Override
-  public String getDiagnostics() {
-    return containerStatus.getDiagnostics();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnLaunchContext.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnLaunchContext.java b/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnLaunchContext.java
deleted file mode 100644
index 8621f93..0000000
--- a/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnLaunchContext.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import com.google.common.base.Function;
-import com.google.common.collect.Maps;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
-import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
-import org.apache.hadoop.yarn.api.records.LocalResource;
-import org.apache.hadoop.yarn.util.Records;
-
-import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.Map;
-
-/**
- *
- */
-public final class Hadoop21YarnLaunchContext implements YarnLaunchContext {
-
-  private static final Function<YarnLocalResource, LocalResource> RESOURCE_TRANSFORM;
-
-  static {
-    // Creates transform function from YarnLocalResource -> LocalResource
-    RESOURCE_TRANSFORM = new Function<YarnLocalResource, LocalResource>() {
-      @Override
-      public LocalResource apply(YarnLocalResource input) {
-        return input.getLocalResource();
-      }
-    };
-  }
-
-  private final ContainerLaunchContext launchContext;
-
-  public Hadoop21YarnLaunchContext() {
-    launchContext = Records.newRecord(ContainerLaunchContext.class);
-  }
-
-  @Override
-  public <T> T getLaunchContext() {
-    return (T) launchContext;
-  }
-
-  @Override
-  public void setCredentials(Credentials credentials) {
-    launchContext.setTokens(YarnUtils.encodeCredentials(credentials));
-  }
-
-  @Override
-  public void setLocalResources(Map<String, YarnLocalResource> localResources) {
-    launchContext.setLocalResources(Maps.transformValues(localResources, RESOURCE_TRANSFORM));
-  }
-
-  @Override
-  public void setServiceData(Map<String, ByteBuffer> serviceData) {
-    launchContext.setServiceData(serviceData);
-  }
-
-  @Override
-  public Map<String, String> getEnvironment() {
-    return launchContext.getEnvironment();
-  }
-
-  @Override
-  public void setEnvironment(Map<String, String> environment) {
-    launchContext.setEnvironment(environment);
-  }
-
-  @Override
-  public List<String> getCommands() {
-    return launchContext.getCommands();
-  }
-
-  @Override
-  public void setCommands(List<String> commands) {
-    launchContext.setCommands(commands);
-  }
-
-  @Override
-  public void setApplicationACLs(Map<ApplicationAccessType, String> acls) {
-    launchContext.setApplicationACLs(acls);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnLocalResource.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnLocalResource.java b/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnLocalResource.java
deleted file mode 100644
index 3f756bd..0000000
--- a/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnLocalResource.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import org.apache.hadoop.yarn.api.records.LocalResource;
-import org.apache.hadoop.yarn.api.records.LocalResourceType;
-import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
-import org.apache.hadoop.yarn.api.records.URL;
-import org.apache.hadoop.yarn.util.Records;
-
-/**
- *
- */
-public final class Hadoop21YarnLocalResource implements YarnLocalResource {
-
-  private final LocalResource localResource;
-
-  public Hadoop21YarnLocalResource() {
-    this.localResource = Records.newRecord(LocalResource.class);
-  }
-
-  @Override
-  public <T> T getLocalResource() {
-    return (T) localResource;
-  }
-
-  @Override
-  public URL getResource() {
-    return localResource.getResource();
-  }
-
-  @Override
-  public void setResource(URL resource) {
-    localResource.setResource(resource);
-  }
-
-  @Override
-  public long getSize() {
-    return localResource.getSize();
-  }
-
-  @Override
-  public void setSize(long size) {
-    localResource.setSize(size);
-  }
-
-  @Override
-  public long getTimestamp() {
-    return localResource.getTimestamp();
-  }
-
-  @Override
-  public void setTimestamp(long timestamp) {
-    localResource.setTimestamp(timestamp);
-  }
-
-  @Override
-  public LocalResourceType getType() {
-    return localResource.getType();
-  }
-
-  @Override
-  public void setType(LocalResourceType type) {
-    localResource.setType(type);
-  }
-
-  @Override
-  public LocalResourceVisibility getVisibility() {
-    return localResource.getVisibility();
-  }
-
-  @Override
-  public void setVisibility(LocalResourceVisibility visibility) {
-    localResource.setVisibility(visibility);
-  }
-
-  @Override
-  public String getPattern() {
-    return localResource.getPattern();
-  }
-
-  @Override
-  public void setPattern(String pattern) {
-    localResource.setPattern(pattern);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnNMClient.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnNMClient.java b/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnNMClient.java
deleted file mode 100644
index d3a6a80..0000000
--- a/yarn/src/main/hadoop21/org/apache/twill/internal/yarn/Hadoop21YarnNMClient.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.yarn;
-
-import org.apache.twill.common.Cancellable;
-import com.google.common.base.Throwables;
-import com.google.common.util.concurrent.AbstractIdleService;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
-import org.apache.hadoop.yarn.api.records.ContainerState;
-import org.apache.hadoop.yarn.api.records.ContainerStatus;
-import org.apache.hadoop.yarn.client.api.NMClient;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- *
- */
-public final class Hadoop21YarnNMClient extends AbstractIdleService implements YarnNMClient {
-
-  private static final Logger LOG = LoggerFactory.getLogger(Hadoop21YarnNMClient.class);
-
-  private final NMClient nmClient;
-
-  public Hadoop21YarnNMClient(Configuration configuration) {
-    this.nmClient = NMClient.createNMClient();
-    nmClient.init(configuration);
-  }
-
-  @Override
-  public Cancellable start(YarnContainerInfo containerInfo, YarnLaunchContext launchContext) {
-    try {
-      Container container = containerInfo.getContainer();
-      nmClient.startContainer(container, launchContext.<ContainerLaunchContext>getLaunchContext());
-      return new ContainerTerminator(container, nmClient);
-    } catch (Exception e) {
-      LOG.error("Error in launching process", e);
-      throw Throwables.propagate(e);
-    }
-
-  }
-
-  @Override
-  protected void startUp() throws Exception {
-    nmClient.start();
-  }
-
-  @Override
-  protected void shutDown() throws Exception {
-    nmClient.stop();
-  }
-
-  private static final class ContainerTerminator implements Cancellable {
-
-    private final Container container;
-    private final NMClient nmClient;
-
-    private ContainerTerminator(Container container, NMClient nmClient) {
-      this.container = container;
-      this.nmClient = nmClient;
-    }
-
-    @Override
-    public void cancel() {
-      LOG.info("Request to stop container {}.", container.getId());
-
-      try {
-        nmClient.stopContainer(container.getId(), container.getNodeId());
-        boolean completed = false;
-        while (!completed) {
-          ContainerStatus status = nmClient.getContainerStatus(container.getId(), container.getNodeId());
-          LOG.info("Container status: {} {}", status, status.getDiagnostics());
-
-          completed = (status.getState() == ContainerState.COMPLETE);
-        }
-        LOG.info("Container {} stopped.", container.getId());
-      } catch (Exception e) {
-        LOG.error("Fail to stop container {}", container.getId(), e);
-        throw Throwables.propagate(e);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/filesystem/HDFSLocation.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/filesystem/HDFSLocation.java b/yarn/src/main/java/org/apache/twill/filesystem/HDFSLocation.java
deleted file mode 100644
index b0eeb43..0000000
--- a/yarn/src/main/java/org/apache/twill/filesystem/HDFSLocation.java
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.filesystem;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Options;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.URI;
-import java.util.UUID;
-
-/**
- * A concrete implementation of {@link Location} for the HDFS filesystem.
- */
-final class HDFSLocation implements Location {
-  private final FileSystem fs;
-  private final Path path;
-
-  /**
-   * Constructs a HDFSLocation.
-   *
-   * @param fs  An instance of {@link FileSystem}
-   * @param path of the file.
-   */
-  HDFSLocation(FileSystem fs, Path path) {
-    this.fs = fs;
-    this.path = path;
-  }
-
-  /**
-   * Checks if this location exists on HDFS.
-   *
-   * @return true if found; false otherwise.
-   * @throws IOException
-   */
-  @Override
-  public boolean exists() throws IOException {
-    return fs.exists(path);
-  }
-
-  /**
-   * @return An {@link InputStream} for this location on HDFS.
-   * @throws IOException
-   */
-  @Override
-  public InputStream getInputStream() throws IOException {
-    return fs.open(path);
-  }
-
-  /**
-   * @return An {@link OutputStream} for this location on HDFS.
-   * @throws IOException
-   */
-  @Override
-  public OutputStream getOutputStream() throws IOException {
-    return fs.create(path);
-  }
-
-  @Override
-  public OutputStream getOutputStream(String permission) throws IOException {
-    Configuration conf = fs.getConf();
-    return fs.create(path,
-                     new FsPermission(permission),
-                     true,
-                     conf.getInt("io.file.buffer.size", 4096),
-                     fs.getDefaultReplication(path),
-                     fs.getDefaultBlockSize(path),
-                     null);
-  }
-
-  /**
-   * Appends the child to the current {@link Location} on HDFS.
-   * <p>
-   * Returns a new instance of Location.
-   * </p>
-   *
-   * @param child to be appended to this location.
-   * @return A new instance of {@link Location}
-   * @throws IOException
-   */
-  @Override
-  public Location append(String child) throws IOException {
-    if (child.startsWith("/")) {
-      child = child.substring(1);
-    }
-    return new HDFSLocation(fs, new Path(URI.create(path.toUri() + "/" + child)));
-  }
-
-  @Override
-  public Location getTempFile(String suffix) throws IOException {
-    Path path = new Path(
-      URI.create(this.path.toUri() + "." + UUID.randomUUID() + (suffix == null ? TEMP_FILE_SUFFIX : suffix)));
-    return new HDFSLocation(fs, path);
-  }
-
-  /**
-   * @return Returns the name of the file or directory denoteed by this abstract pathname.
-   */
-  @Override
-  public String getName() {
-    return path.getName();
-  }
-
-  @Override
-  public boolean createNew() throws IOException {
-    return fs.createNewFile(path);
-  }
-
-  /**
-   * @return A {@link URI} for this location on HDFS.
-   */
-  @Override
-  public URI toURI() {
-    return path.toUri();
-  }
-
-  /**
-   * Deletes the file or directory denoted by this abstract pathname. If this
-   * pathname denotes a directory, then the directory must be empty in order
-   * to be deleted.
-   *
-   * @return true if and only if the file or directory is successfully deleted; false otherwise.
-   */
-  @Override
-  public boolean delete() throws IOException {
-    return fs.delete(path, false);
-  }
-
-  @Override
-  public boolean delete(boolean recursive) throws IOException {
-    return fs.delete(path, true);
-  }
-
-  @Override
-  public Location renameTo(Location destination) throws IOException {
-    // Destination will always be of the same type as this location.
-    if (fs instanceof DistributedFileSystem) {
-      ((DistributedFileSystem) fs).rename(path, ((HDFSLocation) destination).path, Options.Rename.OVERWRITE);
-      return new HDFSLocation(fs, new Path(destination.toURI()));
-    }
-
-    if (fs.rename(path, ((HDFSLocation) destination).path)) {
-      return new HDFSLocation(fs, new Path(destination.toURI()));
-    } else {
-      return null;
-    }
-  }
-
-  /**
-   * Creates the directory named by this abstract pathname, including any necessary
-   * but nonexistent parent directories.
-   *
-   * @return true if and only if the renaming succeeded; false otherwise
-   */
-  @Override
-  public boolean mkdirs() throws IOException {
-    return fs.mkdirs(path);
-  }
-
-  /**
-   * @return Length of file.
-   */
-  @Override
-  public long length() throws IOException {
-    return fs.getFileStatus(path).getLen();
-  }
-
-  @Override
-  public long lastModified() throws IOException {
-    return fs.getFileStatus(path).getModificationTime();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/filesystem/HDFSLocationFactory.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/filesystem/HDFSLocationFactory.java b/yarn/src/main/java/org/apache/twill/filesystem/HDFSLocationFactory.java
deleted file mode 100644
index fa79391..0000000
--- a/yarn/src/main/java/org/apache/twill/filesystem/HDFSLocationFactory.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.filesystem;
-
-import com.google.common.base.Throwables;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-import java.io.IOException;
-import java.net.URI;
-
-/**
- * A {@link LocationFactory} that creates HDFS {@link Location}.
- */
-public final class HDFSLocationFactory implements LocationFactory {
-
-  private final FileSystem fileSystem;
-  private final String pathBase;
-
-  public HDFSLocationFactory(Configuration configuration) {
-    this(getFileSystem(configuration));
-  }
-  
-  public HDFSLocationFactory(Configuration configuration, String pathBase) {
-    this(getFileSystem(configuration), pathBase);
-  }
-
-  public HDFSLocationFactory(FileSystem fileSystem) {
-    this(fileSystem, "/");
-  }
-
-  public HDFSLocationFactory(FileSystem fileSystem, String pathBase) {
-    String base = pathBase.equals("/") ? "" : pathBase;
-    base = base.endsWith("/") ? base.substring(0, base.length() - 1) : base;
-
-    this.fileSystem = fileSystem;
-    this.pathBase = base;
-  }
-
-  @Override
-  public Location create(String path) {
-    if (path.startsWith("/")) {
-      path = path.substring(1);
-    }
-    return new HDFSLocation(fileSystem, new Path(fileSystem.getUri() + "/" + pathBase + "/" + path));
-  }
-
-  @Override
-  public Location create(URI uri) {
-    if (!uri.toString().startsWith(fileSystem.getUri().toString())) {
-      // It's a full URI
-      return new HDFSLocation(fileSystem, new Path(uri));
-    }
-    if (uri.isAbsolute()) {
-      return new HDFSLocation(fileSystem, new Path(fileSystem.getUri() + uri.getPath()));
-    }
-    return new HDFSLocation(fileSystem, new Path(fileSystem.getUri() + "/" + pathBase + "/" + uri.getPath()));
-  }
-
-  @Override
-  public Location getHomeLocation() {
-    return new HDFSLocation(fileSystem, fileSystem.getHomeDirectory());
-  }
-
-  /**
-   * Returns the underlying {@link FileSystem} object.
-   */
-  public FileSystem getFileSystem() {
-    return fileSystem;
-  }
-
-  private static FileSystem getFileSystem(Configuration configuration) {
-    try {
-      return FileSystem.get(configuration);
-    } catch (IOException e) {
-      throw Throwables.propagate(e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/filesystem/package-info.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/filesystem/package-info.java b/yarn/src/main/java/org/apache/twill/filesystem/package-info.java
deleted file mode 100644
index 2ca09fd..0000000
--- a/yarn/src/main/java/org/apache/twill/filesystem/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * Contains HDFS location classes.
- */
-package org.apache.twill.filesystem;

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/AbstractTwillService.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/AbstractTwillService.java b/yarn/src/main/java/org/apache/twill/internal/AbstractTwillService.java
deleted file mode 100644
index 47dd07c..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/AbstractTwillService.java
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.filesystem.Location;
-import org.apache.twill.internal.state.Message;
-import org.apache.twill.internal.state.SystemMessages;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.Service;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.BufferedInputStream;
-import java.io.DataInputStream;
-import java.io.IOException;
-import java.util.concurrent.Executor;
-
-/**
- * A base implementation of {@link Service} handle secure token update.
- */
-public abstract class AbstractTwillService implements Service {
-
-  private static final Logger LOG = LoggerFactory.getLogger(AbstractTwillService.class);
-
-  protected final Location applicationLocation;
-
-  protected volatile Credentials credentials;
-
-  protected AbstractTwillService(Location applicationLocation) {
-    this.applicationLocation = applicationLocation;
-  }
-
-  protected abstract Service getServiceDelegate();
-
-  /**
-   * Returns the location of the secure store, or {@code null} if either not running in secure mode or an error
-   * occur when trying to acquire the location.
-   */
-  protected final Location getSecureStoreLocation() {
-    if (!UserGroupInformation.isSecurityEnabled()) {
-      return null;
-    }
-    try {
-      return applicationLocation.append(Constants.Files.CREDENTIALS);
-    } catch (IOException e) {
-      LOG.error("Failed to create secure store location.", e);
-      return null;
-    }
-  }
-
-  /**
-   * Attempts to handle secure store update.
-   *
-   * @param message The message received
-   * @return {@code true} if the message requests for secure store update, {@code false} otherwise.
-   */
-  protected final boolean handleSecureStoreUpdate(Message message) {
-    if (!SystemMessages.SECURE_STORE_UPDATED.equals(message)) {
-      return false;
-    }
-
-    // If not in secure mode, simply ignore the message.
-    if (!UserGroupInformation.isSecurityEnabled()) {
-      return true;
-    }
-
-    try {
-      Credentials credentials = new Credentials();
-      Location location = getSecureStoreLocation();
-      DataInputStream input = new DataInputStream(new BufferedInputStream(location.getInputStream()));
-      try {
-        credentials.readTokenStorageStream(input);
-      } finally {
-        input.close();
-      }
-
-      UserGroupInformation.getCurrentUser().addCredentials(credentials);
-      this.credentials = credentials;
-
-      LOG.info("Secure store updated from {}.", location.toURI());
-
-    } catch (Throwable t) {
-      LOG.error("Failed to update secure store.", t);
-    }
-
-    return true;
-  }
-
-  @Override
-  public final ListenableFuture<State> start() {
-    return getServiceDelegate().start();
-  }
-
-  @Override
-  public final State startAndWait() {
-    return Futures.getUnchecked(start());
-  }
-
-  @Override
-  public final boolean isRunning() {
-    return getServiceDelegate().isRunning();
-  }
-
-  @Override
-  public final State state() {
-    return getServiceDelegate().state();
-  }
-
-  @Override
-  public final ListenableFuture<State> stop() {
-    return getServiceDelegate().stop();
-  }
-
-  @Override
-  public final State stopAndWait() {
-    return Futures.getUnchecked(stop());
-  }
-
-  @Override
-  public final void addListener(Listener listener, Executor executor) {
-    getServiceDelegate().addListener(listener, executor);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/ServiceMain.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/ServiceMain.java b/yarn/src/main/java/org/apache/twill/internal/ServiceMain.java
deleted file mode 100644
index 4ffb023..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/ServiceMain.java
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal;
-
-import org.apache.twill.common.Services;
-import org.apache.twill.filesystem.HDFSLocationFactory;
-import org.apache.twill.filesystem.LocalLocationFactory;
-import org.apache.twill.filesystem.Location;
-import org.apache.twill.internal.logging.KafkaAppender;
-import org.apache.twill.zookeeper.ZKClientService;
-import ch.qos.logback.classic.LoggerContext;
-import ch.qos.logback.classic.joran.JoranConfigurator;
-import ch.qos.logback.classic.util.ContextInitializer;
-import ch.qos.logback.core.joran.spi.JoranException;
-import com.google.common.base.Throwables;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.Service;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.slf4j.ILoggerFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.xml.sax.InputSource;
-
-import java.io.File;
-import java.io.StringReader;
-import java.net.URI;
-import java.util.concurrent.ExecutionException;
-
-/**
- * Class for main method that starts a service.
- */
-public abstract class ServiceMain {
-
-  private static final Logger LOG = LoggerFactory.getLogger(ServiceMain.class);
-
-  static {
-    // This is to work around detection of HADOOP_HOME (HADOOP-9422)
-    if (!System.getenv().containsKey("HADOOP_HOME") && System.getProperty("hadoop.home.dir") == null) {
-      System.setProperty("hadoop.home.dir", new File("").getAbsolutePath());
-    }
-  }
-
-  protected final void doMain(final ZKClientService zkClientService,
-                              final Service service) throws ExecutionException, InterruptedException {
-    configureLogger();
-
-    final String serviceName = service.toString();
-    Runtime.getRuntime().addShutdownHook(new Thread() {
-      @Override
-      public void run() {
-        Services.chainStop(service, zkClientService);
-      }
-    });
-
-    // Listener for state changes of the service
-    ListenableFuture<Service.State> completion = Services.getCompletionFuture(service);
-
-    // Starts the service
-    LOG.info("Starting service {}.", serviceName);
-    Futures.getUnchecked(Services.chainStart(zkClientService, service));
-    LOG.info("Service {} started.", serviceName);
-    try {
-      completion.get();
-      LOG.info("Service {} completed.", serviceName);
-    } catch (Throwable t) {
-      LOG.warn("Exception thrown from service {}.", serviceName, t);
-      throw Throwables.propagate(t);
-    } finally {
-      ILoggerFactory loggerFactory = LoggerFactory.getILoggerFactory();
-      if (loggerFactory instanceof LoggerContext) {
-        ((LoggerContext) loggerFactory).stop();
-      }
-    }
-  }
-
-  protected abstract String getHostname();
-
-  protected abstract String getKafkaZKConnect();
-
-  /**
-   * Returns the {@link Location} for the application based on the env {@link EnvKeys#TWILL_APP_DIR}.
-   */
-  protected static Location createAppLocation(Configuration conf) {
-    // Note: It's a little bit hacky based on the uri schema to create the LocationFactory, refactor it later.
-    URI appDir = URI.create(System.getenv(EnvKeys.TWILL_APP_DIR));
-
-    try {
-      if ("file".equals(appDir.getScheme())) {
-        return new LocalLocationFactory().create(appDir);
-      }
-
-      if ("hdfs".equals(appDir.getScheme())) {
-        if (UserGroupInformation.isSecurityEnabled()) {
-          return new HDFSLocationFactory(FileSystem.get(conf)).create(appDir);
-        }
-
-        String fsUser = System.getenv(EnvKeys.TWILL_FS_USER);
-        if (fsUser == null) {
-          throw new IllegalStateException("Missing environment variable " + EnvKeys.TWILL_FS_USER);
-        }
-        return new HDFSLocationFactory(FileSystem.get(FileSystem.getDefaultUri(conf), conf, fsUser)).create(appDir);
-      }
-
-      LOG.warn("Unsupported location type {}.", appDir);
-      throw new IllegalArgumentException("Unsupported location type " + appDir);
-
-    } catch (Exception e) {
-      LOG.error("Failed to create application location for {}.", appDir);
-      throw Throwables.propagate(e);
-    }
-  }
-
-  private void configureLogger() {
-    // Check if SLF4J is bound to logback in the current environment
-    ILoggerFactory loggerFactory = LoggerFactory.getILoggerFactory();
-    if (!(loggerFactory instanceof LoggerContext)) {
-      return;
-    }
-
-    LoggerContext context = (LoggerContext) loggerFactory;
-    context.reset();
-    JoranConfigurator configurator = new JoranConfigurator();
-    configurator.setContext(context);
-
-    try {
-      File twillLogback = new File(Constants.Files.LOGBACK_TEMPLATE);
-      if (twillLogback.exists()) {
-        configurator.doConfigure(twillLogback);
-      }
-      new ContextInitializer(context).autoConfig();
-    } catch (JoranException e) {
-      throw Throwables.propagate(e);
-    }
-    doConfigure(configurator, getLogConfig(getLoggerLevel(context.getLogger(Logger.ROOT_LOGGER_NAME))));
-  }
-
-  private void doConfigure(JoranConfigurator configurator, String config) {
-    try {
-      configurator.doConfigure(new InputSource(new StringReader(config)));
-    } catch (Exception e) {
-      throw Throwables.propagate(e);
-    }
-  }
-
-  private String getLogConfig(String rootLevel) {
-    return
-      "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" +
-      "<configuration>\n" +
-      "    <appender name=\"KAFKA\" class=\"" + KafkaAppender.class.getName() + "\">\n" +
-      "        <topic>" + Constants.LOG_TOPIC + "</topic>\n" +
-      "        <hostname>" + getHostname() + "</hostname>\n" +
-      "        <zookeeper>" + getKafkaZKConnect() + "</zookeeper>\n" +
-      "    </appender>\n" +
-      "    <logger name=\"org.apache.twill.internal.logging\" additivity=\"false\" />\n" +
-      "    <root level=\"" + rootLevel + "\">\n" +
-      "        <appender-ref ref=\"KAFKA\"/>\n" +
-      "    </root>\n" +
-      "</configuration>";
-  }
-
-  private String getLoggerLevel(Logger logger) {
-    if (logger instanceof ch.qos.logback.classic.Logger) {
-      return ((ch.qos.logback.classic.Logger) logger).getLevel().toString();
-    }
-
-    if (logger.isTraceEnabled()) {
-      return "TRACE";
-    }
-    if (logger.isDebugEnabled()) {
-      return "DEBUG";
-    }
-    if (logger.isInfoEnabled()) {
-      return "INFO";
-    }
-    if (logger.isWarnEnabled()) {
-      return "WARN";
-    }
-    if (logger.isErrorEnabled()) {
-      return "ERROR";
-    }
-    return "OFF";
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterLiveNodeData.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterLiveNodeData.java b/yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterLiveNodeData.java
deleted file mode 100644
index 028df7b..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterLiveNodeData.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.appmaster;
-
-/**
- * Represents data being stored in the live node of the application master.
- */
-public final class ApplicationMasterLiveNodeData {
-
-  private final int appId;
-  private final long appIdClusterTime;
-  private final String containerId;
-
-  public ApplicationMasterLiveNodeData(int appId, long appIdClusterTime, String containerId) {
-    this.appId = appId;
-    this.appIdClusterTime = appIdClusterTime;
-    this.containerId = containerId;
-  }
-
-  public int getAppId() {
-    return appId;
-  }
-
-  public long getAppIdClusterTime() {
-    return appIdClusterTime;
-  }
-
-  public String getContainerId() {
-    return containerId;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-twill/blob/35dfccc4/yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterMain.java
----------------------------------------------------------------------
diff --git a/yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterMain.java b/yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterMain.java
deleted file mode 100644
index b34a7a2..0000000
--- a/yarn/src/main/java/org/apache/twill/internal/appmaster/ApplicationMasterMain.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.twill.internal.appmaster;
-
-import org.apache.twill.api.RunId;
-import org.apache.twill.internal.Constants;
-import org.apache.twill.internal.EnvKeys;
-import org.apache.twill.internal.RunIds;
-import org.apache.twill.internal.ServiceMain;
-import org.apache.twill.internal.yarn.VersionDetectYarnAMClientFactory;
-import org.apache.twill.zookeeper.RetryStrategies;
-import org.apache.twill.zookeeper.ZKClientService;
-import org.apache.twill.zookeeper.ZKClientServices;
-import org.apache.twill.zookeeper.ZKClients;
-import com.google.common.util.concurrent.Service;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-
-import java.io.File;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Main class for launching {@link ApplicationMasterService}.
- */
-public final class ApplicationMasterMain extends ServiceMain {
-
-  private final String kafkaZKConnect;
-
-  private ApplicationMasterMain(String kafkaZKConnect) {
-    this.kafkaZKConnect = kafkaZKConnect;
-  }
-
-  /**
-   * Starts the application master.
-   */
-  public static void main(String[] args) throws Exception {
-    String zkConnect = System.getenv(EnvKeys.TWILL_ZK_CONNECT);
-    File twillSpec = new File(Constants.Files.TWILL_SPEC);
-    RunId runId = RunIds.fromString(System.getenv(EnvKeys.TWILL_RUN_ID));
-
-    ZKClientService zkClientService =
-      ZKClientServices.delegate(
-        ZKClients.reWatchOnExpire(
-          ZKClients.retryOnFailure(
-            ZKClientService.Builder.of(zkConnect).build(),
-            RetryStrategies.fixDelay(1, TimeUnit.SECONDS))));
-
-    Configuration conf = new YarnConfiguration(new HdfsConfiguration(new Configuration()));
-    Service service = new ApplicationMasterService(runId, zkClientService, twillSpec,
-                                                   new VersionDetectYarnAMClientFactory(conf), createAppLocation(conf));
-    new ApplicationMasterMain(String.format("%s/%s/kafka", zkConnect, runId.getId())).doMain(zkClientService, service);
-  }
-
-  @Override
-  protected String getHostname() {
-    try {
-      return InetAddress.getLocalHost().getCanonicalHostName();
-    } catch (UnknownHostException e) {
-      return "unknown";
-    }
-  }
-
-  @Override
-  protected String getKafkaZKConnect() {
-    return kafkaZKConnect;
-  }
-}