You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hdt.apache.org by ad...@apache.org on 2013/01/28 18:03:39 UTC

git commit: pushed some things from ui to core to allow reuse in the dfs plugins. Needs more work, things aren't cleanly separated here.

Updated Branches:
  refs/heads/master 9188509fd -> cd29c94d7


pushed some things from ui to core to allow reuse in the dfs plugins.
Needs more work, things aren't cleanly separated here.

Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/cd29c94d
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/cd29c94d
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/cd29c94d

Branch: refs/heads/master
Commit: cd29c94d70c34fdc62c26eacdfe371f0aab8809e
Parents: 9188509
Author: adamb <ad...@apache.org>
Authored: Mon Jan 28 11:03:23 2013 -0600
Committer: adamb <ad...@apache.org>
Committed: Mon Jan 28 11:03:23 2013 -0600

----------------------------------------------------------------------
 org.apache.hdt.core/.classpath                     |    1 +
 org.apache.hdt.core/META-INF/MANIFEST.MF           |    8 +-
 .../src/org/apache/hdt/core/Activator.java         |   40 +-
 .../src/org/apache/hdt/core/cluster/ConfProp.java  |  147 ++++
 .../org/apache/hdt/core/cluster/HadoopCluster.java |  521 +++++++++++++++
 .../src/org/apache/hdt/core/cluster/HadoopJob.java |  349 ++++++++++
 .../org/apache/hdt/core/cluster/IJobListener.java  |   38 +
 .../apache/hdt/core/cluster/utils/JarModule.java   |  146 ++++
 .../hdt/core/dialogs/ErrorMessageDialog.java       |   45 ++
 .../apache/hdt/ui/actions/EditLocationAction.java  |    2 +-
 .../src/org/apache/hdt/ui/cluster/ConfProp.java    |  147 ----
 .../org/apache/hdt/ui/cluster/HadoopCluster.java   |  518 --------------
 .../src/org/apache/hdt/ui/cluster/HadoopJob.java   |  349 ----------
 .../hdt/ui/cluster/IHadoopClusterListener.java     |    2 +-
 .../org/apache/hdt/ui/cluster/IJobListener.java    |   38 -
 .../org/apache/hdt/ui/cluster/ServerRegistry.java  |    2 +-
 .../org/apache/hdt/ui/cluster/utils/JarModule.java |  146 ----
 .../apache/hdt/ui/dialogs/ErrorMessageDialog.java  |   45 --
 .../src/org/apache/hdt/ui/views/ClusterView.java   |    8 +-
 .../hdt/ui/wizards/HadoopLocationWizard.java       |    4 +-
 20 files changed, 1291 insertions(+), 1265 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/cd29c94d/org.apache.hdt.core/.classpath
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/.classpath b/org.apache.hdt.core/.classpath
index ad32c83..355df07 100644
--- a/org.apache.hdt.core/.classpath
+++ b/org.apache.hdt.core/.classpath
@@ -3,5 +3,6 @@
 	<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.6"/>
 	<classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
 	<classpathentry kind="src" path="src"/>
+	<classpathentry kind="lib" path="/Users/amberry/tools/hadoop-1.0.4/hadoop-core-1.0.4.jar"/>
 	<classpathentry kind="output" path="bin"/>
 </classpath>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/cd29c94d/org.apache.hdt.core/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/META-INF/MANIFEST.MF b/org.apache.hdt.core/META-INF/MANIFEST.MF
index b2eae6b..e38d50f 100644
--- a/org.apache.hdt.core/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.core/META-INF/MANIFEST.MF
@@ -9,8 +9,14 @@ Require-Bundle: org.eclipse.core.runtime,
  org.eclipse.core.resources,
  org.eclipse.jdt.core,
  org.eclipse.core.expressions,
- org.eclipse.wst.server.core
+ org.eclipse.wst.server.core,
+ org.eclipse.swt,
+ org.eclipse.jface,
+ org.eclipse.ui,
+ org.eclipse.jdt.ui
 Bundle-RequiredExecutionEnvironment: JavaSE-1.6
 Bundle-ActivationPolicy: lazy
 Export-Package: org.apache.hdt.core,
+ org.apache.hdt.core.cluster,
+ org.apache.hdt.core.cluster.utils,
  org.apache.hdt.core.natures

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/cd29c94d/org.apache.hdt.core/src/org/apache/hdt/core/Activator.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/Activator.java b/org.apache.hdt.core/src/org/apache/hdt/core/Activator.java
index 624d949..00ed47e 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/Activator.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/Activator.java
@@ -1,31 +1,47 @@
 package org.apache.hdt.core;
 
-import org.osgi.framework.BundleActivator;
+import org.eclipse.ui.plugin.AbstractUIPlugin;
 import org.osgi.framework.BundleContext;
 
-public class Activator implements BundleActivator {
+public class Activator extends AbstractUIPlugin {
 
-	private static BundleContext context;
-	public static final String PLUGIN_ID = "org.apache.hdt.core";
+	// The plug-in ID
+	public static final String PLUGIN_ID = "org.apache.hdt.ui"; //$NON-NLS-1$
 
-	static BundleContext getContext() {
-		return context;
+	// The shared instance
+	private static Activator plugin;
+	
+	/**
+	 * The constructor
+	 */
+	public Activator() {
 	}
 
 	/*
 	 * (non-Javadoc)
-	 * @see org.osgi.framework.BundleActivator#start(org.osgi.framework.BundleContext)
+	 * @see org.eclipse.ui.plugin.AbstractUIPlugin#start(org.osgi.framework.BundleContext)
 	 */
-	public void start(BundleContext bundleContext) throws Exception {
-		Activator.context = bundleContext;
+	public void start(BundleContext context) throws Exception {
+		super.start(context);
+		plugin = this;
 	}
 
 	/*
 	 * (non-Javadoc)
-	 * @see org.osgi.framework.BundleActivator#stop(org.osgi.framework.BundleContext)
+	 * @see org.eclipse.ui.plugin.AbstractUIPlugin#stop(org.osgi.framework.BundleContext)
+	 */
+	public void stop(BundleContext context) throws Exception {
+		plugin = null;
+		super.stop(context);
+	}
+
+	/**
+	 * Returns the shared instance
+	 *
+	 * @return the shared instance
 	 */
-	public void stop(BundleContext bundleContext) throws Exception {
-		Activator.context = null;
+	public static Activator getDefault() {
+		return plugin;
 	}
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/cd29c94d/org.apache.hdt.core/src/org/apache/hdt/core/cluster/ConfProp.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/cluster/ConfProp.java b/org.apache.hdt.core/src/org/apache/hdt/core/cluster/ConfProp.java
new file mode 100644
index 0000000..1d8b551
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/cluster/ConfProp.java
@@ -0,0 +1,147 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.cluster;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+
+public enum ConfProp {
+  /**
+   * Property name for the Hadoop location name
+   */
+  PI_LOCATION_NAME(true, "location.name", "New Hadoop location"),
+
+  /**
+   * Property name for the master host name (the Job tracker)
+   */
+  PI_JOB_TRACKER_HOST(true, "jobtracker.host", "localhost"),
+
+  /**
+   * Property name for the DFS master host name (the Name node)
+   */
+  PI_NAME_NODE_HOST(true, "namenode.host", "localhost"),
+
+  /**
+   * Property name for the installation directory on the master node
+   */
+  // PI_INSTALL_DIR(true, "install.dir", "/dir/hadoop-version/"),
+  /**
+   * User name to use for Hadoop operations
+   */
+  PI_USER_NAME(true, "user.name", System.getProperty("user.name",
+      "who are you?")),
+
+  /**
+   * Property name for SOCKS proxy activation
+   */
+  PI_SOCKS_PROXY_ENABLE(true, "socks.proxy.enable", "no"),
+
+  /**
+   * Property name for the SOCKS proxy host
+   */
+  PI_SOCKS_PROXY_HOST(true, "socks.proxy.host", "host"),
+
+  /**
+   * Property name for the SOCKS proxy port
+   */
+  PI_SOCKS_PROXY_PORT(true, "socks.proxy.port", "1080"),
+
+  /**
+   * TCP port number for the name node
+   */
+  PI_NAME_NODE_PORT(true, "namenode.port", "50040"),
+
+  /**
+   * TCP port number for the job tracker
+   */
+  PI_JOB_TRACKER_PORT(true, "jobtracker.port", "50020"),
+
+  /**
+   * Are the Map/Reduce and the Distributed FS masters hosted on the same
+   * machine?
+   */
+  PI_COLOCATE_MASTERS(true, "masters.colocate", "yes"),
+
+  /**
+   * Property name for naming the job tracker (URI). This property is related
+   * to {@link #PI_MASTER_HOST_NAME}
+   */
+  JOB_TRACKER_URI(false, "mapreduce.jobtracker.address", "localhost:50020"),
+
+  /**
+   * Property name for naming the default file system (URI).
+   */
+  FS_DEFAULT_URI(false, "fs.default.name", "hdfs://localhost:50040/"),
+
+  /**
+   * Property name for the default socket factory:
+   */
+  SOCKET_FACTORY_DEFAULT(false, "hadoop.rpc.socket.factory.class.default",
+      "org.apache.hadoop.net.StandardSocketFactory"),
+
+  /**
+   * Property name for the SOCKS server URI.
+   */
+  SOCKS_SERVER(false, "hadoop.socks.server", "host:1080"),
+
+  ;
+
+  /**
+   * Map <property name> -> ConfProp
+   */
+  private static Map<String, ConfProp> map;
+
+  private static synchronized void registerProperty(String name,
+      ConfProp prop) {
+
+    if (ConfProp.map == null)
+      ConfProp.map = new HashMap<String, ConfProp>();
+
+    ConfProp.map.put(name, prop);
+  }
+
+  public static ConfProp getByName(String propName) {
+    return map.get(propName);
+  }
+
+  public final String name;
+
+  public final String defVal;
+
+  ConfProp(boolean internal, String name, String defVal) {
+    if (internal)
+      name = "eclipse.plug-in." + name;
+    this.name = name;
+    this.defVal = defVal;
+
+    ConfProp.registerProperty(name, this);
+  }
+
+  String get(Configuration conf) {
+    return conf.get(name);
+  }
+
+  void set(Configuration conf, String value) {
+    assert value != null;
+    conf.set(name, value);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/cd29c94d/org.apache.hdt.core/src/org/apache/hdt/core/cluster/HadoopCluster.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/cluster/HadoopCluster.java b/org.apache.hdt.core/src/org/apache/hdt/core/cluster/HadoopCluster.java
new file mode 100644
index 0000000..a04bda6
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/cluster/HadoopCluster.java
@@ -0,0 +1,521 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.cluster;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.logging.Logger;
+
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hdt.core.Activator;
+import org.apache.hdt.core.cluster.ConfProp;
+import org.apache.hdt.core.cluster.HadoopJob;
+import org.apache.hdt.core.cluster.IJobListener;
+import org.apache.hdt.core.cluster.utils.JarModule;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.mapred.JobClient;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobID;
+import org.apache.hadoop.mapred.JobStatus;
+import org.apache.hadoop.mapred.RunningJob;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.Status;
+import org.eclipse.core.runtime.jobs.Job;
+import org.eclipse.swt.widgets.Display;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+import org.w3c.dom.Text;
+import org.xml.sax.SAXException;
+
+/**
+ * Representation of a Hadoop location, meaning of the master node (NameNode,
+ * JobTracker).
+ * 
+ * <p>
+ * This class does not create any SSH connection anymore. Tunneling must be
+ * setup outside of Eclipse for now (using Putty or <tt>ssh -D&lt;port&gt;
+ * &lt;host&gt;</tt>)
+ * 
+ * <p>
+ * <em> TODO </em>
+ * <li> Disable the updater if a location becomes unreachable or fails for
+ * tool long
+ * <li> Stop the updater on location's disposal/removal
+ */
+
+public class HadoopCluster {
+
+  /**
+   * Frequency of location status observations expressed as the delay in ms
+   * between each observation
+   * 
+   * TODO Add a preference parameter for this
+   */
+  protected static final long STATUS_OBSERVATION_DELAY = 1500;
+
+  /**
+   * 
+   */
+  public class LocationStatusUpdater extends Job {
+
+    JobClient client = null;
+
+    /**
+     * Setup the updater
+     */
+    public LocationStatusUpdater() {
+      super("Map/Reduce location status updater");
+      this.setSystem(true);
+    }
+
+    /* @inheritDoc */
+    @Override
+    protected IStatus run(IProgressMonitor monitor) {
+      if (client == null) {
+        try {
+          client = HadoopCluster.this.getJobClient();
+
+        } catch (IOException ioe) {
+          client = null;
+          return new Status(Status.ERROR, Activator.PLUGIN_ID, 0,
+              "Cannot connect to the Map/Reduce location: "
+                            + HadoopCluster.this.getLocationName(),
+                            ioe);
+        }
+      }
+
+      try {
+        // Set of all known existing Job IDs we want fresh info of
+        Set<JobID> missingJobIds =
+            new HashSet<JobID>(runningJobs.keySet());
+
+        JobStatus[] jstatus = client.jobsToComplete();
+        for (JobStatus status : jstatus) {
+
+          JobID jobId = status.getJobID();
+          missingJobIds.remove(jobId);
+
+          HadoopJob hJob;
+          synchronized (HadoopCluster.this.runningJobs) {
+            hJob = runningJobs.get(jobId);
+            if (hJob == null) {
+              // Unknown job, create an entry
+              RunningJob running = client.getJob(jobId);
+              hJob =
+                  new HadoopJob(HadoopCluster.this, jobId, running, status);
+              newJob(hJob);
+            }
+          }
+
+          // Update HadoopJob with fresh infos
+          updateJob(hJob, status);
+        }
+
+        // Ask explicitly for fresh info for these Job IDs
+        for (JobID jobId : missingJobIds) {
+          HadoopJob hJob = runningJobs.get(jobId);
+          if (!hJob.isCompleted())
+            updateJob(hJob, null);
+        }
+
+      } catch (IOException ioe) {
+        client = null;
+        return new Status(Status.ERROR, Activator.PLUGIN_ID, 0,
+            "Cannot retrieve running Jobs on location: "
+                          + HadoopCluster.this.getLocationName(), ioe);
+      }
+
+      // Schedule the next observation
+      schedule(STATUS_OBSERVATION_DELAY);
+
+      return Status.OK_STATUS;
+    }
+
+    /**
+     * Stores and make the new job available
+     * 
+     * @param data
+     */
+    private void newJob(final HadoopJob data) {
+      runningJobs.put(data.getJobID(), data);
+
+      Display.getDefault().asyncExec(new Runnable() {
+        public void run() {
+          fireJobAdded(data);
+        }
+      });
+    }
+
+    /**
+     * Updates the status of a job
+     * 
+     * @param job the job to update
+     */
+    private void updateJob(final HadoopJob job, JobStatus status) {
+      job.update(status);
+
+      Display.getDefault().asyncExec(new Runnable() {
+        public void run() {
+          fireJobChanged(job);
+        }
+      });
+    }
+
+  }
+
+  static Logger log = Logger.getLogger(HadoopCluster.class.getName());
+
+  /**
+   * Hadoop configuration of the location. Also contains specific parameters
+   * for the plug-in. These parameters are prefix with eclipse.plug-in.*
+   */
+  private Configuration conf;
+
+  /**
+   * Jobs listeners
+   */
+  private Set<IJobListener> jobListeners = new HashSet<IJobListener>();
+
+  /**
+   * Jobs running on this location. The keys of this map are the Job IDs.
+   */
+  private transient Map<JobID, HadoopJob> runningJobs =
+      Collections.synchronizedMap(new TreeMap<JobID, HadoopJob>());
+
+  /**
+   * Status updater for this location
+   */
+  private LocationStatusUpdater statusUpdater;
+
+  // state and status - transient
+  private transient String state = "";
+
+  /**
+   * Creates a new default Hadoop location
+   */
+  public HadoopCluster() {
+    this.conf = new Configuration();
+    this.addPluginConfigDefaultProperties();
+  }
+
+  /**
+   * Creates a location from a file
+   * 
+   * @throws IOException
+   * @throws SAXException
+   * @throws ParserConfigurationException
+   */
+  public HadoopCluster(File file) throws ParserConfigurationException,
+      SAXException, IOException {
+
+    this.conf = new Configuration();
+    this.addPluginConfigDefaultProperties();
+    this.loadFromXML(file);
+  }
+
+  /**
+   * Create a new Hadoop location by copying an already existing one.
+   * 
+   * @param source the location to copy
+   */
+  public HadoopCluster(HadoopCluster existing) {
+    this();
+    this.load(existing);
+  }
+
+  public void addJobListener(IJobListener l) {
+    jobListeners.add(l);
+  }
+
+  public void dispose() {
+    // TODO close DFS connections?
+  }
+
+  /**
+   * List all elements that should be present in the Server window (all
+   * servers and all jobs running on each servers)
+   * 
+   * @return collection of jobs for this location
+   */
+  public Collection<HadoopJob> getJobs() {
+    startStatusUpdater();
+    return this.runningJobs.values();
+  }
+
+  /**
+   * Remove the given job from the currently running jobs map
+   * 
+   * @param job the job to remove
+   */
+  public void purgeJob(final HadoopJob job) {
+    runningJobs.remove(job.getJobID());
+    Display.getDefault().asyncExec(new Runnable() {
+      public void run() {
+        fireJobRemoved(job);
+      }
+    });
+  }
+
+  /**
+   * Returns the {@link Configuration} defining this location.
+   * 
+   * @return the location configuration
+   */
+  public Configuration getConfiguration() {
+    return this.conf;
+  }
+
+  /**
+   * Gets a Hadoop configuration property value
+   * 
+   * @param prop the configuration property
+   * @return the property value
+   */
+  public String getConfProp(ConfProp prop) {
+    return prop.get(conf);
+  }
+
+  /**
+   * Gets a Hadoop configuration property value
+   * 
+   * @param propName the property name
+   * @return the property value
+   */
+  public String getConfProp(String propName) {
+    return this.conf.get(propName);
+  }
+
+  public String getLocationName() {
+    return ConfProp.PI_LOCATION_NAME.get(conf);
+  }
+
+  /**
+   * Returns the master host name of the Hadoop location (the Job tracker)
+   * 
+   * @return the host name of the Job tracker
+   */
+  public String getMasterHostName() {
+    return getConfProp(ConfProp.PI_JOB_TRACKER_HOST);
+  }
+
+  public String getState() {
+    return state;
+  }
+
+  /**
+   * Overwrite this location with the given existing location
+   * 
+   * @param existing the existing location
+   */
+  public void load(HadoopCluster existing) {
+    this.conf = new Configuration(existing.conf);
+  }
+
+  /**
+   * Overwrite this location with settings available in the given XML file.
+   * The existing configuration is preserved if the XML file is invalid.
+   * 
+   * @param file the file path of the XML file
+   * @return validity of the XML file
+   * @throws ParserConfigurationException
+   * @throws IOException
+   * @throws SAXException
+   */
+  public boolean loadFromXML(File file) throws ParserConfigurationException,
+      SAXException, IOException {
+
+    Configuration newConf = new Configuration(this.conf);
+
+    DocumentBuilder builder =
+        DocumentBuilderFactory.newInstance().newDocumentBuilder();
+    Document document = builder.parse(file);
+
+    Element root = document.getDocumentElement();
+    if (!"configuration".equals(root.getTagName()))
+      return false;
+    NodeList props = root.getChildNodes();
+    for (int i = 0; i < props.getLength(); i++) {
+      Node propNode = props.item(i);
+      if (!(propNode instanceof Element))
+        continue;
+      Element prop = (Element) propNode;
+      if (!"property".equals(prop.getTagName()))
+        return false;
+      NodeList fields = prop.getChildNodes();
+      String attr = null;
+      String value = null;
+      for (int j = 0; j < fields.getLength(); j++) {
+        Node fieldNode = fields.item(j);
+        if (!(fieldNode instanceof Element))
+          continue;
+        Element field = (Element) fieldNode;
+        if ("name".equals(field.getTagName()))
+          attr = ((Text) field.getFirstChild()).getData();
+        if ("value".equals(field.getTagName()) && field.hasChildNodes())
+          value = ((Text) field.getFirstChild()).getData();
+      }
+      if (attr != null && value != null)
+        newConf.set(attr, value);
+    }
+
+    this.conf = newConf;
+    return true;
+  }
+
+  /**
+   * Sets a Hadoop configuration property value
+   * 
+   * @param prop the property
+   * @param propvalue the property value
+   */
+  public void setConfProp(ConfProp prop, String propValue) {
+    prop.set(conf, propValue);
+  }
+
+  /**
+   * Sets a Hadoop configuration property value
+   * 
+   * @param propName the property name
+   * @param propValue the property value
+   */
+  public void setConfProp(String propName, String propValue) {
+    this.conf.set(propName, propValue);
+  }
+
+  public void setLocationName(String newName) {
+    ConfProp.PI_LOCATION_NAME.set(conf, newName);
+  }
+
+  /**
+   * Write this location settings to the given output stream
+   * 
+   * @param out the output stream
+   * @throws IOException
+   */
+  public void storeSettingsToFile(File file) throws IOException {
+    FileOutputStream fos = new FileOutputStream(file);
+    try {
+      this.conf.writeXml(fos);
+      fos.close();
+      fos = null;
+    } finally {
+      IOUtils.closeStream(fos);
+    }
+
+  }
+
+  /* @inheritDoc */
+  @Override
+  public String toString() {
+    return this.getLocationName();
+  }
+
+  /**
+   * Fill the configuration with valid default values
+   */
+  private void addPluginConfigDefaultProperties() {
+    for (ConfProp prop : ConfProp.values()) {
+      if (conf.get(prop.name) == null)
+        conf.set(prop.name, prop.defVal);
+    }
+  }
+
+  /**
+   * Starts the location status updater
+   */
+  private synchronized void startStatusUpdater() {
+    if (statusUpdater == null) {
+      statusUpdater = new LocationStatusUpdater();
+      statusUpdater.schedule();
+    }
+  }
+
+  /*
+   * Rewrite of the connecting and tunneling to the Hadoop location
+   */
+
+  /**
+   * Provides access to the default file system of this location.
+   * 
+   * @return a {@link FileSystem}
+   */
+  public FileSystem getDFS() throws IOException {
+    return FileSystem.get(this.conf);
+  }
+
+  /**
+   * Provides access to the Job tracking system of this location
+   * 
+   * @return a {@link JobClient}
+   */
+  public JobClient getJobClient() throws IOException {
+    JobConf jconf = new JobConf(this.conf);
+    return new JobClient(jconf);
+  }
+
+  /*
+   * Listeners handling
+   */
+
+  protected void fireJarPublishDone(JarModule jar) {
+    for (IJobListener listener : jobListeners) {
+      listener.publishDone(jar);
+    }
+  }
+
+  protected void fireJarPublishStart(JarModule jar) {
+    for (IJobListener listener : jobListeners) {
+      listener.publishStart(jar);
+    }
+  }
+
+  protected void fireJobAdded(HadoopJob job) {
+    for (IJobListener listener : jobListeners) {
+      listener.jobAdded(job);
+    }
+  }
+
+  protected void fireJobRemoved(HadoopJob job) {
+    for (IJobListener listener : jobListeners) {
+      listener.jobRemoved(job);
+    }
+  }
+
+  protected void fireJobChanged(HadoopJob job) {
+    for (IJobListener listener : jobListeners) {
+      listener.jobChanged(job);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/cd29c94d/org.apache.hdt.core/src/org/apache/hdt/core/cluster/HadoopJob.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/cluster/HadoopJob.java b/org.apache.hdt.core/src/org/apache/hdt/core/cluster/HadoopJob.java
new file mode 100644
index 0000000..03edc6b
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/cluster/HadoopJob.java
@@ -0,0 +1,349 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.cluster;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.Counters;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobID;
+import org.apache.hadoop.mapred.JobStatus;
+import org.apache.hadoop.mapred.RunningJob;
+
+/**
+ * Representation of a Map/Reduce running job on a given location
+ */
+
+public class HadoopJob {
+
+  /**
+   * Enum representation of a Job state
+   */
+  public enum JobState {
+    PREPARE(JobStatus.PREP), RUNNING(JobStatus.RUNNING), FAILED(
+        JobStatus.FAILED), SUCCEEDED(JobStatus.SUCCEEDED);
+
+    final int state;
+
+    JobState(int state) {
+      this.state = state;
+    }
+
+    static JobState ofInt(int state) {
+      if (state == JobStatus.PREP) {
+        return PREPARE;
+      }
+      else if (state == JobStatus.RUNNING) {
+        return RUNNING;
+      }
+      else if (state == JobStatus.FAILED) {
+        return FAILED;
+      }
+      else if (state == JobStatus.SUCCEEDED) {
+        return SUCCEEDED;
+      }
+      else {
+        return null;
+      }
+    }
+  }
+
+  /**
+   * Location this Job runs on
+   */
+  private final HadoopCluster location;
+
+  /**
+   * Unique identifier of this Job
+   */
+  final JobID jobId;
+
+  /**
+   * Status representation of a running job. This actually contains a
+   * reference to a JobClient. Its methods might block.
+   */
+  RunningJob running;
+
+  /**
+   * Last polled status
+   * 
+   * @deprecated should apparently not be used
+   */
+  JobStatus status;
+
+  /**
+   * Last polled counters
+   */
+  Counters counters;
+
+  /**
+   * Job Configuration
+   */
+  JobConf jobConf = null;
+
+  boolean completed = false;
+
+  boolean successful = false;
+
+  boolean killed = false;
+
+  int totalMaps;
+
+  int totalReduces;
+
+  int completedMaps;
+
+  int completedReduces;
+
+  float mapProgress;
+
+  float reduceProgress;
+
+  /**
+   * Constructor for a Hadoop job representation
+   * 
+   * @param location
+   * @param id
+   * @param running
+   * @param status
+   */
+  public HadoopJob(HadoopCluster location, JobID id, RunningJob running,
+      JobStatus status) {
+
+    this.location = location;
+    this.jobId = id;
+    this.running = running;
+
+    loadJobFile();
+
+    update(status);
+  }
+
+  /**
+   * Try to locate and load the JobConf file for this job so to get more
+   * details on the job (number of maps and of reduces)
+   */
+  private void loadJobFile() {
+    try {
+      String jobFile = getJobFile();
+      FileSystem fs = location.getDFS();
+      File tmp = File.createTempFile(getJobID().toString(), ".xml");
+      if (FileUtil.copy(fs, new Path(jobFile), tmp, false, location
+          .getConfiguration())) {
+        this.jobConf = new JobConf(tmp.toString());
+
+        this.totalMaps = jobConf.getNumMapTasks();
+        this.totalReduces = jobConf.getNumReduceTasks();
+      }
+
+    } catch (IOException ioe) {
+      ioe.printStackTrace();
+    }
+  }
+
+  /* @inheritDoc */
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    result = prime * result + ((jobId == null) ? 0 : jobId.hashCode());
+    result = prime * result + ((location == null) ? 0 : location.hashCode());
+    return result;
+  }
+
+  /* @inheritDoc */
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (obj == null)
+      return false;
+    if (!(obj instanceof HadoopJob))
+      return false;
+    final HadoopJob other = (HadoopJob) obj;
+    if (jobId == null) {
+      if (other.jobId != null)
+        return false;
+    } else if (!jobId.equals(other.jobId))
+      return false;
+    if (location == null) {
+      if (other.location != null)
+        return false;
+    } else if (!location.equals(other.location))
+      return false;
+    return true;
+  }
+
+  /**
+   * Get the running status of the Job (@see {@link JobStatus}).
+   * 
+   * @return
+   */
+  public JobState getState() {
+    if (this.completed) {
+      if (this.successful) {
+        return JobState.SUCCEEDED;
+      } else {
+        return JobState.FAILED;
+      }
+    } else {
+      return JobState.RUNNING;
+    }
+    // return JobState.ofInt(this.status.getRunState());
+  }
+
+  /**
+   * @return
+   */
+  public JobID getJobID() {
+    return this.jobId;
+  }
+
+  /**
+   * @return
+   */
+  public HadoopCluster getLocation() {
+    return this.location;
+  }
+
+  /**
+   * @return
+   */
+  public boolean isCompleted() {
+    return this.completed;
+  }
+
+  /**
+   * @return
+   */
+  public String getJobName() {
+    return this.running.getJobName();
+  }
+
+  /**
+   * @return
+   */
+  public String getJobFile() {
+    return this.running.getJobFile();
+  }
+
+  /**
+   * Return the tracking URL for this Job.
+   * 
+   * @return string representation of the tracking URL for this Job
+   */
+  public String getTrackingURL() {
+    return this.running.getTrackingURL();
+  }
+
+  /**
+   * Returns a string representation of this job status
+   * 
+   * @return string representation of this job status
+   */
+  public String getStatus() {
+
+    StringBuffer s = new StringBuffer();
+
+    s.append("Maps : " + completedMaps + "/" + totalMaps);
+    s.append(" (" + mapProgress + ")");
+    s.append("  Reduces : " + completedReduces + "/" + totalReduces);
+    s.append(" (" + reduceProgress + ")");
+
+    return s.toString();
+  }
+
+  /**
+   * Update this job status according to the given JobStatus
+   * 
+   * @param status
+   */
+  void update(JobStatus status) {
+    this.status = status;
+    try {
+      this.counters = running.getCounters();
+      this.completed = running.isComplete();
+      this.successful = running.isSuccessful();
+      this.mapProgress = running.mapProgress();
+      this.reduceProgress = running.reduceProgress();
+      // running.getTaskCompletionEvents(fromEvent);
+
+    } catch (IOException ioe) {
+      ioe.printStackTrace();
+    }
+
+    this.completedMaps = (int) (this.totalMaps * this.mapProgress);
+    this.completedReduces = (int) (this.totalReduces * this.reduceProgress);
+  }
+
+  /**
+   * Print this job counters (for debugging purpose)
+   */
+  void printCounters() {
+    System.out.printf("New Job:\n", counters);
+    for (String groupName : counters.getGroupNames()) {
+      Counters.Group group = counters.getGroup(groupName);
+      System.out.printf("\t%s[%s]\n", groupName, group.getDisplayName());
+
+      for (Counters.Counter counter : group) {
+        System.out.printf("\t\t%s: %s\n", counter.getDisplayName(),
+                                          counter.getCounter());
+      }
+    }
+    System.out.printf("\n");
+  }
+
+  /**
+   * Kill this job
+   */
+  public void kill() {
+    try {
+      this.running.killJob();
+      this.killed = true;
+
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+  }
+
+  /**
+   * Print this job status (for debugging purpose)
+   */
+  public void display() {
+    System.out.printf("Job id=%s, name=%s\n", getJobID(), getJobName());
+    System.out.printf("Configuration file: %s\n", getJobID());
+    System.out.printf("Tracking URL: %s\n", getTrackingURL());
+
+    System.out.printf("Completion: map: %f reduce %f\n",
+        100.0 * this.mapProgress, 100.0 * this.reduceProgress);
+
+    System.out.println("Job total maps = " + totalMaps);
+    System.out.println("Job completed maps = " + completedMaps);
+    System.out.println("Map percentage complete = " + mapProgress);
+    System.out.println("Job total reduces = " + totalReduces);
+    System.out.println("Job completed reduces = " + completedReduces);
+    System.out.println("Reduce percentage complete = " + reduceProgress);
+    System.out.flush();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/cd29c94d/org.apache.hdt.core/src/org/apache/hdt/core/cluster/IJobListener.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/cluster/IJobListener.java b/org.apache.hdt.core/src/org/apache/hdt/core/cluster/IJobListener.java
new file mode 100644
index 0000000..fb591f3
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/cluster/IJobListener.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.cluster;
+
+import org.apache.hdt.core.cluster.utils.JarModule;
+
+/**
+ * Interface for updating/adding jobs to the MapReduce Server view.
+ */
+public interface IJobListener {
+
+  void jobChanged(HadoopJob job);
+
+  void jobAdded(HadoopJob job);
+
+  void jobRemoved(HadoopJob job);
+
+  void publishStart(JarModule jar);
+
+  void publishDone(JarModule jar);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/cd29c94d/org.apache.hdt.core/src/org/apache/hdt/core/cluster/utils/JarModule.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/cluster/utils/JarModule.java b/org.apache.hdt.core/src/org/apache/hdt/core/cluster/utils/JarModule.java
new file mode 100644
index 0000000..2127091
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/cluster/utils/JarModule.java
@@ -0,0 +1,146 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.cluster.utils;
+
+import java.io.File;
+import java.util.logging.Logger;
+
+import org.apache.hdt.core.Activator;
+import org.apache.hdt.core.dialogs.ErrorMessageDialog;
+import org.eclipse.core.resources.IResource;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.jdt.core.ICompilationUnit;
+import org.eclipse.jdt.core.IJavaElement;
+import org.eclipse.jdt.core.IType;
+import org.eclipse.jdt.ui.jarpackager.IJarExportRunnable;
+import org.eclipse.jdt.ui.jarpackager.JarPackageData;
+import org.eclipse.jface.operation.IRunnableWithProgress;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.ui.PlatformUI;
+
+/**
+ * Methods for interacting with the jar file containing the
+ * Mapper/Reducer/Driver classes for a MapReduce job.
+ */
+
+public class JarModule implements IRunnableWithProgress {
+
+  static Logger log = Logger.getLogger(JarModule.class.getName());
+
+  private IResource resource;
+
+  private File jarFile;
+
+  public JarModule(IResource resource) {
+    this.resource = resource;
+  }
+
+  public String getName() {
+    return resource.getProject().getName() + "/" + resource.getName();
+  }
+
+  /**
+   * Creates a JAR file containing the given resource (Java class with
+   * main()) and all associated resources
+   * 
+   * @param resource the resource
+   * @return a file designing the created package
+   */
+  public void run(IProgressMonitor monitor) {
+
+    log.fine("Build jar");
+    JarPackageData jarrer = new JarPackageData();
+
+    jarrer.setExportJavaFiles(true);
+    jarrer.setExportClassFiles(true);
+    jarrer.setExportOutputFolders(true);
+    jarrer.setOverwrite(true);
+
+    try {
+      // IJavaProject project =
+      // (IJavaProject) resource.getProject().getNature(JavaCore.NATURE_ID);
+
+      // check this is the case before letting this method get called
+      Object element = resource.getAdapter(IJavaElement.class);
+      IType type = ((ICompilationUnit) element).findPrimaryType();
+      jarrer.setManifestMainClass(type);
+
+      // Create a temporary JAR file name
+      File baseDir = Activator.getDefault().getStateLocation().toFile();
+
+      String prefix =
+          String.format("%s_%s-", resource.getProject().getName(), resource
+              .getName());
+      File jarFile = File.createTempFile(prefix, ".jar", baseDir);
+      jarrer.setJarLocation(new Path(jarFile.getAbsolutePath()));
+
+      jarrer.setElements(resource.getProject().members(IResource.FILE));
+      IJarExportRunnable runnable =
+          jarrer.createJarExportRunnable(Display.getDefault()
+              .getActiveShell());
+      runnable.run(monitor);
+
+      this.jarFile = jarFile;
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      throw new RuntimeException(e);
+    }
+  }
+
+  /**
+   * Allow the retrieval of the resulting JAR file
+   * 
+   * @return the generated JAR file
+   */
+  public File getJarFile() {
+    return this.jarFile;
+  }
+
+  /**
+   * Static way to create a JAR package for the given resource and showing a
+   * progress bar
+   * 
+   * @param resource
+   * @return
+   */
+  public static File createJarPackage(IResource resource) {
+
+    JarModule jarModule = new JarModule(resource);
+    try {
+      PlatformUI.getWorkbench().getProgressService().run(false, true,
+          jarModule);
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      return null;
+    }
+
+    File jarFile = jarModule.getJarFile();
+    if (jarFile == null) {
+      ErrorMessageDialog.display("Run on Hadoop",
+          "Unable to create or locate the JAR file for the Job");
+      return null;
+    }
+
+    return jarFile;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/cd29c94d/org.apache.hdt.core/src/org/apache/hdt/core/dialogs/ErrorMessageDialog.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/dialogs/ErrorMessageDialog.java b/org.apache.hdt.core/src/org/apache/hdt/core/dialogs/ErrorMessageDialog.java
new file mode 100644
index 0000000..71067b9
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/dialogs/ErrorMessageDialog.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.dialogs;
+
+import org.eclipse.jface.dialogs.MessageDialog;
+import org.eclipse.swt.widgets.Display;
+
+/**
+ * Error dialog helper
+ */
+public class ErrorMessageDialog {
+
+  public static void display(final String title, final String message) {
+    Display.getDefault().syncExec(new Runnable() {
+
+      public void run() {
+        MessageDialog.openError(Display.getDefault().getActiveShell(),
+            title, message);
+      }
+
+    });
+  }
+
+  public static void display(Exception e) {
+    display("An exception has occured!", "Exception description:\n"
+        + e.getLocalizedMessage());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/cd29c94d/org.apache.hdt.ui/src/org/apache/hdt/ui/actions/EditLocationAction.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/actions/EditLocationAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/actions/EditLocationAction.java
index 6287449..f39b541 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/actions/EditLocationAction.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/actions/EditLocationAction.java
@@ -18,8 +18,8 @@
 
 package org.apache.hdt.ui.actions;
 
+import org.apache.hdt.core.cluster.HadoopCluster;
 import org.apache.hdt.ui.ImageLibrary;
-import org.apache.hdt.ui.cluster.HadoopCluster;
 import org.apache.hdt.ui.wizards.HadoopLocationWizard;
 import org.apache.hdt.ui.views.ClusterView;
 import org.eclipse.jface.action.Action;

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/cd29c94d/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/ConfProp.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/ConfProp.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/ConfProp.java
deleted file mode 100644
index 1468b01..0000000
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/ConfProp.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hdt.ui.cluster;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.conf.Configuration;
-
-public enum ConfProp {
-  /**
-   * Property name for the Hadoop location name
-   */
-  PI_LOCATION_NAME(true, "location.name", "New Hadoop location"),
-
-  /**
-   * Property name for the master host name (the Job tracker)
-   */
-  PI_JOB_TRACKER_HOST(true, "jobtracker.host", "localhost"),
-
-  /**
-   * Property name for the DFS master host name (the Name node)
-   */
-  PI_NAME_NODE_HOST(true, "namenode.host", "localhost"),
-
-  /**
-   * Property name for the installation directory on the master node
-   */
-  // PI_INSTALL_DIR(true, "install.dir", "/dir/hadoop-version/"),
-  /**
-   * User name to use for Hadoop operations
-   */
-  PI_USER_NAME(true, "user.name", System.getProperty("user.name",
-      "who are you?")),
-
-  /**
-   * Property name for SOCKS proxy activation
-   */
-  PI_SOCKS_PROXY_ENABLE(true, "socks.proxy.enable", "no"),
-
-  /**
-   * Property name for the SOCKS proxy host
-   */
-  PI_SOCKS_PROXY_HOST(true, "socks.proxy.host", "host"),
-
-  /**
-   * Property name for the SOCKS proxy port
-   */
-  PI_SOCKS_PROXY_PORT(true, "socks.proxy.port", "1080"),
-
-  /**
-   * TCP port number for the name node
-   */
-  PI_NAME_NODE_PORT(true, "namenode.port", "50040"),
-
-  /**
-   * TCP port number for the job tracker
-   */
-  PI_JOB_TRACKER_PORT(true, "jobtracker.port", "50020"),
-
-  /**
-   * Are the Map/Reduce and the Distributed FS masters hosted on the same
-   * machine?
-   */
-  PI_COLOCATE_MASTERS(true, "masters.colocate", "yes"),
-
-  /**
-   * Property name for naming the job tracker (URI). This property is related
-   * to {@link #PI_MASTER_HOST_NAME}
-   */
-  JOB_TRACKER_URI(false, "mapreduce.jobtracker.address", "localhost:50020"),
-
-  /**
-   * Property name for naming the default file system (URI).
-   */
-  FS_DEFAULT_URI(false, "fs.default.name", "hdfs://localhost:50040/"),
-
-  /**
-   * Property name for the default socket factory:
-   */
-  SOCKET_FACTORY_DEFAULT(false, "hadoop.rpc.socket.factory.class.default",
-      "org.apache.hadoop.net.StandardSocketFactory"),
-
-  /**
-   * Property name for the SOCKS server URI.
-   */
-  SOCKS_SERVER(false, "hadoop.socks.server", "host:1080"),
-
-  ;
-
-  /**
-   * Map <property name> -> ConfProp
-   */
-  private static Map<String, ConfProp> map;
-
-  private static synchronized void registerProperty(String name,
-      ConfProp prop) {
-
-    if (ConfProp.map == null)
-      ConfProp.map = new HashMap<String, ConfProp>();
-
-    ConfProp.map.put(name, prop);
-  }
-
-  public static ConfProp getByName(String propName) {
-    return map.get(propName);
-  }
-
-  public final String name;
-
-  public final String defVal;
-
-  ConfProp(boolean internal, String name, String defVal) {
-    if (internal)
-      name = "eclipse.plug-in." + name;
-    this.name = name;
-    this.defVal = defVal;
-
-    ConfProp.registerProperty(name, this);
-  }
-
-  String get(Configuration conf) {
-    return conf.get(name);
-  }
-
-  void set(Configuration conf, String value) {
-    assert value != null;
-    conf.set(name, value);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/cd29c94d/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/HadoopCluster.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/HadoopCluster.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/HadoopCluster.java
deleted file mode 100644
index 588dd4e..0000000
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/HadoopCluster.java
+++ /dev/null
@@ -1,518 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hdt.ui.cluster;
-
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.logging.Logger;
-
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hdt.ui.Activator;
-import org.apache.hdt.ui.cluster.utils.JarModule;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.mapred.JobClient;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.JobID;
-import org.apache.hadoop.mapred.JobStatus;
-import org.apache.hadoop.mapred.RunningJob;
-import org.eclipse.core.runtime.IProgressMonitor;
-import org.eclipse.core.runtime.IStatus;
-import org.eclipse.core.runtime.Status;
-import org.eclipse.core.runtime.jobs.Job;
-import org.eclipse.swt.widgets.Display;
-import org.w3c.dom.Document;
-import org.w3c.dom.Element;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-import org.w3c.dom.Text;
-import org.xml.sax.SAXException;
-
-/**
- * Representation of a Hadoop location, meaning of the master node (NameNode,
- * JobTracker).
- * 
- * <p>
- * This class does not create any SSH connection anymore. Tunneling must be
- * setup outside of Eclipse for now (using Putty or <tt>ssh -D&lt;port&gt;
- * &lt;host&gt;</tt>)
- * 
- * <p>
- * <em> TODO </em>
- * <li> Disable the updater if a location becomes unreachable or fails for
- * tool long
- * <li> Stop the updater on location's disposal/removal
- */
-
-public class HadoopCluster {
-
-  /**
-   * Frequency of location status observations expressed as the delay in ms
-   * between each observation
-   * 
-   * TODO Add a preference parameter for this
-   */
-  protected static final long STATUS_OBSERVATION_DELAY = 1500;
-
-  /**
-   * 
-   */
-  public class LocationStatusUpdater extends Job {
-
-    JobClient client = null;
-
-    /**
-     * Setup the updater
-     */
-    public LocationStatusUpdater() {
-      super("Map/Reduce location status updater");
-      this.setSystem(true);
-    }
-
-    /* @inheritDoc */
-    @Override
-    protected IStatus run(IProgressMonitor monitor) {
-      if (client == null) {
-        try {
-          client = HadoopCluster.this.getJobClient();
-
-        } catch (IOException ioe) {
-          client = null;
-          return new Status(Status.ERROR, Activator.PLUGIN_ID, 0,
-              "Cannot connect to the Map/Reduce location: "
-                            + HadoopCluster.this.getLocationName(),
-                            ioe);
-        }
-      }
-
-      try {
-        // Set of all known existing Job IDs we want fresh info of
-        Set<JobID> missingJobIds =
-            new HashSet<JobID>(runningJobs.keySet());
-
-        JobStatus[] jstatus = client.jobsToComplete();
-        for (JobStatus status : jstatus) {
-
-          JobID jobId = status.getJobID();
-          missingJobIds.remove(jobId);
-
-          HadoopJob hJob;
-          synchronized (HadoopCluster.this.runningJobs) {
-            hJob = runningJobs.get(jobId);
-            if (hJob == null) {
-              // Unknown job, create an entry
-              RunningJob running = client.getJob(jobId);
-              hJob =
-                  new HadoopJob(HadoopCluster.this, jobId, running, status);
-              newJob(hJob);
-            }
-          }
-
-          // Update HadoopJob with fresh infos
-          updateJob(hJob, status);
-        }
-
-        // Ask explicitly for fresh info for these Job IDs
-        for (JobID jobId : missingJobIds) {
-          HadoopJob hJob = runningJobs.get(jobId);
-          if (!hJob.isCompleted())
-            updateJob(hJob, null);
-        }
-
-      } catch (IOException ioe) {
-        client = null;
-        return new Status(Status.ERROR, Activator.PLUGIN_ID, 0,
-            "Cannot retrieve running Jobs on location: "
-                          + HadoopCluster.this.getLocationName(), ioe);
-      }
-
-      // Schedule the next observation
-      schedule(STATUS_OBSERVATION_DELAY);
-
-      return Status.OK_STATUS;
-    }
-
-    /**
-     * Stores and make the new job available
-     * 
-     * @param data
-     */
-    private void newJob(final HadoopJob data) {
-      runningJobs.put(data.getJobID(), data);
-
-      Display.getDefault().asyncExec(new Runnable() {
-        public void run() {
-          fireJobAdded(data);
-        }
-      });
-    }
-
-    /**
-     * Updates the status of a job
-     * 
-     * @param job the job to update
-     */
-    private void updateJob(final HadoopJob job, JobStatus status) {
-      job.update(status);
-
-      Display.getDefault().asyncExec(new Runnable() {
-        public void run() {
-          fireJobChanged(job);
-        }
-      });
-    }
-
-  }
-
-  static Logger log = Logger.getLogger(HadoopCluster.class.getName());
-
-  /**
-   * Hadoop configuration of the location. Also contains specific parameters
-   * for the plug-in. These parameters are prefix with eclipse.plug-in.*
-   */
-  private Configuration conf;
-
-  /**
-   * Jobs listeners
-   */
-  private Set<IJobListener> jobListeners = new HashSet<IJobListener>();
-
-  /**
-   * Jobs running on this location. The keys of this map are the Job IDs.
-   */
-  private transient Map<JobID, HadoopJob> runningJobs =
-      Collections.synchronizedMap(new TreeMap<JobID, HadoopJob>());
-
-  /**
-   * Status updater for this location
-   */
-  private LocationStatusUpdater statusUpdater;
-
-  // state and status - transient
-  private transient String state = "";
-
-  /**
-   * Creates a new default Hadoop location
-   */
-  public HadoopCluster() {
-    this.conf = new Configuration();
-    this.addPluginConfigDefaultProperties();
-  }
-
-  /**
-   * Creates a location from a file
-   * 
-   * @throws IOException
-   * @throws SAXException
-   * @throws ParserConfigurationException
-   */
-  public HadoopCluster(File file) throws ParserConfigurationException,
-      SAXException, IOException {
-
-    this.conf = new Configuration();
-    this.addPluginConfigDefaultProperties();
-    this.loadFromXML(file);
-  }
-
-  /**
-   * Create a new Hadoop location by copying an already existing one.
-   * 
-   * @param source the location to copy
-   */
-  public HadoopCluster(HadoopCluster existing) {
-    this();
-    this.load(existing);
-  }
-
-  public void addJobListener(IJobListener l) {
-    jobListeners.add(l);
-  }
-
-  public void dispose() {
-    // TODO close DFS connections?
-  }
-
-  /**
-   * List all elements that should be present in the Server window (all
-   * servers and all jobs running on each servers)
-   * 
-   * @return collection of jobs for this location
-   */
-  public Collection<HadoopJob> getJobs() {
-    startStatusUpdater();
-    return this.runningJobs.values();
-  }
-
-  /**
-   * Remove the given job from the currently running jobs map
-   * 
-   * @param job the job to remove
-   */
-  public void purgeJob(final HadoopJob job) {
-    runningJobs.remove(job.getJobID());
-    Display.getDefault().asyncExec(new Runnable() {
-      public void run() {
-        fireJobRemoved(job);
-      }
-    });
-  }
-
-  /**
-   * Returns the {@link Configuration} defining this location.
-   * 
-   * @return the location configuration
-   */
-  public Configuration getConfiguration() {
-    return this.conf;
-  }
-
-  /**
-   * Gets a Hadoop configuration property value
-   * 
-   * @param prop the configuration property
-   * @return the property value
-   */
-  public String getConfProp(ConfProp prop) {
-    return prop.get(conf);
-  }
-
-  /**
-   * Gets a Hadoop configuration property value
-   * 
-   * @param propName the property name
-   * @return the property value
-   */
-  public String getConfProp(String propName) {
-    return this.conf.get(propName);
-  }
-
-  public String getLocationName() {
-    return ConfProp.PI_LOCATION_NAME.get(conf);
-  }
-
-  /**
-   * Returns the master host name of the Hadoop location (the Job tracker)
-   * 
-   * @return the host name of the Job tracker
-   */
-  public String getMasterHostName() {
-    return getConfProp(ConfProp.PI_JOB_TRACKER_HOST);
-  }
-
-  public String getState() {
-    return state;
-  }
-
-  /**
-   * Overwrite this location with the given existing location
-   * 
-   * @param existing the existing location
-   */
-  public void load(HadoopCluster existing) {
-    this.conf = new Configuration(existing.conf);
-  }
-
-  /**
-   * Overwrite this location with settings available in the given XML file.
-   * The existing configuration is preserved if the XML file is invalid.
-   * 
-   * @param file the file path of the XML file
-   * @return validity of the XML file
-   * @throws ParserConfigurationException
-   * @throws IOException
-   * @throws SAXException
-   */
-  public boolean loadFromXML(File file) throws ParserConfigurationException,
-      SAXException, IOException {
-
-    Configuration newConf = new Configuration(this.conf);
-
-    DocumentBuilder builder =
-        DocumentBuilderFactory.newInstance().newDocumentBuilder();
-    Document document = builder.parse(file);
-
-    Element root = document.getDocumentElement();
-    if (!"configuration".equals(root.getTagName()))
-      return false;
-    NodeList props = root.getChildNodes();
-    for (int i = 0; i < props.getLength(); i++) {
-      Node propNode = props.item(i);
-      if (!(propNode instanceof Element))
-        continue;
-      Element prop = (Element) propNode;
-      if (!"property".equals(prop.getTagName()))
-        return false;
-      NodeList fields = prop.getChildNodes();
-      String attr = null;
-      String value = null;
-      for (int j = 0; j < fields.getLength(); j++) {
-        Node fieldNode = fields.item(j);
-        if (!(fieldNode instanceof Element))
-          continue;
-        Element field = (Element) fieldNode;
-        if ("name".equals(field.getTagName()))
-          attr = ((Text) field.getFirstChild()).getData();
-        if ("value".equals(field.getTagName()) && field.hasChildNodes())
-          value = ((Text) field.getFirstChild()).getData();
-      }
-      if (attr != null && value != null)
-        newConf.set(attr, value);
-    }
-
-    this.conf = newConf;
-    return true;
-  }
-
-  /**
-   * Sets a Hadoop configuration property value
-   * 
-   * @param prop the property
-   * @param propvalue the property value
-   */
-  public void setConfProp(ConfProp prop, String propValue) {
-    prop.set(conf, propValue);
-  }
-
-  /**
-   * Sets a Hadoop configuration property value
-   * 
-   * @param propName the property name
-   * @param propValue the property value
-   */
-  public void setConfProp(String propName, String propValue) {
-    this.conf.set(propName, propValue);
-  }
-
-  public void setLocationName(String newName) {
-    ConfProp.PI_LOCATION_NAME.set(conf, newName);
-  }
-
-  /**
-   * Write this location settings to the given output stream
-   * 
-   * @param out the output stream
-   * @throws IOException
-   */
-  public void storeSettingsToFile(File file) throws IOException {
-    FileOutputStream fos = new FileOutputStream(file);
-    try {
-      this.conf.writeXml(fos);
-      fos.close();
-      fos = null;
-    } finally {
-      IOUtils.closeStream(fos);
-    }
-
-  }
-
-  /* @inheritDoc */
-  @Override
-  public String toString() {
-    return this.getLocationName();
-  }
-
-  /**
-   * Fill the configuration with valid default values
-   */
-  private void addPluginConfigDefaultProperties() {
-    for (ConfProp prop : ConfProp.values()) {
-      if (conf.get(prop.name) == null)
-        conf.set(prop.name, prop.defVal);
-    }
-  }
-
-  /**
-   * Starts the location status updater
-   */
-  private synchronized void startStatusUpdater() {
-    if (statusUpdater == null) {
-      statusUpdater = new LocationStatusUpdater();
-      statusUpdater.schedule();
-    }
-  }
-
-  /*
-   * Rewrite of the connecting and tunneling to the Hadoop location
-   */
-
-  /**
-   * Provides access to the default file system of this location.
-   * 
-   * @return a {@link FileSystem}
-   */
-  public FileSystem getDFS() throws IOException {
-    return FileSystem.get(this.conf);
-  }
-
-  /**
-   * Provides access to the Job tracking system of this location
-   * 
-   * @return a {@link JobClient}
-   */
-  public JobClient getJobClient() throws IOException {
-    JobConf jconf = new JobConf(this.conf);
-    return new JobClient(jconf);
-  }
-
-  /*
-   * Listeners handling
-   */
-
-  protected void fireJarPublishDone(JarModule jar) {
-    for (IJobListener listener : jobListeners) {
-      listener.publishDone(jar);
-    }
-  }
-
-  protected void fireJarPublishStart(JarModule jar) {
-    for (IJobListener listener : jobListeners) {
-      listener.publishStart(jar);
-    }
-  }
-
-  protected void fireJobAdded(HadoopJob job) {
-    for (IJobListener listener : jobListeners) {
-      listener.jobAdded(job);
-    }
-  }
-
-  protected void fireJobRemoved(HadoopJob job) {
-    for (IJobListener listener : jobListeners) {
-      listener.jobRemoved(job);
-    }
-  }
-
-  protected void fireJobChanged(HadoopJob job) {
-    for (IJobListener listener : jobListeners) {
-      listener.jobChanged(job);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/cd29c94d/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/HadoopJob.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/HadoopJob.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/HadoopJob.java
deleted file mode 100644
index 78f3fc6..0000000
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/HadoopJob.java
+++ /dev/null
@@ -1,349 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hdt.ui.cluster;
-
-import java.io.File;
-import java.io.IOException;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapred.Counters;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.JobID;
-import org.apache.hadoop.mapred.JobStatus;
-import org.apache.hadoop.mapred.RunningJob;
-
-/**
- * Representation of a Map/Reduce running job on a given location
- */
-
-public class HadoopJob {
-
-  /**
-   * Enum representation of a Job state
-   */
-  public enum JobState {
-    PREPARE(JobStatus.PREP), RUNNING(JobStatus.RUNNING), FAILED(
-        JobStatus.FAILED), SUCCEEDED(JobStatus.SUCCEEDED);
-
-    final int state;
-
-    JobState(int state) {
-      this.state = state;
-    }
-
-    static JobState ofInt(int state) {
-      if (state == JobStatus.PREP) {
-        return PREPARE;
-      }
-      else if (state == JobStatus.RUNNING) {
-        return RUNNING;
-      }
-      else if (state == JobStatus.FAILED) {
-        return FAILED;
-      }
-      else if (state == JobStatus.SUCCEEDED) {
-        return SUCCEEDED;
-      }
-      else {
-        return null;
-      }
-    }
-  }
-
-  /**
-   * Location this Job runs on
-   */
-  private final HadoopCluster location;
-
-  /**
-   * Unique identifier of this Job
-   */
-  final JobID jobId;
-
-  /**
-   * Status representation of a running job. This actually contains a
-   * reference to a JobClient. Its methods might block.
-   */
-  RunningJob running;
-
-  /**
-   * Last polled status
-   * 
-   * @deprecated should apparently not be used
-   */
-  JobStatus status;
-
-  /**
-   * Last polled counters
-   */
-  Counters counters;
-
-  /**
-   * Job Configuration
-   */
-  JobConf jobConf = null;
-
-  boolean completed = false;
-
-  boolean successful = false;
-
-  boolean killed = false;
-
-  int totalMaps;
-
-  int totalReduces;
-
-  int completedMaps;
-
-  int completedReduces;
-
-  float mapProgress;
-
-  float reduceProgress;
-
-  /**
-   * Constructor for a Hadoop job representation
-   * 
-   * @param location
-   * @param id
-   * @param running
-   * @param status
-   */
-  public HadoopJob(HadoopCluster location, JobID id, RunningJob running,
-      JobStatus status) {
-
-    this.location = location;
-    this.jobId = id;
-    this.running = running;
-
-    loadJobFile();
-
-    update(status);
-  }
-
-  /**
-   * Try to locate and load the JobConf file for this job so to get more
-   * details on the job (number of maps and of reduces)
-   */
-  private void loadJobFile() {
-    try {
-      String jobFile = getJobFile();
-      FileSystem fs = location.getDFS();
-      File tmp = File.createTempFile(getJobID().toString(), ".xml");
-      if (FileUtil.copy(fs, new Path(jobFile), tmp, false, location
-          .getConfiguration())) {
-        this.jobConf = new JobConf(tmp.toString());
-
-        this.totalMaps = jobConf.getNumMapTasks();
-        this.totalReduces = jobConf.getNumReduceTasks();
-      }
-
-    } catch (IOException ioe) {
-      ioe.printStackTrace();
-    }
-  }
-
-  /* @inheritDoc */
-  @Override
-  public int hashCode() {
-    final int prime = 31;
-    int result = 1;
-    result = prime * result + ((jobId == null) ? 0 : jobId.hashCode());
-    result = prime * result + ((location == null) ? 0 : location.hashCode());
-    return result;
-  }
-
-  /* @inheritDoc */
-  @Override
-  public boolean equals(Object obj) {
-    if (this == obj)
-      return true;
-    if (obj == null)
-      return false;
-    if (!(obj instanceof HadoopJob))
-      return false;
-    final HadoopJob other = (HadoopJob) obj;
-    if (jobId == null) {
-      if (other.jobId != null)
-        return false;
-    } else if (!jobId.equals(other.jobId))
-      return false;
-    if (location == null) {
-      if (other.location != null)
-        return false;
-    } else if (!location.equals(other.location))
-      return false;
-    return true;
-  }
-
-  /**
-   * Get the running status of the Job (@see {@link JobStatus}).
-   * 
-   * @return
-   */
-  public JobState getState() {
-    if (this.completed) {
-      if (this.successful) {
-        return JobState.SUCCEEDED;
-      } else {
-        return JobState.FAILED;
-      }
-    } else {
-      return JobState.RUNNING;
-    }
-    // return JobState.ofInt(this.status.getRunState());
-  }
-
-  /**
-   * @return
-   */
-  public JobID getJobID() {
-    return this.jobId;
-  }
-
-  /**
-   * @return
-   */
-  public HadoopCluster getLocation() {
-    return this.location;
-  }
-
-  /**
-   * @return
-   */
-  public boolean isCompleted() {
-    return this.completed;
-  }
-
-  /**
-   * @return
-   */
-  public String getJobName() {
-    return this.running.getJobName();
-  }
-
-  /**
-   * @return
-   */
-  public String getJobFile() {
-    return this.running.getJobFile();
-  }
-
-  /**
-   * Return the tracking URL for this Job.
-   * 
-   * @return string representation of the tracking URL for this Job
-   */
-  public String getTrackingURL() {
-    return this.running.getTrackingURL();
-  }
-
-  /**
-   * Returns a string representation of this job status
-   * 
-   * @return string representation of this job status
-   */
-  public String getStatus() {
-
-    StringBuffer s = new StringBuffer();
-
-    s.append("Maps : " + completedMaps + "/" + totalMaps);
-    s.append(" (" + mapProgress + ")");
-    s.append("  Reduces : " + completedReduces + "/" + totalReduces);
-    s.append(" (" + reduceProgress + ")");
-
-    return s.toString();
-  }
-
-  /**
-   * Update this job status according to the given JobStatus
-   * 
-   * @param status
-   */
-  void update(JobStatus status) {
-    this.status = status;
-    try {
-      this.counters = running.getCounters();
-      this.completed = running.isComplete();
-      this.successful = running.isSuccessful();
-      this.mapProgress = running.mapProgress();
-      this.reduceProgress = running.reduceProgress();
-      // running.getTaskCompletionEvents(fromEvent);
-
-    } catch (IOException ioe) {
-      ioe.printStackTrace();
-    }
-
-    this.completedMaps = (int) (this.totalMaps * this.mapProgress);
-    this.completedReduces = (int) (this.totalReduces * this.reduceProgress);
-  }
-
-  /**
-   * Print this job counters (for debugging purpose)
-   */
-  void printCounters() {
-    System.out.printf("New Job:\n", counters);
-    for (String groupName : counters.getGroupNames()) {
-      Counters.Group group = counters.getGroup(groupName);
-      System.out.printf("\t%s[%s]\n", groupName, group.getDisplayName());
-
-      for (Counters.Counter counter : group) {
-        System.out.printf("\t\t%s: %s\n", counter.getDisplayName(),
-                                          counter.getCounter());
-      }
-    }
-    System.out.printf("\n");
-  }
-
-  /**
-   * Kill this job
-   */
-  public void kill() {
-    try {
-      this.running.killJob();
-      this.killed = true;
-
-    } catch (IOException e) {
-      e.printStackTrace();
-    }
-  }
-
-  /**
-   * Print this job status (for debugging purpose)
-   */
-  public void display() {
-    System.out.printf("Job id=%s, name=%s\n", getJobID(), getJobName());
-    System.out.printf("Configuration file: %s\n", getJobID());
-    System.out.printf("Tracking URL: %s\n", getTrackingURL());
-
-    System.out.printf("Completion: map: %f reduce %f\n",
-        100.0 * this.mapProgress, 100.0 * this.reduceProgress);
-
-    System.out.println("Job total maps = " + totalMaps);
-    System.out.println("Job completed maps = " + completedMaps);
-    System.out.println("Map percentage complete = " + mapProgress);
-    System.out.println("Job total reduces = " + totalReduces);
-    System.out.println("Job completed reduces = " + completedReduces);
-    System.out.println("Reduce percentage complete = " + reduceProgress);
-    System.out.flush();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/cd29c94d/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/IHadoopClusterListener.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/IHadoopClusterListener.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/IHadoopClusterListener.java
index 2f7a245..8d33cc8 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/IHadoopClusterListener.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/IHadoopClusterListener.java
@@ -18,7 +18,7 @@
 
 package org.apache.hdt.ui.cluster;
 
-import org.apache.hdt.ui.cluster.HadoopCluster;
+import org.apache.hdt.core.cluster.HadoopCluster;
 
 /**
  * Interface for monitoring server changes

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/cd29c94d/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/IJobListener.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/IJobListener.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/IJobListener.java
deleted file mode 100644
index 5d001c4..0000000
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/IJobListener.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hdt.ui.cluster;
-
-import org.apache.hdt.ui.cluster.utils.JarModule;
-
-/**
- * Interface for updating/adding jobs to the MapReduce Server view.
- */
-public interface IJobListener {
-
-  void jobChanged(HadoopJob job);
-
-  void jobAdded(HadoopJob job);
-
-  void jobRemoved(HadoopJob job);
-
-  void publishStart(JarModule jar);
-
-  void publishDone(JarModule jar);
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/cd29c94d/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/ServerRegistry.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/ServerRegistry.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/ServerRegistry.java
index a1a990e..974315b 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/ServerRegistry.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/ServerRegistry.java
@@ -29,8 +29,8 @@ import java.util.Set;
 import java.util.TreeMap;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hdt.core.cluster.HadoopCluster;
 import org.apache.hdt.ui.Activator;
-import org.apache.hdt.ui.cluster.HadoopCluster;
 import org.eclipse.jface.dialogs.MessageDialog;
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/cd29c94d/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/utils/JarModule.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/utils/JarModule.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/utils/JarModule.java
deleted file mode 100644
index 71d5559..0000000
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/utils/JarModule.java
+++ /dev/null
@@ -1,146 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hdt.ui.cluster.utils;
-
-import java.io.File;
-import java.util.logging.Logger;
-
-import org.apache.hdt.ui.Activator;
-import org.apache.hdt.ui.dialogs.ErrorMessageDialog;
-import org.eclipse.core.resources.IResource;
-import org.eclipse.core.runtime.IProgressMonitor;
-import org.eclipse.core.runtime.Path;
-import org.eclipse.jdt.core.ICompilationUnit;
-import org.eclipse.jdt.core.IJavaElement;
-import org.eclipse.jdt.core.IType;
-import org.eclipse.jdt.ui.jarpackager.IJarExportRunnable;
-import org.eclipse.jdt.ui.jarpackager.JarPackageData;
-import org.eclipse.jface.operation.IRunnableWithProgress;
-import org.eclipse.swt.widgets.Display;
-import org.eclipse.ui.PlatformUI;
-
-/**
- * Methods for interacting with the jar file containing the
- * Mapper/Reducer/Driver classes for a MapReduce job.
- */
-
-public class JarModule implements IRunnableWithProgress {
-
-  static Logger log = Logger.getLogger(JarModule.class.getName());
-
-  private IResource resource;
-
-  private File jarFile;
-
-  public JarModule(IResource resource) {
-    this.resource = resource;
-  }
-
-  public String getName() {
-    return resource.getProject().getName() + "/" + resource.getName();
-  }
-
-  /**
-   * Creates a JAR file containing the given resource (Java class with
-   * main()) and all associated resources
-   * 
-   * @param resource the resource
-   * @return a file designing the created package
-   */
-  public void run(IProgressMonitor monitor) {
-
-    log.fine("Build jar");
-    JarPackageData jarrer = new JarPackageData();
-
-    jarrer.setExportJavaFiles(true);
-    jarrer.setExportClassFiles(true);
-    jarrer.setExportOutputFolders(true);
-    jarrer.setOverwrite(true);
-
-    try {
-      // IJavaProject project =
-      // (IJavaProject) resource.getProject().getNature(JavaCore.NATURE_ID);
-
-      // check this is the case before letting this method get called
-      Object element = resource.getAdapter(IJavaElement.class);
-      IType type = ((ICompilationUnit) element).findPrimaryType();
-      jarrer.setManifestMainClass(type);
-
-      // Create a temporary JAR file name
-      File baseDir = Activator.getDefault().getStateLocation().toFile();
-
-      String prefix =
-          String.format("%s_%s-", resource.getProject().getName(), resource
-              .getName());
-      File jarFile = File.createTempFile(prefix, ".jar", baseDir);
-      jarrer.setJarLocation(new Path(jarFile.getAbsolutePath()));
-
-      jarrer.setElements(resource.getProject().members(IResource.FILE));
-      IJarExportRunnable runnable =
-          jarrer.createJarExportRunnable(Display.getDefault()
-              .getActiveShell());
-      runnable.run(monitor);
-
-      this.jarFile = jarFile;
-
-    } catch (Exception e) {
-      e.printStackTrace();
-      throw new RuntimeException(e);
-    }
-  }
-
-  /**
-   * Allow the retrieval of the resulting JAR file
-   * 
-   * @return the generated JAR file
-   */
-  public File getJarFile() {
-    return this.jarFile;
-  }
-
-  /**
-   * Static way to create a JAR package for the given resource and showing a
-   * progress bar
-   * 
-   * @param resource
-   * @return
-   */
-  public static File createJarPackage(IResource resource) {
-
-    JarModule jarModule = new JarModule(resource);
-    try {
-      PlatformUI.getWorkbench().getProgressService().run(false, true,
-          jarModule);
-
-    } catch (Exception e) {
-      e.printStackTrace();
-      return null;
-    }
-
-    File jarFile = jarModule.getJarFile();
-    if (jarFile == null) {
-      ErrorMessageDialog.display("Run on Hadoop",
-          "Unable to create or locate the JAR file for the Job");
-      return null;
-    }
-
-    return jarFile;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/cd29c94d/org.apache.hdt.ui/src/org/apache/hdt/ui/dialogs/ErrorMessageDialog.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/dialogs/ErrorMessageDialog.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/dialogs/ErrorMessageDialog.java
deleted file mode 100644
index bb0137a..0000000
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/dialogs/ErrorMessageDialog.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hdt.ui.dialogs;
-
-import org.eclipse.jface.dialogs.MessageDialog;
-import org.eclipse.swt.widgets.Display;
-
-/**
- * Error dialog helper
- */
-public class ErrorMessageDialog {
-
-  public static void display(final String title, final String message) {
-    Display.getDefault().syncExec(new Runnable() {
-
-      public void run() {
-        MessageDialog.openError(Display.getDefault().getActiveShell(),
-            title, message);
-      }
-
-    });
-  }
-
-  public static void display(Exception e) {
-    display("An exception has occured!", "Exception description:\n"
-        + e.getLocalizedMessage());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/cd29c94d/org.apache.hdt.ui/src/org/apache/hdt/ui/views/ClusterView.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/views/ClusterView.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/views/ClusterView.java
index 6b81b8d..0c53a19 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/views/ClusterView.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/views/ClusterView.java
@@ -20,13 +20,13 @@ package org.apache.hdt.ui.views;
 
 import java.util.Collection;
 
+import org.apache.hdt.core.cluster.HadoopCluster;
+import org.apache.hdt.core.cluster.HadoopJob;
+import org.apache.hdt.core.cluster.IJobListener;
+import org.apache.hdt.core.cluster.utils.JarModule;
 import org.apache.hdt.ui.ImageLibrary;
 import org.apache.hdt.ui.actions.EditLocationAction;
 import org.apache.hdt.ui.actions.NewLocationAction;
-import org.apache.hdt.ui.cluster.HadoopJob;
-import org.apache.hdt.ui.cluster.HadoopCluster;
-import org.apache.hdt.ui.cluster.IJobListener;
-import org.apache.hdt.ui.cluster.utils.JarModule;
 import org.apache.hdt.ui.cluster.IHadoopClusterListener;
 import org.apache.hdt.ui.cluster.ServerRegistry;
 import org.eclipse.jface.action.Action;

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/cd29c94d/org.apache.hdt.ui/src/org/apache/hdt/ui/wizards/HadoopLocationWizard.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/wizards/HadoopLocationWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/wizards/HadoopLocationWizard.java
index 7079c37..8fe9d19 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/wizards/HadoopLocationWizard.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/wizards/HadoopLocationWizard.java
@@ -29,8 +29,8 @@ import java.util.TreeMap;
 import java.util.Map.Entry;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hdt.ui.cluster.ConfProp;
-import org.apache.hdt.ui.cluster.HadoopCluster;
+import org.apache.hdt.core.cluster.ConfProp;
+import org.apache.hdt.core.cluster.HadoopCluster;
 import org.apache.hdt.ui.cluster.ServerRegistry;
 import org.eclipse.jface.dialogs.IMessageProvider;
 import org.eclipse.jface.wizard.WizardPage;