You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hdt.apache.org by rs...@apache.org on 2014/06/26 10:36:28 UTC

[05/27] git commit: HDT-41: Provide existing MR functionality - ported Mapper/Reducer/Partioner/Driver Wizards - ported Image lookup - ported Map-reduce project wizard - using runtimes from specified hadoop location rather as runtime jars packed in plugi

HDT-41: Provide existing MR functionality
- ported Mapper/Reducer/Partioner/Driver Wizards
- ported Image lookup
- ported Map-reduce project wizard
- using runtimes from specified hadoop location rather as runtime jars packed in plugin
- ported 'Run On Hadoop'
- ported Hadoop location preference at Window->Preferences->Hadoop
- ported clusterView
- Modified hadoop perspective to contain clusterView


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/29467b54
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/29467b54
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/29467b54

Branch: refs/heads/hadoop-eclipse-merge
Commit: 29467b54a03846a50d1a80ad7cd362c11da4616c
Parents: 300cf8b
Author: Rahul Sharma <rs...@apache.org>
Authored: Mon Apr 21 13:38:04 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Fri May 9 09:45:25 2014 +0530

----------------------------------------------------------------------
 org.apache.hdt.core/.classpath                  |   2 +-
 org.apache.hdt.core/META-INF/MANIFEST.MF        |  15 +-
 org.apache.hdt.core/plugin.xml                  |  12 +
 ...org.apache.hadoop.eclipse.hadoopCluster.exsd | 126 +++
 .../hdt/core/launch/AbstractHadoopCluster.java  |  84 ++
 .../org/apache/hdt/core/launch/ConfProp.java    | 133 +++
 .../hdt/core/launch/ErrorMessageDialog.java     |  43 +
 .../hdt/core/launch/IHadoopClusterListener.java |  26 +
 .../org/apache/hdt/core/launch/IHadoopJob.java  |  34 +
 .../org/apache/hdt/core/launch/IJarModule.java  |  41 +
 .../apache/hdt/core/launch/IJobListener.java    |  36 +
 .../hdt/core/natures/MapReduceNature.java       | 135 +++
 .../META-INF/MANIFEST.MF                        |  57 +-
 org.apache.hdt.hadoop.release/build.properties  |   8 +-
 org.apache.hdt.hadoop.release/fragment.xml      |   7 +
 org.apache.hdt.hadoop.release/pom.xml           |  47 +-
 .../hdt/hadoop/release/HadoopCluster.java       | 564 +++++++++++
 .../apache/hdt/hadoop/release/HadoopJob.java    | 342 +++++++
 org.apache.hdt.ui/META-INF/MANIFEST.MF          |   5 +
 org.apache.hdt.ui/plugin.xml                    | 104 ++-
 .../src/org/apache/hdt/ui/ImageLibrary.java     | 251 +++++
 .../launch/HadoopApplicationLaunchShortcut.java | 130 +++
 .../internal/launch/HadoopLocationWizard.java   | 925 +++++++++++++++++++
 ...adoopServerSelectionListContentProvider.java |  76 ++
 .../hdt/ui/internal/launch/JarModule.java       | 146 +++
 .../ui/internal/launch/RunOnHadoopWizard.java   | 346 +++++++
 .../hdt/ui/internal/launch/ServerRegistry.java  | 200 ++++
 .../apache/hdt/ui/internal/mr/ClusterView.java  | 450 +++++++++
 .../hdt/ui/internal/mr/EditLocationAction.java  |  72 ++
 .../hdt/ui/internal/mr/NewDriverWizard.java     |  99 ++
 .../hdt/ui/internal/mr/NewDriverWizardPage.java | 264 ++++++
 .../hdt/ui/internal/mr/NewLocationAction.java   |  63 ++
 .../internal/mr/NewMapReduceProjectWizard.java  | 385 ++++++++
 .../hdt/ui/internal/mr/NewMapperWizard.java     | 167 ++++
 .../ui/internal/mr/NewPartitionerWizard.java    | 194 ++++
 .../hdt/ui/internal/mr/NewReducerWizard.java    | 175 ++++
 .../ui/preferences/MapReducePreferencePage.java |  64 ++
 .../hdt/ui/preferences/PreferenceConstants.java |  34 +
 38 files changed, 5814 insertions(+), 48 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/.classpath
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/.classpath b/org.apache.hdt.core/.classpath
index 4a37a3a..4a91e22 100644
--- a/org.apache.hdt.core/.classpath
+++ b/org.apache.hdt.core/.classpath
@@ -2,7 +2,7 @@
 <classpath>
 	<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.6"/>
 	<classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
-	<classpathentry kind="src" path="src/"/>
+	<classpathentry kind="src" path="src"/>
 	<classpathentry exported="true" kind="lib" path="jars/log4j-1.2.15.jar"/>
 	<classpathentry kind="output" path="target/classes"/>
 </classpath>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/META-INF/MANIFEST.MF b/org.apache.hdt.core/META-INF/MANIFEST.MF
index 6234625..1d6b8c4 100644
--- a/org.apache.hdt.core/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.core/META-INF/MANIFEST.MF
@@ -8,12 +8,15 @@ Require-Bundle: org.eclipse.core.runtime,
  org.eclipse.core.filesystem;bundle-version="1.3.0";visibility:=reexport,
  org.eclipse.core.resources;bundle-version="3.6.0",
  org.eclipse.emf.ecore;bundle-version="2.6.1";visibility:=reexport,
- org.eclipse.team.core;bundle-version="3.5.100"
+ org.eclipse.jdt.core,
+ org.eclipse.team.core;bundle-version="3.5.100",
+ org.eclipse.swt,
+ org.eclipse.jface
 Bundle-RequiredExecutionEnvironment: JavaSE-1.6
 Bundle-Vendor: Apache Hadoop
 Bundle-ClassPath: .,
  jars/log4j-1.2.15.jar
-Export-Package: org.apache.hdt.core,
+Export-Package:  org.apache.hdt.core,
  org.apache.hdt.core.hdfs,
  org.apache.hdt.core.internal,
  org.apache.hdt.core.internal.hdfs;x-friends:="org.apache.hdt.ui",
@@ -21,6 +24,8 @@ Export-Package: org.apache.hdt.core,
  org.apache.hdt.core.internal.model.impl,
  org.apache.hdt.core.internal.model.util,
  org.apache.hdt.core.internal.zookeeper,
+ org.apache.hdt.core.launch,
+ org.apache.hdt.core.natures,
  org.apache.hdt.core.zookeeper,
  org.apache.log4j,
  org.apache.log4j.chainsaw,
@@ -40,5 +45,9 @@ Export-Package: org.apache.hdt.core,
  org.apache.log4j.or.sax,
  org.apache.log4j.spi,
  org.apache.log4j.varia,
- org.apache.log4j.xml
+ org.apache.log4j.xml,
+ org.apache.hadoop,
+ org.apache.hadoop.conf,
+ org.apache.hadoop.io,
+ org.apache.hadoop.mapred
 Bundle-ActivationPolicy: lazy

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/plugin.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/plugin.xml b/org.apache.hdt.core/plugin.xml
index 82dcbec..94f3d49 100644
--- a/org.apache.hdt.core/plugin.xml
+++ b/org.apache.hdt.core/plugin.xml
@@ -19,6 +19,8 @@
 <plugin>
    <extension-point id="org.apache.hdt.core.hdfsClient" name="Apache Hadoop HDFS Client" schema="schema/org.apache.hadoop.eclipse.hdfsclient.exsd"/>
    <extension-point id="org.apache.hdt.core.zookeeperClient" name="Apache Hadoop ZooKeeper Client" schema="schema/org.apache.hadoop.eclipse.zookeeperClient.exsd"/>
+   <extension-point id="org.apache.hdt.core.hadoopCluster" name="Apache Hadoop Cluster" schema="schema/org.apache.hadoop.eclipse.hadoopCluster.exsd"/>
+   
    <extension
          id="org.apache.hadoop.hdfs.filesystem"
          name="Apache Hadoop HDFS"
@@ -39,5 +41,15 @@
             id="org.apache.hadoop.hdfs">
       </repository>
    </extension>
+    <extension
+         id="org.apache.hdt.mrnature"
+         name="MapReduce Nature"
+         point="org.eclipse.core.resources.natures">
+      <runtime>
+         <run
+               class="org.apache.hdt.core.natures.MapReduceNature">
+         </run>
+      </runtime>
+   </extension>
 
 </plugin>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.hadoopCluster.exsd
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.hadoopCluster.exsd b/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.hadoopCluster.exsd
new file mode 100644
index 0000000..72d3899
--- /dev/null
+++ b/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.hadoopCluster.exsd
@@ -0,0 +1,126 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<schema targetNamespace="org.apache.hdt.core" xmlns="http://www.w3.org/2001/XMLSchema">
+<annotation>
+      <appinfo>
+         <meta.schema plugin="org.apache.hdt.core" id="org.apache.hdt.core.hadoopCluster" name="Apache Hadoop Cluster"/>
+      </appinfo>
+      <documentation>
+         [Enter description of this extension point.]
+      </documentation>
+   </annotation>
+
+   <element name="extension">
+      <annotation>
+         <appinfo>
+            <meta.element />
+         </appinfo>
+      </annotation>
+      <complexType>
+         <choice>
+            <sequence>
+               <element ref="hadoopCluster" minOccurs="0" maxOccurs="unbounded"/>
+            </sequence>
+         </choice>
+         <attribute name="point" type="string" use="required">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+            </annotation>
+         </attribute>
+         <attribute name="id" type="string">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+            </annotation>
+         </attribute>
+         <attribute name="name" type="string">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+               <appinfo>
+                  <meta.attribute translatable="true"/>
+               </appinfo>
+            </annotation>
+         </attribute>
+      </complexType>
+   </element>
+
+   <element name="hadoopCluster">
+      <complexType>
+         <attribute name="class" type="string" use="required">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+               <appinfo>
+                  <meta.attribute kind="java" basedOn="org.apache.hdt.core.launch.AbstractHadoopCluster:"/>
+               </appinfo>
+            </annotation>
+         </attribute>
+         <attribute name="protocolVersion" type="string" use="required">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+            </annotation>
+         </attribute>
+      </complexType>
+   </element>
+
+   <annotation>
+      <appinfo>
+         <meta.section type="since"/>
+      </appinfo>
+      <documentation>
+         [Enter the first release in which this extension point appears.]
+      </documentation>
+   </annotation>
+
+   <annotation>
+      <appinfo>
+         <meta.section type="examples"/>
+      </appinfo>
+      <documentation>
+         [Enter extension point usage example here.]
+      </documentation>
+   </annotation>
+
+   <annotation>
+      <appinfo>
+         <meta.section type="apiinfo"/>
+      </appinfo>
+      <documentation>
+         [Enter API information here.]
+      </documentation>
+   </annotation>
+
+   <annotation>
+      <appinfo>
+         <meta.section type="implementation"/>
+      </appinfo>
+      <documentation>
+         [Enter information about supplied implementation of this extension point.]
+      </documentation>
+   </annotation>
+
+
+</schema>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
new file mode 100644
index 0000000..e5f7dd4
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.launch;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.Map.Entry;
+
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IConfigurationElement;
+import org.eclipse.core.runtime.Platform;
+
+public abstract class AbstractHadoopCluster {
+
+	abstract public String getLocationName();
+
+	abstract public void dispose();
+
+	abstract public void storeSettingsToFile(File file) throws IOException;
+
+	abstract public void saveConfiguration(File confDir, String jarFilePath) throws IOException;
+
+	abstract public String getMasterHostName();
+
+	abstract public void setLocationName(String string);
+
+	abstract public void load(AbstractHadoopCluster server);
+
+	abstract public String getConfProp(String propName);
+
+	abstract public String getConfProp(ConfProp prop);
+
+	abstract public void setConfProp(ConfProp prop, String propValue);
+
+	abstract public void setConfProp(String propName, String propValue);
+
+	abstract public Iterator<Entry<String, String>> getConfiguration();
+
+	abstract public void purgeJob(IHadoopJob job);
+
+	abstract public void addJobListener(IJobListener jobListener);
+
+	abstract public Collection<? extends IHadoopJob> getJobs();
+
+	abstract public String getState();
+
+	abstract public boolean loadFromXML(File file) throws IOException;
+
+	public static AbstractHadoopCluster createCluster(File file) throws CoreException, IOException {
+		AbstractHadoopCluster hadoopCluster = createCluster();
+		hadoopCluster.loadFromXML(file);
+		return hadoopCluster;
+	}
+
+	public static AbstractHadoopCluster createCluster() throws CoreException {
+		IConfigurationElement[] elementsFor = Platform.getExtensionRegistry().getConfigurationElementsFor("org.apache.hdt.core.hadoopCluster");
+		return (AbstractHadoopCluster) elementsFor[0].createExecutableExtension("class");
+	}
+
+	public static AbstractHadoopCluster createCluster(AbstractHadoopCluster existing) throws CoreException {
+		AbstractHadoopCluster hadoopCluster = createCluster();
+		hadoopCluster.load(existing);
+		return hadoopCluster;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java
new file mode 100644
index 0000000..538eb75
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java
@@ -0,0 +1,133 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.launch;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public enum ConfProp {
+	/**
+	 * Property name for the Hadoop location name
+	 */
+	PI_LOCATION_NAME(true, "location.name", "New Hadoop location"),
+
+	/**
+	 * Property name for the master host name (the Job tracker)
+	 */
+	PI_JOB_TRACKER_HOST(true, "jobtracker.host", "localhost"),
+
+	/**
+	 * Property name for the DFS master host name (the Name node)
+	 */
+	PI_NAME_NODE_HOST(true, "namenode.host", "localhost"),
+
+	/**
+	 * Property name for the installation directory on the master node
+	 */
+	// PI_INSTALL_DIR(true, "install.dir", "/dir/hadoop-version/"),
+	/**
+	 * User name to use for Hadoop operations
+	 */
+	PI_USER_NAME(true, "user.name", System.getProperty("user.name", "who are you?")),
+
+	/**
+	 * Property name for SOCKS proxy activation
+	 */
+	PI_SOCKS_PROXY_ENABLE(true, "socks.proxy.enable", "no"),
+
+	/**
+	 * Property name for the SOCKS proxy host
+	 */
+	PI_SOCKS_PROXY_HOST(true, "socks.proxy.host", "host"),
+
+	/**
+	 * Property name for the SOCKS proxy port
+	 */
+	PI_SOCKS_PROXY_PORT(true, "socks.proxy.port", "1080"),
+
+	/**
+	 * TCP port number for the name node
+	 */
+	PI_NAME_NODE_PORT(true, "namenode.port", "50040"),
+
+	/**
+	 * TCP port number for the job tracker
+	 */
+	PI_JOB_TRACKER_PORT(true, "jobtracker.port", "50020"),
+
+	/**
+	 * Are the Map/Reduce and the Distributed FS masters hosted on the same
+	 * machine?
+	 */
+	PI_COLOCATE_MASTERS(true, "masters.colocate", "yes"),
+
+	/**
+	 * Property name for naming the job tracker (URI). This property is related
+	 * to {@link #PI_MASTER_HOST_NAME}
+	 */
+	JOB_TRACKER_URI(false, "mapreduce.jobtracker.address", "localhost:50020"),
+
+	/**
+	 * Property name for naming the default file system (URI).
+	 */
+	FS_DEFAULT_URI(false, "fs.default.name", "hdfs://localhost:50040/"),
+
+	/**
+	 * Property name for the default socket factory:
+	 */
+	SOCKET_FACTORY_DEFAULT(false, "hadoop.rpc.socket.factory.class.default", "org.apache.hadoop.net.StandardSocketFactory"),
+
+	/**
+	 * Property name for the SOCKS server URI.
+	 */
+	SOCKS_SERVER(false, "hadoop.socks.server", "host:1080"),
+
+	;
+
+	/**
+	 * Map <property name> -> ConfProp
+	 */
+	private static Map<String, ConfProp> map;
+
+	private static synchronized void registerProperty(String name, ConfProp prop) {
+
+		if (ConfProp.map == null)
+			ConfProp.map = new HashMap<String, ConfProp>();
+
+		ConfProp.map.put(name, prop);
+	}
+
+	public static ConfProp getByName(String propName) {
+		return map.get(propName);
+	}
+
+	public final String name;
+
+	public final String defVal;
+
+	ConfProp(boolean internal, String name, String defVal) {
+		if (internal)
+			name = "eclipse.plug-in." + name;
+		this.name = name;
+		this.defVal = defVal;
+
+		ConfProp.registerProperty(name, this);
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/src/org/apache/hdt/core/launch/ErrorMessageDialog.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/ErrorMessageDialog.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/ErrorMessageDialog.java
new file mode 100644
index 0000000..82b6d10
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/ErrorMessageDialog.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.launch;
+
+import org.eclipse.jface.dialogs.MessageDialog;
+import org.eclipse.swt.widgets.Display;
+
+/**
+ * Error dialog helper
+ */
+public class ErrorMessageDialog {
+
+	public static void display(final String title, final String message) {
+		Display.getDefault().syncExec(new Runnable() {
+
+			public void run() {
+				MessageDialog.openError(Display.getDefault().getActiveShell(), title, message);
+			}
+
+		});
+	}
+
+	public static void display(Exception e) {
+		display("An exception has occured!", "Exception description:\n" + e.getLocalizedMessage());
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/src/org/apache/hdt/core/launch/IHadoopClusterListener.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/IHadoopClusterListener.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IHadoopClusterListener.java
new file mode 100644
index 0000000..e403c57
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IHadoopClusterListener.java
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.launch;
+
+/**
+ * Interface for monitoring server changes
+ */
+public interface IHadoopClusterListener {
+	void serverChanged(AbstractHadoopCluster location, int type);
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/src/org/apache/hdt/core/launch/IHadoopJob.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/IHadoopJob.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IHadoopJob.java
new file mode 100644
index 0000000..0b58699
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IHadoopJob.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.launch;
+
+public interface IHadoopJob {
+
+	boolean isCompleted();
+
+	AbstractHadoopCluster getLocation();
+
+	String getJobID();
+
+	void kill();
+
+	String getStatus();
+
+	String getState();
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/src/org/apache/hdt/core/launch/IJarModule.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/IJarModule.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IJarModule.java
new file mode 100644
index 0000000..0af6c9f
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IJarModule.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.launch;
+
+import java.io.File;
+
+import org.eclipse.jface.operation.IRunnableWithProgress;
+
+/**
+ * Methods for interacting with the jar file containing the
+ * Mapper/Reducer/Driver classes for a MapReduce job.
+ */
+
+public interface IJarModule extends IRunnableWithProgress {
+
+	String getName();
+
+	/**
+	 * Allow the retrieval of the resulting JAR file
+	 * 
+	 * @return the generated JAR file
+	 */
+	File getJarFile();
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/src/org/apache/hdt/core/launch/IJobListener.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/IJobListener.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IJobListener.java
new file mode 100644
index 0000000..4dc3bc5
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IJobListener.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.launch;
+
+/**
+ * Interface for updating/adding jobs to the MapReduce Server view.
+ */
+public interface IJobListener {
+
+	void jobChanged(IHadoopJob job);
+
+	void jobAdded(IHadoopJob job);
+
+	void jobRemoved(IHadoopJob job);
+
+	void publishStart(IJarModule jar);
+
+	void publishDone(IJarModule jar);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/src/org/apache/hdt/core/natures/MapReduceNature.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/natures/MapReduceNature.java b/org.apache.hdt.core/src/org/apache/hdt/core/natures/MapReduceNature.java
new file mode 100644
index 0000000..e93ee9a
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/natures/MapReduceNature.java
@@ -0,0 +1,135 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.natures;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.hdt.core.Activator;
+import org.eclipse.core.resources.IProject;
+import org.eclipse.core.resources.IProjectNature;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.NullProgressMonitor;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.core.runtime.QualifiedName;
+import org.eclipse.jdt.core.IClasspathEntry;
+import org.eclipse.jdt.core.IJavaProject;
+import org.eclipse.jdt.core.JavaCore;
+
+/**
+ * Class to configure and deconfigure an Eclipse project with the MapReduce
+ * project nature.
+ */
+
+public class MapReduceNature implements IProjectNature {
+
+	public static final String ID = "org.apache.hdt.mrnature";
+
+	private IProject project;
+
+	static Logger log = Logger.getLogger(MapReduceNature.class.getName());
+
+	/**
+	 * Configures an Eclipse project as a Map/Reduce project by adding the
+	 * Hadoop libraries to a project's classpath.
+	 */
+	/*
+	 * TODO Versioning connector needed here
+	 */
+	public void configure() throws CoreException {
+
+		String hadoopHomePath = project.getPersistentProperty(new QualifiedName(Activator.BUNDLE_ID, "hadoop.runtime.path"));
+		File hadoopHome = new Path(hadoopHomePath).toFile();
+		File hadoopLib = new File(hadoopHome, "lib");
+
+		final ArrayList<File> coreJars = new ArrayList<File>();
+		coreJars.addAll(getJarFiles(hadoopHome));
+		coreJars.addAll(getJarFiles(hadoopLib));
+
+		// Add Hadoop libraries onto classpath
+		IJavaProject javaProject = JavaCore.create(getProject());
+		// Bundle bundle = Activator.getDefault().getBundle();
+		try {
+			IClasspathEntry[] currentCp = javaProject.getRawClasspath();
+			IClasspathEntry[] newCp = new IClasspathEntry[currentCp.length + coreJars.size()];
+			System.arraycopy(currentCp, 0, newCp, 0, currentCp.length);
+
+			final Iterator<File> i = coreJars.iterator();
+			int count = 0;
+			while (i.hasNext()) {
+				// for (int i = 0; i < s_coreJarNames.length; i++) {
+
+				final File f = (File) i.next();
+				// URL url = FileLocator.toFileURL(FileLocator.find(bundle, new
+				// Path("lib/" + s_coreJarNames[i]), null));
+				URL url = f.toURI().toURL();
+				log.finer("hadoop library url.getPath() = " + url.getPath());
+
+				newCp[newCp.length - 1 - count] = JavaCore.newLibraryEntry(new Path(url.getPath()), null, null);
+				count++;
+			}
+
+			javaProject.setRawClasspath(newCp, new NullProgressMonitor());
+		} catch (Exception e) {
+			log.log(Level.SEVERE, "IOException generated in " + this.getClass().getCanonicalName(), e);
+		}
+	}
+
+	private ArrayList<File> getJarFiles(File hadoopHome) {
+		FilenameFilter jarFileFilter = new FilenameFilter() {
+			@Override
+			public boolean accept(File dir, String name) {
+				return name.endsWith(".jar");
+			}
+		};
+		final ArrayList<File> jars = new ArrayList<File>();
+		for (String hadopCoreLibFileName : hadoopHome.list(jarFileFilter)) {
+			jars.add(new File(hadoopHome, hadopCoreLibFileName));
+		}
+		return jars;
+	}
+
+	/**
+	 * Deconfigure a project from MapReduce status. Currently unimplemented.
+	 */
+	public void deconfigure() throws CoreException {
+		// TODO Auto-generated method stub
+	}
+
+	/**
+	 * Returns the project to which this project nature applies.
+	 */
+	public IProject getProject() {
+		return this.project;
+	}
+
+	/**
+	 * Sets the project to which this nature applies. Used when instantiating
+	 * this project nature runtime.
+	 */
+	public void setProject(IProject project) {
+		this.project = project;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF b/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
index 005ca9b..db5e83c 100644
--- a/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
@@ -7,14 +7,55 @@ Bundle-Vendor: Apache Hadoop
 Fragment-Host: org.apache.hdt.core
 Bundle-RequiredExecutionEnvironment: JavaSE-1.6
 Bundle-ClassPath: .,
- jars/log4j-1.2.15.jar,
- jars/slf4j-api-1.6.1.jar,
+ jars/zookeeper-3.4.5.jar,
  jars/slf4j-log4j12-1.6.1.jar,
- jars/commons-configuration-1.6.jar,
- jars/commons-lang-2.4.jar,
- jars/commons-logging-1.1.1.jar,
- jars/hadoop-client-1.1.2.jar,
+ jars/slf4j-api-1.6.1.jar,
+ jars/log4j-1.2.15.jar,
+ jars/xmlenc-0.52.jar,
+ jars/stax-api-1.0-2.jar,
+ jars/stax-api-1.0.1.jar,
+ jars/servlet-api-2.5-6.1.14.jar,
+ jars/servlet-api-2.5-20081211.jar,
+ jars/oro-2.0.8.jar,
+ jars/junit-4.11.jar,
+ jars/jsp-api-2.1-6.1.14.jar,
+ jars/jsp-2.1-6.1.14.jar,
+ jars/jetty-util-6.1.26.jar,
+ jars/jetty-6.1.26.jar,
+ jars/jettison-1.1.jar,
+ jars/jets3t-0.6.1.jar,
+ jars/jersey-server-1.8.jar,
+ jars/jersey-json-1.8.jar,
+ jars/jersey-core-1.8.jar,
+ jars/jaxb-impl-2.2.3-1.jar,
+ jars/jaxb-api-2.2.2.jar,
+ jars/jasper-runtime-5.5.12.jar,
+ jars/jasper-compiler-5.5.12.jar,
+ jars/jackson-xc-1.7.1.jar,
+ jars/jackson-mapper-asl-1.8.8.jar,
+ jars/jackson-jaxrs-1.7.1.jar,
+ jars/jackson-core-asl-1.7.1.jar,
+ jars/hsqldb-1.8.0.10.jar,
+ jars/hamcrest-core-1.3.jar,
  jars/hadoop-core-1.1.2.jar,
- jars/hadoop-test-1.1.2.jar,
+ jars/core-3.1.1.jar,
+ jars/commons-net-1.4.1.jar,
+ jars/commons-math-2.1.jar,
+ jars/commons-logging-1.1.1.jar,
+ jars/commons-lang-2.4.jar,
+ jars/commons-io-2.1.jar,
+ jars/commons-httpclient-3.0.1.jar,
+ jars/commons-el-1.0.jar,
+ jars/commons-digester-1.8.jar,
+ jars/commons-configuration-1.6.jar,
+ jars/commons-collections-3.2.1.jar,
+ jars/commons-codec-1.4.jar,
+ jars/commons-cli-1.2.jar,
+ jars/commons-beanutils-core-1.8.0.jar,
+ jars/commons-beanutils-1.7.0.jar,
+ jars/asm-3.1.jar,
+ jars/ant-1.6.5.jar,
+ jars/activation-1.1.jar,
  jars/hadoop-tools-1.1.2.jar,
- jars/zookeeper-3.4.5.jar
+ jars/hadoop-test-1.1.2.jar,
+ jars/hadoop-client-1.1.2.jar

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.hadoop.release/build.properties
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/build.properties b/org.apache.hdt.hadoop.release/build.properties
index 6d99810..4c1d15a 100644
--- a/org.apache.hdt.hadoop.release/build.properties
+++ b/org.apache.hdt.hadoop.release/build.properties
@@ -20,10 +20,4 @@ output.. = bin/
 bin.includes = META-INF/,\
                .,\
                fragment.xml,\
-               jars/,\
-               jars/slf4j-api-1.6.1.jar,\
-               jars/slf4j-log4j12-1.6.1.jar,\
-               jars/commons-configuration-1.6.jar,\
-               jars/commons-lang-2.4.jar,\
-               jars/commons-logging-1.1.1.jar,\
-               jars/log4j-1.2.15.jar
+               jars/

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.hadoop.release/fragment.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/fragment.xml b/org.apache.hdt.hadoop.release/fragment.xml
index 1b11581..729d38f 100644
--- a/org.apache.hdt.hadoop.release/fragment.xml
+++ b/org.apache.hdt.hadoop.release/fragment.xml
@@ -32,5 +32,12 @@
             protocolVersion="3.4.5">
       </zookeeperClient>
    </extension>
+   <extension
+         point="org.apache.hdt.core.hadoopCluster">
+      <hadoopCluster
+            class="org.apache.hdt.hadoop.release.HadoopCluster"
+            protocolVersion="1.1">
+      </hadoopCluster>
+   </extension>
 
 </fragment>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.hadoop.release/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/pom.xml b/org.apache.hdt.hadoop.release/pom.xml
index 69c61f2..fa65ec3 100644
--- a/org.apache.hdt.hadoop.release/pom.xml
+++ b/org.apache.hdt.hadoop.release/pom.xml
@@ -27,6 +27,12 @@ under the License.
   <artifactId>org.apache.hdt.hadoop.release</artifactId>
   <packaging>eclipse-plugin</packaging>
   <name>Apache Hadoop Devlopment Tools Assembly</name>
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-core</artifactId>
+    </dependency>
+  </dependencies>
   <build>
     <sourceDirectory>src</sourceDirectory>
     <plugins>
@@ -36,6 +42,27 @@ under the License.
         <version>2.8</version>
         <executions>
           <execution>
+            <id>copy-dependencies</id>
+            <phase>initialize</phase>
+            <goals>
+              <goal>copy-dependencies</goal>
+            </goals>
+            <configuration>
+              <excludeScope>system</excludeScope>
+              <outputDirectory>${basedir}/jars</outputDirectory>
+              <overWriteReleases>false</overWriteReleases>
+              <overWriteSnapshots>false</overWriteSnapshots>
+              <overWriteIfNewer>true</overWriteIfNewer>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <version>2.8</version>
+        <executions>
+          <execution>
             <id>copy</id>
             <phase>initialize</phase>
             <goals>
@@ -50,11 +77,6 @@ under the License.
                 </artifactItem>
                 <artifactItem>
                   <groupId>org.apache.hadoop</groupId>
-                  <artifactId>hadoop-core</artifactId>
-                  <overWrite>false</overWrite>
-                </artifactItem>
-                <artifactItem>
-                  <groupId>org.apache.hadoop</groupId>
                   <artifactId>hadoop-test</artifactId>
                   <overWrite>false</overWrite>
                 </artifactItem>
@@ -83,21 +105,6 @@ under the License.
                   <artifactId>slf4j-log4j12</artifactId>
                   <overWrite>false</overWrite>
                 </artifactItem>
-                <artifactItem>
-	              <groupId>commons-configuration</groupId>
-				  <artifactId>commons-configuration</artifactId>
-				  <overWrite>false</overWrite>
-                </artifactItem>
-                <artifactItem>
-	              <groupId>commons-lang</groupId>
-				  <artifactId>commons-lang</artifactId>
-				  <overWrite>false</overWrite>
-                </artifactItem>
-                <artifactItem>
-	              <groupId>commons-logging</groupId>
-				  <artifactId>commons-logging</artifactId>
-				  <overWrite>false</overWrite>
-                </artifactItem>
               </artifactItems>
               <outputDirectory>${basedir}/jars</outputDirectory>
               <overWriteReleases>false</overWriteReleases>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
new file mode 100644
index 0000000..daaf990
--- /dev/null
+++ b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
@@ -0,0 +1,564 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.hadoop.release;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.logging.Logger;
+
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.mapred.JobClient;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobID;
+import org.apache.hadoop.mapred.JobStatus;
+import org.apache.hadoop.mapred.RunningJob;
+import org.apache.hdt.core.Activator;
+import org.apache.hdt.core.launch.ConfProp;
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.IHadoopJob;
+import org.apache.hdt.core.launch.IJarModule;
+import org.apache.hdt.core.launch.IJobListener;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.Status;
+import org.eclipse.core.runtime.jobs.Job;
+import org.eclipse.swt.widgets.Display;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+import org.w3c.dom.Text;
+import org.xml.sax.SAXException;
+
+/**
+ * Representation of a Hadoop location, meaning of the master node (NameNode,
+ * JobTracker).
+ * 
+ * <p>
+ * This class does not create any SSH connection anymore. Tunneling must be
+ * setup outside of Eclipse for now (using Putty or <tt>ssh -D&lt;port&gt;
+ * &lt;host&gt;</tt>)
+ * 
+ * <p>
+ * <em> TODO </em>
+ * <li>Disable the updater if a location becomes unreachable or fails for tool
+ * long
+ * <li>Stop the updater on location's disposal/removal
+ */
+
+public class HadoopCluster extends AbstractHadoopCluster {
+
+	/**
+	 * Frequency of location status observations expressed as the delay in ms
+	 * between each observation
+	 * 
+	 * TODO Add a preference parameter for this
+	 */
+	protected static final long STATUS_OBSERVATION_DELAY = 1500;
+
+	/**
+   * 
+   */
+	public class LocationStatusUpdater extends Job {
+
+		JobClient client = null;
+
+		/**
+		 * Setup the updater
+		 */
+		public LocationStatusUpdater() {
+			super("Map/Reduce location status updater");
+			this.setSystem(true);
+		}
+
+		/* @inheritDoc */
+		@Override
+		protected IStatus run(IProgressMonitor monitor) {
+			if (client == null) {
+				try {
+					client = HadoopCluster.this.getJobClient();
+
+				} catch (IOException ioe) {
+					client = null;
+					return new Status(Status.ERROR, Activator.BUNDLE_ID, 0, "Cannot connect to the Map/Reduce location: "
+							+ HadoopCluster.this.getLocationName(), ioe);
+				}
+			}
+
+			try {
+				// Set of all known existing Job IDs we want fresh info of
+				Set<JobID> missingJobIds = new HashSet<JobID>(runningJobs.keySet());
+
+				JobStatus[] jstatus = client.jobsToComplete();
+				jstatus = jstatus == null ? new JobStatus[0] : jstatus;
+				for (JobStatus status : jstatus) {
+
+					JobID jobId = status.getJobID();
+					missingJobIds.remove(jobId);
+
+					HadoopJob hJob;
+					synchronized (HadoopCluster.this.runningJobs) {
+						hJob = runningJobs.get(jobId);
+						if (hJob == null) {
+							// Unknown job, create an entry
+							RunningJob running = client.getJob(jobId);
+							hJob = new HadoopJob(HadoopCluster.this, jobId, running, status);
+							newJob(hJob);
+						}
+					}
+
+					// Update HadoopJob with fresh infos
+					updateJob(hJob, status);
+				}
+
+				// Ask explicitly for fresh info for these Job IDs
+				for (JobID jobId : missingJobIds) {
+					HadoopJob hJob = runningJobs.get(jobId);
+					if (!hJob.isCompleted())
+						updateJob(hJob, null);
+				}
+
+			} catch (IOException ioe) {
+				client = null;
+				return new Status(Status.ERROR, Activator.BUNDLE_ID, 0, "Cannot retrieve running Jobs on location: " + HadoopCluster.this.getLocationName(),
+						ioe);
+			}
+
+			// Schedule the next observation
+			schedule(STATUS_OBSERVATION_DELAY);
+
+			return Status.OK_STATUS;
+		}
+
+		/**
+		 * Stores and make the new job available
+		 * 
+		 * @param data
+		 */
+		private void newJob(final HadoopJob data) {
+			runningJobs.put(data.jobId, data);
+
+			Display.getDefault().asyncExec(new Runnable() {
+				public void run() {
+					fireJobAdded(data);
+				}
+			});
+		}
+
+		/**
+		 * Updates the status of a job
+		 * 
+		 * @param job
+		 *            the job to update
+		 */
+		private void updateJob(final HadoopJob job, JobStatus status) {
+			job.update(status);
+
+			Display.getDefault().asyncExec(new Runnable() {
+				public void run() {
+					fireJobChanged(job);
+				}
+			});
+		}
+
+	}
+
+	static Logger log = Logger.getLogger(HadoopCluster.class.getName());
+
+	/**
+	 * Hadoop configuration of the location. Also contains specific parameters
+	 * for the plug-in. These parameters are prefix with eclipse.plug-in.*
+	 */
+	private Configuration conf;
+
+	/**
+	 * Jobs listeners
+	 */
+	private Set<IJobListener> jobListeners = new HashSet<IJobListener>();
+
+	/**
+	 * Jobs running on this location. The keys of this map are the Job IDs.
+	 */
+	private transient Map<JobID, HadoopJob> runningJobs = Collections.synchronizedMap(new TreeMap<JobID, HadoopJob>());
+
+	/**
+	 * Status updater for this location
+	 */
+	private LocationStatusUpdater statusUpdater;
+
+	// state and status - transient
+	private transient String state = "";
+
+	/**
+	 * Creates a new default Hadoop location
+	 */
+	public HadoopCluster() {
+		this.conf = new Configuration();
+		this.addPluginConfigDefaultProperties();
+	}
+
+	/**
+	 * Creates a location from a file
+	 * 
+	 * @throws IOException
+	 * @throws SAXException
+	 * @throws ParserConfigurationException
+	 */
+	public HadoopCluster(File file) throws ParserConfigurationException, SAXException, IOException {
+
+		this.conf = new Configuration();
+		this.addPluginConfigDefaultProperties();
+		this.loadFromXML(file);
+	}
+
+	/**
+	 * Create a new Hadoop location by copying an already existing one.
+	 * 
+	 * @param source
+	 *            the location to copy
+	 */
+	public HadoopCluster(HadoopCluster existing) {
+		this();
+		this.load(existing);
+	}
+
+	public void addJobListener(IJobListener l) {
+		jobListeners.add(l);
+	}
+
+	public void dispose() {
+		// TODO close DFS connections?
+	}
+
+	/**
+	 * List all elements that should be present in the Server window (all
+	 * servers and all jobs running on each servers)
+	 * 
+	 * @return collection of jobs for this location
+	 */
+	public Collection<? extends IHadoopJob> getJobs() {
+		startStatusUpdater();
+		return this.runningJobs.values();
+	}
+
+	/**
+	 * Remove the given job from the currently running jobs map
+	 * 
+	 * @param job
+	 *            the job to remove
+	 */
+	public void purgeJob(final IHadoopJob job) {
+		runningJobs.remove(job.getJobID());
+		Display.getDefault().asyncExec(new Runnable() {
+			public void run() {
+				fireJobRemoved(job);
+			}
+		});
+	}
+
+	/**
+	 * Returns the {@link Configuration} defining this location.
+	 * 
+	 * @return the location configuration
+	 */
+	public Iterator<Entry<String, String>> getConfiguration() {
+		return this.conf.iterator();
+	}
+
+	/**
+	 * @return the conf
+	 */
+	public Configuration getConf() {
+		return conf;
+	}
+
+	/**
+	 * Gets a Hadoop configuration property value
+	 * 
+	 * @param prop
+	 *            the configuration property
+	 * @return the property value
+	 */
+	public String getConfProp(ConfProp prop) {
+		return conf.get(prop.name);
+	}
+
+	/**
+	 * Gets a Hadoop configuration property value
+	 * 
+	 * @param propName
+	 *            the property name
+	 * @return the property value
+	 */
+	public String getConfProp(String propName) {
+		return this.conf.get(propName);
+	}
+
+	public String getLocationName() {
+		return getConfProp(ConfProp.PI_LOCATION_NAME);
+	}
+
+	/**
+	 * Returns the master host name of the Hadoop location (the Job tracker)
+	 * 
+	 * @return the host name of the Job tracker
+	 */
+	public String getMasterHostName() {
+		return getConfProp(ConfProp.PI_JOB_TRACKER_HOST);
+	}
+
+	public String getState() {
+		return state;
+	}
+
+	/**
+	 * Overwrite this location with the given existing location
+	 * 
+	 * @param existing
+	 *            the existing location
+	 */
+	public void load(AbstractHadoopCluster existing) {
+		this.conf = new Configuration(((HadoopCluster) existing).conf);
+	}
+
+	/**
+	 * Overwrite this location with settings available in the given XML file.
+	 * The existing configuration is preserved if the XML file is invalid.
+	 * 
+	 * @param file
+	 *            the file path of the XML file
+	 * @return validity of the XML file
+	 * @throws ParserConfigurationException
+	 * @throws IOException
+	 * @throws SAXException
+	 */
+	public boolean loadFromXML(File file) {
+
+		Configuration newConf = new Configuration(this.conf);
+		DocumentBuilder builder;
+		Document document;
+		try {
+			builder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
+			document = builder.parse(file);
+		} catch (ParserConfigurationException e) {
+			e.printStackTrace();
+			return false;
+		} catch (SAXException e) {
+			e.printStackTrace();
+			return false;
+		} catch (IOException e) {
+			e.printStackTrace();
+			return false;
+		}
+		Element root = document.getDocumentElement();
+		if (!"configuration".equals(root.getTagName()))
+			return false;
+		NodeList props = root.getChildNodes();
+		for (int i = 0; i < props.getLength(); i++) {
+			Node propNode = props.item(i);
+			if (!(propNode instanceof Element))
+				continue;
+			Element prop = (Element) propNode;
+			if (!"property".equals(prop.getTagName()))
+				return false;
+			NodeList fields = prop.getChildNodes();
+			String attr = null;
+			String value = null;
+			for (int j = 0; j < fields.getLength(); j++) {
+				Node fieldNode = fields.item(j);
+				if (!(fieldNode instanceof Element))
+					continue;
+				Element field = (Element) fieldNode;
+				if ("name".equals(field.getTagName()))
+					attr = ((Text) field.getFirstChild()).getData();
+				if ("value".equals(field.getTagName()) && field.hasChildNodes())
+					value = ((Text) field.getFirstChild()).getData();
+			}
+			if (attr != null && value != null)
+				newConf.set(attr, value);
+		}
+
+		this.conf = newConf;
+		return true;
+	}
+
+	/**
+	 * Sets a Hadoop configuration property value
+	 * 
+	 * @param prop
+	 *            the property
+	 * @param propvalue
+	 *            the property value
+	 */
+	public void setConfProp(ConfProp prop, String propValue) {
+		assert propValue != null;
+		conf.set(prop.name, propValue);
+	}
+
+	/**
+	 * Sets a Hadoop configuration property value
+	 * 
+	 * @param propName
+	 *            the property name
+	 * @param propValue
+	 *            the property value
+	 */
+	public void setConfProp(String propName, String propValue) {
+		this.conf.set(propName, propValue);
+	}
+
+	public void setLocationName(String newName) {
+		setConfProp(ConfProp.PI_LOCATION_NAME, newName);
+	}
+
+	/**
+	 * Write this location settings to the given output stream
+	 * 
+	 * @param out
+	 *            the output stream
+	 * @throws IOException
+	 */
+	public void storeSettingsToFile(File file) throws IOException {
+		FileOutputStream fos = new FileOutputStream(file);
+		try {
+			this.conf.writeXml(fos);
+			fos.close();
+			fos = null;
+		} finally {
+			IOUtils.closeStream(fos);
+		}
+
+	}
+
+	/* @inheritDoc */
+	@Override
+	public String toString() {
+		return this.getLocationName();
+	}
+
+	/**
+	 * Fill the configuration with valid default values
+	 */
+	private void addPluginConfigDefaultProperties() {
+		for (ConfProp prop : ConfProp.values()) {
+			if (conf.get(prop.name) == null)
+				conf.set(prop.name, prop.defVal);
+		}
+	}
+
+	/**
+	 * Starts the location status updater
+	 */
+	private synchronized void startStatusUpdater() {
+		if (statusUpdater == null) {
+			statusUpdater = new LocationStatusUpdater();
+			statusUpdater.schedule();
+		}
+	}
+
+	/*
+	 * Rewrite of the connecting and tunneling to the Hadoop location
+	 */
+
+	/**
+	 * Provides access to the default file system of this location.
+	 * 
+	 * @return a {@link FileSystem}
+	 */
+	public FileSystem getDFS() throws IOException {
+		return FileSystem.get(this.conf);
+	}
+
+	/**
+	 * Provides access to the Job tracking system of this location
+	 * 
+	 * @return a {@link JobClient}
+	 */
+	public JobClient getJobClient() throws IOException {
+		JobConf jconf = new JobConf(this.conf);
+		return new JobClient(jconf);
+	}
+
+	/*
+	 * Listeners handling
+	 */
+
+	protected void fireJarPublishDone(IJarModule jar) {
+		for (IJobListener listener : jobListeners) {
+			listener.publishDone(jar);
+		}
+	}
+
+	protected void fireJarPublishStart(IJarModule jar) {
+		for (IJobListener listener : jobListeners) {
+			listener.publishStart(jar);
+		}
+	}
+
+	protected void fireJobAdded(HadoopJob job) {
+		for (IJobListener listener : jobListeners) {
+			listener.jobAdded(job);
+		}
+	}
+
+	protected void fireJobRemoved(IHadoopJob job) {
+		for (IJobListener listener : jobListeners) {
+			listener.jobRemoved(job);
+		}
+	}
+
+	protected void fireJobChanged(HadoopJob job) {
+		for (IJobListener listener : jobListeners) {
+			listener.jobChanged(job);
+		}
+	}
+
+	@Override
+	public void saveConfiguration(File confDir, String jarFilePath) throws IOException {
+		// Prepare the Hadoop configuration
+		JobConf conf = new JobConf(this.conf);
+		conf.setJar(jarFilePath);
+		// Write it to the disk file
+		File confFile = new File(confDir, "core-site.xml");
+		FileOutputStream fos = new FileOutputStream(confFile);
+		try {
+			conf.writeXml(fos);
+			fos.close();
+			fos = null;
+		} finally {
+			IOUtils.closeStream(fos);
+		}
+
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopJob.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopJob.java b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopJob.java
new file mode 100644
index 0000000..5861967
--- /dev/null
+++ b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopJob.java
@@ -0,0 +1,342 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.hadoop.release;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.Counters;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobID;
+import org.apache.hadoop.mapred.JobStatus;
+import org.apache.hadoop.mapred.RunningJob;
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.IHadoopJob;
+
+/**
+ * Representation of a Map/Reduce running job on a given location
+ */
+
+public class HadoopJob implements IHadoopJob {
+
+	/**
+	 * Enum representation of a Job state
+	 */
+	public enum JobState {
+		PREPARE(JobStatus.PREP), RUNNING(JobStatus.RUNNING), FAILED(JobStatus.FAILED), SUCCEEDED(JobStatus.SUCCEEDED);
+
+		final int state;
+
+		JobState(int state) {
+			this.state = state;
+		}
+
+		static JobState ofInt(int state) {
+			if (state == JobStatus.PREP) {
+				return PREPARE;
+			} else if (state == JobStatus.RUNNING) {
+				return RUNNING;
+			} else if (state == JobStatus.FAILED) {
+				return FAILED;
+			} else if (state == JobStatus.SUCCEEDED) {
+				return SUCCEEDED;
+			} else {
+				return null;
+			}
+		}
+	}
+
+	/**
+	 * Location this Job runs on
+	 */
+	private final HadoopCluster location;
+
+	/**
+	 * Unique identifier of this Job
+	 */
+	final JobID jobId;
+
+	/**
+	 * Status representation of a running job. This actually contains a
+	 * reference to a JobClient. Its methods might block.
+	 */
+	RunningJob running;
+
+	/**
+	 * Last polled status
+	 * 
+	 * @deprecated should apparently not be used
+	 */
+	JobStatus status;
+
+	/**
+	 * Last polled counters
+	 */
+	Counters counters;
+
+	/**
+	 * Job Configuration
+	 */
+	JobConf jobConf = null;
+
+	boolean completed = false;
+
+	boolean successful = false;
+
+	boolean killed = false;
+
+	int totalMaps;
+
+	int totalReduces;
+
+	int completedMaps;
+
+	int completedReduces;
+
+	float mapProgress;
+
+	float reduceProgress;
+
+	/**
+	 * Constructor for a Hadoop job representation
+	 * 
+	 * @param location
+	 * @param id
+	 * @param running
+	 * @param status
+	 */
+	public HadoopJob(HadoopCluster location, JobID id, RunningJob running, JobStatus status) {
+
+		this.location = location;
+		this.jobId = id;
+		this.running = running;
+
+		loadJobFile();
+
+		update(status);
+	}
+
+	/**
+	 * Try to locate and load the JobConf file for this job so to get more
+	 * details on the job (number of maps and of reduces)
+	 */
+	private void loadJobFile() {
+		try {
+			String jobFile = getJobFile();
+			FileSystem fs = location.getDFS();
+			File tmp = File.createTempFile(getJobID().toString(), ".xml");
+			if (FileUtil.copy(fs, new Path(jobFile), tmp, false, location.getConf())) {
+				this.jobConf = new JobConf(tmp.toString());
+
+				this.totalMaps = jobConf.getNumMapTasks();
+				this.totalReduces = jobConf.getNumReduceTasks();
+			}
+
+		} catch (IOException ioe) {
+			ioe.printStackTrace();
+		}
+	}
+
+	/* @inheritDoc */
+	@Override
+	public int hashCode() {
+		final int prime = 31;
+		int result = 1;
+		result = prime * result + ((jobId == null) ? 0 : jobId.hashCode());
+		result = prime * result + ((location == null) ? 0 : location.hashCode());
+		return result;
+	}
+
+	/* @inheritDoc */
+	@Override
+	public boolean equals(Object obj) {
+		if (this == obj)
+			return true;
+		if (obj == null)
+			return false;
+		if (!(obj instanceof HadoopJob))
+			return false;
+		final HadoopJob other = (HadoopJob) obj;
+		if (jobId == null) {
+			if (other.jobId != null)
+				return false;
+		} else if (!jobId.equals(other.jobId))
+			return false;
+		if (location == null) {
+			if (other.location != null)
+				return false;
+		} else if (!location.equals(other.location))
+			return false;
+		return true;
+	}
+
+	/**
+	 * Get the running status of the Job (@see {@link JobStatus}).
+	 * 
+	 * @return
+	 */
+	public String getState() {
+		if (this.completed) {
+			if (this.successful) {
+				return JobState.SUCCEEDED.toString();
+			} else {
+				return JobState.FAILED.toString();
+			}
+		} else {
+			return JobState.RUNNING.toString();
+		}
+		// return JobState.ofInt(this.status.getRunState());
+	}
+
+	/**
+	 * @return
+	 */
+	public String getJobID() {
+		return this.jobId.toString();
+	}
+
+	/**
+	 * @return
+	 */
+	public AbstractHadoopCluster getLocation() {
+		return this.location;
+	}
+
+	/**
+	 * @return
+	 */
+	public boolean isCompleted() {
+		return this.completed;
+	}
+
+	/**
+	 * @return
+	 */
+	public String getJobName() {
+		return this.running.getJobName();
+	}
+
+	/**
+	 * @return
+	 */
+	public String getJobFile() {
+		return this.running.getJobFile();
+	}
+
+	/**
+	 * Return the tracking URL for this Job.
+	 * 
+	 * @return string representation of the tracking URL for this Job
+	 */
+	public String getTrackingURL() {
+		return this.running.getTrackingURL();
+	}
+
+	/**
+	 * Returns a string representation of this job status
+	 * 
+	 * @return string representation of this job status
+	 */
+	public String getStatus() {
+
+		StringBuffer s = new StringBuffer();
+
+		s.append("Maps : " + completedMaps + "/" + totalMaps);
+		s.append(" (" + mapProgress + ")");
+		s.append("  Reduces : " + completedReduces + "/" + totalReduces);
+		s.append(" (" + reduceProgress + ")");
+
+		return s.toString();
+	}
+
+	/**
+	 * Update this job status according to the given JobStatus
+	 * 
+	 * @param status
+	 */
+	void update(JobStatus status) {
+		this.status = status;
+		try {
+			this.counters = running.getCounters();
+			this.completed = running.isComplete();
+			this.successful = running.isSuccessful();
+			this.mapProgress = running.mapProgress();
+			this.reduceProgress = running.reduceProgress();
+			// running.getTaskCompletionEvents(fromEvent);
+
+		} catch (IOException ioe) {
+			ioe.printStackTrace();
+		}
+
+		this.completedMaps = (int) (this.totalMaps * this.mapProgress);
+		this.completedReduces = (int) (this.totalReduces * this.reduceProgress);
+	}
+
+	/**
+	 * Print this job counters (for debugging purpose)
+	 */
+	void printCounters() {
+		System.out.printf("New Job:\n", counters);
+		for (String groupName : counters.getGroupNames()) {
+			Counters.Group group = counters.getGroup(groupName);
+			System.out.printf("\t%s[%s]\n", groupName, group.getDisplayName());
+
+			for (Counters.Counter counter : group) {
+				System.out.printf("\t\t%s: %s\n", counter.getDisplayName(), counter.getCounter());
+			}
+		}
+		System.out.printf("\n");
+	}
+
+	/**
+	 * Kill this job
+	 */
+	public void kill() {
+		try {
+			this.running.killJob();
+			this.killed = true;
+
+		} catch (IOException e) {
+			e.printStackTrace();
+		}
+	}
+
+	/**
+	 * Print this job status (for debugging purpose)
+	 */
+	public void display() {
+		System.out.printf("Job id=%s, name=%s\n", getJobID(), getJobName());
+		System.out.printf("Configuration file: %s\n", getJobID());
+		System.out.printf("Tracking URL: %s\n", getTrackingURL());
+
+		System.out.printf("Completion: map: %f reduce %f\n", 100.0 * this.mapProgress, 100.0 * this.reduceProgress);
+
+		System.out.println("Job total maps = " + totalMaps);
+		System.out.println("Job completed maps = " + completedMaps);
+		System.out.println("Map percentage complete = " + mapProgress);
+		System.out.println("Job total reduces = " + totalReduces);
+		System.out.println("Job completed reduces = " + completedReduces);
+		System.out.println("Reduce percentage complete = " + reduceProgress);
+		System.out.flush();
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/META-INF/MANIFEST.MF b/org.apache.hdt.ui/META-INF/MANIFEST.MF
index ac39e07..c34e98a 100644
--- a/org.apache.hdt.ui/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.ui/META-INF/MANIFEST.MF
@@ -8,11 +8,16 @@ Bundle-Vendor: Apache Hadoop
 Require-Bundle: org.eclipse.core.runtime,
  org.eclipse.core.resources,
  org.eclipse.ui,
+ org.eclipse.jdt.core,
+ org.eclipse.jdt.ui,
  org.eclipse.ui.ide;bundle-version="3.6.0",
  org.eclipse.team.ui;bundle-version="3.5.100",
  org.eclipse.ui.navigator;bundle-version="3.5.0",
  org.eclipse.ui.navigator.resources;bundle-version="3.4.200",
  org.eclipse.ui.views.properties.tabbed;bundle-version="3.5.100";resolution:=optional,
+ org.eclipse.jdt.debug.ui,
+ org.eclipse.jdt.launching,
+ org.eclipse.debug.ui,
  org.apache.hdt.core
 Bundle-RequiredExecutionEnvironment: JavaSE-1.6
 Bundle-ActivationPolicy: lazy

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/plugin.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/plugin.xml b/org.apache.hdt.ui/plugin.xml
index e6f1e53..7bc5b47 100644
--- a/org.apache.hdt.ui/plugin.xml
+++ b/org.apache.hdt.ui/plugin.xml
@@ -17,6 +17,14 @@
    limitations under the License.
 -->
 <plugin>
+  <extension
+         point="org.eclipse.ui.preferencePages">
+      <page
+            class="org.apache.hdt.ui.preferences.MapReducePreferencePage"
+            id="org.apache.hdt.ui.preferences.MapReducePreferencePage"
+            name="Hadoop">
+      </page>
+   </extension>
    <extension
          point="org.eclipse.ui.perspectives">
       <perspective
@@ -51,6 +59,14 @@
                relationship="bottom"
                relative="org.eclipse.ui.editorss">
          </view>
+          <view
+                id=" org.apache.hdt.ui.ClusterView"
+                minimized="false"
+                relationship="stack"
+                relative="org.apache.hdt.ui.view.servers">
+         </view>
+         <newWizardShortcut
+               id="org.apache.hdt.ui.wizard.newProjectWizard"/>
          <newWizardShortcut
                id="org.apache.hdt.ui.wizard.newHdfsServer">
          </newWizardShortcut>
@@ -146,6 +162,14 @@
    </extension>
    <extension
          point="org.eclipse.ui.newWizards">
+         <wizard
+            category="org.apache.hdt.ui.newWizards.category"
+            class="org.apache.hdt.ui.internal.mr.NewMapReduceProjectWizard"
+            finalPerspective="org.apache.hdt.ui.perspective"
+            icon="icons/hadoop-logo-16x16.png"
+            id="org.apache.hdt.ui.wizard.newProjectWizard"
+            name="Map/Reduce Project"
+            project="true"/>
       <wizard
             category="org.apache.hdt.ui.newWizards.category"
             class="org.apache.hdt.ui.internal.hdfs.NewHDFSWizard"
@@ -154,10 +178,6 @@
             id="org.apache.hdt.ui.wizard.newHdfsServer"
             name="New HDFS Server">
       </wizard>
-      <category
-            id="org.apache.hdt.ui.newWizards.category"
-            name="Hadoop">
-      </category>
       <wizard
             category="org.apache.hdt.ui.newWizards.category"
             class="org.apache.hdt.ui.internal.zookeeper.NewZooKeeperWizard"
@@ -166,6 +186,36 @@
             id="org.apache.hdt.ui.wizard.newZooKeeperServer"
             name="New ZooKeeper Server">
       </wizard>
+      <wizard category="org.apache.hdt.ui.newWizards.category"
+            class="org.apache.hdt.ui.internal.mr.NewMapperWizard"
+            icon="icons/mapper16.png"
+            id="org.apache.hdt.ui.wizard.NewMapperWizard"
+            name="Mapper"
+            project="false"/>
+      <wizard category="org.apache.hdt.ui.newWizards.category"
+            class="org.apache.hdt.ui.internal.mr.NewReducerWizard"
+            icon="icons/reducer16.png"
+            id="org.apache.hdt.ui.wizard.NewReducerWizard"
+            name="Reducer"
+            project="false"/>
+      <wizard
+            category="org.apache.hdt.ui.newWizards.category"
+            class="org.apache.hdt.ui.internal.mr.NewDriverWizard"
+            icon="icons/driver.png"
+            id="org.apache.hdt.ui.wizard.NewDriverWizard"
+            name="MapReduce Driver"
+            project="false"/>
+      <wizard
+            category="org.apache.hdt.ui.newWizards.category"
+            class="org.apache.hdt.ui.internal.mr.NewPartitionerWizard"
+            icon="icons/Elephant16x16.gif"
+            id="org.apache.hdt.ui.wizard.NewPartitionerWizard"
+            name="Partitioner"
+            project="false"/>
+      <category
+            id="org.apache.hdt.ui.newWizards.category"
+            name="Hadoop">
+      </category>      
    </extension>
    <extension
          point="org.eclipse.ui.popupMenus">
@@ -381,6 +431,10 @@
    </extension>
    <extension
          point="org.eclipse.ui.views">
+       <category
+            id="org.apache.hdt.ui.category"
+            name="Hadoop">
+      </category>  
       <view
             allowMultiple="false"
             category="org.apache.hdt.ui.category"
@@ -390,10 +444,15 @@
             name="Hadoop Servers"
             restorable="true">
       </view>
-      <category
-            id="org.apache.hdt.ui.category"
-            name="Hadoop">
-      </category>
+      <view
+            allowMultiple="false"
+            category="org.apache.hdt.ui.category"
+            class="org.apache.hdt.ui.internal.mr.ClusterView"
+            icon="icons/hadoop-logo-16x16.png"
+            id="org.apache.hdt.ui.ClusterView"
+            name="Hadoop Clusters"
+            restorable="true">
+      </view>
    </extension>
    <extension
          point="org.eclipse.ui.actionSets">
@@ -420,5 +479,32 @@
          </action>
       </actionSet>
    </extension>
-
+    <extension
+         point="org.eclipse.debug.ui.launchShortcuts">
+      <shortcut
+            class="org.apache.hdt.ui.internal.launch.HadoopApplicationLaunchShortcut"
+            icon="icons/elephantblue16x16.gif"
+            id="org.apache.hdt.launch.shortcut"
+            label="Run on Hadoop"
+            modes="run">
+         <contextualLaunch>
+            <contextLabel mode="run" label="Run on Hadoop" />
+            <enablement>
+             <with variable="selection">
+               <count value="1"/>
+               <iterate>
+                <or>
+                  <test property="org.eclipse.jdt.launching.hasMain"/>
+                  <and>
+                     <test property="org.eclipse.jdt.launching.isContainer"/>
+                     <test property="org.eclipse.jdt.launching.hasProjectNature" args="org.eclipse.jdt.core.javanature"/>
+                     <test property="org.eclipse.jdt.launching.hasProjectNature" args="org.apache.hdt.mrature"/>                     
+                  </and>
+                </or>
+               </iterate>
+               </with>
+           </enablement>
+         </contextualLaunch>
+      </shortcut>
+      </extension>
 </plugin>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/src/org/apache/hdt/ui/ImageLibrary.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/ImageLibrary.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/ImageLibrary.java
new file mode 100644
index 0000000..b4017cd
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/ImageLibrary.java
@@ -0,0 +1,251 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui;
+
+import java.net.URL;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.eclipse.core.runtime.FileLocator;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.jface.resource.ImageDescriptor;
+import org.eclipse.swt.graphics.Image;
+import org.eclipse.ui.ISharedImages;
+import org.eclipse.ui.PlatformUI;
+import org.eclipse.ui.plugin.AbstractUIPlugin;
+import org.osgi.framework.Bundle;
+
+/**
+ * Icons manager
+ */
+public class ImageLibrary {
+
+	private final Bundle bundle = Activator.getDefault().getBundle();
+
+	/**
+	 * Singleton instance
+	 */
+	private static volatile ImageLibrary instance = null;
+
+	private ISharedImages sharedImages = PlatformUI.getWorkbench().getSharedImages();
+
+	/**
+	 * Where resources (icons, images...) are available in the Bundle
+	 */
+	private static final String RESOURCE_DIR = "icons/";
+
+	/**
+	 * Public access to image descriptors
+	 * 
+	 * @param name
+	 * @return the image descriptor
+	 */
+	public static ImageDescriptor get(String name) {
+		return getInstance().getImageDescriptorByName(name);
+	}
+
+	/**
+	 * Public access to images
+	 * 
+	 * @param name
+	 * @return the image
+	 */
+	public static Image getImage(String name) {
+		return getInstance().getImageByName(name);
+	}
+
+	/**
+	 * Singleton access
+	 * 
+	 * @return the Image library
+	 */
+	public static ImageLibrary getInstance() {
+		if (instance == null) {
+			synchronized (ImageLibrary.class) {
+				if (instance == null)
+					instance = new ImageLibrary();
+			}
+		}
+		return instance;
+	}
+
+	/**
+	 * Map of registered resources (ImageDescriptor and Image)
+	 */
+	private Map<String, ImageDescriptor> descMap = new HashMap<String, ImageDescriptor>();
+
+	private Map<String, Image> imageMap = new HashMap<String, Image>();
+
+	/**
+	 * Image library constructor: put image definitions here.
+	 */
+	private ImageLibrary() {
+		/*
+		 * Servers view
+		 */
+		newImage("server.view.location.entry", "Elephant-24x24.png");
+		newImage("server.view.job.entry", "job.gif");
+		newImage("server.view.action.location.new", "location-new-16x16.png");
+		newImage("server.view.action.location.edit", "location-edit-16x16.png");
+		newSharedImage("server.view.action.delete", ISharedImages.IMG_TOOL_DELETE);
+
+		/*
+		 * DFS Browser
+		 */
+		newImage("dfs.browser.root.entry", "files.gif");
+		newImage("dfs.browser.location.entry", "Elephant-16x16.png");
+		newSharedImage("dfs.browser.folder.entry", ISharedImages.IMG_OBJ_FOLDER);
+		newSharedImage("dfs.browser.file.entry", ISharedImages.IMG_OBJ_FILE);
+		// DFS files in editor
+		newSharedImage("dfs.file.editor", ISharedImages.IMG_OBJ_FILE);
+		// Actions
+		newImage("dfs.browser.action.mkdir", "new-folder.png");
+		newImage("dfs.browser.action.download", "download.png");
+		newImage("dfs.browser.action.upload_files", "upload.png");
+		newImage("dfs.browser.action.upload_dir", "upload.png");
+		newSharedImage("dfs.browser.action.delete", ISharedImages.IMG_TOOL_DELETE);
+		newImage("dfs.browser.action.refresh", "refresh.png");
+
+		/*
+		 * Wizards
+		 */
+		newImage("wizard.mapper.new", "mapwiz.png");
+		newImage("wizard.reducer.new", "reducewiz.png");
+		newImage("wizard.driver.new", "driverwiz.png");
+		newImage("wizard.mapreduce.project.new", "projwiz.png");
+	}
+
+	/**
+	 * Accessor to images
+	 * 
+	 * @param name
+	 * @return
+	 */
+	private ImageDescriptor getImageDescriptorByName(String name) {
+		return this.descMap.get(name);
+	}
+
+	/**
+	 * Accessor to images
+	 * 
+	 * @param name
+	 * @return
+	 */
+	private Image getImageByName(String name) {
+		return this.imageMap.get(name);
+	}
+
+	/**
+	 * Access to platform shared images
+	 * 
+	 * @param name
+	 * @return
+	 */
+	private ImageDescriptor getSharedByName(String name) {
+		return sharedImages.getImageDescriptor(name);
+	}
+
+	/**
+	 * Load and register a new image. If the image resource does not exist or
+	 * fails to load, a default "error" resource is supplied.
+	 * 
+	 * @param name
+	 *            name of the image
+	 * @param filename
+	 *            name of the file containing the image
+	 * @return whether the image has correctly been loaded
+	 */
+	private boolean newImage(String name, String filename) {
+		ImageDescriptor id;
+		boolean success;
+
+		try {
+			URL fileURL = FileLocator.find(bundle, new Path(RESOURCE_DIR + filename), null);
+			id = ImageDescriptor.createFromURL(FileLocator.toFileURL(fileURL));
+			success = true;
+
+		} catch (Exception e) {
+
+			e.printStackTrace();
+			id = ImageDescriptor.getMissingImageDescriptor();
+			// id = getSharedByName(ISharedImages.IMG_OBJS_ERROR_TSK);
+			success = false;
+		}
+
+		descMap.put(name, id);
+		imageMap.put(name, id.createImage(true));
+
+		return success;
+	}
+
+	/**
+	 * Register an image from the workspace shared image pool. If the image
+	 * resource does not exist or fails to load, a default "error" resource is
+	 * supplied.
+	 * 
+	 * @param name
+	 *            name of the image
+	 * @param sharedName
+	 *            name of the shared image ({@link ISharedImages})
+	 * @return whether the image has correctly been loaded
+	 */
+	private boolean newSharedImage(String name, String sharedName) {
+		boolean success = true;
+		ImageDescriptor id = getSharedByName(sharedName);
+
+		if (id == null) {
+			id = ImageDescriptor.getMissingImageDescriptor();
+			// id = getSharedByName(ISharedImages.IMG_OBJS_ERROR_TSK);
+			success = false;
+		}
+
+		descMap.put(name, id);
+		imageMap.put(name, id.createImage(true));
+
+		return success;
+	}
+
+	/**
+	 * Register an image from the workspace shared image pool. If the image
+	 * resource does not exist or fails to load, a default "error" resource is
+	 * supplied.
+	 * 
+	 * @param name
+	 *            name of the image
+	 * @param sharedName
+	 *            name of the shared image ({@link ISharedImages})
+	 * @return whether the image has correctly been loaded
+	 */
+	private boolean newPluginImage(String name, String pluginId, String filename) {
+
+		boolean success = true;
+		ImageDescriptor id = AbstractUIPlugin.imageDescriptorFromPlugin(pluginId, filename);
+
+		if (id == null) {
+			id = ImageDescriptor.getMissingImageDescriptor();
+			// id = getSharedByName(ISharedImages.IMG_OBJS_ERROR_TSK);
+			success = false;
+		}
+
+		descMap.put(name, id);
+		imageMap.put(name, id.createImage(true));
+
+		return success;
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopApplicationLaunchShortcut.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopApplicationLaunchShortcut.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopApplicationLaunchShortcut.java
new file mode 100644
index 0000000..4cc03d4
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopApplicationLaunchShortcut.java
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.launch;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.logging.Logger;
+
+import org.eclipse.core.resources.IFile;
+import org.eclipse.core.resources.IResource;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.debug.core.ILaunchConfiguration;
+import org.eclipse.debug.core.ILaunchConfigurationWorkingCopy;
+import org.eclipse.jdt.core.IJavaProject;
+import org.eclipse.jdt.core.IType;
+import org.eclipse.jdt.core.JavaCore;
+import org.eclipse.jdt.debug.ui.launchConfigurations.JavaApplicationLaunchShortcut;
+import org.eclipse.jdt.launching.IJavaLaunchConfigurationConstants;
+import org.eclipse.jdt.launching.IRuntimeClasspathEntry;
+import org.eclipse.jdt.launching.JavaRuntime;
+import org.eclipse.jface.wizard.IWizard;
+import org.eclipse.jface.wizard.WizardDialog;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.swt.widgets.Shell;
+
+/**
+ * Add a shortcut "Run on Hadoop" to the Run menu
+ */
+
+public class HadoopApplicationLaunchShortcut extends JavaApplicationLaunchShortcut {
+
+	static Logger log = Logger.getLogger(HadoopApplicationLaunchShortcut.class.getName());
+
+	// private ActionDelegate delegate = new RunOnHadoopActionDelegate();
+
+	public HadoopApplicationLaunchShortcut() {
+	}
+
+	/* @inheritDoc */
+	@Override
+	protected ILaunchConfiguration createConfiguration(IType type) {
+
+		ILaunchConfiguration iConf = super.createConfiguration(type);
+		ILaunchConfigurationWorkingCopy iConfWC;
+		try {
+			/*
+			 * Tune the default launch configuration: setup run-time classpath
+			 * manually
+			 */
+			iConfWC = iConf.getWorkingCopy();
+
+			iConfWC.setAttribute(IJavaLaunchConfigurationConstants.ATTR_DEFAULT_CLASSPATH, false);
+
+			List<String> classPath = new ArrayList<String>();
+			IResource resource = type.getResource();
+			IJavaProject project = (IJavaProject) resource.getProject().getNature(JavaCore.NATURE_ID);
+			IRuntimeClasspathEntry cpEntry = JavaRuntime.newDefaultProjectClasspathEntry(project);
+			classPath.add(0, cpEntry.getMemento());
+
+			iConfWC.setAttribute(IJavaLaunchConfigurationConstants.ATTR_CLASSPATH, classPath);
+
+		} catch (CoreException e) {
+			e.printStackTrace();
+			// FIXME Error dialog
+			return null;
+		}
+
+		/*
+		 * Update the selected configuration with a specific Hadoop location
+		 * target
+		 */
+		IResource resource = type.getResource();
+		if (!(resource instanceof IFile))
+			return null;
+		RunOnHadoopWizard wizard = new RunOnHadoopWizard((IFile) resource, iConfWC);
+		WizardDialog dialog = new WizardDialog(Display.getDefault().getActiveShell(), wizard);
+
+		dialog.create();
+		dialog.setBlockOnOpen(true);
+		if (dialog.open() != WizardDialog.OK)
+			return null;
+
+		try {
+
+			// Only save if some configuration is different.
+			if (!iConfWC.contentsEqual(iConf))
+				iConfWC.doSave();
+
+		} catch (CoreException e) {
+			e.printStackTrace();
+			// FIXME Error dialog
+			return null;
+		}
+
+		return iConfWC;
+	}
+
+	/**
+	 * Was used to run the RunOnHadoopWizard inside and provide it a
+	 * ProgressMonitor
+	 */
+	static class Dialog extends WizardDialog {
+		public Dialog(Shell parentShell, IWizard newWizard) {
+			super(parentShell, newWizard);
+		}
+
+		@Override
+		public void create() {
+			super.create();
+
+			((RunOnHadoopWizard) getWizard()).setProgressMonitor(getProgressMonitor());
+		}
+	}
+}