You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hdt.apache.org by rs...@apache.org on 2014/06/26 10:36:24 UTC

[01/27] git commit: Merge branch 'release/hdt-release-0.0.1.incubating-RC3' into hadoop-eclipse-merge-development

Repository: incubator-hdt
Updated Branches:
  refs/heads/hadoop-eclipse-merge 3672346b9 -> 027a98626
  refs/heads/hadoop-eclipse-merge-development 4cd0302f8 -> 669009605


Merge branch 'release/hdt-release-0.0.1.incubating-RC3' into hadoop-eclipse-merge-development


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/aea6a370
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/aea6a370
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/aea6a370

Branch: refs/heads/hadoop-eclipse-merge
Commit: aea6a37041705927effeb2a437b6409304e6a053
Parents: 8836e9a 8a8c671
Author: Rahul Sharma <rs...@apache.org>
Authored: Mon Mar 31 10:15:21 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Mon Mar 31 10:15:21 2014 +0530

----------------------------------------------------------------------
 org.apache.hdt.core/META-INF/MANIFEST.MF           | 2 +-
 org.apache.hdt.core/pom.xml                        | 2 +-
 org.apache.hdt.dist/pom.xml                        | 2 +-
 org.apache.hdt.feature/feature.xml                 | 8 ++++----
 org.apache.hdt.feature/pom.xml                     | 2 +-
 org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF | 2 +-
 org.apache.hdt.hadoop.release/pom.xml              | 2 +-
 org.apache.hdt.ui.test/META-INF/MANIFEST.MF        | 2 +-
 org.apache.hdt.ui.test/pom.xml                     | 2 +-
 org.apache.hdt.ui/META-INF/MANIFEST.MF             | 2 +-
 org.apache.hdt.ui/pom.xml                          | 2 +-
 org.apache.hdt.updateSite/pom.xml                  | 2 +-
 pom.xml                                            | 2 +-
 13 files changed, 16 insertions(+), 16 deletions(-)
----------------------------------------------------------------------



[23/27] git commit: - updating release version

Posted by rs...@apache.org.
 - updating release version


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/2de1a905
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/2de1a905
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/2de1a905

Branch: refs/heads/hadoop-eclipse-merge
Commit: 2de1a905cfa73d8e9dc83b9902aa27d1fbe7c4d2
Parents: 4cd0302
Author: Rahul Sharma <rs...@apache.org>
Authored: Thu Jun 26 12:00:17 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Thu Jun 26 12:00:17 2014 +0530

----------------------------------------------------------------------
 org.apache.hdt.core/META-INF/MANIFEST.MF            |  2 +-
 org.apache.hdt.core/pom.xml                         |  2 +-
 org.apache.hdt.dist/pom.xml                         |  2 +-
 org.apache.hdt.feature/feature.xml                  | 10 +++++-----
 org.apache.hdt.feature/pom.xml                      |  2 +-
 org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF  |  2 +-
 org.apache.hdt.hadoop.release/pom.xml               |  2 +-
 org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF |  2 +-
 org.apache.hdt.hadoop2.release/pom.xml              |  2 +-
 org.apache.hdt.ui.test/META-INF/MANIFEST.MF         |  2 +-
 org.apache.hdt.ui.test/pom.xml                      |  2 +-
 org.apache.hdt.ui/META-INF/MANIFEST.MF              |  2 +-
 org.apache.hdt.ui/pom.xml                           |  2 +-
 org.apache.hdt.updateSite/pom.xml                   |  2 +-
 pom.xml                                             |  2 +-
 15 files changed, 19 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.core/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/META-INF/MANIFEST.MF b/org.apache.hdt.core/META-INF/MANIFEST.MF
index e50301c..bbe6c84 100644
--- a/org.apache.hdt.core/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.core/META-INF/MANIFEST.MF
@@ -2,7 +2,7 @@ Manifest-Version: 1.0
 Bundle-ManifestVersion: 2
 Bundle-Name: Apache Hadoop Eclipse Plugin
 Bundle-SymbolicName: org.apache.hdt.core;singleton:=true
-Bundle-Version: 0.0.2.qualifier
+Bundle-Version: 0.0.2.incubating
 Bundle-Activator: org.apache.hdt.core.Activator
 Require-Bundle: org.eclipse.core.runtime,
  org.eclipse.core.filesystem;bundle-version="1.3.0";visibility:=reexport,

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.core/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/pom.xml b/org.apache.hdt.core/pom.xml
index 4dfedb1..095e1dc 100644
--- a/org.apache.hdt.core/pom.xml
+++ b/org.apache.hdt.core/pom.xml
@@ -23,7 +23,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2-SNAPSHOT</version>
+    <version>0.0.2.incubating</version>
   </parent>
 
   <artifactId>org.apache.hdt.core</artifactId>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.dist/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.dist/pom.xml b/org.apache.hdt.dist/pom.xml
index 48c21e7..ef1af8e 100644
--- a/org.apache.hdt.dist/pom.xml
+++ b/org.apache.hdt.dist/pom.xml
@@ -22,7 +22,7 @@
   <parent>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2-SNAPSHOT</version>
+    <version>0.0.2.incubating</version>
   </parent>
   <artifactId>org.apache.hdt.dist</artifactId>
   <name>Apache Hadoop Development Tools Distribution</name>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.feature/feature.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.feature/feature.xml b/org.apache.hdt.feature/feature.xml
index 120c73f..9a1253a 100644
--- a/org.apache.hdt.feature/feature.xml
+++ b/org.apache.hdt.feature/feature.xml
@@ -2,7 +2,7 @@
 <feature
       id="org.apache.hdt.feature"
       label="Hadoop Development Tools"
-      version="0.0.2.qualifier"
+      version="0.0.2.incubating"
       provider-name="Apache Software Foundation">
 
    <description url="http://hdt.incubator.apache.org/">
@@ -40,7 +40,7 @@ permissions and limitations under the License.
          id="org.apache.hdt.hadoop.release"
          download-size="0"
          install-size="0"
-         version="0.0.2.qualifier"
+         version="0.0.2.incubating"
          fragment="true"
          unpack="false"/>
 
@@ -48,7 +48,7 @@ permissions and limitations under the License.
          id="org.apache.hdt.hadoop2.release"
          download-size="0"
          install-size="0"
-         version="0.0.2.qualifier"
+         version="0.0.2.incubating"
          fragment="true"
          unpack="false"/>
 
@@ -56,14 +56,14 @@ permissions and limitations under the License.
          id="org.apache.hdt.ui"
          download-size="0"
          install-size="0"
-         version="0.0.2.qualifier"
+         version="0.0.2.incubating"
          unpack="false"/>
          
    <plugin
          id="org.apache.hdt.core"
          download-size="0"
          install-size="0"
-         version="0.0.2.qualifier"
+         version="0.0.2.incubating"
          unpack="false"/>
 
 </feature>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.feature/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.feature/pom.xml b/org.apache.hdt.feature/pom.xml
index faa6ab7..19a5a18 100644
--- a/org.apache.hdt.feature/pom.xml
+++ b/org.apache.hdt.feature/pom.xml
@@ -22,7 +22,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2-SNAPSHOT</version>
+    <version>0.0.2.incubating</version>
   </parent>
   
   <artifactId>org.apache.hdt.feature</artifactId>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF b/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
index ec6c80c..0f56f4b 100644
--- a/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
@@ -2,7 +2,7 @@ Manifest-Version: 1.0
 Bundle-ManifestVersion: 2
 Bundle-Name: Apache Hadoop 0.0.1.qualifier Release Eclipse Plugin
 Bundle-SymbolicName: org.apache.hdt.hadoop.release;singleton:=true
-Bundle-Version: 0.0.2.qualifier
+Bundle-Version: 0.0.2.incubating
 Bundle-Vendor: Apache Hadoop
 Bundle-RequiredExecutionEnvironment: JavaSE-1.6
 Require-Bundle: org.apache.hdt.core,

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.hadoop.release/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/pom.xml b/org.apache.hdt.hadoop.release/pom.xml
index 8f10f85..e08e28c 100644
--- a/org.apache.hdt.hadoop.release/pom.xml
+++ b/org.apache.hdt.hadoop.release/pom.xml
@@ -22,7 +22,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2-SNAPSHOT</version>
+    <version>0.0.2.incubating</version>
   </parent>
   <artifactId>org.apache.hdt.hadoop.release</artifactId>
   <packaging>eclipse-plugin</packaging>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF b/org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF
index eb51451..3a6e69f 100644
--- a/org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF
@@ -2,7 +2,7 @@ Manifest-Version: 1.0
 Bundle-ManifestVersion: 2
 Bundle-Name: Apache Hadoop2 Release Eclipse Plugin
 Bundle-SymbolicName: org.apache.hdt.hadoop2.release;singleton:=true
-Bundle-Version: 0.0.2.qualifier
+Bundle-Version: 0.0.2.incubating
 Bundle-Vendor: Apache Hadoop
 Bundle-RequiredExecutionEnvironment: JavaSE-1.6
 Require-Bundle: org.apache.hdt.core,

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.hadoop2.release/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/pom.xml b/org.apache.hdt.hadoop2.release/pom.xml
index 8625bae..74db93b 100644
--- a/org.apache.hdt.hadoop2.release/pom.xml
+++ b/org.apache.hdt.hadoop2.release/pom.xml
@@ -22,7 +22,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2-SNAPSHOT</version>
+    <version>0.0.2.incubating</version>
   </parent>
   <artifactId>org.apache.hdt.hadoop2.release</artifactId>
   <packaging>eclipse-plugin</packaging>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.ui.test/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui.test/META-INF/MANIFEST.MF b/org.apache.hdt.ui.test/META-INF/MANIFEST.MF
index 021ae55..01bbaee 100644
--- a/org.apache.hdt.ui.test/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.ui.test/META-INF/MANIFEST.MF
@@ -2,7 +2,7 @@ Manifest-Version: 1.0
 Bundle-ManifestVersion: 2
 Bundle-Name: Apache Hadoop UI Test Eclipse Plugin
 Bundle-SymbolicName: org.apache.hdt.ui.test;singleton:=true
-Bundle-Version: 0.0.2.qualifier
+Bundle-Version: 0.0.2.incubating
 Bundle-Activator: org.apache.hdt.ui.test.Activator
 Bundle-Vendor: Apache Hadoop
 Require-Bundle: org.eclipse.ui,

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.ui.test/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui.test/pom.xml b/org.apache.hdt.ui.test/pom.xml
index 210dd79..5b10a29 100644
--- a/org.apache.hdt.ui.test/pom.xml
+++ b/org.apache.hdt.ui.test/pom.xml
@@ -24,7 +24,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2-SNAPSHOT</version>
+    <version>0.0.2.incubating</version>
   </parent>
 
   <artifactId>org.apache.hdt.ui.test</artifactId>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.ui/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/META-INF/MANIFEST.MF b/org.apache.hdt.ui/META-INF/MANIFEST.MF
index c34e98a..86b811a 100644
--- a/org.apache.hdt.ui/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.ui/META-INF/MANIFEST.MF
@@ -2,7 +2,7 @@ Manifest-Version: 1.0
 Bundle-ManifestVersion: 2
 Bundle-Name: Apache Hadoop UI Eclipse Plugin
 Bundle-SymbolicName: org.apache.hdt.ui;singleton:=true
-Bundle-Version: 0.0.2.qualifier
+Bundle-Version: 0.0.2.incubating
 Bundle-Activator: org.apache.hdt.ui.Activator
 Bundle-Vendor: Apache Hadoop
 Require-Bundle: org.eclipse.core.runtime,

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.ui/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/pom.xml b/org.apache.hdt.ui/pom.xml
index 0505e43..0df6a60 100644
--- a/org.apache.hdt.ui/pom.xml
+++ b/org.apache.hdt.ui/pom.xml
@@ -22,7 +22,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2-SNAPSHOT</version>
+    <version>0.0.2.incubating</version>
   </parent>
   <artifactId>org.apache.hdt.ui</artifactId>
   <packaging>eclipse-plugin</packaging>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.updateSite/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.updateSite/pom.xml b/org.apache.hdt.updateSite/pom.xml
index 0c770b2..f3a33e8 100644
--- a/org.apache.hdt.updateSite/pom.xml
+++ b/org.apache.hdt.updateSite/pom.xml
@@ -23,7 +23,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2-SNAPSHOT</version>
+    <version>0.0.2.incubating</version>
   </parent>
   
   <artifactId>org.apache.hdt.updateSite</artifactId>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 107a28b..3c1ce87 100644
--- a/pom.xml
+++ b/pom.xml
@@ -25,7 +25,7 @@ under the License.
   </parent>
   <groupId>org.apache.hdt</groupId>
   <artifactId>hdt.master</artifactId>
-  <version>0.0.2-SNAPSHOT</version>
+  <version>0.0.2.incubating</version>
   <packaging>pom</packaging>
   <name>Apache Hadoop Development Tools</name>
   <description>Eclipse tools for developing against the Hadoop platform</description>


[02/27] git commit: - bumping up versions to 0.0.2-SNAPSHOT

Posted by rs...@apache.org.
- bumping up versions to 0.0.2-SNAPSHOT


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/300cf8ba
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/300cf8ba
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/300cf8ba

Branch: refs/heads/hadoop-eclipse-merge
Commit: 300cf8ba190f410f7b1b28ec27b9e850090e4e95
Parents: aea6a37
Author: Rahul Sharma <rs...@apache.org>
Authored: Mon Mar 31 10:19:11 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Mon Mar 31 10:19:11 2014 +0530

----------------------------------------------------------------------
 org.apache.hdt.core/META-INF/MANIFEST.MF           | 2 +-
 org.apache.hdt.core/pom.xml                        | 2 +-
 org.apache.hdt.dist/pom.xml                        | 2 +-
 org.apache.hdt.feature/feature.xml                 | 8 ++++----
 org.apache.hdt.feature/pom.xml                     | 2 +-
 org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF | 2 +-
 org.apache.hdt.hadoop.release/pom.xml              | 2 +-
 org.apache.hdt.ui.test/META-INF/MANIFEST.MF        | 2 +-
 org.apache.hdt.ui.test/pom.xml                     | 2 +-
 org.apache.hdt.ui/META-INF/MANIFEST.MF             | 2 +-
 org.apache.hdt.ui/pom.xml                          | 2 +-
 org.apache.hdt.updateSite/pom.xml                  | 2 +-
 pom.xml                                            | 2 +-
 13 files changed, 16 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/300cf8ba/org.apache.hdt.core/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/META-INF/MANIFEST.MF b/org.apache.hdt.core/META-INF/MANIFEST.MF
index 620e9ba..6234625 100644
--- a/org.apache.hdt.core/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.core/META-INF/MANIFEST.MF
@@ -2,7 +2,7 @@ Manifest-Version: 1.0
 Bundle-ManifestVersion: 2
 Bundle-Name: Apache Hadoop Eclipse Plugin
 Bundle-SymbolicName: org.apache.hdt.core;singleton:=true
-Bundle-Version: 0.0.1.incubating
+Bundle-Version: 0.0.2.qualifier
 Bundle-Activator: org.apache.hdt.core.Activator
 Require-Bundle: org.eclipse.core.runtime,
  org.eclipse.core.filesystem;bundle-version="1.3.0";visibility:=reexport,

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/300cf8ba/org.apache.hdt.core/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/pom.xml b/org.apache.hdt.core/pom.xml
index a5bbc5e..4dfedb1 100644
--- a/org.apache.hdt.core/pom.xml
+++ b/org.apache.hdt.core/pom.xml
@@ -23,7 +23,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.1.incubating</version>
+    <version>0.0.2-SNAPSHOT</version>
   </parent>
 
   <artifactId>org.apache.hdt.core</artifactId>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/300cf8ba/org.apache.hdt.dist/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.dist/pom.xml b/org.apache.hdt.dist/pom.xml
index 7e06e62..48c21e7 100644
--- a/org.apache.hdt.dist/pom.xml
+++ b/org.apache.hdt.dist/pom.xml
@@ -22,7 +22,7 @@
   <parent>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.1.incubating</version>
+    <version>0.0.2-SNAPSHOT</version>
   </parent>
   <artifactId>org.apache.hdt.dist</artifactId>
   <name>Apache Hadoop Development Tools Distribution</name>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/300cf8ba/org.apache.hdt.feature/feature.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.feature/feature.xml b/org.apache.hdt.feature/feature.xml
index 6d351e2..0f13637 100644
--- a/org.apache.hdt.feature/feature.xml
+++ b/org.apache.hdt.feature/feature.xml
@@ -2,7 +2,7 @@
 <feature
       id="org.apache.hdt.feature"
       label="Hadoop Development Tools"
-      version="0.0.1.incubating"
+      version="0.0.2.qualifier"
       provider-name="Apache Software Foundation">
 
    <description url="http://hdt.incubator.apache.org/">
@@ -40,7 +40,7 @@ permissions and limitations under the License.
          id="org.apache.hdt.hadoop.release"
          download-size="0"
          install-size="0"
-         version="0.0.1.incubating"
+         version="0.0.2.qualifier"
          fragment="true"
          unpack="false"/>
 
@@ -48,14 +48,14 @@ permissions and limitations under the License.
          id="org.apache.hdt.ui"
          download-size="0"
          install-size="0"
-         version="0.0.1.incubating"
+         version="0.0.2.qualifier"
          unpack="false"/>
          
    <plugin
          id="org.apache.hdt.core"
          download-size="0"
          install-size="0"
-         version="0.0.1.incubating"
+         version="0.0.2.qualifier"
          unpack="false"/>
 
 </feature>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/300cf8ba/org.apache.hdt.feature/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.feature/pom.xml b/org.apache.hdt.feature/pom.xml
index de6f2dd..faa6ab7 100644
--- a/org.apache.hdt.feature/pom.xml
+++ b/org.apache.hdt.feature/pom.xml
@@ -22,7 +22,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.1.incubating</version>
+    <version>0.0.2-SNAPSHOT</version>
   </parent>
   
   <artifactId>org.apache.hdt.feature</artifactId>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/300cf8ba/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF b/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
index 1e0d762..005ca9b 100644
--- a/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
@@ -2,7 +2,7 @@ Manifest-Version: 1.0
 Bundle-ManifestVersion: 2
 Bundle-Name: Apache Hadoop 0.0.1.qualifier Release Eclipse Plugin
 Bundle-SymbolicName: org.apache.hdt.hadoop.release;singleton:=true
-Bundle-Version: 0.0.1.incubating
+Bundle-Version: 0.0.2.qualifier
 Bundle-Vendor: Apache Hadoop
 Fragment-Host: org.apache.hdt.core
 Bundle-RequiredExecutionEnvironment: JavaSE-1.6

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/300cf8ba/org.apache.hdt.hadoop.release/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/pom.xml b/org.apache.hdt.hadoop.release/pom.xml
index 279d131..69c61f2 100644
--- a/org.apache.hdt.hadoop.release/pom.xml
+++ b/org.apache.hdt.hadoop.release/pom.xml
@@ -22,7 +22,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.1.incubating</version>
+    <version>0.0.2-SNAPSHOT</version>
   </parent>
   <artifactId>org.apache.hdt.hadoop.release</artifactId>
   <packaging>eclipse-plugin</packaging>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/300cf8ba/org.apache.hdt.ui.test/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui.test/META-INF/MANIFEST.MF b/org.apache.hdt.ui.test/META-INF/MANIFEST.MF
index baa7145..021ae55 100644
--- a/org.apache.hdt.ui.test/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.ui.test/META-INF/MANIFEST.MF
@@ -2,7 +2,7 @@ Manifest-Version: 1.0
 Bundle-ManifestVersion: 2
 Bundle-Name: Apache Hadoop UI Test Eclipse Plugin
 Bundle-SymbolicName: org.apache.hdt.ui.test;singleton:=true
-Bundle-Version: 0.0.1.incubating
+Bundle-Version: 0.0.2.qualifier
 Bundle-Activator: org.apache.hdt.ui.test.Activator
 Bundle-Vendor: Apache Hadoop
 Require-Bundle: org.eclipse.ui,

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/300cf8ba/org.apache.hdt.ui.test/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui.test/pom.xml b/org.apache.hdt.ui.test/pom.xml
index 1cbb8d0..b029539 100644
--- a/org.apache.hdt.ui.test/pom.xml
+++ b/org.apache.hdt.ui.test/pom.xml
@@ -24,7 +24,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.1.incubating</version>
+    <version>0.0.2-SNAPSHOT</version>
   </parent>
 
   <artifactId>org.apache.hdt.ui.test</artifactId>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/300cf8ba/org.apache.hdt.ui/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/META-INF/MANIFEST.MF b/org.apache.hdt.ui/META-INF/MANIFEST.MF
index 3825bf1..ac39e07 100644
--- a/org.apache.hdt.ui/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.ui/META-INF/MANIFEST.MF
@@ -2,7 +2,7 @@ Manifest-Version: 1.0
 Bundle-ManifestVersion: 2
 Bundle-Name: Apache Hadoop UI Eclipse Plugin
 Bundle-SymbolicName: org.apache.hdt.ui;singleton:=true
-Bundle-Version: 0.0.1.incubating
+Bundle-Version: 0.0.2.qualifier
 Bundle-Activator: org.apache.hdt.ui.Activator
 Bundle-Vendor: Apache Hadoop
 Require-Bundle: org.eclipse.core.runtime,

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/300cf8ba/org.apache.hdt.ui/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/pom.xml b/org.apache.hdt.ui/pom.xml
index f275c81..0505e43 100644
--- a/org.apache.hdt.ui/pom.xml
+++ b/org.apache.hdt.ui/pom.xml
@@ -22,7 +22,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.1.incubating</version>
+    <version>0.0.2-SNAPSHOT</version>
   </parent>
   <artifactId>org.apache.hdt.ui</artifactId>
   <packaging>eclipse-plugin</packaging>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/300cf8ba/org.apache.hdt.updateSite/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.updateSite/pom.xml b/org.apache.hdt.updateSite/pom.xml
index bffad22..0c770b2 100644
--- a/org.apache.hdt.updateSite/pom.xml
+++ b/org.apache.hdt.updateSite/pom.xml
@@ -23,7 +23,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.1.incubating</version>
+    <version>0.0.2-SNAPSHOT</version>
   </parent>
   
   <artifactId>org.apache.hdt.updateSite</artifactId>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/300cf8ba/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index b2b1738..19b9ec5 100644
--- a/pom.xml
+++ b/pom.xml
@@ -25,7 +25,7 @@ under the License.
   </parent>
   <groupId>org.apache.hdt</groupId>
   <artifactId>hdt.master</artifactId>
-  <version>0.0.1.incubating</version>
+  <version>0.0.2-SNAPSHOT</version>
   <packaging>pom</packaging>
   <name>Apache Hadoop Development Tools</name>
   <description>Eclipse tools for developing against the Hadoop platform</description>


[21/27] git commit: - Fixing cleanup on destory server

Posted by rs...@apache.org.
- Fixing cleanup on destory server


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/bbc139af
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/bbc139af
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/bbc139af

Branch: refs/heads/hadoop-eclipse-merge
Commit: bbc139af52c9d43908c4b2f99c02686524b9fd66
Parents: d63110a
Author: Rahul Sharma <rs...@apache.org>
Authored: Mon Jun 23 13:51:41 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Mon Jun 23 13:51:41 2014 +0530

----------------------------------------------------------------------
 .../org/apache/hdt/core/internal/hdfs/HDFSManager.java   | 11 +++++++++++
 1 file changed, 11 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/bbc139af/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java
index 43ebf1f..8d27d23 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java
@@ -207,6 +207,7 @@ public class HDFSManager {
 									"HDFS Error", "Unable to create HDFS site :"+e.getMessage());
 						}
 					});
+					deleteServer(getServer(hdfsURI.toString()));
 					return e.getStatus();
 				} finally {
 					monitor.done();
@@ -280,7 +281,17 @@ public class HDFSManager {
 		String projectName = this.serverToProjectMap.remove(server);
 		this.projectToServerMap.remove(projectName);
 		this.uriToServerMap.remove(server.getUri());
+		this.uriToServerCacheMap.remove(server.getUri());
 		HadoopManager.INSTANCE.saveServers();
+		String tmpUri = server.getUri();
+		while (tmpUri != null && uriToServerCacheMap.containsKey(tmpUri)) {
+			uriToServerCacheMap.remove(tmpUri);
+			int lastSlashIndex = tmpUri.lastIndexOf('/');
+			tmpUri = lastSlashIndex < 0 ? null : tmpUri.substring(0, lastSlashIndex);
+		}
+		if(hdfsClientsMap.containsKey(server.getUri().toString())){
+			hdfsClientsMap.remove(server.getUri().toString());
+		}
 	}
 
 	/**


[08/27] git commit: - HDT-52 : changing menu of ClusterView - shows three options for MR cluster - shows delete option for MR job

Posted by rs...@apache.org.
- HDT-52 : changing menu of ClusterView - shows three options for MR cluster - shows delete option for MR job


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/45147711
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/45147711
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/45147711

Branch: refs/heads/hadoop-eclipse-merge
Commit: 45147711746b3189de4ef32fba8f3952c55b3353
Parents: 00646ae
Author: Rahul Sharma <rs...@apache.org>
Authored: Wed May 14 14:34:59 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Fri May 23 08:45:39 2014 +0530

----------------------------------------------------------------------
 .../apache/hdt/ui/internal/mr/ClusterView.java  | 53 ++++----------------
 1 file changed, 9 insertions(+), 44 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/45147711/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/ClusterView.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/ClusterView.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/ClusterView.java
index 9952904..7ac0582 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/ClusterView.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/ClusterView.java
@@ -176,45 +176,9 @@ public class ClusterView extends ViewPart implements ITreeContentProvider, ITabl
 		getViewSite().getActionBars().setGlobalActionHandler(ActionFactory.DELETE.getId(), deleteAction);
 		getViewSite().getActionBars().getToolBarManager().add(editServerAction);
 		getViewSite().getActionBars().getToolBarManager().add(newLocationAction);
-
-		createActions();
 		createContextMenu();
 	}
 
-	/**
-	 * Actions
-	 */
-	private void createActions() {
-		/*
-		 * addItemAction = new Action("Add...") { public void run() { addItem();
-		 * } }; addItemAction.setImageDescriptor(ImageLibrary
-		 * .get("server.view.location.new"));
-		 */
-		/*
-		 * deleteItemAction = new Action("Delete") { public void run() {
-		 * deleteItem(); } };
-		 * deleteItemAction.setImageDescriptor(getImageDescriptor
-		 * ("delete.gif"));
-		 * 
-		 * selectAllAction = new Action("Select All") { public void run() {
-		 * selectAll(); } };
-		 */
-		// Add selection listener.
-		viewer.addSelectionChangedListener(new ISelectionChangedListener() {
-			public void selectionChanged(SelectionChangedEvent event) {
-				updateActionEnablement();
-			}
-		});
-	}
-
-	private void addItem() {
-		System.out.printf("ADD ITEM\n");
-	}
-
-	private void updateActionEnablement() {
-		IStructuredSelection sel = (IStructuredSelection) viewer.getSelection();
-		// deleteItemAction.setEnabled(sel.size() > 0);
-	}
 
 	/**
 	 * Contextual menu
@@ -238,14 +202,15 @@ public class ClusterView extends ViewPart implements ITreeContentProvider, ITabl
 	}
 
 	private void fillContextMenu(IMenuManager mgr) {
-		mgr.add(newLocationAction);
-		mgr.add(editServerAction);
-		mgr.add(deleteAction);
-		/*
-		 * mgr.add(new GroupMarker(IWorkbenchActionConstants.MB_ADDITIONS));
-		 * mgr.add(deleteItemAction); mgr.add(new Separator());
-		 * mgr.add(selectAllAction);
-		 */
+		IStructuredSelection sel = (IStructuredSelection) viewer.getSelection();
+		Object firstElement = sel.getFirstElement();
+		if(firstElement instanceof IHadoopJob){
+			mgr.add(deleteAction);
+		}else{
+			mgr.add(newLocationAction);
+			mgr.add(editServerAction);
+			mgr.add(deleteAction);
+		}
 	}
 
 	/* @inheritDoc */


[10/27] git commit: HDT-54: Adding a CLusterView Wizard.

Posted by rs...@apache.org.
HDT-54: Adding a CLusterView Wizard.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/092213c1
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/092213c1
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/092213c1

Branch: refs/heads/hadoop-eclipse-merge
Commit: 092213c1981376f3bca05522b4e0f56669d5c606
Parents: ec415a9
Author: Rahul Sharma <rs...@apache.org>
Authored: Thu May 15 15:30:46 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Fri May 23 08:46:47 2014 +0530

----------------------------------------------------------------------
 org.apache.hdt.ui/plugin.xml                    |  8 +++
 .../hdt/ui/internal/mr/NewLocationWizard.java   | 75 ++++++++++++++++++++
 2 files changed, 83 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/092213c1/org.apache.hdt.ui/plugin.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/plugin.xml b/org.apache.hdt.ui/plugin.xml
index af68ffa..7bc1a36 100644
--- a/org.apache.hdt.ui/plugin.xml
+++ b/org.apache.hdt.ui/plugin.xml
@@ -186,6 +186,14 @@
             id="org.apache.hdt.ui.wizard.newZooKeeperServer"
             name="New ZooKeeper Server">
       </wizard>
+      <wizard
+            category="org.apache.hdt.ui.newWizards.category"
+            class="org.apache.hdt.ui.internal.mr.NewLocationWizard"
+            finalPerspective="org.apache.hdt.ui.perspective"
+            icon="icons/location-new-16x16.png"
+            id="org.apache.hdt.ui.wizard.newMRCluster"
+            name="New MR Cluster">
+      </wizard>
       <wizard category="org.apache.hdt.ui.newWizards.category"
             class="org.apache.hdt.ui.internal.mr.NewMapperWizard"
             icon="icons/mapper16.png"

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/092213c1/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewLocationWizard.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewLocationWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewLocationWizard.java
new file mode 100644
index 0000000..ee4e399
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewLocationWizard.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.ui.internal.mr;
+
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.ui.internal.launch.HadoopLocationWizard;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IConfigurationElement;
+import org.eclipse.core.runtime.IExecutableExtension;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.jface.wizard.Wizard;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.ui.INewWizard;
+import org.eclipse.ui.IWorkbench;
+import org.eclipse.ui.wizards.newresource.BasicNewProjectResourceWizard;
+
+public class NewLocationWizard extends Wizard implements INewWizard,IExecutableExtension{
+	
+	private HadoopLocationWizard serverLocationWizardPage;
+	private IConfigurationElement configElement;
+
+	/* (non-Javadoc)
+	 * @see org.eclipse.ui.IWorkbenchWizard#init(org.eclipse.ui.IWorkbench, org.eclipse.jface.viewers.IStructuredSelection)
+	 */
+	@Override
+	public void init(IWorkbench workbench, IStructuredSelection selection) {
+		// TODO Auto-generated method stub
+		
+	}
+	@Override
+	public void addPages() {
+		super.addPages();
+		if (serverLocationWizardPage == null) {
+			serverLocationWizardPage = new HadoopLocationWizard();
+		}
+		addPage(serverLocationWizardPage);
+	}
+	/* (non-Javadoc)
+	 * @see org.eclipse.core.runtime.IExecutableExtension#setInitializationData(org.eclipse.core.runtime.IConfigurationElement, java.lang.String, java.lang.Object)
+	 */
+	@Override
+	public void setInitializationData(IConfigurationElement config, String propertyName, Object data) throws CoreException {
+		this.configElement=config;
+     }
+
+	/* (non-Javadoc)
+	 * @see org.eclipse.jface.wizard.Wizard#performFinish()
+	 */
+	@Override
+	public boolean performFinish() {
+		Display.getDefault().syncExec(new Runnable() {
+			public void run() {
+				BasicNewProjectResourceWizard.updatePerspective(configElement);
+			}
+		});
+		AbstractHadoopCluster cluster = serverLocationWizardPage.performFinish();
+		return cluster!=null;
+	}
+	
+}
\ No newline at end of file


[24/27] git commit: - updating release version

Posted by rs...@apache.org.
 - updating release version


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/2de1a905
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/2de1a905
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/2de1a905

Branch: refs/heads/hadoop-eclipse-merge-development
Commit: 2de1a905cfa73d8e9dc83b9902aa27d1fbe7c4d2
Parents: 4cd0302
Author: Rahul Sharma <rs...@apache.org>
Authored: Thu Jun 26 12:00:17 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Thu Jun 26 12:00:17 2014 +0530

----------------------------------------------------------------------
 org.apache.hdt.core/META-INF/MANIFEST.MF            |  2 +-
 org.apache.hdt.core/pom.xml                         |  2 +-
 org.apache.hdt.dist/pom.xml                         |  2 +-
 org.apache.hdt.feature/feature.xml                  | 10 +++++-----
 org.apache.hdt.feature/pom.xml                      |  2 +-
 org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF  |  2 +-
 org.apache.hdt.hadoop.release/pom.xml               |  2 +-
 org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF |  2 +-
 org.apache.hdt.hadoop2.release/pom.xml              |  2 +-
 org.apache.hdt.ui.test/META-INF/MANIFEST.MF         |  2 +-
 org.apache.hdt.ui.test/pom.xml                      |  2 +-
 org.apache.hdt.ui/META-INF/MANIFEST.MF              |  2 +-
 org.apache.hdt.ui/pom.xml                           |  2 +-
 org.apache.hdt.updateSite/pom.xml                   |  2 +-
 pom.xml                                             |  2 +-
 15 files changed, 19 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.core/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/META-INF/MANIFEST.MF b/org.apache.hdt.core/META-INF/MANIFEST.MF
index e50301c..bbe6c84 100644
--- a/org.apache.hdt.core/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.core/META-INF/MANIFEST.MF
@@ -2,7 +2,7 @@ Manifest-Version: 1.0
 Bundle-ManifestVersion: 2
 Bundle-Name: Apache Hadoop Eclipse Plugin
 Bundle-SymbolicName: org.apache.hdt.core;singleton:=true
-Bundle-Version: 0.0.2.qualifier
+Bundle-Version: 0.0.2.incubating
 Bundle-Activator: org.apache.hdt.core.Activator
 Require-Bundle: org.eclipse.core.runtime,
  org.eclipse.core.filesystem;bundle-version="1.3.0";visibility:=reexport,

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.core/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/pom.xml b/org.apache.hdt.core/pom.xml
index 4dfedb1..095e1dc 100644
--- a/org.apache.hdt.core/pom.xml
+++ b/org.apache.hdt.core/pom.xml
@@ -23,7 +23,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2-SNAPSHOT</version>
+    <version>0.0.2.incubating</version>
   </parent>
 
   <artifactId>org.apache.hdt.core</artifactId>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.dist/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.dist/pom.xml b/org.apache.hdt.dist/pom.xml
index 48c21e7..ef1af8e 100644
--- a/org.apache.hdt.dist/pom.xml
+++ b/org.apache.hdt.dist/pom.xml
@@ -22,7 +22,7 @@
   <parent>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2-SNAPSHOT</version>
+    <version>0.0.2.incubating</version>
   </parent>
   <artifactId>org.apache.hdt.dist</artifactId>
   <name>Apache Hadoop Development Tools Distribution</name>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.feature/feature.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.feature/feature.xml b/org.apache.hdt.feature/feature.xml
index 120c73f..9a1253a 100644
--- a/org.apache.hdt.feature/feature.xml
+++ b/org.apache.hdt.feature/feature.xml
@@ -2,7 +2,7 @@
 <feature
       id="org.apache.hdt.feature"
       label="Hadoop Development Tools"
-      version="0.0.2.qualifier"
+      version="0.0.2.incubating"
       provider-name="Apache Software Foundation">
 
    <description url="http://hdt.incubator.apache.org/">
@@ -40,7 +40,7 @@ permissions and limitations under the License.
          id="org.apache.hdt.hadoop.release"
          download-size="0"
          install-size="0"
-         version="0.0.2.qualifier"
+         version="0.0.2.incubating"
          fragment="true"
          unpack="false"/>
 
@@ -48,7 +48,7 @@ permissions and limitations under the License.
          id="org.apache.hdt.hadoop2.release"
          download-size="0"
          install-size="0"
-         version="0.0.2.qualifier"
+         version="0.0.2.incubating"
          fragment="true"
          unpack="false"/>
 
@@ -56,14 +56,14 @@ permissions and limitations under the License.
          id="org.apache.hdt.ui"
          download-size="0"
          install-size="0"
-         version="0.0.2.qualifier"
+         version="0.0.2.incubating"
          unpack="false"/>
          
    <plugin
          id="org.apache.hdt.core"
          download-size="0"
          install-size="0"
-         version="0.0.2.qualifier"
+         version="0.0.2.incubating"
          unpack="false"/>
 
 </feature>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.feature/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.feature/pom.xml b/org.apache.hdt.feature/pom.xml
index faa6ab7..19a5a18 100644
--- a/org.apache.hdt.feature/pom.xml
+++ b/org.apache.hdt.feature/pom.xml
@@ -22,7 +22,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2-SNAPSHOT</version>
+    <version>0.0.2.incubating</version>
   </parent>
   
   <artifactId>org.apache.hdt.feature</artifactId>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF b/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
index ec6c80c..0f56f4b 100644
--- a/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
@@ -2,7 +2,7 @@ Manifest-Version: 1.0
 Bundle-ManifestVersion: 2
 Bundle-Name: Apache Hadoop 0.0.1.qualifier Release Eclipse Plugin
 Bundle-SymbolicName: org.apache.hdt.hadoop.release;singleton:=true
-Bundle-Version: 0.0.2.qualifier
+Bundle-Version: 0.0.2.incubating
 Bundle-Vendor: Apache Hadoop
 Bundle-RequiredExecutionEnvironment: JavaSE-1.6
 Require-Bundle: org.apache.hdt.core,

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.hadoop.release/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/pom.xml b/org.apache.hdt.hadoop.release/pom.xml
index 8f10f85..e08e28c 100644
--- a/org.apache.hdt.hadoop.release/pom.xml
+++ b/org.apache.hdt.hadoop.release/pom.xml
@@ -22,7 +22,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2-SNAPSHOT</version>
+    <version>0.0.2.incubating</version>
   </parent>
   <artifactId>org.apache.hdt.hadoop.release</artifactId>
   <packaging>eclipse-plugin</packaging>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF b/org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF
index eb51451..3a6e69f 100644
--- a/org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF
@@ -2,7 +2,7 @@ Manifest-Version: 1.0
 Bundle-ManifestVersion: 2
 Bundle-Name: Apache Hadoop2 Release Eclipse Plugin
 Bundle-SymbolicName: org.apache.hdt.hadoop2.release;singleton:=true
-Bundle-Version: 0.0.2.qualifier
+Bundle-Version: 0.0.2.incubating
 Bundle-Vendor: Apache Hadoop
 Bundle-RequiredExecutionEnvironment: JavaSE-1.6
 Require-Bundle: org.apache.hdt.core,

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.hadoop2.release/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/pom.xml b/org.apache.hdt.hadoop2.release/pom.xml
index 8625bae..74db93b 100644
--- a/org.apache.hdt.hadoop2.release/pom.xml
+++ b/org.apache.hdt.hadoop2.release/pom.xml
@@ -22,7 +22,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2-SNAPSHOT</version>
+    <version>0.0.2.incubating</version>
   </parent>
   <artifactId>org.apache.hdt.hadoop2.release</artifactId>
   <packaging>eclipse-plugin</packaging>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.ui.test/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui.test/META-INF/MANIFEST.MF b/org.apache.hdt.ui.test/META-INF/MANIFEST.MF
index 021ae55..01bbaee 100644
--- a/org.apache.hdt.ui.test/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.ui.test/META-INF/MANIFEST.MF
@@ -2,7 +2,7 @@ Manifest-Version: 1.0
 Bundle-ManifestVersion: 2
 Bundle-Name: Apache Hadoop UI Test Eclipse Plugin
 Bundle-SymbolicName: org.apache.hdt.ui.test;singleton:=true
-Bundle-Version: 0.0.2.qualifier
+Bundle-Version: 0.0.2.incubating
 Bundle-Activator: org.apache.hdt.ui.test.Activator
 Bundle-Vendor: Apache Hadoop
 Require-Bundle: org.eclipse.ui,

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.ui.test/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui.test/pom.xml b/org.apache.hdt.ui.test/pom.xml
index 210dd79..5b10a29 100644
--- a/org.apache.hdt.ui.test/pom.xml
+++ b/org.apache.hdt.ui.test/pom.xml
@@ -24,7 +24,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2-SNAPSHOT</version>
+    <version>0.0.2.incubating</version>
   </parent>
 
   <artifactId>org.apache.hdt.ui.test</artifactId>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.ui/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/META-INF/MANIFEST.MF b/org.apache.hdt.ui/META-INF/MANIFEST.MF
index c34e98a..86b811a 100644
--- a/org.apache.hdt.ui/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.ui/META-INF/MANIFEST.MF
@@ -2,7 +2,7 @@ Manifest-Version: 1.0
 Bundle-ManifestVersion: 2
 Bundle-Name: Apache Hadoop UI Eclipse Plugin
 Bundle-SymbolicName: org.apache.hdt.ui;singleton:=true
-Bundle-Version: 0.0.2.qualifier
+Bundle-Version: 0.0.2.incubating
 Bundle-Activator: org.apache.hdt.ui.Activator
 Bundle-Vendor: Apache Hadoop
 Require-Bundle: org.eclipse.core.runtime,

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.ui/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/pom.xml b/org.apache.hdt.ui/pom.xml
index 0505e43..0df6a60 100644
--- a/org.apache.hdt.ui/pom.xml
+++ b/org.apache.hdt.ui/pom.xml
@@ -22,7 +22,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2-SNAPSHOT</version>
+    <version>0.0.2.incubating</version>
   </parent>
   <artifactId>org.apache.hdt.ui</artifactId>
   <packaging>eclipse-plugin</packaging>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/org.apache.hdt.updateSite/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.updateSite/pom.xml b/org.apache.hdt.updateSite/pom.xml
index 0c770b2..f3a33e8 100644
--- a/org.apache.hdt.updateSite/pom.xml
+++ b/org.apache.hdt.updateSite/pom.xml
@@ -23,7 +23,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2-SNAPSHOT</version>
+    <version>0.0.2.incubating</version>
   </parent>
   
   <artifactId>org.apache.hdt.updateSite</artifactId>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/2de1a905/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 107a28b..3c1ce87 100644
--- a/pom.xml
+++ b/pom.xml
@@ -25,7 +25,7 @@ under the License.
   </parent>
   <groupId>org.apache.hdt</groupId>
   <artifactId>hdt.master</artifactId>
-  <version>0.0.2-SNAPSHOT</version>
+  <version>0.0.2.incubating</version>
   <packaging>pom</packaging>
   <name>Apache Hadoop Development Tools</name>
   <description>Eclipse tools for developing against the Hadoop platform</description>


[19/27] git commit: HDT-51 : trival convertion of string to JobId

Posted by rs...@apache.org.
HDT-51 : trival convertion of string to JobId


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/b6634e66
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/b6634e66
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/b6634e66

Branch: refs/heads/hadoop-eclipse-merge
Commit: b6634e66d7098b498755634f08bd18c1b49db846
Parents: bf1a494
Author: Rahul Sharma <rs...@apache.org>
Authored: Fri Jun 20 14:59:24 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Fri Jun 20 14:59:24 2014 +0530

----------------------------------------------------------------------
 .../src/org/apache/hdt/hadoop/release/HadoopCluster.java           | 2 +-
 .../src/org/apache/hdt/hadoop2/release/HadoopCluster.java          | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/b6634e66/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
index 54c5500..c64f757 100644
--- a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
+++ b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
@@ -274,7 +274,7 @@ public class HadoopCluster extends AbstractHadoopCluster {
 	 *            the job to remove
 	 */
 	public void purgeJob(final IHadoopJob job) {
-		runningJobs.remove(job.getJobID());
+		runningJobs.remove(JobID.forName(job.getJobID()));
 		Display.getDefault().asyncExec(new Runnable() {
 			public void run() {
 				fireJobRemoved(job);

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/b6634e66/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java
index cf4dcd4..d18679a 100644
--- a/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java
+++ b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java
@@ -271,7 +271,7 @@ public class HadoopCluster extends AbstractHadoopCluster {
 	 *            the job to remove
 	 */
 	public void purgeJob(final IHadoopJob job) {
-		runningJobs.remove(job.getJobID());
+		runningJobs.remove(JobID.forName(job.getJobID()));
 		Display.getDefault().asyncExec(new Runnable() {
 			public void run() {
 				fireJobRemoved(job);


[06/27] git commit: HDT-49 : Job does not launch on cluster - Generated mapred-site.xml so that jobs can run on cluster - using mapred.job.tracker instead of deprecated mapreduce.jobtracker.address

Posted by rs...@apache.org.
HDT-49 : Job does not launch on cluster
- Generated mapred-site.xml so that jobs can run on cluster
- using mapred.job.tracker instead of deprecated mapreduce.jobtracker.address


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/0e9e729a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/0e9e729a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/0e9e729a

Branch: refs/heads/hadoop-eclipse-merge
Commit: 0e9e729a283806c6d8772c3958a18ac767c7143d
Parents: 29467b5
Author: Rahul Sharma <rs...@apache.org>
Authored: Thu May 1 15:25:15 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Fri May 9 09:58:17 2014 +0530

----------------------------------------------------------------------
 .../hdt/core/launch/AbstractHadoopCluster.java  |  2 +-
 .../org/apache/hdt/core/launch/ConfProp.java    |  2 +-
 .../hdt/hadoop/release/HadoopCluster.java       | 25 ++++++++++++--------
 3 files changed, 17 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/0e9e729a/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
index e5f7dd4..782a89c 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
@@ -63,7 +63,7 @@ public abstract class AbstractHadoopCluster {
 	abstract public String getState();
 
 	abstract public boolean loadFromXML(File file) throws IOException;
-
+	
 	public static AbstractHadoopCluster createCluster(File file) throws CoreException, IOException {
 		AbstractHadoopCluster hadoopCluster = createCluster();
 		hadoopCluster.loadFromXML(file);

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/0e9e729a/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java
index 538eb75..c7c64f9 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java
@@ -81,7 +81,7 @@ public enum ConfProp {
 	 * Property name for naming the job tracker (URI). This property is related
 	 * to {@link #PI_MASTER_HOST_NAME}
 	 */
-	JOB_TRACKER_URI(false, "mapreduce.jobtracker.address", "localhost:50020"),
+	JOB_TRACKER_URI(false, "mapred.job.tracker", "localhost:50020"),
 
 	/**
 	 * Property name for naming the default file system (URI).

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/0e9e729a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
index daaf990..466739b 100644
--- a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
+++ b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
@@ -18,7 +18,9 @@
 
 package org.apache.hdt.hadoop.release;
 
+import java.io.BufferedInputStream;
 import java.io.File;
+import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.util.Collection;
@@ -49,6 +51,7 @@ import org.apache.hdt.core.launch.AbstractHadoopCluster;
 import org.apache.hdt.core.launch.IHadoopJob;
 import org.apache.hdt.core.launch.IJarModule;
 import org.apache.hdt.core.launch.IJobListener;
+import org.eclipse.core.internal.utils.FileUtil;
 import org.eclipse.core.runtime.IProgressMonitor;
 import org.eclipse.core.runtime.IStatus;
 import org.eclipse.core.runtime.Status;
@@ -236,9 +239,7 @@ public class HadoopCluster extends AbstractHadoopCluster {
 	 * @throws ParserConfigurationException
 	 */
 	public HadoopCluster(File file) throws ParserConfigurationException, SAXException, IOException {
-
-		this.conf = new Configuration();
-		this.addPluginConfigDefaultProperties();
+		this();
 		this.loadFromXML(file);
 	}
 
@@ -422,8 +423,8 @@ public class HadoopCluster extends AbstractHadoopCluster {
 	 *            the property value
 	 */
 	public void setConfProp(ConfProp prop, String propValue) {
-		assert propValue != null;
-		conf.set(prop.name, propValue);
+		if (propValue != null)
+			conf.set(prop.name, propValue);
 	}
 
 	/**
@@ -472,8 +473,7 @@ public class HadoopCluster extends AbstractHadoopCluster {
 	 */
 	private void addPluginConfigDefaultProperties() {
 		for (ConfProp prop : ConfProp.values()) {
-			if (conf.get(prop.name) == null)
-				conf.set(prop.name, prop.defVal);
+			conf.set(prop.name, prop.defVal);
 		}
 	}
 
@@ -550,14 +550,19 @@ public class HadoopCluster extends AbstractHadoopCluster {
 		JobConf conf = new JobConf(this.conf);
 		conf.setJar(jarFilePath);
 		// Write it to the disk file
-		File confFile = new File(confDir, "core-site.xml");
-		FileOutputStream fos = new FileOutputStream(confFile);
+		File coreSiteFile = new File(confDir, "core-site.xml");
+		File mapredSiteFile = new File(confDir, "mapred-site.xml");
+		FileOutputStream fos = new FileOutputStream(coreSiteFile);
+		FileInputStream fis = null;
 		try {
 			conf.writeXml(fos);
 			fos.close();
-			fos = null;
+			fos = new FileOutputStream(mapredSiteFile);
+			fis = new FileInputStream(coreSiteFile);
+			IOUtils.copyBytes(new BufferedInputStream(fis), fos, 4096);
 		} finally {
 			IOUtils.closeStream(fos);
+			IOUtils.closeStream(fis);
 		}
 
 	}


[25/27] git commit: Merge branch 'release/0.0.2.incubating' into hadoop-eclipse-merge

Posted by rs...@apache.org.
Merge branch 'release/0.0.2.incubating' into hadoop-eclipse-merge


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/027a9862
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/027a9862
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/027a9862

Branch: refs/heads/hadoop-eclipse-merge
Commit: 027a9862650903333541a39662b6fddaa861289d
Parents: 3672346 2de1a90
Author: Rahul Sharma <rs...@apache.org>
Authored: Thu Jun 26 12:00:46 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Thu Jun 26 12:00:46 2014 +0530

----------------------------------------------------------------------
 NOTICE                                          |   2 +-
 README.txt                                      |  19 +-
 org.apache.hdt.core/.classpath                  |   2 +-
 org.apache.hdt.core/META-INF/MANIFEST.MF        |  10 +-
 org.apache.hdt.core/models/Hadoop.ecore         |   2 +
 org.apache.hdt.core/plugin.xml                  |  13 +
 org.apache.hdt.core/pom.xml                     |   2 +-
 ...org.apache.hadoop.eclipse.hadoopCluster.exsd | 126 +++
 ....apache.hadoop.eclipse.hadoopHomeReader.exsd | 126 +++
 .../hdt/core/AbstractHadoopHomeReader.java      |  46 ++
 .../src/org/apache/hdt/core/HadoopVersion.java  |  34 +
 .../apache/hdt/core/internal/HadoopManager.java |   6 +-
 .../hdt/core/internal/hdfs/HDFSFileStore.java   |   2 +-
 .../hdt/core/internal/hdfs/HDFSManager.java     |  85 +-
 .../core/internal/hdfs/HDFSMoveDeleteHook.java  |   3 +
 .../internal/hdfs/InterruptableHDFSClient.java  |   9 +-
 .../hdt/core/internal/model/HDFSServer.java     |  28 +
 .../hdt/core/internal/model/HadoopPackage.java  |  30 +-
 .../internal/model/impl/HDFSServerImpl.java     |  54 ++
 .../internal/model/impl/HadoopFactoryImpl.java  |   2 +-
 .../internal/model/impl/HadoopPackageImpl.java  |  11 +
 .../core/internal/model/util/HadoopSwitch.java  |  36 +-
 .../zookeeper/InterruptableZooKeeperClient.java |   5 +-
 .../internal/zookeeper/ZooKeeperManager.java    |  47 +-
 .../hdt/core/launch/AbstractHadoopCluster.java  | 189 +++++
 .../org/apache/hdt/core/launch/ConfProp.java    | 145 ++++
 .../hdt/core/launch/ErrorMessageDialog.java     |  43 ++
 .../hdt/core/launch/IHadoopClusterListener.java |  26 +
 .../org/apache/hdt/core/launch/IHadoopJob.java  |  34 +
 .../org/apache/hdt/core/launch/IJarModule.java  |  41 +
 .../apache/hdt/core/launch/IJobListener.java    |  36 +
 .../hdt/core/natures/MapReduceNature.java       | 119 +++
 org.apache.hdt.dist/pom.xml                     |   2 +-
 org.apache.hdt.feature/.classpath               |   3 +-
 org.apache.hdt.feature/.project                 |   1 +
 org.apache.hdt.feature/feature.xml              |  18 +-
 org.apache.hdt.feature/pom.xml                  |   2 +-
 .../META-INF/MANIFEST.MF                        |  66 +-
 org.apache.hdt.hadoop.release/build.properties  |  10 +-
 org.apache.hdt.hadoop.release/fragment.xml      |  36 -
 org.apache.hdt.hadoop.release/plugin.xml        |  51 ++
 org.apache.hdt.hadoop.release/pom.xml           |  42 +-
 .../hdt/hadoop/release/HadoopCluster.java       | 547 +++++++++++++
 .../hdt/hadoop/release/HadoopHomeReader.java    |  77 ++
 .../apache/hdt/hadoop/release/HadoopJob.java    | 344 +++++++++
 .../release/HadoopV1ConfigurationBuilder.java   | 690 +++++++++++++++++
 org.apache.hdt.hadoop2.release/.classpath       |  91 +++
 .../.settings/org.eclipse.core.resources.prefs  |   2 +
 .../.settings/org.eclipse.jdt.core.prefs        |   7 +
 .../.settings/org.eclipse.m2e.core.prefs        |   4 +
 .../META-INF/MANIFEST.MF                        |  98 +++
 org.apache.hdt.hadoop2.release/build.properties |  23 +
 org.apache.hdt.hadoop2.release/plugin.xml       |  42 +
 org.apache.hdt.hadoop2.release/pom.xml          | 120 +++
 .../hdt/hadoop2/release/HDFSClientRelease.java  | 235 ++++++
 .../hdt/hadoop2/release/HadoopCluster.java      | 537 +++++++++++++
 .../hdt/hadoop2/release/HadoopHomeReader.java   | 101 +++
 .../apache/hdt/hadoop2/release/HadoopJob.java   | 338 ++++++++
 .../release/HadoopV2ConfigurationBuilder.java   | 771 +++++++++++++++++++
 org.apache.hdt.ui.test/META-INF/MANIFEST.MF     |   2 +-
 org.apache.hdt.ui.test/pom.xml                  |   3 +-
 org.apache.hdt.ui/META-INF/MANIFEST.MF          |   7 +-
 org.apache.hdt.ui/plugin.xml                    | 122 ++-
 org.apache.hdt.ui/pom.xml                       |   2 +-
 .../src/org/apache/hdt/ui/ImageLibrary.java     | 251 ++++++
 .../hdfs/HDFSLightweightLabelDecorator.java     |   2 +-
 .../internal/hdfs/NewHDFSServerWizardPage.java  |  37 +-
 .../hdt/ui/internal/hdfs/NewHDFSWizard.java     |  40 +-
 .../launch/HadoopApplicationLaunchShortcut.java | 130 ++++
 .../internal/launch/HadoopLocationWizard.java   | 378 +++++++++
 ...adoopServerSelectionListContentProvider.java |  76 ++
 .../hdt/ui/internal/launch/JarModule.java       | 146 ++++
 .../ui/internal/launch/RunOnHadoopWizard.java   | 346 +++++++++
 .../hdt/ui/internal/launch/ServerRegistry.java  | 227 ++++++
 .../apache/hdt/ui/internal/mr/ClusterView.java  | 415 ++++++++++
 .../hdt/ui/internal/mr/EditLocationAction.java  |  72 ++
 .../hdt/ui/internal/mr/NewDriverWizard.java     |  99 +++
 .../hdt/ui/internal/mr/NewDriverWizardPage.java | 264 +++++++
 .../hdt/ui/internal/mr/NewLocationAction.java   |  63 ++
 .../hdt/ui/internal/mr/NewLocationWizard.java   |  75 ++
 .../internal/mr/NewMapReduceProjectWizard.java  | 441 +++++++++++
 .../hdt/ui/internal/mr/NewMapperWizard.java     | 167 ++++
 .../ui/internal/mr/NewPartitionerWizard.java    | 194 +++++
 .../hdt/ui/internal/mr/NewReducerWizard.java    | 175 +++++
 .../hdt/ui/internal/zookeeper/DeleteAction.java |  91 ++-
 .../ui/internal/zookeeper/DisconnectAction.java |  52 +-
 .../internal/zookeeper/NewZooKeeperWizard.java  |  36 +-
 .../ui/internal/zookeeper/ReconnectAction.java  |  54 +-
 .../ZooKeeperCommonContentProvider.java         |  10 +-
 .../ui/preferences/MapReducePreferencePage.java | 130 ++++
 .../hdt/ui/preferences/PreferenceConstants.java |  36 +
 org.apache.hdt.updateSite/.classpath            |  15 +-
 org.apache.hdt.updateSite/.project              |   1 +
 org.apache.hdt.updateSite/pom.xml               |   2 +-
 pom.xml                                         |  12 +-
 95 files changed, 9476 insertions(+), 288 deletions(-)
----------------------------------------------------------------------



[20/27] git commit: HDT-13: Validaing Hadoop preference home for the selected version.

Posted by rs...@apache.org.
 HDT-13: Validaing Hadoop preference home for the selected version.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/d63110a4
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/d63110a4
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/d63110a4

Branch: refs/heads/hadoop-eclipse-merge
Commit: d63110a4ec9bb5debae25f127bb8085fb0ff48df
Parents: b6634e6
Author: Rahul Sharma <rs...@apache.org>
Authored: Fri Jun 20 16:34:10 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Mon Jun 23 10:03:10 2014 +0530

----------------------------------------------------------------------
 .../ui/preferences/MapReducePreferencePage.java | 119 ++++++++++++++-----
 1 file changed, 87 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/d63110a4/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/MapReducePreferencePage.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/MapReducePreferencePage.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/MapReducePreferencePage.java
index b711f91..240fc64 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/MapReducePreferencePage.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/MapReducePreferencePage.java
@@ -17,19 +17,25 @@
  */
 package org.apache.hdt.ui.preferences;
 
+import org.apache.hdt.core.AbstractHadoopHomeReader;
 import org.apache.hdt.core.HadoopVersion;
 import org.apache.hdt.ui.Activator;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.Path;
 import org.eclipse.jface.preference.ComboFieldEditor;
 import org.eclipse.jface.preference.DirectoryFieldEditor;
+import org.eclipse.jface.preference.FieldEditor;
 import org.eclipse.jface.preference.FieldEditorPreferencePage;
+import org.eclipse.jface.preference.StringFieldEditor;
+import org.eclipse.jface.util.PropertyChangeEvent;
 import org.eclipse.ui.IWorkbench;
 import org.eclipse.ui.IWorkbenchPreferencePage;
 
 /**
  * This class represents a preference page that is contributed to the
- * Preferences dialog. By sub-classing <tt>FieldEditorPreferencePage</tt>,
- * we can use the field support built into JFace that allows us to create a
- * page that is small and knows how to save, restore and apply itself.
+ * Preferences dialog. By sub-classing <tt>FieldEditorPreferencePage</tt>, we
+ * can use the field support built into JFace that allows us to create a page
+ * that is small and knows how to save, restore and apply itself.
  * 
  * <p>
  * This page is used to modify preferences only. They are stored in the
@@ -37,39 +43,88 @@ import org.eclipse.ui.IWorkbenchPreferencePage;
  * preferences can be accessed directly via the preference store.
  */
 
-public class MapReducePreferencePage extends FieldEditorPreferencePage
-    implements IWorkbenchPreferencePage {
+public class MapReducePreferencePage extends FieldEditorPreferencePage implements IWorkbenchPreferencePage {
 
-  public MapReducePreferencePage() {
-    super(GRID);
-    setPreferenceStore(Activator.getDefault().getPreferenceStore());
-    setTitle("Hadoop Map/Reduce Tools");
-    // setDescription("Hadoop Map/Reduce Preferences");
-  }
+	private StringFieldEditor hadoopHomeDirEditor;
+	private ComboFieldEditor hadoopVersionEditor;
+	private String hadoopVersionValue;
+	private String hadoopHomeValue;
 
-  /**
-   * Creates the field editors. Field editors are abstractions of the common
-   * GUI blocks needed to manipulate various types of preferences. Each field
-   * editor knows how to save and restore itself.
-   */
-  @Override
-  public void createFieldEditors() {
-    addField(new DirectoryFieldEditor(PreferenceConstants.P_PATH,
-        "&Hadoop installation directory:", getFieldEditorParent()));
-    HadoopVersion[] versions = HadoopVersion.values();
-    String[][] values= new String[versions.length][2];
-    int pos=0;
-	for(HadoopVersion ver:versions){
-		values[pos][0]=values[pos][1]=ver.getDisplayName();
-		pos++;
+	public MapReducePreferencePage() {
+		super(GRID);
+		setPreferenceStore(Activator.getDefault().getPreferenceStore());
+		setTitle("Hadoop Map/Reduce Tools");
+		// setDescription("Hadoop Map/Reduce Preferences");
 	}
-    addField(new ComboFieldEditor(PreferenceConstants.P_VERSION,
-            "&Hadoop Version:",values,getFieldEditorParent()));
 
-  }
+	/**
+	 * Creates the field editors. Field editors are abstractions of the common
+	 * GUI blocks needed to manipulate various types of preferences. Each field
+	 * editor knows how to save and restore itself.
+	 */
+	@Override
+	public void createFieldEditors() {
+		DirectoryFieldEditor editor = new DirectoryFieldEditor(PreferenceConstants.P_PATH, "&Hadoop installation directory:", getFieldEditorParent());
+		addField(editor);
+		HadoopVersion[] versions = HadoopVersion.values();
+		String[][] values = new String[versions.length][2];
+		int pos = 0;
+		for (HadoopVersion ver : versions) {
+			values[pos][0] = values[pos][1] = ver.getDisplayName();
+			pos++;
+		}
+		ComboFieldEditor options = new ComboFieldEditor(PreferenceConstants.P_VERSION, "&Hadoop Version:", values, getFieldEditorParent());
+		addField(options);
+		hadoopVersionEditor = options;
+		hadoopHomeDirEditor = editor;
+		hadoopVersionValue = HadoopVersion.Version1.getDisplayName();
+	}
+
+	public void propertyChange(PropertyChangeEvent event) {
+		super.propertyChange(event);
+		if (event.getSource().equals(hadoopVersionEditor)) {
+			hadoopVersionValue = event.getNewValue().toString();
+		}
+		if (event.getSource().equals(hadoopHomeDirEditor)) {
+			hadoopHomeValue = event.getNewValue().toString();
+		}
+		if (event.getProperty().equals(FieldEditor.VALUE)) {
+			checkState();
+		}
+	}
 
-  /* @inheritDoc */
-  public void init(IWorkbench workbench) {
-  }
+	@Override
+	protected void checkState() {
+		super.checkState();
+		if(hadoopHomeValue==null || hadoopVersionValue==null){
+			setErrorMessage("Please set Hadoop Home/Version.");
+			setValid(false);
+			return;
+		}
+		AbstractHadoopHomeReader homeReader;
+		try {
+			homeReader = AbstractHadoopHomeReader.createReader(hadoopVersionValue);
+			if (!homeReader.validateHadoopHome(new Path(hadoopHomeValue).toFile())) {
+				setErrorMessage("Invalid Hadoop Home.");
+				setValid(false);
+			} else {
+				setErrorMessage(null);
+				setValid(true);
+			}
+		} catch (CoreException e) {
+			e.printStackTrace();
+		}
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IWorkbenchPreferencePage#init(org.eclipse.ui.IWorkbench)
+	 */
+	@Override
+	public void init(IWorkbench workbench) {
+		// TODO Auto-generated method stub
 
+	}
 }


[26/27] git commit: Merge branch 'release/0.0.2.incubating' into hadoop-eclipse-merge-development

Posted by rs...@apache.org.
Merge branch 'release/0.0.2.incubating' into hadoop-eclipse-merge-development


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/d04238c9
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/d04238c9
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/d04238c9

Branch: refs/heads/hadoop-eclipse-merge-development
Commit: d04238c9d77eefdeb3adb0195b45d1710f972d07
Parents: 4cd0302 2de1a90
Author: Rahul Sharma <rs...@apache.org>
Authored: Thu Jun 26 12:01:19 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Thu Jun 26 12:01:19 2014 +0530

----------------------------------------------------------------------
 org.apache.hdt.core/META-INF/MANIFEST.MF            |  2 +-
 org.apache.hdt.core/pom.xml                         |  2 +-
 org.apache.hdt.dist/pom.xml                         |  2 +-
 org.apache.hdt.feature/feature.xml                  | 10 +++++-----
 org.apache.hdt.feature/pom.xml                      |  2 +-
 org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF  |  2 +-
 org.apache.hdt.hadoop.release/pom.xml               |  2 +-
 org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF |  2 +-
 org.apache.hdt.hadoop2.release/pom.xml              |  2 +-
 org.apache.hdt.ui.test/META-INF/MANIFEST.MF         |  2 +-
 org.apache.hdt.ui.test/pom.xml                      |  2 +-
 org.apache.hdt.ui/META-INF/MANIFEST.MF              |  2 +-
 org.apache.hdt.ui/pom.xml                           |  2 +-
 org.apache.hdt.updateSite/pom.xml                   |  2 +-
 pom.xml                                             |  2 +-
 15 files changed, 19 insertions(+), 19 deletions(-)
----------------------------------------------------------------------



[22/27] git commit: - Adding version numbers to pom - some readme instructions

Posted by rs...@apache.org.
- Adding version numbers to pom
- some readme instructions


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/4cd0302f
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/4cd0302f
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/4cd0302f

Branch: refs/heads/hadoop-eclipse-merge
Commit: 4cd0302f826a7accce32337e3735ec689832bbb9
Parents: bbc139a
Author: Rahul Sharma <rs...@apache.org>
Authored: Wed Jun 25 14:59:44 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Wed Jun 25 14:59:44 2014 +0530

----------------------------------------------------------------------
 NOTICE                                 |  2 +-
 README.txt                             | 19 ++++++++++++++++++-
 org.apache.hdt.hadoop.release/pom.xml  |  7 -------
 org.apache.hdt.hadoop2.release/pom.xml |  7 -------
 org.apache.hdt.ui.test/pom.xml         |  1 +
 pom.xml                                |  9 ++++++++-
 6 files changed, 28 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/4cd0302f/NOTICE
----------------------------------------------------------------------
diff --git a/NOTICE b/NOTICE
index 507b766..bea83bd 100644
--- a/NOTICE
+++ b/NOTICE
@@ -1,5 +1,5 @@
 Apache Hadoop Development Tools
-Copyright 2013 The Apache Software Foundation
+Copyright 2014 The Apache Software Foundation
 
 This product includes software developed at
 The Apache Software Foundation (http://www.apache.org/).

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/4cd0302f/README.txt
----------------------------------------------------------------------
diff --git a/README.txt b/README.txt
index 38e8bf9..8366cc0 100644
--- a/README.txt
+++ b/README.txt
@@ -1,3 +1,20 @@
+Welcome to Apache Hadoop Development Tools!
+===========================================
+The Hadoop Development Tools (HDT) is a set of plugins for the Eclipse IDE for developing against the Hadoop platform.
+For more information please see the website:
+
+  http://hdt.incubator.apache.org/
+
 This is the repository for the Apache Hadoop Development Tools project, currently a podling at the Apache incubator.
 
-http://hdt.incubator.apache.org/
\ No newline at end of file
+
+Building the Source Code
+------------------------
+
+We recommend Maven 3 and JDK 6 for building Crunch. To build the project run the following Maven command:
+
+  mvn clean install
+
+Default Perm Gem size may not be sufficient for the plugin to work. Plese increase it to 128M by setting JAVA_OPTS/MAVEN_OPTS. 
+
+

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/4cd0302f/org.apache.hdt.hadoop.release/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/pom.xml b/org.apache.hdt.hadoop.release/pom.xml
index fa65ec3..8f10f85 100644
--- a/org.apache.hdt.hadoop.release/pom.xml
+++ b/org.apache.hdt.hadoop.release/pom.xml
@@ -55,13 +55,6 @@ under the License.
               <overWriteIfNewer>true</overWriteIfNewer>
             </configuration>
           </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <version>2.8</version>
-        <executions>
           <execution>
             <id>copy</id>
             <phase>initialize</phase>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/4cd0302f/org.apache.hdt.hadoop2.release/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/pom.xml b/org.apache.hdt.hadoop2.release/pom.xml
index 249ad6e..8625bae 100644
--- a/org.apache.hdt.hadoop2.release/pom.xml
+++ b/org.apache.hdt.hadoop2.release/pom.xml
@@ -72,13 +72,6 @@ under the License.
               <overWriteIfNewer>true</overWriteIfNewer>
             </configuration>
           </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <version>2.8</version>
-        <executions>
           <execution>
             <id>copy</id>
             <phase>initialize</phase>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/4cd0302f/org.apache.hdt.ui.test/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui.test/pom.xml b/org.apache.hdt.ui.test/pom.xml
index b029539..210dd79 100644
--- a/org.apache.hdt.ui.test/pom.xml
+++ b/org.apache.hdt.ui.test/pom.xml
@@ -37,6 +37,7 @@ under the License.
       <plugin>
         <groupId>org.eclipse.tycho</groupId>
         <artifactId>tycho-surefire-plugin</artifactId>
+	<version>${tycho-version}</version>
       </plugin>
     </plugins>
   </build>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/4cd0302f/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 4005645..107a28b 100644
--- a/pom.xml
+++ b/pom.xml
@@ -149,6 +149,13 @@ under the License.
         <version>${tycho-version}</version>
         <configuration>
           <pomDependencies>consider</pomDependencies>
+	  <environments>
+            <environment>
+	      <os>linux</os>
+              <ws>gtk</ws>
+              <arch>x86_64</arch>
+            </environment>
+          </environments>
         </configuration>
       </plugin>
      
@@ -207,7 +214,7 @@ under the License.
             </archive>
           </configuration>
         </plugin>
-          <plugin>
+        <plugin>
           <groupId>org.eclipse.tycho</groupId>
           <artifactId>tycho-versions-plugin</artifactId>
           <version>${tycho-version}</version>          


[05/27] git commit: HDT-41: Provide existing MR functionality - ported Mapper/Reducer/Partioner/Driver Wizards - ported Image lookup - ported Map-reduce project wizard - using runtimes from specified hadoop location rather as runtime jars packed in plugi

Posted by rs...@apache.org.
HDT-41: Provide existing MR functionality
- ported Mapper/Reducer/Partioner/Driver Wizards
- ported Image lookup
- ported Map-reduce project wizard
- using runtimes from specified hadoop location rather as runtime jars packed in plugin
- ported 'Run On Hadoop'
- ported Hadoop location preference at Window->Preferences->Hadoop
- ported clusterView
- Modified hadoop perspective to contain clusterView


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/29467b54
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/29467b54
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/29467b54

Branch: refs/heads/hadoop-eclipse-merge
Commit: 29467b54a03846a50d1a80ad7cd362c11da4616c
Parents: 300cf8b
Author: Rahul Sharma <rs...@apache.org>
Authored: Mon Apr 21 13:38:04 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Fri May 9 09:45:25 2014 +0530

----------------------------------------------------------------------
 org.apache.hdt.core/.classpath                  |   2 +-
 org.apache.hdt.core/META-INF/MANIFEST.MF        |  15 +-
 org.apache.hdt.core/plugin.xml                  |  12 +
 ...org.apache.hadoop.eclipse.hadoopCluster.exsd | 126 +++
 .../hdt/core/launch/AbstractHadoopCluster.java  |  84 ++
 .../org/apache/hdt/core/launch/ConfProp.java    | 133 +++
 .../hdt/core/launch/ErrorMessageDialog.java     |  43 +
 .../hdt/core/launch/IHadoopClusterListener.java |  26 +
 .../org/apache/hdt/core/launch/IHadoopJob.java  |  34 +
 .../org/apache/hdt/core/launch/IJarModule.java  |  41 +
 .../apache/hdt/core/launch/IJobListener.java    |  36 +
 .../hdt/core/natures/MapReduceNature.java       | 135 +++
 .../META-INF/MANIFEST.MF                        |  57 +-
 org.apache.hdt.hadoop.release/build.properties  |   8 +-
 org.apache.hdt.hadoop.release/fragment.xml      |   7 +
 org.apache.hdt.hadoop.release/pom.xml           |  47 +-
 .../hdt/hadoop/release/HadoopCluster.java       | 564 +++++++++++
 .../apache/hdt/hadoop/release/HadoopJob.java    | 342 +++++++
 org.apache.hdt.ui/META-INF/MANIFEST.MF          |   5 +
 org.apache.hdt.ui/plugin.xml                    | 104 ++-
 .../src/org/apache/hdt/ui/ImageLibrary.java     | 251 +++++
 .../launch/HadoopApplicationLaunchShortcut.java | 130 +++
 .../internal/launch/HadoopLocationWizard.java   | 925 +++++++++++++++++++
 ...adoopServerSelectionListContentProvider.java |  76 ++
 .../hdt/ui/internal/launch/JarModule.java       | 146 +++
 .../ui/internal/launch/RunOnHadoopWizard.java   | 346 +++++++
 .../hdt/ui/internal/launch/ServerRegistry.java  | 200 ++++
 .../apache/hdt/ui/internal/mr/ClusterView.java  | 450 +++++++++
 .../hdt/ui/internal/mr/EditLocationAction.java  |  72 ++
 .../hdt/ui/internal/mr/NewDriverWizard.java     |  99 ++
 .../hdt/ui/internal/mr/NewDriverWizardPage.java | 264 ++++++
 .../hdt/ui/internal/mr/NewLocationAction.java   |  63 ++
 .../internal/mr/NewMapReduceProjectWizard.java  | 385 ++++++++
 .../hdt/ui/internal/mr/NewMapperWizard.java     | 167 ++++
 .../ui/internal/mr/NewPartitionerWizard.java    | 194 ++++
 .../hdt/ui/internal/mr/NewReducerWizard.java    | 175 ++++
 .../ui/preferences/MapReducePreferencePage.java |  64 ++
 .../hdt/ui/preferences/PreferenceConstants.java |  34 +
 38 files changed, 5814 insertions(+), 48 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/.classpath
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/.classpath b/org.apache.hdt.core/.classpath
index 4a37a3a..4a91e22 100644
--- a/org.apache.hdt.core/.classpath
+++ b/org.apache.hdt.core/.classpath
@@ -2,7 +2,7 @@
 <classpath>
 	<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.6"/>
 	<classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
-	<classpathentry kind="src" path="src/"/>
+	<classpathentry kind="src" path="src"/>
 	<classpathentry exported="true" kind="lib" path="jars/log4j-1.2.15.jar"/>
 	<classpathentry kind="output" path="target/classes"/>
 </classpath>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/META-INF/MANIFEST.MF b/org.apache.hdt.core/META-INF/MANIFEST.MF
index 6234625..1d6b8c4 100644
--- a/org.apache.hdt.core/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.core/META-INF/MANIFEST.MF
@@ -8,12 +8,15 @@ Require-Bundle: org.eclipse.core.runtime,
  org.eclipse.core.filesystem;bundle-version="1.3.0";visibility:=reexport,
  org.eclipse.core.resources;bundle-version="3.6.0",
  org.eclipse.emf.ecore;bundle-version="2.6.1";visibility:=reexport,
- org.eclipse.team.core;bundle-version="3.5.100"
+ org.eclipse.jdt.core,
+ org.eclipse.team.core;bundle-version="3.5.100",
+ org.eclipse.swt,
+ org.eclipse.jface
 Bundle-RequiredExecutionEnvironment: JavaSE-1.6
 Bundle-Vendor: Apache Hadoop
 Bundle-ClassPath: .,
  jars/log4j-1.2.15.jar
-Export-Package: org.apache.hdt.core,
+Export-Package:  org.apache.hdt.core,
  org.apache.hdt.core.hdfs,
  org.apache.hdt.core.internal,
  org.apache.hdt.core.internal.hdfs;x-friends:="org.apache.hdt.ui",
@@ -21,6 +24,8 @@ Export-Package: org.apache.hdt.core,
  org.apache.hdt.core.internal.model.impl,
  org.apache.hdt.core.internal.model.util,
  org.apache.hdt.core.internal.zookeeper,
+ org.apache.hdt.core.launch,
+ org.apache.hdt.core.natures,
  org.apache.hdt.core.zookeeper,
  org.apache.log4j,
  org.apache.log4j.chainsaw,
@@ -40,5 +45,9 @@ Export-Package: org.apache.hdt.core,
  org.apache.log4j.or.sax,
  org.apache.log4j.spi,
  org.apache.log4j.varia,
- org.apache.log4j.xml
+ org.apache.log4j.xml,
+ org.apache.hadoop,
+ org.apache.hadoop.conf,
+ org.apache.hadoop.io,
+ org.apache.hadoop.mapred
 Bundle-ActivationPolicy: lazy

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/plugin.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/plugin.xml b/org.apache.hdt.core/plugin.xml
index 82dcbec..94f3d49 100644
--- a/org.apache.hdt.core/plugin.xml
+++ b/org.apache.hdt.core/plugin.xml
@@ -19,6 +19,8 @@
 <plugin>
    <extension-point id="org.apache.hdt.core.hdfsClient" name="Apache Hadoop HDFS Client" schema="schema/org.apache.hadoop.eclipse.hdfsclient.exsd"/>
    <extension-point id="org.apache.hdt.core.zookeeperClient" name="Apache Hadoop ZooKeeper Client" schema="schema/org.apache.hadoop.eclipse.zookeeperClient.exsd"/>
+   <extension-point id="org.apache.hdt.core.hadoopCluster" name="Apache Hadoop Cluster" schema="schema/org.apache.hadoop.eclipse.hadoopCluster.exsd"/>
+   
    <extension
          id="org.apache.hadoop.hdfs.filesystem"
          name="Apache Hadoop HDFS"
@@ -39,5 +41,15 @@
             id="org.apache.hadoop.hdfs">
       </repository>
    </extension>
+    <extension
+         id="org.apache.hdt.mrnature"
+         name="MapReduce Nature"
+         point="org.eclipse.core.resources.natures">
+      <runtime>
+         <run
+               class="org.apache.hdt.core.natures.MapReduceNature">
+         </run>
+      </runtime>
+   </extension>
 
 </plugin>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.hadoopCluster.exsd
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.hadoopCluster.exsd b/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.hadoopCluster.exsd
new file mode 100644
index 0000000..72d3899
--- /dev/null
+++ b/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.hadoopCluster.exsd
@@ -0,0 +1,126 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<schema targetNamespace="org.apache.hdt.core" xmlns="http://www.w3.org/2001/XMLSchema">
+<annotation>
+      <appinfo>
+         <meta.schema plugin="org.apache.hdt.core" id="org.apache.hdt.core.hadoopCluster" name="Apache Hadoop Cluster"/>
+      </appinfo>
+      <documentation>
+         [Enter description of this extension point.]
+      </documentation>
+   </annotation>
+
+   <element name="extension">
+      <annotation>
+         <appinfo>
+            <meta.element />
+         </appinfo>
+      </annotation>
+      <complexType>
+         <choice>
+            <sequence>
+               <element ref="hadoopCluster" minOccurs="0" maxOccurs="unbounded"/>
+            </sequence>
+         </choice>
+         <attribute name="point" type="string" use="required">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+            </annotation>
+         </attribute>
+         <attribute name="id" type="string">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+            </annotation>
+         </attribute>
+         <attribute name="name" type="string">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+               <appinfo>
+                  <meta.attribute translatable="true"/>
+               </appinfo>
+            </annotation>
+         </attribute>
+      </complexType>
+   </element>
+
+   <element name="hadoopCluster">
+      <complexType>
+         <attribute name="class" type="string" use="required">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+               <appinfo>
+                  <meta.attribute kind="java" basedOn="org.apache.hdt.core.launch.AbstractHadoopCluster:"/>
+               </appinfo>
+            </annotation>
+         </attribute>
+         <attribute name="protocolVersion" type="string" use="required">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+            </annotation>
+         </attribute>
+      </complexType>
+   </element>
+
+   <annotation>
+      <appinfo>
+         <meta.section type="since"/>
+      </appinfo>
+      <documentation>
+         [Enter the first release in which this extension point appears.]
+      </documentation>
+   </annotation>
+
+   <annotation>
+      <appinfo>
+         <meta.section type="examples"/>
+      </appinfo>
+      <documentation>
+         [Enter extension point usage example here.]
+      </documentation>
+   </annotation>
+
+   <annotation>
+      <appinfo>
+         <meta.section type="apiinfo"/>
+      </appinfo>
+      <documentation>
+         [Enter API information here.]
+      </documentation>
+   </annotation>
+
+   <annotation>
+      <appinfo>
+         <meta.section type="implementation"/>
+      </appinfo>
+      <documentation>
+         [Enter information about supplied implementation of this extension point.]
+      </documentation>
+   </annotation>
+
+
+</schema>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
new file mode 100644
index 0000000..e5f7dd4
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.launch;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.Map.Entry;
+
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IConfigurationElement;
+import org.eclipse.core.runtime.Platform;
+
+public abstract class AbstractHadoopCluster {
+
+	abstract public String getLocationName();
+
+	abstract public void dispose();
+
+	abstract public void storeSettingsToFile(File file) throws IOException;
+
+	abstract public void saveConfiguration(File confDir, String jarFilePath) throws IOException;
+
+	abstract public String getMasterHostName();
+
+	abstract public void setLocationName(String string);
+
+	abstract public void load(AbstractHadoopCluster server);
+
+	abstract public String getConfProp(String propName);
+
+	abstract public String getConfProp(ConfProp prop);
+
+	abstract public void setConfProp(ConfProp prop, String propValue);
+
+	abstract public void setConfProp(String propName, String propValue);
+
+	abstract public Iterator<Entry<String, String>> getConfiguration();
+
+	abstract public void purgeJob(IHadoopJob job);
+
+	abstract public void addJobListener(IJobListener jobListener);
+
+	abstract public Collection<? extends IHadoopJob> getJobs();
+
+	abstract public String getState();
+
+	abstract public boolean loadFromXML(File file) throws IOException;
+
+	public static AbstractHadoopCluster createCluster(File file) throws CoreException, IOException {
+		AbstractHadoopCluster hadoopCluster = createCluster();
+		hadoopCluster.loadFromXML(file);
+		return hadoopCluster;
+	}
+
+	public static AbstractHadoopCluster createCluster() throws CoreException {
+		IConfigurationElement[] elementsFor = Platform.getExtensionRegistry().getConfigurationElementsFor("org.apache.hdt.core.hadoopCluster");
+		return (AbstractHadoopCluster) elementsFor[0].createExecutableExtension("class");
+	}
+
+	public static AbstractHadoopCluster createCluster(AbstractHadoopCluster existing) throws CoreException {
+		AbstractHadoopCluster hadoopCluster = createCluster();
+		hadoopCluster.load(existing);
+		return hadoopCluster;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java
new file mode 100644
index 0000000..538eb75
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java
@@ -0,0 +1,133 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.launch;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public enum ConfProp {
+	/**
+	 * Property name for the Hadoop location name
+	 */
+	PI_LOCATION_NAME(true, "location.name", "New Hadoop location"),
+
+	/**
+	 * Property name for the master host name (the Job tracker)
+	 */
+	PI_JOB_TRACKER_HOST(true, "jobtracker.host", "localhost"),
+
+	/**
+	 * Property name for the DFS master host name (the Name node)
+	 */
+	PI_NAME_NODE_HOST(true, "namenode.host", "localhost"),
+
+	/**
+	 * Property name for the installation directory on the master node
+	 */
+	// PI_INSTALL_DIR(true, "install.dir", "/dir/hadoop-version/"),
+	/**
+	 * User name to use for Hadoop operations
+	 */
+	PI_USER_NAME(true, "user.name", System.getProperty("user.name", "who are you?")),
+
+	/**
+	 * Property name for SOCKS proxy activation
+	 */
+	PI_SOCKS_PROXY_ENABLE(true, "socks.proxy.enable", "no"),
+
+	/**
+	 * Property name for the SOCKS proxy host
+	 */
+	PI_SOCKS_PROXY_HOST(true, "socks.proxy.host", "host"),
+
+	/**
+	 * Property name for the SOCKS proxy port
+	 */
+	PI_SOCKS_PROXY_PORT(true, "socks.proxy.port", "1080"),
+
+	/**
+	 * TCP port number for the name node
+	 */
+	PI_NAME_NODE_PORT(true, "namenode.port", "50040"),
+
+	/**
+	 * TCP port number for the job tracker
+	 */
+	PI_JOB_TRACKER_PORT(true, "jobtracker.port", "50020"),
+
+	/**
+	 * Are the Map/Reduce and the Distributed FS masters hosted on the same
+	 * machine?
+	 */
+	PI_COLOCATE_MASTERS(true, "masters.colocate", "yes"),
+
+	/**
+	 * Property name for naming the job tracker (URI). This property is related
+	 * to {@link #PI_MASTER_HOST_NAME}
+	 */
+	JOB_TRACKER_URI(false, "mapreduce.jobtracker.address", "localhost:50020"),
+
+	/**
+	 * Property name for naming the default file system (URI).
+	 */
+	FS_DEFAULT_URI(false, "fs.default.name", "hdfs://localhost:50040/"),
+
+	/**
+	 * Property name for the default socket factory:
+	 */
+	SOCKET_FACTORY_DEFAULT(false, "hadoop.rpc.socket.factory.class.default", "org.apache.hadoop.net.StandardSocketFactory"),
+
+	/**
+	 * Property name for the SOCKS server URI.
+	 */
+	SOCKS_SERVER(false, "hadoop.socks.server", "host:1080"),
+
+	;
+
+	/**
+	 * Map <property name> -> ConfProp
+	 */
+	private static Map<String, ConfProp> map;
+
+	private static synchronized void registerProperty(String name, ConfProp prop) {
+
+		if (ConfProp.map == null)
+			ConfProp.map = new HashMap<String, ConfProp>();
+
+		ConfProp.map.put(name, prop);
+	}
+
+	public static ConfProp getByName(String propName) {
+		return map.get(propName);
+	}
+
+	public final String name;
+
+	public final String defVal;
+
+	ConfProp(boolean internal, String name, String defVal) {
+		if (internal)
+			name = "eclipse.plug-in." + name;
+		this.name = name;
+		this.defVal = defVal;
+
+		ConfProp.registerProperty(name, this);
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/src/org/apache/hdt/core/launch/ErrorMessageDialog.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/ErrorMessageDialog.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/ErrorMessageDialog.java
new file mode 100644
index 0000000..82b6d10
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/ErrorMessageDialog.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.launch;
+
+import org.eclipse.jface.dialogs.MessageDialog;
+import org.eclipse.swt.widgets.Display;
+
+/**
+ * Error dialog helper
+ */
+public class ErrorMessageDialog {
+
+	public static void display(final String title, final String message) {
+		Display.getDefault().syncExec(new Runnable() {
+
+			public void run() {
+				MessageDialog.openError(Display.getDefault().getActiveShell(), title, message);
+			}
+
+		});
+	}
+
+	public static void display(Exception e) {
+		display("An exception has occured!", "Exception description:\n" + e.getLocalizedMessage());
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/src/org/apache/hdt/core/launch/IHadoopClusterListener.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/IHadoopClusterListener.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IHadoopClusterListener.java
new file mode 100644
index 0000000..e403c57
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IHadoopClusterListener.java
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.launch;
+
+/**
+ * Interface for monitoring server changes
+ */
+public interface IHadoopClusterListener {
+	void serverChanged(AbstractHadoopCluster location, int type);
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/src/org/apache/hdt/core/launch/IHadoopJob.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/IHadoopJob.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IHadoopJob.java
new file mode 100644
index 0000000..0b58699
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IHadoopJob.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.launch;
+
+public interface IHadoopJob {
+
+	boolean isCompleted();
+
+	AbstractHadoopCluster getLocation();
+
+	String getJobID();
+
+	void kill();
+
+	String getStatus();
+
+	String getState();
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/src/org/apache/hdt/core/launch/IJarModule.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/IJarModule.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IJarModule.java
new file mode 100644
index 0000000..0af6c9f
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IJarModule.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.launch;
+
+import java.io.File;
+
+import org.eclipse.jface.operation.IRunnableWithProgress;
+
+/**
+ * Methods for interacting with the jar file containing the
+ * Mapper/Reducer/Driver classes for a MapReduce job.
+ */
+
+public interface IJarModule extends IRunnableWithProgress {
+
+	String getName();
+
+	/**
+	 * Allow the retrieval of the resulting JAR file
+	 * 
+	 * @return the generated JAR file
+	 */
+	File getJarFile();
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/src/org/apache/hdt/core/launch/IJobListener.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/IJobListener.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IJobListener.java
new file mode 100644
index 0000000..4dc3bc5
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IJobListener.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.launch;
+
+/**
+ * Interface for updating/adding jobs to the MapReduce Server view.
+ */
+public interface IJobListener {
+
+	void jobChanged(IHadoopJob job);
+
+	void jobAdded(IHadoopJob job);
+
+	void jobRemoved(IHadoopJob job);
+
+	void publishStart(IJarModule jar);
+
+	void publishDone(IJarModule jar);
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.core/src/org/apache/hdt/core/natures/MapReduceNature.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/natures/MapReduceNature.java b/org.apache.hdt.core/src/org/apache/hdt/core/natures/MapReduceNature.java
new file mode 100644
index 0000000..e93ee9a
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/natures/MapReduceNature.java
@@ -0,0 +1,135 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.natures;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.hdt.core.Activator;
+import org.eclipse.core.resources.IProject;
+import org.eclipse.core.resources.IProjectNature;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.NullProgressMonitor;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.core.runtime.QualifiedName;
+import org.eclipse.jdt.core.IClasspathEntry;
+import org.eclipse.jdt.core.IJavaProject;
+import org.eclipse.jdt.core.JavaCore;
+
+/**
+ * Class to configure and deconfigure an Eclipse project with the MapReduce
+ * project nature.
+ */
+
+public class MapReduceNature implements IProjectNature {
+
+	public static final String ID = "org.apache.hdt.mrnature";
+
+	private IProject project;
+
+	static Logger log = Logger.getLogger(MapReduceNature.class.getName());
+
+	/**
+	 * Configures an Eclipse project as a Map/Reduce project by adding the
+	 * Hadoop libraries to a project's classpath.
+	 */
+	/*
+	 * TODO Versioning connector needed here
+	 */
+	public void configure() throws CoreException {
+
+		String hadoopHomePath = project.getPersistentProperty(new QualifiedName(Activator.BUNDLE_ID, "hadoop.runtime.path"));
+		File hadoopHome = new Path(hadoopHomePath).toFile();
+		File hadoopLib = new File(hadoopHome, "lib");
+
+		final ArrayList<File> coreJars = new ArrayList<File>();
+		coreJars.addAll(getJarFiles(hadoopHome));
+		coreJars.addAll(getJarFiles(hadoopLib));
+
+		// Add Hadoop libraries onto classpath
+		IJavaProject javaProject = JavaCore.create(getProject());
+		// Bundle bundle = Activator.getDefault().getBundle();
+		try {
+			IClasspathEntry[] currentCp = javaProject.getRawClasspath();
+			IClasspathEntry[] newCp = new IClasspathEntry[currentCp.length + coreJars.size()];
+			System.arraycopy(currentCp, 0, newCp, 0, currentCp.length);
+
+			final Iterator<File> i = coreJars.iterator();
+			int count = 0;
+			while (i.hasNext()) {
+				// for (int i = 0; i < s_coreJarNames.length; i++) {
+
+				final File f = (File) i.next();
+				// URL url = FileLocator.toFileURL(FileLocator.find(bundle, new
+				// Path("lib/" + s_coreJarNames[i]), null));
+				URL url = f.toURI().toURL();
+				log.finer("hadoop library url.getPath() = " + url.getPath());
+
+				newCp[newCp.length - 1 - count] = JavaCore.newLibraryEntry(new Path(url.getPath()), null, null);
+				count++;
+			}
+
+			javaProject.setRawClasspath(newCp, new NullProgressMonitor());
+		} catch (Exception e) {
+			log.log(Level.SEVERE, "IOException generated in " + this.getClass().getCanonicalName(), e);
+		}
+	}
+
+	private ArrayList<File> getJarFiles(File hadoopHome) {
+		FilenameFilter jarFileFilter = new FilenameFilter() {
+			@Override
+			public boolean accept(File dir, String name) {
+				return name.endsWith(".jar");
+			}
+		};
+		final ArrayList<File> jars = new ArrayList<File>();
+		for (String hadopCoreLibFileName : hadoopHome.list(jarFileFilter)) {
+			jars.add(new File(hadoopHome, hadopCoreLibFileName));
+		}
+		return jars;
+	}
+
+	/**
+	 * Deconfigure a project from MapReduce status. Currently unimplemented.
+	 */
+	public void deconfigure() throws CoreException {
+		// TODO Auto-generated method stub
+	}
+
+	/**
+	 * Returns the project to which this project nature applies.
+	 */
+	public IProject getProject() {
+		return this.project;
+	}
+
+	/**
+	 * Sets the project to which this nature applies. Used when instantiating
+	 * this project nature runtime.
+	 */
+	public void setProject(IProject project) {
+		this.project = project;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF b/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
index 005ca9b..db5e83c 100644
--- a/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
@@ -7,14 +7,55 @@ Bundle-Vendor: Apache Hadoop
 Fragment-Host: org.apache.hdt.core
 Bundle-RequiredExecutionEnvironment: JavaSE-1.6
 Bundle-ClassPath: .,
- jars/log4j-1.2.15.jar,
- jars/slf4j-api-1.6.1.jar,
+ jars/zookeeper-3.4.5.jar,
  jars/slf4j-log4j12-1.6.1.jar,
- jars/commons-configuration-1.6.jar,
- jars/commons-lang-2.4.jar,
- jars/commons-logging-1.1.1.jar,
- jars/hadoop-client-1.1.2.jar,
+ jars/slf4j-api-1.6.1.jar,
+ jars/log4j-1.2.15.jar,
+ jars/xmlenc-0.52.jar,
+ jars/stax-api-1.0-2.jar,
+ jars/stax-api-1.0.1.jar,
+ jars/servlet-api-2.5-6.1.14.jar,
+ jars/servlet-api-2.5-20081211.jar,
+ jars/oro-2.0.8.jar,
+ jars/junit-4.11.jar,
+ jars/jsp-api-2.1-6.1.14.jar,
+ jars/jsp-2.1-6.1.14.jar,
+ jars/jetty-util-6.1.26.jar,
+ jars/jetty-6.1.26.jar,
+ jars/jettison-1.1.jar,
+ jars/jets3t-0.6.1.jar,
+ jars/jersey-server-1.8.jar,
+ jars/jersey-json-1.8.jar,
+ jars/jersey-core-1.8.jar,
+ jars/jaxb-impl-2.2.3-1.jar,
+ jars/jaxb-api-2.2.2.jar,
+ jars/jasper-runtime-5.5.12.jar,
+ jars/jasper-compiler-5.5.12.jar,
+ jars/jackson-xc-1.7.1.jar,
+ jars/jackson-mapper-asl-1.8.8.jar,
+ jars/jackson-jaxrs-1.7.1.jar,
+ jars/jackson-core-asl-1.7.1.jar,
+ jars/hsqldb-1.8.0.10.jar,
+ jars/hamcrest-core-1.3.jar,
  jars/hadoop-core-1.1.2.jar,
- jars/hadoop-test-1.1.2.jar,
+ jars/core-3.1.1.jar,
+ jars/commons-net-1.4.1.jar,
+ jars/commons-math-2.1.jar,
+ jars/commons-logging-1.1.1.jar,
+ jars/commons-lang-2.4.jar,
+ jars/commons-io-2.1.jar,
+ jars/commons-httpclient-3.0.1.jar,
+ jars/commons-el-1.0.jar,
+ jars/commons-digester-1.8.jar,
+ jars/commons-configuration-1.6.jar,
+ jars/commons-collections-3.2.1.jar,
+ jars/commons-codec-1.4.jar,
+ jars/commons-cli-1.2.jar,
+ jars/commons-beanutils-core-1.8.0.jar,
+ jars/commons-beanutils-1.7.0.jar,
+ jars/asm-3.1.jar,
+ jars/ant-1.6.5.jar,
+ jars/activation-1.1.jar,
  jars/hadoop-tools-1.1.2.jar,
- jars/zookeeper-3.4.5.jar
+ jars/hadoop-test-1.1.2.jar,
+ jars/hadoop-client-1.1.2.jar

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.hadoop.release/build.properties
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/build.properties b/org.apache.hdt.hadoop.release/build.properties
index 6d99810..4c1d15a 100644
--- a/org.apache.hdt.hadoop.release/build.properties
+++ b/org.apache.hdt.hadoop.release/build.properties
@@ -20,10 +20,4 @@ output.. = bin/
 bin.includes = META-INF/,\
                .,\
                fragment.xml,\
-               jars/,\
-               jars/slf4j-api-1.6.1.jar,\
-               jars/slf4j-log4j12-1.6.1.jar,\
-               jars/commons-configuration-1.6.jar,\
-               jars/commons-lang-2.4.jar,\
-               jars/commons-logging-1.1.1.jar,\
-               jars/log4j-1.2.15.jar
+               jars/

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.hadoop.release/fragment.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/fragment.xml b/org.apache.hdt.hadoop.release/fragment.xml
index 1b11581..729d38f 100644
--- a/org.apache.hdt.hadoop.release/fragment.xml
+++ b/org.apache.hdt.hadoop.release/fragment.xml
@@ -32,5 +32,12 @@
             protocolVersion="3.4.5">
       </zookeeperClient>
    </extension>
+   <extension
+         point="org.apache.hdt.core.hadoopCluster">
+      <hadoopCluster
+            class="org.apache.hdt.hadoop.release.HadoopCluster"
+            protocolVersion="1.1">
+      </hadoopCluster>
+   </extension>
 
 </fragment>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.hadoop.release/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/pom.xml b/org.apache.hdt.hadoop.release/pom.xml
index 69c61f2..fa65ec3 100644
--- a/org.apache.hdt.hadoop.release/pom.xml
+++ b/org.apache.hdt.hadoop.release/pom.xml
@@ -27,6 +27,12 @@ under the License.
   <artifactId>org.apache.hdt.hadoop.release</artifactId>
   <packaging>eclipse-plugin</packaging>
   <name>Apache Hadoop Devlopment Tools Assembly</name>
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-core</artifactId>
+    </dependency>
+  </dependencies>
   <build>
     <sourceDirectory>src</sourceDirectory>
     <plugins>
@@ -36,6 +42,27 @@ under the License.
         <version>2.8</version>
         <executions>
           <execution>
+            <id>copy-dependencies</id>
+            <phase>initialize</phase>
+            <goals>
+              <goal>copy-dependencies</goal>
+            </goals>
+            <configuration>
+              <excludeScope>system</excludeScope>
+              <outputDirectory>${basedir}/jars</outputDirectory>
+              <overWriteReleases>false</overWriteReleases>
+              <overWriteSnapshots>false</overWriteSnapshots>
+              <overWriteIfNewer>true</overWriteIfNewer>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <version>2.8</version>
+        <executions>
+          <execution>
             <id>copy</id>
             <phase>initialize</phase>
             <goals>
@@ -50,11 +77,6 @@ under the License.
                 </artifactItem>
                 <artifactItem>
                   <groupId>org.apache.hadoop</groupId>
-                  <artifactId>hadoop-core</artifactId>
-                  <overWrite>false</overWrite>
-                </artifactItem>
-                <artifactItem>
-                  <groupId>org.apache.hadoop</groupId>
                   <artifactId>hadoop-test</artifactId>
                   <overWrite>false</overWrite>
                 </artifactItem>
@@ -83,21 +105,6 @@ under the License.
                   <artifactId>slf4j-log4j12</artifactId>
                   <overWrite>false</overWrite>
                 </artifactItem>
-                <artifactItem>
-	              <groupId>commons-configuration</groupId>
-				  <artifactId>commons-configuration</artifactId>
-				  <overWrite>false</overWrite>
-                </artifactItem>
-                <artifactItem>
-	              <groupId>commons-lang</groupId>
-				  <artifactId>commons-lang</artifactId>
-				  <overWrite>false</overWrite>
-                </artifactItem>
-                <artifactItem>
-	              <groupId>commons-logging</groupId>
-				  <artifactId>commons-logging</artifactId>
-				  <overWrite>false</overWrite>
-                </artifactItem>
               </artifactItems>
               <outputDirectory>${basedir}/jars</outputDirectory>
               <overWriteReleases>false</overWriteReleases>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
new file mode 100644
index 0000000..daaf990
--- /dev/null
+++ b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
@@ -0,0 +1,564 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.hadoop.release;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.logging.Logger;
+
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.mapred.JobClient;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobID;
+import org.apache.hadoop.mapred.JobStatus;
+import org.apache.hadoop.mapred.RunningJob;
+import org.apache.hdt.core.Activator;
+import org.apache.hdt.core.launch.ConfProp;
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.IHadoopJob;
+import org.apache.hdt.core.launch.IJarModule;
+import org.apache.hdt.core.launch.IJobListener;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.Status;
+import org.eclipse.core.runtime.jobs.Job;
+import org.eclipse.swt.widgets.Display;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+import org.w3c.dom.Text;
+import org.xml.sax.SAXException;
+
+/**
+ * Representation of a Hadoop location, meaning of the master node (NameNode,
+ * JobTracker).
+ * 
+ * <p>
+ * This class does not create any SSH connection anymore. Tunneling must be
+ * setup outside of Eclipse for now (using Putty or <tt>ssh -D&lt;port&gt;
+ * &lt;host&gt;</tt>)
+ * 
+ * <p>
+ * <em> TODO </em>
+ * <li>Disable the updater if a location becomes unreachable or fails for tool
+ * long
+ * <li>Stop the updater on location's disposal/removal
+ */
+
+public class HadoopCluster extends AbstractHadoopCluster {
+
+	/**
+	 * Frequency of location status observations expressed as the delay in ms
+	 * between each observation
+	 * 
+	 * TODO Add a preference parameter for this
+	 */
+	protected static final long STATUS_OBSERVATION_DELAY = 1500;
+
+	/**
+   * 
+   */
+	public class LocationStatusUpdater extends Job {
+
+		JobClient client = null;
+
+		/**
+		 * Setup the updater
+		 */
+		public LocationStatusUpdater() {
+			super("Map/Reduce location status updater");
+			this.setSystem(true);
+		}
+
+		/* @inheritDoc */
+		@Override
+		protected IStatus run(IProgressMonitor monitor) {
+			if (client == null) {
+				try {
+					client = HadoopCluster.this.getJobClient();
+
+				} catch (IOException ioe) {
+					client = null;
+					return new Status(Status.ERROR, Activator.BUNDLE_ID, 0, "Cannot connect to the Map/Reduce location: "
+							+ HadoopCluster.this.getLocationName(), ioe);
+				}
+			}
+
+			try {
+				// Set of all known existing Job IDs we want fresh info of
+				Set<JobID> missingJobIds = new HashSet<JobID>(runningJobs.keySet());
+
+				JobStatus[] jstatus = client.jobsToComplete();
+				jstatus = jstatus == null ? new JobStatus[0] : jstatus;
+				for (JobStatus status : jstatus) {
+
+					JobID jobId = status.getJobID();
+					missingJobIds.remove(jobId);
+
+					HadoopJob hJob;
+					synchronized (HadoopCluster.this.runningJobs) {
+						hJob = runningJobs.get(jobId);
+						if (hJob == null) {
+							// Unknown job, create an entry
+							RunningJob running = client.getJob(jobId);
+							hJob = new HadoopJob(HadoopCluster.this, jobId, running, status);
+							newJob(hJob);
+						}
+					}
+
+					// Update HadoopJob with fresh infos
+					updateJob(hJob, status);
+				}
+
+				// Ask explicitly for fresh info for these Job IDs
+				for (JobID jobId : missingJobIds) {
+					HadoopJob hJob = runningJobs.get(jobId);
+					if (!hJob.isCompleted())
+						updateJob(hJob, null);
+				}
+
+			} catch (IOException ioe) {
+				client = null;
+				return new Status(Status.ERROR, Activator.BUNDLE_ID, 0, "Cannot retrieve running Jobs on location: " + HadoopCluster.this.getLocationName(),
+						ioe);
+			}
+
+			// Schedule the next observation
+			schedule(STATUS_OBSERVATION_DELAY);
+
+			return Status.OK_STATUS;
+		}
+
+		/**
+		 * Stores and make the new job available
+		 * 
+		 * @param data
+		 */
+		private void newJob(final HadoopJob data) {
+			runningJobs.put(data.jobId, data);
+
+			Display.getDefault().asyncExec(new Runnable() {
+				public void run() {
+					fireJobAdded(data);
+				}
+			});
+		}
+
+		/**
+		 * Updates the status of a job
+		 * 
+		 * @param job
+		 *            the job to update
+		 */
+		private void updateJob(final HadoopJob job, JobStatus status) {
+			job.update(status);
+
+			Display.getDefault().asyncExec(new Runnable() {
+				public void run() {
+					fireJobChanged(job);
+				}
+			});
+		}
+
+	}
+
+	static Logger log = Logger.getLogger(HadoopCluster.class.getName());
+
+	/**
+	 * Hadoop configuration of the location. Also contains specific parameters
+	 * for the plug-in. These parameters are prefix with eclipse.plug-in.*
+	 */
+	private Configuration conf;
+
+	/**
+	 * Jobs listeners
+	 */
+	private Set<IJobListener> jobListeners = new HashSet<IJobListener>();
+
+	/**
+	 * Jobs running on this location. The keys of this map are the Job IDs.
+	 */
+	private transient Map<JobID, HadoopJob> runningJobs = Collections.synchronizedMap(new TreeMap<JobID, HadoopJob>());
+
+	/**
+	 * Status updater for this location
+	 */
+	private LocationStatusUpdater statusUpdater;
+
+	// state and status - transient
+	private transient String state = "";
+
+	/**
+	 * Creates a new default Hadoop location
+	 */
+	public HadoopCluster() {
+		this.conf = new Configuration();
+		this.addPluginConfigDefaultProperties();
+	}
+
+	/**
+	 * Creates a location from a file
+	 * 
+	 * @throws IOException
+	 * @throws SAXException
+	 * @throws ParserConfigurationException
+	 */
+	public HadoopCluster(File file) throws ParserConfigurationException, SAXException, IOException {
+
+		this.conf = new Configuration();
+		this.addPluginConfigDefaultProperties();
+		this.loadFromXML(file);
+	}
+
+	/**
+	 * Create a new Hadoop location by copying an already existing one.
+	 * 
+	 * @param source
+	 *            the location to copy
+	 */
+	public HadoopCluster(HadoopCluster existing) {
+		this();
+		this.load(existing);
+	}
+
+	public void addJobListener(IJobListener l) {
+		jobListeners.add(l);
+	}
+
+	public void dispose() {
+		// TODO close DFS connections?
+	}
+
+	/**
+	 * List all elements that should be present in the Server window (all
+	 * servers and all jobs running on each servers)
+	 * 
+	 * @return collection of jobs for this location
+	 */
+	public Collection<? extends IHadoopJob> getJobs() {
+		startStatusUpdater();
+		return this.runningJobs.values();
+	}
+
+	/**
+	 * Remove the given job from the currently running jobs map
+	 * 
+	 * @param job
+	 *            the job to remove
+	 */
+	public void purgeJob(final IHadoopJob job) {
+		runningJobs.remove(job.getJobID());
+		Display.getDefault().asyncExec(new Runnable() {
+			public void run() {
+				fireJobRemoved(job);
+			}
+		});
+	}
+
+	/**
+	 * Returns the {@link Configuration} defining this location.
+	 * 
+	 * @return the location configuration
+	 */
+	public Iterator<Entry<String, String>> getConfiguration() {
+		return this.conf.iterator();
+	}
+
+	/**
+	 * @return the conf
+	 */
+	public Configuration getConf() {
+		return conf;
+	}
+
+	/**
+	 * Gets a Hadoop configuration property value
+	 * 
+	 * @param prop
+	 *            the configuration property
+	 * @return the property value
+	 */
+	public String getConfProp(ConfProp prop) {
+		return conf.get(prop.name);
+	}
+
+	/**
+	 * Gets a Hadoop configuration property value
+	 * 
+	 * @param propName
+	 *            the property name
+	 * @return the property value
+	 */
+	public String getConfProp(String propName) {
+		return this.conf.get(propName);
+	}
+
+	public String getLocationName() {
+		return getConfProp(ConfProp.PI_LOCATION_NAME);
+	}
+
+	/**
+	 * Returns the master host name of the Hadoop location (the Job tracker)
+	 * 
+	 * @return the host name of the Job tracker
+	 */
+	public String getMasterHostName() {
+		return getConfProp(ConfProp.PI_JOB_TRACKER_HOST);
+	}
+
+	public String getState() {
+		return state;
+	}
+
+	/**
+	 * Overwrite this location with the given existing location
+	 * 
+	 * @param existing
+	 *            the existing location
+	 */
+	public void load(AbstractHadoopCluster existing) {
+		this.conf = new Configuration(((HadoopCluster) existing).conf);
+	}
+
+	/**
+	 * Overwrite this location with settings available in the given XML file.
+	 * The existing configuration is preserved if the XML file is invalid.
+	 * 
+	 * @param file
+	 *            the file path of the XML file
+	 * @return validity of the XML file
+	 * @throws ParserConfigurationException
+	 * @throws IOException
+	 * @throws SAXException
+	 */
+	public boolean loadFromXML(File file) {
+
+		Configuration newConf = new Configuration(this.conf);
+		DocumentBuilder builder;
+		Document document;
+		try {
+			builder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
+			document = builder.parse(file);
+		} catch (ParserConfigurationException e) {
+			e.printStackTrace();
+			return false;
+		} catch (SAXException e) {
+			e.printStackTrace();
+			return false;
+		} catch (IOException e) {
+			e.printStackTrace();
+			return false;
+		}
+		Element root = document.getDocumentElement();
+		if (!"configuration".equals(root.getTagName()))
+			return false;
+		NodeList props = root.getChildNodes();
+		for (int i = 0; i < props.getLength(); i++) {
+			Node propNode = props.item(i);
+			if (!(propNode instanceof Element))
+				continue;
+			Element prop = (Element) propNode;
+			if (!"property".equals(prop.getTagName()))
+				return false;
+			NodeList fields = prop.getChildNodes();
+			String attr = null;
+			String value = null;
+			for (int j = 0; j < fields.getLength(); j++) {
+				Node fieldNode = fields.item(j);
+				if (!(fieldNode instanceof Element))
+					continue;
+				Element field = (Element) fieldNode;
+				if ("name".equals(field.getTagName()))
+					attr = ((Text) field.getFirstChild()).getData();
+				if ("value".equals(field.getTagName()) && field.hasChildNodes())
+					value = ((Text) field.getFirstChild()).getData();
+			}
+			if (attr != null && value != null)
+				newConf.set(attr, value);
+		}
+
+		this.conf = newConf;
+		return true;
+	}
+
+	/**
+	 * Sets a Hadoop configuration property value
+	 * 
+	 * @param prop
+	 *            the property
+	 * @param propvalue
+	 *            the property value
+	 */
+	public void setConfProp(ConfProp prop, String propValue) {
+		assert propValue != null;
+		conf.set(prop.name, propValue);
+	}
+
+	/**
+	 * Sets a Hadoop configuration property value
+	 * 
+	 * @param propName
+	 *            the property name
+	 * @param propValue
+	 *            the property value
+	 */
+	public void setConfProp(String propName, String propValue) {
+		this.conf.set(propName, propValue);
+	}
+
+	public void setLocationName(String newName) {
+		setConfProp(ConfProp.PI_LOCATION_NAME, newName);
+	}
+
+	/**
+	 * Write this location settings to the given output stream
+	 * 
+	 * @param out
+	 *            the output stream
+	 * @throws IOException
+	 */
+	public void storeSettingsToFile(File file) throws IOException {
+		FileOutputStream fos = new FileOutputStream(file);
+		try {
+			this.conf.writeXml(fos);
+			fos.close();
+			fos = null;
+		} finally {
+			IOUtils.closeStream(fos);
+		}
+
+	}
+
+	/* @inheritDoc */
+	@Override
+	public String toString() {
+		return this.getLocationName();
+	}
+
+	/**
+	 * Fill the configuration with valid default values
+	 */
+	private void addPluginConfigDefaultProperties() {
+		for (ConfProp prop : ConfProp.values()) {
+			if (conf.get(prop.name) == null)
+				conf.set(prop.name, prop.defVal);
+		}
+	}
+
+	/**
+	 * Starts the location status updater
+	 */
+	private synchronized void startStatusUpdater() {
+		if (statusUpdater == null) {
+			statusUpdater = new LocationStatusUpdater();
+			statusUpdater.schedule();
+		}
+	}
+
+	/*
+	 * Rewrite of the connecting and tunneling to the Hadoop location
+	 */
+
+	/**
+	 * Provides access to the default file system of this location.
+	 * 
+	 * @return a {@link FileSystem}
+	 */
+	public FileSystem getDFS() throws IOException {
+		return FileSystem.get(this.conf);
+	}
+
+	/**
+	 * Provides access to the Job tracking system of this location
+	 * 
+	 * @return a {@link JobClient}
+	 */
+	public JobClient getJobClient() throws IOException {
+		JobConf jconf = new JobConf(this.conf);
+		return new JobClient(jconf);
+	}
+
+	/*
+	 * Listeners handling
+	 */
+
+	protected void fireJarPublishDone(IJarModule jar) {
+		for (IJobListener listener : jobListeners) {
+			listener.publishDone(jar);
+		}
+	}
+
+	protected void fireJarPublishStart(IJarModule jar) {
+		for (IJobListener listener : jobListeners) {
+			listener.publishStart(jar);
+		}
+	}
+
+	protected void fireJobAdded(HadoopJob job) {
+		for (IJobListener listener : jobListeners) {
+			listener.jobAdded(job);
+		}
+	}
+
+	protected void fireJobRemoved(IHadoopJob job) {
+		for (IJobListener listener : jobListeners) {
+			listener.jobRemoved(job);
+		}
+	}
+
+	protected void fireJobChanged(HadoopJob job) {
+		for (IJobListener listener : jobListeners) {
+			listener.jobChanged(job);
+		}
+	}
+
+	@Override
+	public void saveConfiguration(File confDir, String jarFilePath) throws IOException {
+		// Prepare the Hadoop configuration
+		JobConf conf = new JobConf(this.conf);
+		conf.setJar(jarFilePath);
+		// Write it to the disk file
+		File confFile = new File(confDir, "core-site.xml");
+		FileOutputStream fos = new FileOutputStream(confFile);
+		try {
+			conf.writeXml(fos);
+			fos.close();
+			fos = null;
+		} finally {
+			IOUtils.closeStream(fos);
+		}
+
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopJob.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopJob.java b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopJob.java
new file mode 100644
index 0000000..5861967
--- /dev/null
+++ b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopJob.java
@@ -0,0 +1,342 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.hadoop.release;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.Counters;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobID;
+import org.apache.hadoop.mapred.JobStatus;
+import org.apache.hadoop.mapred.RunningJob;
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.IHadoopJob;
+
+/**
+ * Representation of a Map/Reduce running job on a given location
+ */
+
+public class HadoopJob implements IHadoopJob {
+
+	/**
+	 * Enum representation of a Job state
+	 */
+	public enum JobState {
+		PREPARE(JobStatus.PREP), RUNNING(JobStatus.RUNNING), FAILED(JobStatus.FAILED), SUCCEEDED(JobStatus.SUCCEEDED);
+
+		final int state;
+
+		JobState(int state) {
+			this.state = state;
+		}
+
+		static JobState ofInt(int state) {
+			if (state == JobStatus.PREP) {
+				return PREPARE;
+			} else if (state == JobStatus.RUNNING) {
+				return RUNNING;
+			} else if (state == JobStatus.FAILED) {
+				return FAILED;
+			} else if (state == JobStatus.SUCCEEDED) {
+				return SUCCEEDED;
+			} else {
+				return null;
+			}
+		}
+	}
+
+	/**
+	 * Location this Job runs on
+	 */
+	private final HadoopCluster location;
+
+	/**
+	 * Unique identifier of this Job
+	 */
+	final JobID jobId;
+
+	/**
+	 * Status representation of a running job. This actually contains a
+	 * reference to a JobClient. Its methods might block.
+	 */
+	RunningJob running;
+
+	/**
+	 * Last polled status
+	 * 
+	 * @deprecated should apparently not be used
+	 */
+	JobStatus status;
+
+	/**
+	 * Last polled counters
+	 */
+	Counters counters;
+
+	/**
+	 * Job Configuration
+	 */
+	JobConf jobConf = null;
+
+	boolean completed = false;
+
+	boolean successful = false;
+
+	boolean killed = false;
+
+	int totalMaps;
+
+	int totalReduces;
+
+	int completedMaps;
+
+	int completedReduces;
+
+	float mapProgress;
+
+	float reduceProgress;
+
+	/**
+	 * Constructor for a Hadoop job representation
+	 * 
+	 * @param location
+	 * @param id
+	 * @param running
+	 * @param status
+	 */
+	public HadoopJob(HadoopCluster location, JobID id, RunningJob running, JobStatus status) {
+
+		this.location = location;
+		this.jobId = id;
+		this.running = running;
+
+		loadJobFile();
+
+		update(status);
+	}
+
+	/**
+	 * Try to locate and load the JobConf file for this job so to get more
+	 * details on the job (number of maps and of reduces)
+	 */
+	private void loadJobFile() {
+		try {
+			String jobFile = getJobFile();
+			FileSystem fs = location.getDFS();
+			File tmp = File.createTempFile(getJobID().toString(), ".xml");
+			if (FileUtil.copy(fs, new Path(jobFile), tmp, false, location.getConf())) {
+				this.jobConf = new JobConf(tmp.toString());
+
+				this.totalMaps = jobConf.getNumMapTasks();
+				this.totalReduces = jobConf.getNumReduceTasks();
+			}
+
+		} catch (IOException ioe) {
+			ioe.printStackTrace();
+		}
+	}
+
+	/* @inheritDoc */
+	@Override
+	public int hashCode() {
+		final int prime = 31;
+		int result = 1;
+		result = prime * result + ((jobId == null) ? 0 : jobId.hashCode());
+		result = prime * result + ((location == null) ? 0 : location.hashCode());
+		return result;
+	}
+
+	/* @inheritDoc */
+	@Override
+	public boolean equals(Object obj) {
+		if (this == obj)
+			return true;
+		if (obj == null)
+			return false;
+		if (!(obj instanceof HadoopJob))
+			return false;
+		final HadoopJob other = (HadoopJob) obj;
+		if (jobId == null) {
+			if (other.jobId != null)
+				return false;
+		} else if (!jobId.equals(other.jobId))
+			return false;
+		if (location == null) {
+			if (other.location != null)
+				return false;
+		} else if (!location.equals(other.location))
+			return false;
+		return true;
+	}
+
+	/**
+	 * Get the running status of the Job (@see {@link JobStatus}).
+	 * 
+	 * @return
+	 */
+	public String getState() {
+		if (this.completed) {
+			if (this.successful) {
+				return JobState.SUCCEEDED.toString();
+			} else {
+				return JobState.FAILED.toString();
+			}
+		} else {
+			return JobState.RUNNING.toString();
+		}
+		// return JobState.ofInt(this.status.getRunState());
+	}
+
+	/**
+	 * @return
+	 */
+	public String getJobID() {
+		return this.jobId.toString();
+	}
+
+	/**
+	 * @return
+	 */
+	public AbstractHadoopCluster getLocation() {
+		return this.location;
+	}
+
+	/**
+	 * @return
+	 */
+	public boolean isCompleted() {
+		return this.completed;
+	}
+
+	/**
+	 * @return
+	 */
+	public String getJobName() {
+		return this.running.getJobName();
+	}
+
+	/**
+	 * @return
+	 */
+	public String getJobFile() {
+		return this.running.getJobFile();
+	}
+
+	/**
+	 * Return the tracking URL for this Job.
+	 * 
+	 * @return string representation of the tracking URL for this Job
+	 */
+	public String getTrackingURL() {
+		return this.running.getTrackingURL();
+	}
+
+	/**
+	 * Returns a string representation of this job status
+	 * 
+	 * @return string representation of this job status
+	 */
+	public String getStatus() {
+
+		StringBuffer s = new StringBuffer();
+
+		s.append("Maps : " + completedMaps + "/" + totalMaps);
+		s.append(" (" + mapProgress + ")");
+		s.append("  Reduces : " + completedReduces + "/" + totalReduces);
+		s.append(" (" + reduceProgress + ")");
+
+		return s.toString();
+	}
+
+	/**
+	 * Update this job status according to the given JobStatus
+	 * 
+	 * @param status
+	 */
+	void update(JobStatus status) {
+		this.status = status;
+		try {
+			this.counters = running.getCounters();
+			this.completed = running.isComplete();
+			this.successful = running.isSuccessful();
+			this.mapProgress = running.mapProgress();
+			this.reduceProgress = running.reduceProgress();
+			// running.getTaskCompletionEvents(fromEvent);
+
+		} catch (IOException ioe) {
+			ioe.printStackTrace();
+		}
+
+		this.completedMaps = (int) (this.totalMaps * this.mapProgress);
+		this.completedReduces = (int) (this.totalReduces * this.reduceProgress);
+	}
+
+	/**
+	 * Print this job counters (for debugging purpose)
+	 */
+	void printCounters() {
+		System.out.printf("New Job:\n", counters);
+		for (String groupName : counters.getGroupNames()) {
+			Counters.Group group = counters.getGroup(groupName);
+			System.out.printf("\t%s[%s]\n", groupName, group.getDisplayName());
+
+			for (Counters.Counter counter : group) {
+				System.out.printf("\t\t%s: %s\n", counter.getDisplayName(), counter.getCounter());
+			}
+		}
+		System.out.printf("\n");
+	}
+
+	/**
+	 * Kill this job
+	 */
+	public void kill() {
+		try {
+			this.running.killJob();
+			this.killed = true;
+
+		} catch (IOException e) {
+			e.printStackTrace();
+		}
+	}
+
+	/**
+	 * Print this job status (for debugging purpose)
+	 */
+	public void display() {
+		System.out.printf("Job id=%s, name=%s\n", getJobID(), getJobName());
+		System.out.printf("Configuration file: %s\n", getJobID());
+		System.out.printf("Tracking URL: %s\n", getTrackingURL());
+
+		System.out.printf("Completion: map: %f reduce %f\n", 100.0 * this.mapProgress, 100.0 * this.reduceProgress);
+
+		System.out.println("Job total maps = " + totalMaps);
+		System.out.println("Job completed maps = " + completedMaps);
+		System.out.println("Map percentage complete = " + mapProgress);
+		System.out.println("Job total reduces = " + totalReduces);
+		System.out.println("Job completed reduces = " + completedReduces);
+		System.out.println("Reduce percentage complete = " + reduceProgress);
+		System.out.flush();
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/META-INF/MANIFEST.MF b/org.apache.hdt.ui/META-INF/MANIFEST.MF
index ac39e07..c34e98a 100644
--- a/org.apache.hdt.ui/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.ui/META-INF/MANIFEST.MF
@@ -8,11 +8,16 @@ Bundle-Vendor: Apache Hadoop
 Require-Bundle: org.eclipse.core.runtime,
  org.eclipse.core.resources,
  org.eclipse.ui,
+ org.eclipse.jdt.core,
+ org.eclipse.jdt.ui,
  org.eclipse.ui.ide;bundle-version="3.6.0",
  org.eclipse.team.ui;bundle-version="3.5.100",
  org.eclipse.ui.navigator;bundle-version="3.5.0",
  org.eclipse.ui.navigator.resources;bundle-version="3.4.200",
  org.eclipse.ui.views.properties.tabbed;bundle-version="3.5.100";resolution:=optional,
+ org.eclipse.jdt.debug.ui,
+ org.eclipse.jdt.launching,
+ org.eclipse.debug.ui,
  org.apache.hdt.core
 Bundle-RequiredExecutionEnvironment: JavaSE-1.6
 Bundle-ActivationPolicy: lazy

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/plugin.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/plugin.xml b/org.apache.hdt.ui/plugin.xml
index e6f1e53..7bc5b47 100644
--- a/org.apache.hdt.ui/plugin.xml
+++ b/org.apache.hdt.ui/plugin.xml
@@ -17,6 +17,14 @@
    limitations under the License.
 -->
 <plugin>
+  <extension
+         point="org.eclipse.ui.preferencePages">
+      <page
+            class="org.apache.hdt.ui.preferences.MapReducePreferencePage"
+            id="org.apache.hdt.ui.preferences.MapReducePreferencePage"
+            name="Hadoop">
+      </page>
+   </extension>
    <extension
          point="org.eclipse.ui.perspectives">
       <perspective
@@ -51,6 +59,14 @@
                relationship="bottom"
                relative="org.eclipse.ui.editorss">
          </view>
+          <view
+                id=" org.apache.hdt.ui.ClusterView"
+                minimized="false"
+                relationship="stack"
+                relative="org.apache.hdt.ui.view.servers">
+         </view>
+         <newWizardShortcut
+               id="org.apache.hdt.ui.wizard.newProjectWizard"/>
          <newWizardShortcut
                id="org.apache.hdt.ui.wizard.newHdfsServer">
          </newWizardShortcut>
@@ -146,6 +162,14 @@
    </extension>
    <extension
          point="org.eclipse.ui.newWizards">
+         <wizard
+            category="org.apache.hdt.ui.newWizards.category"
+            class="org.apache.hdt.ui.internal.mr.NewMapReduceProjectWizard"
+            finalPerspective="org.apache.hdt.ui.perspective"
+            icon="icons/hadoop-logo-16x16.png"
+            id="org.apache.hdt.ui.wizard.newProjectWizard"
+            name="Map/Reduce Project"
+            project="true"/>
       <wizard
             category="org.apache.hdt.ui.newWizards.category"
             class="org.apache.hdt.ui.internal.hdfs.NewHDFSWizard"
@@ -154,10 +178,6 @@
             id="org.apache.hdt.ui.wizard.newHdfsServer"
             name="New HDFS Server">
       </wizard>
-      <category
-            id="org.apache.hdt.ui.newWizards.category"
-            name="Hadoop">
-      </category>
       <wizard
             category="org.apache.hdt.ui.newWizards.category"
             class="org.apache.hdt.ui.internal.zookeeper.NewZooKeeperWizard"
@@ -166,6 +186,36 @@
             id="org.apache.hdt.ui.wizard.newZooKeeperServer"
             name="New ZooKeeper Server">
       </wizard>
+      <wizard category="org.apache.hdt.ui.newWizards.category"
+            class="org.apache.hdt.ui.internal.mr.NewMapperWizard"
+            icon="icons/mapper16.png"
+            id="org.apache.hdt.ui.wizard.NewMapperWizard"
+            name="Mapper"
+            project="false"/>
+      <wizard category="org.apache.hdt.ui.newWizards.category"
+            class="org.apache.hdt.ui.internal.mr.NewReducerWizard"
+            icon="icons/reducer16.png"
+            id="org.apache.hdt.ui.wizard.NewReducerWizard"
+            name="Reducer"
+            project="false"/>
+      <wizard
+            category="org.apache.hdt.ui.newWizards.category"
+            class="org.apache.hdt.ui.internal.mr.NewDriverWizard"
+            icon="icons/driver.png"
+            id="org.apache.hdt.ui.wizard.NewDriverWizard"
+            name="MapReduce Driver"
+            project="false"/>
+      <wizard
+            category="org.apache.hdt.ui.newWizards.category"
+            class="org.apache.hdt.ui.internal.mr.NewPartitionerWizard"
+            icon="icons/Elephant16x16.gif"
+            id="org.apache.hdt.ui.wizard.NewPartitionerWizard"
+            name="Partitioner"
+            project="false"/>
+      <category
+            id="org.apache.hdt.ui.newWizards.category"
+            name="Hadoop">
+      </category>      
    </extension>
    <extension
          point="org.eclipse.ui.popupMenus">
@@ -381,6 +431,10 @@
    </extension>
    <extension
          point="org.eclipse.ui.views">
+       <category
+            id="org.apache.hdt.ui.category"
+            name="Hadoop">
+      </category>  
       <view
             allowMultiple="false"
             category="org.apache.hdt.ui.category"
@@ -390,10 +444,15 @@
             name="Hadoop Servers"
             restorable="true">
       </view>
-      <category
-            id="org.apache.hdt.ui.category"
-            name="Hadoop">
-      </category>
+      <view
+            allowMultiple="false"
+            category="org.apache.hdt.ui.category"
+            class="org.apache.hdt.ui.internal.mr.ClusterView"
+            icon="icons/hadoop-logo-16x16.png"
+            id="org.apache.hdt.ui.ClusterView"
+            name="Hadoop Clusters"
+            restorable="true">
+      </view>
    </extension>
    <extension
          point="org.eclipse.ui.actionSets">
@@ -420,5 +479,32 @@
          </action>
       </actionSet>
    </extension>
-
+    <extension
+         point="org.eclipse.debug.ui.launchShortcuts">
+      <shortcut
+            class="org.apache.hdt.ui.internal.launch.HadoopApplicationLaunchShortcut"
+            icon="icons/elephantblue16x16.gif"
+            id="org.apache.hdt.launch.shortcut"
+            label="Run on Hadoop"
+            modes="run">
+         <contextualLaunch>
+            <contextLabel mode="run" label="Run on Hadoop" />
+            <enablement>
+             <with variable="selection">
+               <count value="1"/>
+               <iterate>
+                <or>
+                  <test property="org.eclipse.jdt.launching.hasMain"/>
+                  <and>
+                     <test property="org.eclipse.jdt.launching.isContainer"/>
+                     <test property="org.eclipse.jdt.launching.hasProjectNature" args="org.eclipse.jdt.core.javanature"/>
+                     <test property="org.eclipse.jdt.launching.hasProjectNature" args="org.apache.hdt.mrature"/>                     
+                  </and>
+                </or>
+               </iterate>
+               </with>
+           </enablement>
+         </contextualLaunch>
+      </shortcut>
+      </extension>
 </plugin>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/src/org/apache/hdt/ui/ImageLibrary.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/ImageLibrary.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/ImageLibrary.java
new file mode 100644
index 0000000..b4017cd
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/ImageLibrary.java
@@ -0,0 +1,251 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui;
+
+import java.net.URL;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.eclipse.core.runtime.FileLocator;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.jface.resource.ImageDescriptor;
+import org.eclipse.swt.graphics.Image;
+import org.eclipse.ui.ISharedImages;
+import org.eclipse.ui.PlatformUI;
+import org.eclipse.ui.plugin.AbstractUIPlugin;
+import org.osgi.framework.Bundle;
+
+/**
+ * Icons manager
+ */
+public class ImageLibrary {
+
+	private final Bundle bundle = Activator.getDefault().getBundle();
+
+	/**
+	 * Singleton instance
+	 */
+	private static volatile ImageLibrary instance = null;
+
+	private ISharedImages sharedImages = PlatformUI.getWorkbench().getSharedImages();
+
+	/**
+	 * Where resources (icons, images...) are available in the Bundle
+	 */
+	private static final String RESOURCE_DIR = "icons/";
+
+	/**
+	 * Public access to image descriptors
+	 * 
+	 * @param name
+	 * @return the image descriptor
+	 */
+	public static ImageDescriptor get(String name) {
+		return getInstance().getImageDescriptorByName(name);
+	}
+
+	/**
+	 * Public access to images
+	 * 
+	 * @param name
+	 * @return the image
+	 */
+	public static Image getImage(String name) {
+		return getInstance().getImageByName(name);
+	}
+
+	/**
+	 * Singleton access
+	 * 
+	 * @return the Image library
+	 */
+	public static ImageLibrary getInstance() {
+		if (instance == null) {
+			synchronized (ImageLibrary.class) {
+				if (instance == null)
+					instance = new ImageLibrary();
+			}
+		}
+		return instance;
+	}
+
+	/**
+	 * Map of registered resources (ImageDescriptor and Image)
+	 */
+	private Map<String, ImageDescriptor> descMap = new HashMap<String, ImageDescriptor>();
+
+	private Map<String, Image> imageMap = new HashMap<String, Image>();
+
+	/**
+	 * Image library constructor: put image definitions here.
+	 */
+	private ImageLibrary() {
+		/*
+		 * Servers view
+		 */
+		newImage("server.view.location.entry", "Elephant-24x24.png");
+		newImage("server.view.job.entry", "job.gif");
+		newImage("server.view.action.location.new", "location-new-16x16.png");
+		newImage("server.view.action.location.edit", "location-edit-16x16.png");
+		newSharedImage("server.view.action.delete", ISharedImages.IMG_TOOL_DELETE);
+
+		/*
+		 * DFS Browser
+		 */
+		newImage("dfs.browser.root.entry", "files.gif");
+		newImage("dfs.browser.location.entry", "Elephant-16x16.png");
+		newSharedImage("dfs.browser.folder.entry", ISharedImages.IMG_OBJ_FOLDER);
+		newSharedImage("dfs.browser.file.entry", ISharedImages.IMG_OBJ_FILE);
+		// DFS files in editor
+		newSharedImage("dfs.file.editor", ISharedImages.IMG_OBJ_FILE);
+		// Actions
+		newImage("dfs.browser.action.mkdir", "new-folder.png");
+		newImage("dfs.browser.action.download", "download.png");
+		newImage("dfs.browser.action.upload_files", "upload.png");
+		newImage("dfs.browser.action.upload_dir", "upload.png");
+		newSharedImage("dfs.browser.action.delete", ISharedImages.IMG_TOOL_DELETE);
+		newImage("dfs.browser.action.refresh", "refresh.png");
+
+		/*
+		 * Wizards
+		 */
+		newImage("wizard.mapper.new", "mapwiz.png");
+		newImage("wizard.reducer.new", "reducewiz.png");
+		newImage("wizard.driver.new", "driverwiz.png");
+		newImage("wizard.mapreduce.project.new", "projwiz.png");
+	}
+
+	/**
+	 * Accessor to images
+	 * 
+	 * @param name
+	 * @return
+	 */
+	private ImageDescriptor getImageDescriptorByName(String name) {
+		return this.descMap.get(name);
+	}
+
+	/**
+	 * Accessor to images
+	 * 
+	 * @param name
+	 * @return
+	 */
+	private Image getImageByName(String name) {
+		return this.imageMap.get(name);
+	}
+
+	/**
+	 * Access to platform shared images
+	 * 
+	 * @param name
+	 * @return
+	 */
+	private ImageDescriptor getSharedByName(String name) {
+		return sharedImages.getImageDescriptor(name);
+	}
+
+	/**
+	 * Load and register a new image. If the image resource does not exist or
+	 * fails to load, a default "error" resource is supplied.
+	 * 
+	 * @param name
+	 *            name of the image
+	 * @param filename
+	 *            name of the file containing the image
+	 * @return whether the image has correctly been loaded
+	 */
+	private boolean newImage(String name, String filename) {
+		ImageDescriptor id;
+		boolean success;
+
+		try {
+			URL fileURL = FileLocator.find(bundle, new Path(RESOURCE_DIR + filename), null);
+			id = ImageDescriptor.createFromURL(FileLocator.toFileURL(fileURL));
+			success = true;
+
+		} catch (Exception e) {
+
+			e.printStackTrace();
+			id = ImageDescriptor.getMissingImageDescriptor();
+			// id = getSharedByName(ISharedImages.IMG_OBJS_ERROR_TSK);
+			success = false;
+		}
+
+		descMap.put(name, id);
+		imageMap.put(name, id.createImage(true));
+
+		return success;
+	}
+
+	/**
+	 * Register an image from the workspace shared image pool. If the image
+	 * resource does not exist or fails to load, a default "error" resource is
+	 * supplied.
+	 * 
+	 * @param name
+	 *            name of the image
+	 * @param sharedName
+	 *            name of the shared image ({@link ISharedImages})
+	 * @return whether the image has correctly been loaded
+	 */
+	private boolean newSharedImage(String name, String sharedName) {
+		boolean success = true;
+		ImageDescriptor id = getSharedByName(sharedName);
+
+		if (id == null) {
+			id = ImageDescriptor.getMissingImageDescriptor();
+			// id = getSharedByName(ISharedImages.IMG_OBJS_ERROR_TSK);
+			success = false;
+		}
+
+		descMap.put(name, id);
+		imageMap.put(name, id.createImage(true));
+
+		return success;
+	}
+
+	/**
+	 * Register an image from the workspace shared image pool. If the image
+	 * resource does not exist or fails to load, a default "error" resource is
+	 * supplied.
+	 * 
+	 * @param name
+	 *            name of the image
+	 * @param sharedName
+	 *            name of the shared image ({@link ISharedImages})
+	 * @return whether the image has correctly been loaded
+	 */
+	private boolean newPluginImage(String name, String pluginId, String filename) {
+
+		boolean success = true;
+		ImageDescriptor id = AbstractUIPlugin.imageDescriptorFromPlugin(pluginId, filename);
+
+		if (id == null) {
+			id = ImageDescriptor.getMissingImageDescriptor();
+			// id = getSharedByName(ISharedImages.IMG_OBJS_ERROR_TSK);
+			success = false;
+		}
+
+		descMap.put(name, id);
+		imageMap.put(name, id.createImage(true));
+
+		return success;
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopApplicationLaunchShortcut.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopApplicationLaunchShortcut.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopApplicationLaunchShortcut.java
new file mode 100644
index 0000000..4cc03d4
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopApplicationLaunchShortcut.java
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.launch;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.logging.Logger;
+
+import org.eclipse.core.resources.IFile;
+import org.eclipse.core.resources.IResource;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.debug.core.ILaunchConfiguration;
+import org.eclipse.debug.core.ILaunchConfigurationWorkingCopy;
+import org.eclipse.jdt.core.IJavaProject;
+import org.eclipse.jdt.core.IType;
+import org.eclipse.jdt.core.JavaCore;
+import org.eclipse.jdt.debug.ui.launchConfigurations.JavaApplicationLaunchShortcut;
+import org.eclipse.jdt.launching.IJavaLaunchConfigurationConstants;
+import org.eclipse.jdt.launching.IRuntimeClasspathEntry;
+import org.eclipse.jdt.launching.JavaRuntime;
+import org.eclipse.jface.wizard.IWizard;
+import org.eclipse.jface.wizard.WizardDialog;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.swt.widgets.Shell;
+
+/**
+ * Add a shortcut "Run on Hadoop" to the Run menu
+ */
+
+public class HadoopApplicationLaunchShortcut extends JavaApplicationLaunchShortcut {
+
+	static Logger log = Logger.getLogger(HadoopApplicationLaunchShortcut.class.getName());
+
+	// private ActionDelegate delegate = new RunOnHadoopActionDelegate();
+
+	public HadoopApplicationLaunchShortcut() {
+	}
+
+	/* @inheritDoc */
+	@Override
+	protected ILaunchConfiguration createConfiguration(IType type) {
+
+		ILaunchConfiguration iConf = super.createConfiguration(type);
+		ILaunchConfigurationWorkingCopy iConfWC;
+		try {
+			/*
+			 * Tune the default launch configuration: setup run-time classpath
+			 * manually
+			 */
+			iConfWC = iConf.getWorkingCopy();
+
+			iConfWC.setAttribute(IJavaLaunchConfigurationConstants.ATTR_DEFAULT_CLASSPATH, false);
+
+			List<String> classPath = new ArrayList<String>();
+			IResource resource = type.getResource();
+			IJavaProject project = (IJavaProject) resource.getProject().getNature(JavaCore.NATURE_ID);
+			IRuntimeClasspathEntry cpEntry = JavaRuntime.newDefaultProjectClasspathEntry(project);
+			classPath.add(0, cpEntry.getMemento());
+
+			iConfWC.setAttribute(IJavaLaunchConfigurationConstants.ATTR_CLASSPATH, classPath);
+
+		} catch (CoreException e) {
+			e.printStackTrace();
+			// FIXME Error dialog
+			return null;
+		}
+
+		/*
+		 * Update the selected configuration with a specific Hadoop location
+		 * target
+		 */
+		IResource resource = type.getResource();
+		if (!(resource instanceof IFile))
+			return null;
+		RunOnHadoopWizard wizard = new RunOnHadoopWizard((IFile) resource, iConfWC);
+		WizardDialog dialog = new WizardDialog(Display.getDefault().getActiveShell(), wizard);
+
+		dialog.create();
+		dialog.setBlockOnOpen(true);
+		if (dialog.open() != WizardDialog.OK)
+			return null;
+
+		try {
+
+			// Only save if some configuration is different.
+			if (!iConfWC.contentsEqual(iConf))
+				iConfWC.doSave();
+
+		} catch (CoreException e) {
+			e.printStackTrace();
+			// FIXME Error dialog
+			return null;
+		}
+
+		return iConfWC;
+	}
+
+	/**
+	 * Was used to run the RunOnHadoopWizard inside and provide it a
+	 * ProgressMonitor
+	 */
+	static class Dialog extends WizardDialog {
+		public Dialog(Shell parentShell, IWizard newWizard) {
+			super(parentShell, newWizard);
+		}
+
+		@Override
+		public void create() {
+			super.create();
+
+			((RunOnHadoopWizard) getWizard()).setProgressMonitor(getProgressMonitor());
+		}
+	}
+}


[03/27] HDT-41: Provide existing MR functionality - ported Mapper/Reducer/Partioner/Driver Wizards - ported Image lookup - ported Map-reduce project wizard - using runtimes from specified hadoop location rather as runtime jars packed in plugin - ported '

Posted by rs...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewMapReduceProjectWizard.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewMapReduceProjectWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewMapReduceProjectWizard.java
new file mode 100644
index 0000000..3963828
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewMapReduceProjectWizard.java
@@ -0,0 +1,385 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.mr;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.lang.reflect.InvocationTargetException;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.hdt.core.natures.MapReduceNature;
+import org.apache.hdt.ui.Activator;
+import org.apache.hdt.ui.ImageLibrary;
+import org.apache.hdt.ui.preferences.MapReducePreferencePage;
+import org.apache.hdt.ui.preferences.PreferenceConstants;
+import org.eclipse.core.resources.IProject;
+import org.eclipse.core.resources.IProjectDescription;
+import org.eclipse.core.resources.ResourcesPlugin;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IConfigurationElement;
+import org.eclipse.core.runtime.IExecutableExtension;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.NullProgressMonitor;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.core.runtime.QualifiedName;
+import org.eclipse.core.runtime.SubProgressMonitor;
+import org.eclipse.jdt.ui.wizards.NewJavaProjectWizardPage;
+import org.eclipse.jface.dialogs.IDialogConstants;
+import org.eclipse.jface.operation.IRunnableWithProgress;
+import org.eclipse.jface.preference.PreferenceDialog;
+import org.eclipse.jface.preference.PreferenceManager;
+import org.eclipse.jface.preference.PreferenceNode;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.jface.wizard.IWizardPage;
+import org.eclipse.jface.wizard.Wizard;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.events.SelectionEvent;
+import org.eclipse.swt.events.SelectionListener;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.swt.widgets.DirectoryDialog;
+import org.eclipse.swt.widgets.Group;
+import org.eclipse.swt.widgets.Link;
+import org.eclipse.swt.widgets.Text;
+import org.eclipse.ui.INewWizard;
+import org.eclipse.ui.IWorkbench;
+import org.eclipse.ui.PlatformUI;
+import org.eclipse.ui.dialogs.WizardNewProjectCreationPage;
+import org.eclipse.ui.wizards.newresource.BasicNewProjectResourceWizard;
+
+/**
+ * Wizard for creating a new MapReduce Project
+ * 
+ */
+
+public class NewMapReduceProjectWizard extends Wizard implements INewWizard, IExecutableExtension {
+	static Logger log = Logger.getLogger(NewMapReduceProjectWizard.class.getName());
+
+	private HadoopFirstPage firstPage;
+
+	private NewJavaProjectWizardPage javaPage;
+
+	public NewDriverWizardPage newDriverPage;
+
+	private IConfigurationElement config;
+
+	public NewMapReduceProjectWizard() {
+		setWindowTitle("New MapReduce Project Wizard");
+	}
+
+	public void init(IWorkbench workbench, IStructuredSelection selection) {
+
+	}
+
+	@Override
+	public boolean canFinish() {
+		return firstPage.isPageComplete() && javaPage.isPageComplete()
+		// && ((!firstPage.generateDriver.getSelection())
+		// || newDriverPage.isPageComplete()
+		;
+	}
+
+	@Override
+	public IWizardPage getNextPage(IWizardPage page) {
+		// if (page == firstPage
+		// && firstPage.generateDriver.getSelection()
+		// )
+		// {
+		// return newDriverPage; // if "generate mapper" checked, second page is
+		// new driver page
+		// }
+		// else
+		// {
+		IWizardPage answer = super.getNextPage(page);
+		if (answer == newDriverPage) {
+			return null; // dont flip to new driver page unless "generate
+			// driver" is checked
+		} else if (answer == javaPage) {
+			return answer;
+		} else {
+			return answer;
+		}
+		// }
+	}
+
+	@Override
+	public IWizardPage getPreviousPage(IWizardPage page) {
+		if (page == newDriverPage) {
+			return firstPage; // newDriverPage, if it appears, is the second
+			// page
+		} else {
+			return super.getPreviousPage(page);
+		}
+	}
+
+	static class HadoopFirstPage extends WizardNewProjectCreationPage implements SelectionListener {
+		public HadoopFirstPage() {
+			super("New Hadoop Project");
+			setImageDescriptor(ImageLibrary.get("wizard.mapreduce.project.new"));
+		}
+
+		private Link openPreferences;
+
+		private Button workspaceHadoop;
+
+		private Button projectHadoop;
+
+		private Text location;
+
+		private Button browse;
+
+		private String path;
+
+		public String currentPath;
+
+		// private Button generateDriver;
+
+		@Override
+		public void createControl(Composite parent) {
+			super.createControl(parent);
+
+			setTitle("MapReduce Project");
+			setDescription("Create a MapReduce project.");
+
+			Group group = new Group((Composite) getControl(), SWT.NONE);
+			group.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
+			group.setText("Hadoop MapReduce Library Installation Path");
+			GridLayout layout = new GridLayout(3, true);
+			layout.marginLeft = convertHorizontalDLUsToPixels(IDialogConstants.HORIZONTAL_MARGIN);
+			layout.marginRight = convertHorizontalDLUsToPixels(IDialogConstants.HORIZONTAL_MARGIN);
+			layout.marginTop = convertHorizontalDLUsToPixels(IDialogConstants.VERTICAL_MARGIN);
+			layout.marginBottom = convertHorizontalDLUsToPixels(IDialogConstants.VERTICAL_MARGIN);
+			group.setLayout(layout);
+
+			workspaceHadoop = new Button(group, SWT.RADIO);
+			GridData d = new GridData(GridData.BEGINNING, GridData.BEGINNING, false, false);
+			d.horizontalSpan = 2;
+			workspaceHadoop.setLayoutData(d);
+			// workspaceHadoop.setText("Use default workbench Hadoop library
+			// location");
+			workspaceHadoop.setSelection(true);
+
+			updateHadoopDirLabelFromPreferences();
+
+			openPreferences = new Link(group, SWT.NONE);
+			openPreferences.setText("<a>Configure Hadoop install directory...</a>");
+			openPreferences.setLayoutData(new GridData(GridData.END, GridData.CENTER, false, false));
+			openPreferences.addSelectionListener(this);
+
+			projectHadoop = new Button(group, SWT.RADIO);
+			projectHadoop.setLayoutData(new GridData(GridData.BEGINNING, GridData.CENTER, false, false));
+			projectHadoop.setText("Specify Hadoop library location");
+
+			location = new Text(group, SWT.SINGLE | SWT.BORDER);
+			location.setText("");
+			d = new GridData(GridData.END, GridData.CENTER, true, false);
+			d.horizontalSpan = 1;
+			d.widthHint = 250;
+			d.grabExcessHorizontalSpace = true;
+			location.setLayoutData(d);
+			location.setEnabled(false);
+
+			browse = new Button(group, SWT.NONE);
+			browse.setText("Browse...");
+			browse.setLayoutData(new GridData(GridData.BEGINNING, GridData.CENTER, false, false));
+			browse.setEnabled(false);
+			browse.addSelectionListener(this);
+
+			projectHadoop.addSelectionListener(this);
+			workspaceHadoop.addSelectionListener(this);
+
+			// generateDriver = new Button((Composite) getControl(), SWT.CHECK);
+			// generateDriver.setText("Generate a MapReduce driver");
+			// generateDriver.addListener(SWT.Selection, new Listener()
+			// {
+			// public void handleEvent(Event event) {
+			// getContainer().updateButtons(); }
+			// });
+		}
+
+		@Override
+		public boolean isPageComplete() {
+			boolean validHadoop = validateHadoopLocation();
+
+			if (!validHadoop && isCurrentPage()) {
+				setErrorMessage("Invalid Hadoop Runtime specified; please click 'Configure Hadoop install directory' or fill in library location input field");
+			} else {
+				setErrorMessage(null);
+			}
+
+			return super.isPageComplete() && validHadoop;
+		}
+
+		private boolean validateHadoopLocation() {
+			FilenameFilter gotHadoopJar = new FilenameFilter() {
+				public boolean accept(File dir, String name) {
+					return (name.startsWith("hadoop") && name.endsWith(".jar") && (name.indexOf("test") == -1) && (name.indexOf("examples") == -1));
+				}
+			};
+
+			if (workspaceHadoop.getSelection()) {
+				this.currentPath = path;
+				return new Path(path).toFile().exists() && (new Path(path).toFile().list(gotHadoopJar).length > 0);
+			} else {
+				this.currentPath = location.getText();
+				File file = new Path(location.getText()).toFile();
+				return file.exists() && (new Path(location.getText()).toFile().list(gotHadoopJar).length > 0);
+			}
+		}
+
+		private void updateHadoopDirLabelFromPreferences() {
+			path = Activator.getDefault().getPreferenceStore().getString(PreferenceConstants.P_PATH);
+
+			if ((path != null) && (path.length() > 0)) {
+				workspaceHadoop.setText("Use default Hadoop");
+			} else {
+				workspaceHadoop.setText("Use default Hadoop (currently not set)");
+			}
+		}
+
+		public void widgetDefaultSelected(SelectionEvent e) {
+		}
+
+		public void widgetSelected(SelectionEvent e) {
+			if (e.getSource() == openPreferences) {
+				PreferenceManager manager = new PreferenceManager();
+				manager.addToRoot(new PreferenceNode("Hadoop Installation Directory", new MapReducePreferencePage()));
+				PreferenceDialog dialog = new PreferenceDialog(this.getShell(), manager);
+				dialog.create();
+				dialog.setMessage("Select Hadoop Installation Directory");
+				dialog.setBlockOnOpen(true);
+				dialog.open();
+
+				updateHadoopDirLabelFromPreferences();
+			} else if (e.getSource() == browse) {
+				DirectoryDialog dialog = new DirectoryDialog(this.getShell());
+				dialog.setMessage("Select a hadoop installation, containing hadoop-X-core.jar");
+				dialog.setText("Select Hadoop Installation Directory");
+				String directory = dialog.open();
+
+				if (directory != null) {
+					location.setText(directory);
+
+					if (!validateHadoopLocation()) {
+						setErrorMessage("No Hadoop jar found in specified directory");
+					} else {
+						setErrorMessage(null);
+					}
+				}
+			} else if (projectHadoop.getSelection()) {
+				location.setEnabled(true);
+				browse.setEnabled(true);
+			} else {
+				location.setEnabled(false);
+				browse.setEnabled(false);
+			}
+
+			getContainer().updateButtons();
+		}
+	}
+
+	@Override
+	public void addPages() {
+		/*
+		 * firstPage = new HadoopFirstPage(); addPage(firstPage ); addPage( new
+		 * JavaProjectWizardSecondPage(firstPage) );
+		 */
+
+		firstPage = new HadoopFirstPage();
+		javaPage = new NewJavaProjectWizardPage(ResourcesPlugin.getWorkspace().getRoot(), firstPage);
+		// newDriverPage = new NewDriverWizardPage(false);
+		// newDriverPage.setPageComplete(false); // ensure finish button
+		// initially disabled
+		addPage(firstPage);
+		addPage(javaPage);
+
+		// addPage(newDriverPage);
+	}
+
+	@Override
+	public boolean performFinish() {
+		try {
+			PlatformUI.getWorkbench().getProgressService().runInUI(this.getContainer(), new IRunnableWithProgress() {
+				public void run(IProgressMonitor monitor) {
+					try {
+						monitor.beginTask("Create Hadoop Project", 300);
+
+						javaPage.getRunnable().run(new SubProgressMonitor(monitor, 100));
+
+						// if( firstPage.generateDriver.getSelection())
+						// {
+						// newDriverPage.setPackageFragmentRoot(javaPage.getNewJavaProject().getAllPackageFragmentRoots()[0],
+						// false);
+						// newDriverPage.getRunnable().run(new
+						// SubProgressMonitor(monitor,100));
+						// }
+
+						IProject project = javaPage.getNewJavaProject().getResource().getProject();
+						IProjectDescription description = project.getDescription();
+						String[] existingNatures = description.getNatureIds();
+						String[] natures = new String[existingNatures.length + 1];
+						for (int i = 0; i < existingNatures.length; i++) {
+							natures[i + 1] = existingNatures[i];
+						}
+
+						natures[0] = MapReduceNature.ID;
+						description.setNatureIds(natures);
+
+						project.setPersistentProperty(new QualifiedName(Activator.PLUGIN_ID, "hadoop.runtime.path"), firstPage.currentPath);
+						project.setDescription(description, new NullProgressMonitor());
+
+						String[] natureIds = project.getDescription().getNatureIds();
+						for (int i = 0; i < natureIds.length; i++) {
+							log.fine("Nature id # " + i + " > " + natureIds[i]);
+						}
+
+						monitor.worked(100);
+						monitor.done();
+
+						BasicNewProjectResourceWizard.updatePerspective(config);
+					} catch (CoreException e) {
+						// TODO Auto-generated catch block
+						log.log(Level.SEVERE, "CoreException thrown.", e);
+					} catch (InvocationTargetException e) {
+						// TODO Auto-generated catch block
+						e.printStackTrace();
+					} catch (InterruptedException e) {
+						// TODO Auto-generated catch block
+						e.printStackTrace();
+					}
+				}
+			}, null);
+		} catch (InvocationTargetException e) {
+			// TODO Auto-generated catch block
+			e.printStackTrace();
+		} catch (InterruptedException e) {
+			// TODO Auto-generated catch block
+			e.printStackTrace();
+		}
+
+		return true;
+	}
+
+	public void setInitializationData(IConfigurationElement config, String propertyName, Object data) throws CoreException {
+		this.config = config;
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewMapperWizard.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewMapperWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewMapperWizard.java
new file mode 100644
index 0000000..b15bfda
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewMapperWizard.java
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.mr;
+
+import org.apache.hdt.ui.ImageLibrary;
+import org.eclipse.core.resources.IFile;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.jdt.core.IJavaElement;
+import org.eclipse.jdt.core.IType;
+import org.eclipse.jdt.internal.ui.wizards.NewElementWizard;
+import org.eclipse.jdt.ui.wizards.NewTypeWizardPage;
+import org.eclipse.jface.operation.IRunnableWithProgress;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.ui.INewWizard;
+import org.eclipse.ui.IWorkbench;
+
+/**
+ * Wizard for creating a new Mapper class (a class that runs the Map portion of
+ * a MapReduce job). The class is pre-filled with a template.
+ * 
+ */
+
+public class NewMapperWizard extends NewElementWizard implements INewWizard, IRunnableWithProgress {
+	private Page page;
+
+	public NewMapperWizard() {
+		setWindowTitle("New Mapper");
+	}
+
+	public void run(IProgressMonitor monitor) {
+		try {
+			page.createType(monitor);
+		} catch (CoreException e) {
+			// TODO Auto-generated catch block
+			e.printStackTrace();
+		} catch (InterruptedException e) {
+			// TODO Auto-generated catch block
+			e.printStackTrace();
+		}
+	}
+
+	@Override
+	public void init(IWorkbench workbench, IStructuredSelection selection) {
+		super.init(workbench, selection);
+
+		page = new Page();
+		addPage(page);
+		page.setSelection(selection);
+	}
+
+	public static class Page extends NewTypeWizardPage {
+		private Button isCreateMapMethod;
+
+		public Page() {
+			super(true, "Mapper");
+
+			setTitle("Mapper");
+			setDescription("Create a new Mapper implementation.");
+			setImageDescriptor(ImageLibrary.get("wizard.mapper.new"));
+		}
+
+		public void setSelection(IStructuredSelection selection) {
+			initContainerPage(getInitialJavaElement(selection));
+			initTypePage(getInitialJavaElement(selection));
+		}
+
+		@Override
+		public void createType(IProgressMonitor monitor) throws CoreException, InterruptedException {
+			super.createType(monitor);
+		}
+
+		@Override
+		protected void createTypeMembers(IType newType, ImportsManager imports, IProgressMonitor monitor) throws CoreException {
+			super.createTypeMembers(newType, imports, monitor);
+			imports.addImport("java.io.IOException");
+			imports.addImport("org.apache.hadoop.io.Text");
+			imports.addImport("org.apache.hadoop.io.IntWritable");
+			imports.addImport("org.apache.hadoop.io.LongWritable");
+			imports.addImport("org.apache.hadoop.mapreduce.Mapper");
+			newType.createMethod("public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException \n{\n}\n", null,
+					false, monitor);
+		}
+
+		public void createControl(Composite parent) {
+			// super.createControl(parent);
+
+			initializeDialogUnits(parent);
+			Composite composite = new Composite(parent, SWT.NONE);
+			GridLayout layout = new GridLayout();
+			layout.numColumns = 4;
+			composite.setLayout(layout);
+
+			createContainerControls(composite, 4);
+			createPackageControls(composite, 4);
+			createSeparator(composite, 4);
+			createTypeNameControls(composite, 4);
+			createSuperClassControls(composite, 4);
+			createSuperInterfacesControls(composite, 4);
+			// createSeparator(composite, 4);
+
+			setControl(composite);
+
+			setSuperClass("org.apache.hadoop.mapreduce.Mapper<LongWritable, Text, Text, IntWritable>", true);
+
+			setFocus();
+			validate();
+		}
+
+		@Override
+		protected void handleFieldChanged(String fieldName) {
+			super.handleFieldChanged(fieldName);
+
+			validate();
+		}
+
+		private void validate() {
+			updateStatus(new IStatus[] { fContainerStatus, fPackageStatus, fTypeNameStatus, fSuperClassStatus, fSuperInterfacesStatus });
+		}
+	}
+
+	@Override
+	public boolean performFinish() {
+		if (super.performFinish()) {
+			if (getCreatedElement() != null) {
+				openResource((IFile) page.getModifiedResource());
+				selectAndReveal(page.getModifiedResource());
+			}
+
+			return true;
+		} else {
+			return false;
+		}
+	}
+
+	@Override
+	protected void finishPage(IProgressMonitor monitor) throws InterruptedException, CoreException {
+		this.run(monitor);
+	}
+
+	@Override
+	public IJavaElement getCreatedElement() {
+		return page.getCreatedType().getPrimaryElement();
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewPartitionerWizard.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewPartitionerWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewPartitionerWizard.java
new file mode 100644
index 0000000..c09e142
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewPartitionerWizard.java
@@ -0,0 +1,194 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.mr;
+
+import java.util.ArrayList;
+
+import org.apache.hdt.ui.ImageLibrary;
+import org.eclipse.core.resources.IFile;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.jdt.core.IJavaElement;
+import org.eclipse.jdt.core.IType;
+import org.eclipse.jdt.internal.ui.wizards.NewElementWizard;
+import org.eclipse.jdt.ui.wizards.NewTypeWizardPage;
+import org.eclipse.jface.operation.IRunnableWithProgress;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.ui.INewWizard;
+import org.eclipse.ui.IWorkbench;
+
+/**
+ * Wizard for creating a new Partitioner class (a class that runs the Map portion
+ * of a MapReduce job). The class is pre-filled with a template.
+ * 
+ */
+
+public class NewPartitionerWizard extends NewElementWizard implements INewWizard,
+    IRunnableWithProgress {
+  private Page page;
+
+  public NewPartitionerWizard() {
+    setWindowTitle("New Partitioner");
+  }
+
+  public void run(IProgressMonitor monitor) {
+    try {
+      page.createType(monitor);
+    } catch (CoreException e) {
+      // TODO Auto-generated catch block
+      e.printStackTrace();
+    } catch (InterruptedException e) {
+      // TODO Auto-generated catch block
+      e.printStackTrace();
+    }
+  }
+
+  @Override
+  public void init(IWorkbench workbench, IStructuredSelection selection) {
+    super.init(workbench, selection);
+
+    page = new Page();
+    addPage(page);
+    page.setSelection(selection);
+  }
+
+  public static class Page extends NewTypeWizardPage {
+    private Button isCreateMapMethod;
+
+    public Page() {
+      super(true, "Partitioner");
+
+      setTitle("Partitioner");
+      setDescription("Create a new Partitioner implementation.");
+      setImageDescriptor(ImageLibrary.get("wizard.partitioner.new"));
+    }
+
+    public void setSelection(IStructuredSelection selection) {
+      initContainerPage(getInitialJavaElement(selection));
+      initTypePage(getInitialJavaElement(selection));
+    }
+
+    @Override
+    public void createType(IProgressMonitor monitor) throws CoreException,
+        InterruptedException {
+      super.createType(monitor);
+    }
+
+    @Override
+    protected void createTypeMembers(IType newType, ImportsManager imports,
+        IProgressMonitor monitor) throws CoreException {
+      super.createTypeMembers(newType, imports, monitor);
+      imports.addImport("java.util.HashMap");
+      imports.addImport("org.apache.hadoop.io.Text");
+      imports.addImport("org.apache.hadoop.conf.Configurable");
+      imports.addImport("org.apache.hadoop.conf.Configuration");
+      imports.addImport("org.apache.hadoop.mapreduce.Partitioner");
+      
+ 
+      newType
+      .createMethod(
+          "	  @Override\n" +
+	      "   public Configuration getConf() { \n" +
+		  "     // TODO Auto-generated method stub \n" +
+		  "     return null;\n" +
+	      "   }\n\n" +
+	      "	  @Override\n" +
+	      "	  public void setConf(Configuration conf) {\n" +
+	      "	    // TODO Auto-generated method stub\n" +
+	      "	  }\n\n" +
+		  "	  @Override\n" +
+		  "	  public int getPartition(Text key, Text value, int nr) { \n" +
+          "	  // TODO Auto-generated method stub \n" +
+          "	  return 0; \n" +
+		  "	  }\n", null, false,
+          monitor);
+    }
+
+    public void createControl(Composite parent) {
+      // super.createControl(parent);
+
+      initializeDialogUnits(parent);
+      Composite composite = new Composite(parent, SWT.NONE);
+      GridLayout layout = new GridLayout();
+      layout.numColumns = 4;
+      composite.setLayout(layout);
+
+      createContainerControls(composite, 4);
+      createPackageControls(composite, 4);
+      createSeparator(composite, 4);
+      createTypeNameControls(composite, 4);
+      createSuperClassControls(composite, 4);
+      createSuperInterfacesControls(composite, 4);
+      // createSeparator(composite, 4);
+
+      setControl(composite);
+
+      setSuperClass("org.apache.hadoop.mapreduce.Partitioner<Text, Text>", true);
+      ArrayList al = new ArrayList();
+      al.add("org.apache.hadoop.conf.Configurable");
+      setSuperInterfaces(al, true);
+
+      setFocus();
+      validate();
+    }
+
+    @Override
+    protected void handleFieldChanged(String fieldName) {
+      super.handleFieldChanged(fieldName);
+
+      validate();
+    }
+
+    private void validate() {
+      updateStatus(new IStatus[] { fContainerStatus, fPackageStatus,
+          fTypeNameStatus, fSuperClassStatus, fSuperInterfacesStatus });
+    }
+  }
+
+  @Override
+  public boolean performFinish() {
+    if (super.performFinish()) {
+      if (getCreatedElement() != null) {
+        openResource((IFile) page.getModifiedResource());
+        selectAndReveal(page.getModifiedResource());
+      }
+
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  @Override
+  protected void finishPage(IProgressMonitor monitor)
+      throws InterruptedException, CoreException {
+    this.run(monitor);
+  }
+
+  @Override
+  public IJavaElement getCreatedElement() {
+    return page.getCreatedType().getPrimaryElement();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewReducerWizard.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewReducerWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewReducerWizard.java
new file mode 100644
index 0000000..da514e4
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewReducerWizard.java
@@ -0,0 +1,175 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.mr;
+
+import org.apache.hdt.ui.ImageLibrary;
+import org.eclipse.core.resources.IFile;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.jdt.core.IJavaElement;
+import org.eclipse.jdt.core.IType;
+import org.eclipse.jdt.internal.ui.wizards.NewElementWizard;
+import org.eclipse.jdt.ui.wizards.NewTypeWizardPage;
+import org.eclipse.jface.operation.IRunnableWithProgress;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.ui.INewWizard;
+import org.eclipse.ui.IWorkbench;
+
+/**
+ * Wizard for creating a new Reducer class (a class that runs the Reduce
+ * portion of a MapReduce job). The class is pre-filled with a template.
+ * 
+ */
+
+public class NewReducerWizard extends NewElementWizard implements
+    INewWizard, IRunnableWithProgress {
+  private Page page;
+
+  public NewReducerWizard() {
+    setWindowTitle("New Reducer");
+  }
+
+  public void run(IProgressMonitor monitor) {
+    try {
+      page.createType(monitor);
+    } catch (CoreException e) {
+      // TODO Auto-generated catch block
+      e.printStackTrace();
+    } catch (InterruptedException e) {
+      // TODO Auto-generated catch block
+      e.printStackTrace();
+    }
+  }
+
+  @Override
+  public void init(IWorkbench workbench, IStructuredSelection selection) {
+    super.init(workbench, selection);
+
+    page = new Page();
+    addPage(page);
+    page.setSelection(selection);
+  }
+
+  public static class Page extends NewTypeWizardPage {
+    public Page() {
+      super(true, "Reducer");
+
+      setTitle("Reducer");
+      setDescription("Create a new Reducer implementation.");
+      setImageDescriptor(ImageLibrary.get("wizard.reducer.new"));
+    }
+
+    public void setSelection(IStructuredSelection selection) {
+      initContainerPage(getInitialJavaElement(selection));
+      initTypePage(getInitialJavaElement(selection));
+    }
+
+    @Override
+    public void createType(IProgressMonitor monitor) throws CoreException,
+        InterruptedException {
+      super.createType(monitor);
+    }
+
+    @Override
+    protected void createTypeMembers(IType newType, ImportsManager imports,
+        IProgressMonitor monitor) throws CoreException {
+      super.createTypeMembers(newType, imports, monitor);
+      imports.addImport("java.io.IOException");
+      imports.addImport("org.apache.hadoop.mapreduce.Reducer");
+      imports.addImport("org.apache.hadoop.io.Text");
+      imports.addImport("org.apache.hadoop.io.IntWritable");
+      newType
+          .createMethod(
+              "public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException \n{\n"
+
+                  + "\twhile (values.iterator().hasNext()) {\n"
+                  + "\t\t// replace ValueType with the real type of your value\n"
+
+                  + "\t\t// process value\n" + "\t}\n" + "}\n", null, false,
+              monitor);
+    }
+
+    public void createControl(Composite parent) {
+      // super.createControl(parent);
+
+      initializeDialogUnits(parent);
+      Composite composite = new Composite(parent, SWT.NONE);
+      GridLayout layout = new GridLayout();
+      layout.numColumns = 4;
+      composite.setLayout(layout);
+
+      createContainerControls(composite, 4);
+      createPackageControls(composite, 4);
+      createSeparator(composite, 4);
+      createTypeNameControls(composite, 4);
+      createSuperClassControls(composite, 4);
+      createSuperInterfacesControls(composite, 4);
+      // createSeparator(composite, 4);
+
+      setControl(composite);
+
+      setSuperClass("org.apache.hadoop.mapreduce.Reducer<Text, IntWritable, Text, IntWritable>", true);
+ 
+      setFocus();
+      validate();
+    }
+
+    @Override
+    protected void handleFieldChanged(String fieldName) {
+      super.handleFieldChanged(fieldName);
+
+      validate();
+    }
+
+    private void validate() {
+      updateStatus(new IStatus[] { fContainerStatus, fPackageStatus,
+          fTypeNameStatus, fSuperClassStatus, fSuperInterfacesStatus });
+    }
+  }
+
+  @Override
+  public boolean performFinish() {
+    if (super.performFinish()) {
+      if (getCreatedElement() != null) {
+        selectAndReveal(page.getModifiedResource());
+        openResource((IFile) page.getModifiedResource());
+      }
+
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  @Override
+  protected void finishPage(IProgressMonitor monitor)
+      throws InterruptedException, CoreException {
+    this.run(monitor);
+  }
+
+  @Override
+  public IJavaElement getCreatedElement() {
+    return (page.getCreatedType() == null) ? null : page.getCreatedType()
+        .getPrimaryElement();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/MapReducePreferencePage.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/MapReducePreferencePage.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/MapReducePreferencePage.java
new file mode 100644
index 0000000..b653b10
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/MapReducePreferencePage.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.ui.preferences;
+
+import org.apache.hdt.ui.Activator;
+import org.eclipse.jface.preference.DirectoryFieldEditor;
+import org.eclipse.jface.preference.FieldEditorPreferencePage;
+import org.eclipse.ui.IWorkbench;
+import org.eclipse.ui.IWorkbenchPreferencePage;
+
+/**
+ * This class represents a preference page that is contributed to the
+ * Preferences dialog. By sub-classing <tt>FieldEditorPreferencePage</tt>,
+ * we can use the field support built into JFace that allows us to create a
+ * page that is small and knows how to save, restore and apply itself.
+ * 
+ * <p>
+ * This page is used to modify preferences only. They are stored in the
+ * preference store that belongs to the main plug-in class. That way,
+ * preferences can be accessed directly via the preference store.
+ */
+
+public class MapReducePreferencePage extends FieldEditorPreferencePage
+    implements IWorkbenchPreferencePage {
+
+  public MapReducePreferencePage() {
+    super(GRID);
+    setPreferenceStore(Activator.getDefault().getPreferenceStore());
+    setTitle("Hadoop Map/Reduce Tools");
+    // setDescription("Hadoop Map/Reduce Preferences");
+  }
+
+  /**
+   * Creates the field editors. Field editors are abstractions of the common
+   * GUI blocks needed to manipulate various types of preferences. Each field
+   * editor knows how to save and restore itself.
+   */
+  @Override
+  public void createFieldEditors() {
+    addField(new DirectoryFieldEditor(PreferenceConstants.P_PATH,
+        "&Hadoop installation directory:", getFieldEditorParent()));
+
+  }
+
+  /* @inheritDoc */
+  public void init(IWorkbench workbench) {
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/PreferenceConstants.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/PreferenceConstants.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/PreferenceConstants.java
new file mode 100644
index 0000000..4efcbdd
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/PreferenceConstants.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.preferences;
+
+/**
+ * Constant definitions for plug-in preferences
+ */
+public class PreferenceConstants {
+
+  public static final String P_PATH = "pathPreference";
+
+  // public static final String P_BOOLEAN = "booleanPreference";
+  //
+  // public static final String P_CHOICE = "choicePreference";
+  //
+  // public static final String P_STRING = "stringPreference";
+  //	
+}


[07/27] git commit: HDT-50: Adding HDFS site if found - Minor tweaks to the hadoop perspective - Modified create/open of HDFS project in WorkspaceJob - Modified RunOnHadoop to add HDFS site. Show error in dialog if found. - Adding timeoutcheck to MR clus

Posted by rs...@apache.org.
HDT-50: Adding HDFS site if found - Minor tweaks to the hadoop perspective - Modified create/open of HDFS project in WorkspaceJob - Modified RunOnHadoop to add HDFS site. Show error in dialog if found. - Adding timeoutcheck to MR cluster


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/00646ae0
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/00646ae0
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/00646ae0

Branch: refs/heads/hadoop-eclipse-merge
Commit: 00646ae0b3e502dda9ec3a4ddb5484412cf709ef
Parents: 0e9e729
Author: Rahul Sharma <rs...@apache.org>
Authored: Tue May 13 17:00:53 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Fri May 23 08:44:02 2014 +0530

----------------------------------------------------------------------
 .../hdt/core/internal/hdfs/HDFSManager.java     | 61 +++++++++++++++++---
 .../hdt/core/launch/AbstractHadoopCluster.java  |  4 ++
 .../hdt/hadoop/release/HadoopCluster.java       | 33 +++++++++++
 org.apache.hdt.ui/plugin.xml                    | 14 ++---
 .../hdt/ui/internal/hdfs/NewHDFSWizard.java     | 21 ++-----
 .../internal/launch/HadoopLocationWizard.java   | 43 +++++++++++++-
 .../hdt/ui/internal/launch/ServerRegistry.java  | 53 ++++++++++++-----
 7 files changed, 184 insertions(+), 45 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/00646ae0/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java
index 93f0696..5897cea 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java
@@ -18,6 +18,7 @@
 
 package org.apache.hdt.core.internal.hdfs;
 
+import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
@@ -37,13 +38,19 @@ import org.eclipse.core.resources.IResource;
 import org.eclipse.core.resources.IWorkspace;
 import org.eclipse.core.resources.IWorkspaceRoot;
 import org.eclipse.core.resources.ResourcesPlugin;
+import org.eclipse.core.resources.WorkspaceJob;
 import org.eclipse.core.runtime.CoreException;
 import org.eclipse.core.runtime.IConfigurationElement;
+import org.eclipse.core.runtime.IProgressMonitor;
 import org.eclipse.core.runtime.IStatus;
 import org.eclipse.core.runtime.NullProgressMonitor;
 import org.eclipse.core.runtime.Platform;
 import org.eclipse.core.runtime.Status;
+import org.eclipse.core.runtime.SubProgressMonitor;
+import org.eclipse.core.runtime.jobs.Job;
 import org.eclipse.emf.common.util.EList;
+import org.eclipse.jface.dialogs.MessageDialog;
+import org.eclipse.swt.widgets.Display;
 import org.eclipse.team.core.RepositoryProvider;
 
 /**
@@ -176,14 +183,39 @@ public class HDFSManager {
 	 * @return
 	 * @throws CoreException
 	 */
-	private IProject createIProject(String name, java.net.URI hdfsURI) throws CoreException {
+	private IProject createIProject(String name, final java.net.URI hdfsURI) {
 		final IWorkspace workspace = ResourcesPlugin.getWorkspace();
-		IProject project = workspace.getRoot().getProject(name);
-		IProjectDescription pd = workspace.newProjectDescription(name);
-		pd.setLocationURI(hdfsURI);
-		project.create(pd, new NullProgressMonitor());
-		project.open(new NullProgressMonitor());
-		RepositoryProvider.map(project, HDFSTeamRepositoryProvider.ID);
+		final IProject project = workspace.getRoot().getProject(name);
+		final IProjectDescription pd = workspace.newProjectDescription(name);
+	    WorkspaceJob operation = new WorkspaceJob("Adding HDFS Location") {
+
+			@Override
+			public IStatus runInWorkspace(IProgressMonitor monitor) throws CoreException {
+				monitor.beginTask("Creating Project", 100);
+				try {
+					pd.setLocationURI(hdfsURI);
+					project.create(pd, new SubProgressMonitor(monitor, 70));
+					project.open(IResource.BACKGROUND_REFRESH, new SubProgressMonitor(monitor, 30));
+					RepositoryProvider.map(project, HDFSTeamRepositoryProvider.ID);
+					return Status.OK_STATUS;
+				} catch (final CoreException e) {
+					logger.error("error found in creating HDFS site", e);
+					Display.getDefault().syncExec(new Runnable(){
+						public void run(){
+							MessageDialog.openError(Display.getDefault().getActiveShell(), 
+									"HDFS Error", "Unable to create HDFS site :"+e.getMessage());
+						}
+					});
+					return e.getStatus();
+				} finally {
+					monitor.done();
+				}
+			}
+		   };
+		operation.setPriority(Job.LONG);
+		operation.setUser(true);
+		operation.setRule(project);
+		operation.schedule();
 		return project;
 	}
 
@@ -204,6 +236,8 @@ public class HDFSManager {
 		}
 		return uriToServerCacheMap.get(uri);
 	}
+	
+	
 
 	public String getProjectName(HDFSServer server) {
 		return serverToProjectMap.get(server);
@@ -282,4 +316,17 @@ public class HDFSManager {
 			return hdfsClientsMap.get(serverURI);
 		}
 	}
+	
+	public static org.eclipse.core.runtime.IStatus addServer(String serverName, String location, String userId, List<String> groupId) {
+		try {
+			HDFSManager.INSTANCE.createServer(serverName, new URI(location), userId, groupId);
+		} catch (CoreException e) {
+			logger.warn(e.getMessage(), e);
+			return e.getStatus();
+		} catch (URISyntaxException e) {
+			logger.warn(e.getMessage(), e);
+			return new Status(Status.ERROR,"unknown",e.getMessage(),e);
+		}
+		return Status.OK_STATUS;
+	}
 }

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/00646ae0/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
index 782a89c..cd06f0e 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
@@ -64,6 +64,8 @@ public abstract class AbstractHadoopCluster {
 
 	abstract public boolean loadFromXML(File file) throws IOException;
 	
+	abstract public boolean isAvailable() throws CoreException;
+	
 	public static AbstractHadoopCluster createCluster(File file) throws CoreException, IOException {
 		AbstractHadoopCluster hadoopCluster = createCluster();
 		hadoopCluster.loadFromXML(file);
@@ -81,4 +83,6 @@ public abstract class AbstractHadoopCluster {
 		return hadoopCluster;
 	}
 
+	
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/00646ae0/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
index 466739b..67fcb75 100644
--- a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
+++ b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
@@ -31,6 +31,13 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeMap;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 import java.util.logging.Logger;
 
 import javax.xml.parsers.DocumentBuilder;
@@ -52,6 +59,8 @@ import org.apache.hdt.core.launch.IHadoopJob;
 import org.apache.hdt.core.launch.IJarModule;
 import org.apache.hdt.core.launch.IJobListener;
 import org.eclipse.core.internal.utils.FileUtil;
+import org.eclipse.core.resources.WorkspaceJob;
+import org.eclipse.core.runtime.CoreException;
 import org.eclipse.core.runtime.IProgressMonitor;
 import org.eclipse.core.runtime.IStatus;
 import org.eclipse.core.runtime.Status;
@@ -81,6 +90,7 @@ import org.xml.sax.SAXException;
  */
 
 public class HadoopCluster extends AbstractHadoopCluster {
+	private ExecutorService service= Executors.newSingleThreadExecutor();
 
 	/**
 	 * Frequency of location status observations expressed as the delay in ms
@@ -566,4 +576,27 @@ public class HadoopCluster extends AbstractHadoopCluster {
 		}
 
 	}
+
+	/* (non-Javadoc)
+	 * @see org.apache.hdt.core.launch.AbstractHadoopCluster#isAvailable()
+	 */
+	@Override
+	public boolean isAvailable() throws CoreException {
+		Callable<JobClient> task= new Callable<JobClient>() {
+			
+			@Override
+			public JobClient call() throws Exception {
+				return getJobClient();
+			}
+		}; 
+		Future<JobClient> jobClientFuture = service.submit(task);
+		try{
+			JobClient jobClient = jobClientFuture.get(5, TimeUnit.SECONDS);
+			return jobClient!=null;
+		}catch(Exception e){
+			e.printStackTrace();
+			throw new CoreException(new Status(Status.ERROR, 
+					Activator.BUNDLE_ID, "unable to connect to server", e));
+		}
+	}
 }

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/00646ae0/org.apache.hdt.ui/plugin.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/plugin.xml b/org.apache.hdt.ui/plugin.xml
index 7bc5b47..af68ffa 100644
--- a/org.apache.hdt.ui/plugin.xml
+++ b/org.apache.hdt.ui/plugin.xml
@@ -39,31 +39,31 @@
       <perspectiveExtension
             targetID="org.apache.hdt.ui.perspective">
          <view
-               id="org.apache.hdt.ui.view.servers"
+               id="org.eclipse.ui.navigator.ProjectExplorer"
                minimized="false"
                ratio="0.25"
                relationship="left"
                relative="org.eclipse.ui.editorss">
          </view>
          <view
-               id="org.eclipse.ui.navigator.ProjectExplorer"
+               id="org.apache.hdt.ui.view.servers"
                minimized="false"
                ratio="0.5"
                relationship="bottom"
-               relative="org.apache.hdt.ui.view.servers">
+               relative="org.eclipse.ui.navigator.ProjectExplorer">
          </view>
          <view
-               id="org.eclipse.ui.views.PropertySheet"
+               id="org.apache.hdt.ui.ClusterView"
                minimized="false"
                ratio="0.66"
                relationship="bottom"
                relative="org.eclipse.ui.editorss">
          </view>
           <view
-                id=" org.apache.hdt.ui.ClusterView"
+                id="org.eclipse.ui.views.PropertySheet"
                 minimized="false"
                 relationship="stack"
-                relative="org.apache.hdt.ui.view.servers">
+                relative="org.apache.hdt.ui.ClusterView">
          </view>
          <newWizardShortcut
                id="org.apache.hdt.ui.wizard.newProjectWizard"/>
@@ -441,7 +441,7 @@
             class="org.eclipse.ui.navigator.CommonNavigator"
             icon="icons/hadoop-logo-16x16.png"
             id="org.apache.hdt.ui.view.servers"
-            name="Hadoop Servers"
+            name="ZooKeeper Servers"
             restorable="true">
       </view>
       <view

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/00646ae0/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java
index 545ea3a..4cd40de 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java
@@ -17,14 +17,9 @@
  */
 package org.apache.hdt.ui.internal.hdfs;
 
-import java.net.URI;
-import java.net.URISyntaxException;
-
 import org.apache.hdt.core.internal.hdfs.HDFSManager;
 import org.apache.hdt.ui.Activator;
 import org.apache.log4j.Logger;
-import org.eclipse.core.runtime.CoreException;
-import org.eclipse.core.runtime.Status;
 import org.eclipse.core.runtime.jobs.Job;
 import org.eclipse.jface.preference.IPreferenceStore;
 import org.eclipse.jface.viewers.IStructuredSelection;
@@ -73,17 +68,9 @@ public class NewHDFSWizard extends Wizard implements INewWizard {
 
 				Job j = new Job("Creating HDFS project [" + serverLocationWizardPage.getHdfsServerName() + "]") {
 					protected org.eclipse.core.runtime.IStatus run(org.eclipse.core.runtime.IProgressMonitor monitor) {
-						try {
-							HDFSManager.INSTANCE.createServer(serverLocationWizardPage.getHdfsServerName(), new URI(serverLocationWizardPage
-									.getHdfsServerLocation()), serverLocationWizardPage.isOverrideDefaultSecurity() ? serverLocationWizardPage.getUserId()
-									: null, serverLocationWizardPage.isOverrideDefaultSecurity() ? serverLocationWizardPage.getGroupIds() : null);
-						} catch (CoreException e) {
-							logger.warn(e.getMessage(), e);
-							return e.getStatus();
-						} catch (URISyntaxException e) {
-							logger.warn(e.getMessage(), e);
-						}
-						return Status.OK_STATUS;
+						return HDFSManager.addServer(serverLocationWizardPage.getHdfsServerName(),serverLocationWizardPage.getHdfsServerLocation(),
+								serverLocationWizardPage.isOverrideDefaultSecurity() ? serverLocationWizardPage.getUserId() : null,
+								serverLocationWizardPage.isOverrideDefaultSecurity() ? serverLocationWizardPage.getGroupIds() : null);
 					};
 				};
 				j.schedule();
@@ -93,4 +80,6 @@ public class NewHDFSWizard extends Wizard implements INewWizard {
 		return false;
 	}
 
+	
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/00646ae0/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
index 4f11128..3757c05 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
@@ -28,8 +28,12 @@ import java.util.Set;
 import java.util.SortedMap;
 import java.util.TreeMap;
 
-import org.apache.hdt.core.launch.ConfProp;
+import org.apache.hdt.core.internal.hdfs.HDFSManager;
 import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.ConfProp;
+import org.eclipse.core.resources.IProject;
+import org.eclipse.core.resources.IWorkspaceRoot;
+import org.eclipse.core.resources.ResourcesPlugin;
 import org.eclipse.core.runtime.CoreException;
 import org.eclipse.jface.dialogs.IMessageProvider;
 import org.eclipse.jface.wizard.WizardPage;
@@ -118,17 +122,49 @@ public class HadoopLocationWizard extends WizardPage {
 	public AbstractHadoopCluster performFinish() {
 		try {
 			if (this.original == null) {
+				Display.getDefault().syncExec(new Runnable() {
+					public void run() {
+						HDFSManager.addServer(location.getLocationName(),
+								location.getConfProp(ConfProp.FS_DEFAULT_URI), location
+								.getConfProp(ConfProp.PI_USER_NAME), null);
+					}
+				});
 				// New location
 				Display.getDefault().syncExec(new Runnable() {
 					public void run() {
-						ServerRegistry.getInstance().addServer(HadoopLocationWizard.this.location);
+							ServerRegistry.getInstance().addServer(HadoopLocationWizard.this.location);
 					}
 				});
 				return this.location;
 
 			} else {
+				
 				// Update location
 				final String originalName = this.original.getLocationName();
+				final String originalLoc = this.original.getConfProp(ConfProp.FS_DEFAULT_URI);
+				final String newName = this.location.getLocationName();
+				final String newLoc = this.location.getConfProp(ConfProp.FS_DEFAULT_URI);
+				
+				if (!originalName.equals(newName) || !originalLoc.equals(newLoc)){
+					IWorkspaceRoot root = ResourcesPlugin.getWorkspace().getRoot();
+					final IProject project = root.getProject(originalName);
+					
+					Display.getDefault().syncExec(new Runnable() {
+						public void run() {
+							if(project.exists()){
+								try {
+									project.close(null);
+									project.delete(true, null);
+								} catch (CoreException e) {
+									e.printStackTrace();
+								}
+							}
+							HDFSManager.addServer(location.getLocationName(),
+									location.getConfProp(ConfProp.FS_DEFAULT_URI), location
+									.getConfProp(ConfProp.PI_USER_NAME), null);
+						}
+					});
+				}
 				this.original.load(this.location);
 
 				Display.getDefault().syncExec(new Runnable() {
@@ -139,6 +175,9 @@ public class HadoopLocationWizard extends WizardPage {
 				return this.original;
 
 			}
+			
+			
+			
 		} catch (Exception e) {
 			e.printStackTrace();
 			setMessage("Invalid server location values", IMessageProvider.ERROR);

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/00646ae0/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/ServerRegistry.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/ServerRegistry.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/ServerRegistry.java
index 785286c..f03fb50 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/ServerRegistry.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/ServerRegistry.java
@@ -33,6 +33,12 @@ import javax.security.auth.login.Configuration;
 import org.apache.hdt.core.launch.AbstractHadoopCluster;
 import org.apache.hdt.core.launch.IHadoopClusterListener;
 import org.apache.hdt.ui.Activator;
+import org.eclipse.core.resources.ResourcesPlugin;
+import org.eclipse.core.resources.WorkspaceJob;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.jobs.Job;
 import org.eclipse.jface.dialogs.MessageDialog;
 
 /**
@@ -173,10 +179,22 @@ public class ServerRegistry {
 		fireListeners(server, SERVER_REMOVED);
 	}
 
-	public synchronized void addServer(AbstractHadoopCluster server) {
-		this.servers.put(server.getLocationName(), server);
-		store();
-		fireListeners(server, SERVER_ADDED);
+	public synchronized void addServer(final AbstractHadoopCluster server) {
+		WorkspaceJob job= new WorkspaceJob("Adding Hadoop Server") {
+			@Override
+			public IStatus runInWorkspace(IProgressMonitor monitor) throws CoreException {
+				if(server.isAvailable()){
+					servers.put(server.getLocationName(), server);
+					store();
+					fireListeners(server, SERVER_ADDED);
+				}
+			return org.eclipse.core.runtime.Status.OK_STATUS;
+		}};
+		
+		job.setPriority(Job.LONG);
+		job.setRule(ResourcesPlugin.getWorkspace().getRoot());
+		job.setUser(true);
+		job.schedule();
 	}
 
 	/**
@@ -187,14 +205,23 @@ public class ServerRegistry {
 	 * @param server
 	 *            the location
 	 */
-	public synchronized void updateServer(String originalName, AbstractHadoopCluster server) {
-
-		// Update the map if the location name has changed
-		if (!server.getLocationName().equals(originalName)) {
-			servers.remove(originalName);
-			servers.put(server.getLocationName(), server);
-		}
-		store();
-		fireListeners(server, SERVER_STATE_CHANGED);
+	public synchronized void updateServer(final String originalName, final AbstractHadoopCluster server) {
+		WorkspaceJob job= new WorkspaceJob("Updating  Hadoop Server") {
+			@Override
+			public IStatus runInWorkspace(IProgressMonitor monitor) throws CoreException {
+				// Update the map if the location name has changed
+				if (!server.getLocationName().equals(originalName) && server.isAvailable()) {
+					servers.remove(originalName);
+					servers.put(server.getLocationName(), server);
+					store();
+					fireListeners(server, SERVER_STATE_CHANGED);
+				}
+				
+				return org.eclipse.core.runtime.Status.OK_STATUS;
+			}};
+		
+		job.setPriority(Job.LONG);
+		job.setUser(true);
+		job.schedule();
 	}
 }


[04/27] HDT-41: Provide existing MR functionality - ported Mapper/Reducer/Partioner/Driver Wizards - ported Image lookup - ported Map-reduce project wizard - using runtimes from specified hadoop location rather as runtime jars packed in plugin - ported '

Posted by rs...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
new file mode 100644
index 0000000..4f11128
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
@@ -0,0 +1,925 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.launch;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import org.apache.hdt.core.launch.ConfProp;
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.jface.dialogs.IMessageProvider;
+import org.eclipse.jface.wizard.WizardPage;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.custom.ScrolledComposite;
+import org.eclipse.swt.events.ModifyEvent;
+import org.eclipse.swt.events.ModifyListener;
+import org.eclipse.swt.events.SelectionEvent;
+import org.eclipse.swt.events.SelectionListener;
+import org.eclipse.swt.graphics.Image;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.swt.widgets.Control;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.swt.widgets.Event;
+import org.eclipse.swt.widgets.Group;
+import org.eclipse.swt.widgets.Label;
+import org.eclipse.swt.widgets.Listener;
+import org.eclipse.swt.widgets.TabFolder;
+import org.eclipse.swt.widgets.TabItem;
+import org.eclipse.swt.widgets.Text;
+
+/**
+ * Wizard for editing the settings of a Hadoop location
+ * 
+ * The wizard contains 3 tabs: General, Tunneling and Advanced. It edits
+ * parameters of the location member which either a new location or a copy of an
+ * existing registered location.
+ */
+
+public class HadoopLocationWizard extends WizardPage {
+
+	Image circle;
+
+	/**
+	 * The location effectively edited by the wizard. This location is a copy or
+	 * a new one.
+	 */
+	private AbstractHadoopCluster location;
+
+	/**
+	 * The original location being edited by the wizard (null if we create a new
+	 * instance).
+	 */
+	private AbstractHadoopCluster original;
+
+	/**
+	 * New Hadoop location wizard
+	 */
+	public HadoopLocationWizard() {
+		super("Hadoop Server", "New Hadoop Location", null);
+
+		this.original = null;
+		try {
+			this.location = AbstractHadoopCluster.createCluster();
+		} catch (CoreException e) {
+			e.printStackTrace();
+		}
+		this.location.setLocationName("");
+	}
+
+	/**
+	 * Constructor to edit the parameters of an existing Hadoop server
+	 * 
+	 * @param server
+	 */
+	public HadoopLocationWizard(AbstractHadoopCluster server) {
+		super("Create a new Hadoop location", "Edit Hadoop Location", null);
+		this.original = server;
+		try {
+			this.location = AbstractHadoopCluster.createCluster(server);
+		} catch (CoreException e) {
+			e.printStackTrace();
+		}
+	}
+
+	/**
+	 * Performs any actions appropriate in response to the user having pressed
+	 * the Finish button, or refuse if finishing now is not permitted.
+	 * 
+	 * @return the created or updated Hadoop location
+	 */
+
+	public AbstractHadoopCluster performFinish() {
+		try {
+			if (this.original == null) {
+				// New location
+				Display.getDefault().syncExec(new Runnable() {
+					public void run() {
+						ServerRegistry.getInstance().addServer(HadoopLocationWizard.this.location);
+					}
+				});
+				return this.location;
+
+			} else {
+				// Update location
+				final String originalName = this.original.getLocationName();
+				this.original.load(this.location);
+
+				Display.getDefault().syncExec(new Runnable() {
+					public void run() {
+						ServerRegistry.getInstance().updateServer(originalName, HadoopLocationWizard.this.location);
+					}
+				});
+				return this.original;
+
+			}
+		} catch (Exception e) {
+			e.printStackTrace();
+			setMessage("Invalid server location values", IMessageProvider.ERROR);
+			return null;
+		}
+	}
+
+	/**
+	 * Validates the current Hadoop location settings (look for Hadoop
+	 * installation directory).
+	 * 
+	 */
+	private void testLocation() {
+		setMessage("Not implemented yet", IMessageProvider.WARNING);
+	}
+
+	/**
+	 * Location is not complete (and finish button not available) until a host
+	 * name is specified.
+	 * 
+	 * @inheritDoc
+	 */
+	@Override
+	public boolean isPageComplete() {
+
+		{
+			String locName = location.getConfProp(ConfProp.PI_LOCATION_NAME);
+			if ((locName == null) || (locName.length() == 0) || locName.contains("/")) {
+
+				setMessage("Bad location name: " + "the location name should not contain " + "any character prohibited in a file name.", WARNING);
+
+				return false;
+			}
+		}
+
+		{
+			String master = location.getConfProp(ConfProp.PI_JOB_TRACKER_HOST);
+			if ((master == null) || (master.length() == 0)) {
+
+				setMessage("Bad master host name: " + "the master host name refers to the machine " + "that runs the Job tracker.", WARNING);
+
+				return false;
+			}
+		}
+
+		{
+			String jobTracker = location.getConfProp(ConfProp.JOB_TRACKER_URI);
+			String[] strs = jobTracker.split(":");
+			boolean ok = (strs.length == 2);
+			if (ok) {
+				try {
+					int port = Integer.parseInt(strs[1]);
+					ok = (port >= 0) && (port < 65536);
+				} catch (NumberFormatException nfe) {
+					ok = false;
+				}
+			}
+			if (!ok) {
+				setMessage("The job tracker information (" + ConfProp.JOB_TRACKER_URI.name + ") is invalid. " + "This usually looks like \"host:port\"",
+						WARNING);
+				return false;
+			}
+		}
+
+		{
+			String fsDefaultURI = location.getConfProp(ConfProp.FS_DEFAULT_URI);
+			try {
+				URI uri = new URI(fsDefaultURI);
+			} catch (URISyntaxException e) {
+
+				setMessage("The default file system URI is invalid. " + "This usually looks like \"hdfs://host:port/\" " + "or \"file:///dir/\"", WARNING);
+			}
+		}
+
+		setMessage("Define the location of a Hadoop infrastructure " + "for running MapReduce applications.");
+		return true;
+	}
+
+	/**
+	 * Create the wizard
+	 */
+	/* @inheritDoc */
+	public void createControl(Composite parent) {
+		setTitle("Define Hadoop location");
+		setDescription("Define the location of a Hadoop infrastructure " + "for running MapReduce applications.");
+
+		Composite panel = new Composite(parent, SWT.FILL);
+		GridLayout glayout = new GridLayout(2, false);
+		panel.setLayout(glayout);
+
+		TabMediator mediator = new TabMediator(panel);
+		{
+			GridData gdata = new GridData(GridData.FILL_BOTH);
+			gdata.horizontalSpan = 2;
+			mediator.folder.setLayoutData(gdata);
+		}
+		this.setControl(panel /* mediator.folder */);
+		{
+			final Button btn = new Button(panel, SWT.NONE);
+			btn.setText("&Load from file");
+			btn.setEnabled(false);
+			btn.setToolTipText("Not yet implemented");
+			btn.addListener(SWT.Selection, new Listener() {
+				public void handleEvent(Event e) {
+					// TODO
+				}
+			});
+		}
+		{
+			final Button validate = new Button(panel, SWT.NONE);
+			validate.setText("&Validate location");
+			validate.setEnabled(false);
+			validate.setToolTipText("Not yet implemented");
+			validate.addListener(SWT.Selection, new Listener() {
+				public void handleEvent(Event e) {
+					testLocation();
+				}
+			});
+		}
+	}
+
+	private interface TabListener {
+		void notifyChange(ConfProp prop, String propValue);
+	}
+
+	/*
+	 * Mediator pattern to keep tabs synchronized with each other and with the
+	 * location state.
+	 */
+
+	private class TabMediator {
+		TabFolder folder;
+
+		private Set<TabListener> tabs = new HashSet<TabListener>();
+
+		TabMediator(Composite parent) {
+			folder = new TabFolder(parent, SWT.NONE);
+			tabs.add(new TabMain(this));
+			tabs.add(new TabAdvanced(this));
+		}
+
+		/**
+		 * Access to current configuration settings
+		 * 
+		 * @param propName
+		 *            the property name
+		 * @return the current property value
+		 */
+		String get(String propName) {
+			return location.getConfProp(propName);
+		}
+
+		String get(ConfProp prop) {
+			return location.getConfProp(prop);
+		}
+
+		/**
+		 * Implements change notifications from any tab: update the location
+		 * state and other tabs
+		 * 
+		 * @param source
+		 *            origin of the notification (one of the tree tabs)
+		 * @param propName
+		 *            modified property
+		 * @param propValue
+		 *            new value
+		 */
+		void notifyChange(TabListener source, final ConfProp prop, final String propValue) {
+			// Ignore notification when no change
+			String oldValue = location.getConfProp(prop);
+			if ((oldValue != null) && oldValue.equals(propValue))
+				return;
+
+			location.setConfProp(prop, propValue);
+			Display.getDefault().syncExec(new Runnable() {
+				public void run() {
+					getContainer().updateButtons();
+				}
+			});
+
+			this.fireChange(source, prop, propValue);
+
+			/*
+			 * Now we deal with dependencies between settings
+			 */
+			final String jobTrackerHost = location.getConfProp(ConfProp.PI_JOB_TRACKER_HOST);
+			final String jobTrackerPort = location.getConfProp(ConfProp.PI_JOB_TRACKER_PORT);
+			final String nameNodeHost = location.getConfProp(ConfProp.PI_NAME_NODE_HOST);
+			final String nameNodePort = location.getConfProp(ConfProp.PI_NAME_NODE_PORT);
+			final boolean colocate = location.getConfProp(ConfProp.PI_COLOCATE_MASTERS).equalsIgnoreCase("yes");
+			final String jobTrackerURI = location.getConfProp(ConfProp.JOB_TRACKER_URI);
+			final String fsDefaultURI = location.getConfProp(ConfProp.FS_DEFAULT_URI);
+			final String socksServerURI = location.getConfProp(ConfProp.SOCKS_SERVER);
+			final boolean socksProxyEnable = location.getConfProp(ConfProp.PI_SOCKS_PROXY_ENABLE).equalsIgnoreCase("yes");
+			final String socksProxyHost = location.getConfProp(ConfProp.PI_SOCKS_PROXY_HOST);
+			final String socksProxyPort = location.getConfProp(ConfProp.PI_SOCKS_PROXY_PORT);
+
+			Display.getDefault().syncExec(new Runnable() {
+				public void run() {
+					switch (prop) {
+					case PI_JOB_TRACKER_HOST: {
+						if (colocate)
+							notifyChange(null, ConfProp.PI_NAME_NODE_HOST, jobTrackerHost);
+						String newJobTrackerURI = String.format("%s:%s", jobTrackerHost, jobTrackerPort);
+						notifyChange(null, ConfProp.JOB_TRACKER_URI, newJobTrackerURI);
+						break;
+					}
+					case PI_JOB_TRACKER_PORT: {
+						String newJobTrackerURI = String.format("%s:%s", jobTrackerHost, jobTrackerPort);
+						notifyChange(null, ConfProp.JOB_TRACKER_URI, newJobTrackerURI);
+						break;
+					}
+					case PI_NAME_NODE_HOST: {
+						String newHDFSURI = String.format("hdfs://%s:%s/", nameNodeHost, nameNodePort);
+						notifyChange(null, ConfProp.FS_DEFAULT_URI, newHDFSURI);
+
+						// Break colocation if someone force the DFS Master
+						if (!colocate && !nameNodeHost.equals(jobTrackerHost))
+							notifyChange(null, ConfProp.PI_COLOCATE_MASTERS, "no");
+						break;
+					}
+					case PI_NAME_NODE_PORT: {
+						String newHDFSURI = String.format("hdfs://%s:%s/", nameNodeHost, nameNodePort);
+						notifyChange(null, ConfProp.FS_DEFAULT_URI, newHDFSURI);
+						break;
+					}
+					case PI_SOCKS_PROXY_HOST: {
+						String newSocksProxyURI = String.format("%s:%s", socksProxyHost, socksProxyPort);
+						notifyChange(null, ConfProp.SOCKS_SERVER, newSocksProxyURI);
+						break;
+					}
+					case PI_SOCKS_PROXY_PORT: {
+						String newSocksProxyURI = String.format("%s:%s", socksProxyHost, socksProxyPort);
+						notifyChange(null, ConfProp.SOCKS_SERVER, newSocksProxyURI);
+						break;
+					}
+					case JOB_TRACKER_URI: {
+						String[] strs = jobTrackerURI.split(":", 2);
+						String host = strs[0];
+						String port = (strs.length == 2) ? strs[1] : "";
+						notifyChange(null, ConfProp.PI_JOB_TRACKER_HOST, host);
+						notifyChange(null, ConfProp.PI_JOB_TRACKER_PORT, port);
+						break;
+					}
+					case FS_DEFAULT_URI: {
+						try {
+							URI uri = new URI(fsDefaultURI);
+							if (uri.getScheme().equals("hdfs")) {
+								String host = uri.getHost();
+								String port = Integer.toString(uri.getPort());
+								notifyChange(null, ConfProp.PI_NAME_NODE_HOST, host);
+								notifyChange(null, ConfProp.PI_NAME_NODE_PORT, port);
+							}
+						} catch (URISyntaxException use) {
+							// Ignore the update!
+						}
+						break;
+					}
+					case SOCKS_SERVER: {
+						String[] strs = socksServerURI.split(":", 2);
+						String host = strs[0];
+						String port = (strs.length == 2) ? strs[1] : "";
+						notifyChange(null, ConfProp.PI_SOCKS_PROXY_HOST, host);
+						notifyChange(null, ConfProp.PI_SOCKS_PROXY_PORT, port);
+						break;
+					}
+					case PI_COLOCATE_MASTERS: {
+						if (colocate)
+							notifyChange(null, ConfProp.PI_NAME_NODE_HOST, jobTrackerHost);
+						break;
+					}
+					case PI_SOCKS_PROXY_ENABLE: {
+						if (socksProxyEnable) {
+							notifyChange(null, ConfProp.SOCKET_FACTORY_DEFAULT, "org.apache.hadoop.net.SocksSocketFactory");
+						} else {
+							notifyChange(null, ConfProp.SOCKET_FACTORY_DEFAULT, "org.apache.hadoop.net.StandardSocketFactory");
+						}
+						break;
+					}
+					}
+				}
+			});
+
+		}
+
+		/**
+		 * Change notifications on properties (by name). A property might not be
+		 * reflected as a ConfProp enum. If it is, the notification is forwarded
+		 * to the ConfProp notifyChange method. If not, it is processed here.
+		 * 
+		 * @param source
+		 * @param propName
+		 * @param propValue
+		 */
+		void notifyChange(TabListener source, String propName, String propValue) {
+
+			ConfProp prop = ConfProp.getByName(propName);
+			if (prop != null)
+				notifyChange(source, prop, propValue);
+
+			location.setConfProp(propName, propValue);
+		}
+
+		/**
+		 * Broadcast a property change to all registered tabs. If a tab is
+		 * identified as the source of the change, this tab will not be
+		 * notified.
+		 * 
+		 * @param source
+		 *            TODO
+		 * @param prop
+		 * @param value
+		 */
+		private void fireChange(TabListener source, ConfProp prop, String value) {
+			for (TabListener tab : tabs) {
+				if (tab != source)
+					tab.notifyChange(prop, value);
+			}
+		}
+
+	}
+
+	/**
+	 * Create a SWT Text component for the given {@link ConfProp} text
+	 * configuration property.
+	 * 
+	 * @param listener
+	 * @param parent
+	 * @param prop
+	 * @return
+	 */
+	private Text createConfText(ModifyListener listener, Composite parent, ConfProp prop) {
+
+		Text text = new Text(parent, SWT.SINGLE | SWT.BORDER);
+		GridData data = new GridData(GridData.FILL_HORIZONTAL);
+		text.setLayoutData(data);
+		text.setData("hProp", prop);
+		text.setText(location.getConfProp(prop));
+		text.addModifyListener(listener);
+
+		return text;
+	}
+
+	/**
+	 * Create a SWT Checked Button component for the given {@link ConfProp}
+	 * boolean configuration property.
+	 * 
+	 * @param listener
+	 * @param parent
+	 * @param prop
+	 * @return
+	 */
+	private Button createConfCheckButton(SelectionListener listener, Composite parent, ConfProp prop, String text) {
+
+		Button button = new Button(parent, SWT.CHECK);
+		button.setText(text);
+		button.setData("hProp", prop);
+		button.setSelection(location.getConfProp(prop).equalsIgnoreCase("yes"));
+		button.addSelectionListener(listener);
+
+		return button;
+	}
+
+	/**
+	 * Create editor entry for the given configuration property. The editor is a
+	 * couple (Label, Text).
+	 * 
+	 * @param listener
+	 *            the listener to trigger on property change
+	 * @param parent
+	 *            the SWT parent container
+	 * @param prop
+	 *            the property to create an editor for
+	 * @param labelText
+	 *            a label (null will defaults to the property name)
+	 * 
+	 * @return a SWT Text field
+	 */
+	private Text createConfLabelText(ModifyListener listener, Composite parent, ConfProp prop, String labelText) {
+
+		Label label = new Label(parent, SWT.NONE);
+		if (labelText == null)
+			labelText = prop.name;
+		label.setText(labelText);
+
+		return createConfText(listener, parent, prop);
+	}
+
+	/**
+	 * Create an editor entry for the given configuration name
+	 * 
+	 * @param listener
+	 *            the listener to trigger on property change
+	 * @param parent
+	 *            the SWT parent container
+	 * @param propName
+	 *            the name of the property to create an editor for
+	 * @param labelText
+	 *            a label (null will defaults to the property name)
+	 * 
+	 * @return a SWT Text field
+	 */
+	private Text createConfNameEditor(ModifyListener listener, Composite parent, String propName, String labelText) {
+
+		{
+			ConfProp prop = ConfProp.getByName(propName);
+			if (prop != null)
+				return createConfLabelText(listener, parent, prop, labelText);
+		}
+
+		Label label = new Label(parent, SWT.NONE);
+		if (labelText == null)
+			labelText = propName;
+		label.setText(labelText);
+
+		Text text = new Text(parent, SWT.SINGLE | SWT.BORDER);
+		GridData data = new GridData(GridData.FILL_HORIZONTAL);
+		text.setLayoutData(data);
+		text.setData("hPropName", propName);
+		text.setText(location.getConfProp(propName));
+		text.addModifyListener(listener);
+
+		return text;
+	}
+
+	/**
+	 * Main parameters of the Hadoop location: <li>host and port of the
+	 * Map/Reduce master (Job tracker) <li>host and port of the DFS master (Name
+	 * node) <li>SOCKS proxy
+	 */
+	private class TabMain implements TabListener, ModifyListener, SelectionListener {
+
+		TabMediator mediator;
+
+		Text locationName;
+
+		Text textJTHost;
+
+		Text textNNHost;
+
+		Button colocateMasters;
+
+		Text textJTPort;
+
+		Text textNNPort;
+
+		Text userName;
+
+		Button useSocksProxy;
+
+		Text socksProxyHost;
+
+		Text socksProxyPort;
+
+		TabMain(TabMediator mediator) {
+			this.mediator = mediator;
+			TabItem tab = new TabItem(mediator.folder, SWT.NONE);
+			tab.setText("General");
+			tab.setToolTipText("General location parameters");
+			tab.setImage(circle);
+			tab.setControl(createControl(mediator.folder));
+		}
+
+		private Control createControl(Composite parent) {
+
+			Composite panel = new Composite(parent, SWT.FILL);
+			panel.setLayout(new GridLayout(2, false));
+
+			GridData data;
+
+			/*
+			 * Location name
+			 */
+			{
+				Composite subpanel = new Composite(panel, SWT.FILL);
+				subpanel.setLayout(new GridLayout(2, false));
+				data = new GridData();
+				data.horizontalSpan = 2;
+				data.horizontalAlignment = SWT.FILL;
+				subpanel.setLayoutData(data);
+
+				locationName = createConfLabelText(this, subpanel, ConfProp.PI_LOCATION_NAME, "&Location name:");
+			}
+
+			/*
+			 * Map/Reduce group
+			 */
+			{
+				Group groupMR = new Group(panel, SWT.SHADOW_NONE);
+				groupMR.setText("Map/Reduce Master");
+				groupMR.setToolTipText("Address of the Map/Reduce master node " + "(the Job Tracker).");
+				GridLayout layout = new GridLayout(2, false);
+				groupMR.setLayout(layout);
+				data = new GridData();
+				data.verticalAlignment = SWT.FILL;
+				data.horizontalAlignment = SWT.CENTER;
+				data.widthHint = 250;
+				groupMR.setLayoutData(data);
+
+				// Job Tracker host
+				Label label = new Label(groupMR, SWT.NONE);
+				label.setText("Host:");
+				data = new GridData(GridData.BEGINNING, GridData.CENTER, false, true);
+				label.setLayoutData(data);
+
+				textJTHost = createConfText(this, groupMR, ConfProp.PI_JOB_TRACKER_HOST);
+				data = new GridData(GridData.FILL, GridData.CENTER, true, true);
+				textJTHost.setLayoutData(data);
+
+				// Job Tracker port
+				label = new Label(groupMR, SWT.NONE);
+				label.setText("Port:");
+				data = new GridData(GridData.BEGINNING, GridData.CENTER, false, true);
+				label.setLayoutData(data);
+
+				textJTPort = createConfText(this, groupMR, ConfProp.PI_JOB_TRACKER_PORT);
+				data = new GridData(GridData.FILL, GridData.CENTER, true, true);
+				textJTPort.setLayoutData(data);
+			}
+
+			/*
+			 * DFS group
+			 */
+			{
+				Group groupDFS = new Group(panel, SWT.SHADOW_NONE);
+				groupDFS.setText("DFS Master");
+				groupDFS.setToolTipText("Address of the Distributed FileSystem " + "master node (the Name Node).");
+				GridLayout layout = new GridLayout(2, false);
+				groupDFS.setLayout(layout);
+				data = new GridData();
+				data.horizontalAlignment = SWT.CENTER;
+				data.widthHint = 250;
+				groupDFS.setLayoutData(data);
+
+				colocateMasters = createConfCheckButton(this, groupDFS, ConfProp.PI_COLOCATE_MASTERS, "Use M/R Master host");
+				data = new GridData();
+				data.horizontalSpan = 2;
+				colocateMasters.setLayoutData(data);
+
+				// Job Tracker host
+				Label label = new Label(groupDFS, SWT.NONE);
+				data = new GridData();
+				label.setText("Host:");
+				label.setLayoutData(data);
+
+				textNNHost = createConfText(this, groupDFS, ConfProp.PI_NAME_NODE_HOST);
+
+				// Job Tracker port
+				label = new Label(groupDFS, SWT.NONE);
+				data = new GridData();
+				label.setText("Port:");
+				label.setLayoutData(data);
+
+				textNNPort = createConfText(this, groupDFS, ConfProp.PI_NAME_NODE_PORT);
+			}
+
+			{
+				Composite subpanel = new Composite(panel, SWT.FILL);
+				subpanel.setLayout(new GridLayout(2, false));
+				data = new GridData();
+				data.horizontalSpan = 2;
+				data.horizontalAlignment = SWT.FILL;
+				subpanel.setLayoutData(data);
+
+				userName = createConfLabelText(this, subpanel, ConfProp.PI_USER_NAME, "&User name:");
+			}
+
+			// SOCKS proxy group
+			{
+				Group groupSOCKS = new Group(panel, SWT.SHADOW_NONE);
+				groupSOCKS.setText("SOCKS proxy");
+				groupSOCKS.setToolTipText("Address of the SOCKS proxy to use " + "to connect to the infrastructure.");
+				GridLayout layout = new GridLayout(2, false);
+				groupSOCKS.setLayout(layout);
+				data = new GridData();
+				data.horizontalAlignment = SWT.CENTER;
+				data.horizontalSpan = 2;
+				data.widthHint = 250;
+				groupSOCKS.setLayoutData(data);
+
+				useSocksProxy = createConfCheckButton(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_ENABLE, "Enable SOCKS proxy");
+				data = new GridData();
+				data.horizontalSpan = 2;
+				useSocksProxy.setLayoutData(data);
+
+				// SOCKS proxy host
+				Label label = new Label(groupSOCKS, SWT.NONE);
+				data = new GridData();
+				label.setText("Host:");
+				label.setLayoutData(data);
+
+				socksProxyHost = createConfText(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_HOST);
+
+				// SOCKS proxy port
+				label = new Label(groupSOCKS, SWT.NONE);
+				data = new GridData();
+				label.setText("Port:");
+				label.setLayoutData(data);
+
+				socksProxyPort = createConfText(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_PORT);
+			}
+
+			// Update the state of all widgets according to the current values!
+			reloadConfProp(ConfProp.PI_COLOCATE_MASTERS);
+			reloadConfProp(ConfProp.PI_SOCKS_PROXY_ENABLE);
+			reloadConfProp(ConfProp.PI_JOB_TRACKER_HOST);
+
+			return panel;
+		}
+
+		/**
+		 * Reload the given configuration property value
+		 * 
+		 * @param prop
+		 */
+		private void reloadConfProp(ConfProp prop) {
+			this.notifyChange(prop, location.getConfProp(prop));
+		}
+
+		public void notifyChange(ConfProp prop, String propValue) {
+			switch (prop) {
+			case PI_JOB_TRACKER_HOST: {
+				textJTHost.setText(propValue);
+				break;
+			}
+			case PI_JOB_TRACKER_PORT: {
+				textJTPort.setText(propValue);
+				break;
+			}
+			case PI_LOCATION_NAME: {
+				locationName.setText(propValue);
+				break;
+			}
+			case PI_USER_NAME: {
+				userName.setText(propValue);
+				break;
+			}
+			case PI_COLOCATE_MASTERS: {
+				if (colocateMasters != null) {
+					boolean colocate = propValue.equalsIgnoreCase("yes");
+					colocateMasters.setSelection(colocate);
+					if (textNNHost != null) {
+						textNNHost.setEnabled(!colocate);
+					}
+				}
+				break;
+			}
+			case PI_NAME_NODE_HOST: {
+				textNNHost.setText(propValue);
+				break;
+			}
+			case PI_NAME_NODE_PORT: {
+				textNNPort.setText(propValue);
+				break;
+			}
+			case PI_SOCKS_PROXY_ENABLE: {
+				if (useSocksProxy != null) {
+					boolean useProxy = propValue.equalsIgnoreCase("yes");
+					useSocksProxy.setSelection(useProxy);
+					if (socksProxyHost != null)
+						socksProxyHost.setEnabled(useProxy);
+					if (socksProxyPort != null)
+						socksProxyPort.setEnabled(useProxy);
+				}
+				break;
+			}
+			case PI_SOCKS_PROXY_HOST: {
+				socksProxyHost.setText(propValue);
+				break;
+			}
+			case PI_SOCKS_PROXY_PORT: {
+				socksProxyPort.setText(propValue);
+				break;
+			}
+			}
+		}
+
+		/* @inheritDoc */
+		public void modifyText(ModifyEvent e) {
+			final Text text = (Text) e.widget;
+			final ConfProp prop = (ConfProp) text.getData("hProp");
+			Display.getDefault().syncExec(new Runnable() {
+				public void run() {
+					mediator.notifyChange(TabMain.this, prop, text.getText());
+				}
+			});
+		}
+
+		/* @inheritDoc */
+		public void widgetDefaultSelected(SelectionEvent e) {
+			this.widgetSelected(e);
+		}
+
+		/* @inheritDoc */
+		public void widgetSelected(SelectionEvent e) {
+			final Button button = (Button) e.widget;
+			final ConfProp prop = (ConfProp) button.getData("hProp");
+
+			Display.getDefault().syncExec(new Runnable() {
+				public void run() {
+					// We want to receive the update also!
+					mediator.notifyChange(null, prop, button.getSelection() ? "yes" : "no");
+				}
+			});
+		}
+
+	}
+
+	private class TabAdvanced implements TabListener, ModifyListener {
+		TabMediator mediator;
+
+		private Composite panel;
+
+		private Map<String, Text> textMap = new TreeMap<String, Text>();
+
+		TabAdvanced(TabMediator mediator) {
+			this.mediator = mediator;
+			TabItem tab = new TabItem(mediator.folder, SWT.NONE);
+			tab.setText("Advanced parameters");
+			tab.setToolTipText("Access to advanced Hadoop parameters");
+			tab.setImage(circle);
+			tab.setControl(createControl(mediator.folder));
+
+		}
+
+		private Control createControl(Composite parent) {
+			ScrolledComposite sc = new ScrolledComposite(parent, SWT.BORDER | SWT.H_SCROLL | SWT.V_SCROLL);
+
+			panel = new Composite(sc, SWT.NONE);
+			sc.setContent(panel);
+
+			sc.setExpandHorizontal(true);
+			sc.setExpandVertical(true);
+
+			sc.setMinSize(640, 480);
+
+			GridLayout layout = new GridLayout();
+			layout.numColumns = 2;
+			layout.makeColumnsEqualWidth = false;
+			panel.setLayout(layout);
+			panel.setLayoutData(new GridData(GridData.FILL, GridData.FILL, true, true, 1, 1));
+
+			// Sort by property name
+			SortedMap<String, String> map = new TreeMap<String, String>();
+			Iterator<Entry<String, String>> it = location.getConfiguration();
+			while (it.hasNext()) {
+				Entry<String, String> entry = it.next();
+				map.put(entry.getKey(), entry.getValue());
+			}
+
+			for (Entry<String, String> entry : map.entrySet()) {
+				Text text = createConfNameEditor(this, panel, entry.getKey(), null);
+				textMap.put(entry.getKey(), text);
+			}
+
+			sc.setMinSize(panel.computeSize(SWT.DEFAULT, SWT.DEFAULT));
+
+			return sc;
+		}
+
+		public void notifyChange(ConfProp prop, final String propValue) {
+			Text text = textMap.get(prop.name);
+			text.setText(propValue);
+		}
+
+		public void modifyText(ModifyEvent e) {
+			final Text text = (Text) e.widget;
+			Object hProp = text.getData("hProp");
+			final ConfProp prop = (hProp != null) ? (ConfProp) hProp : null;
+			Object hPropName = text.getData("hPropName");
+			final String propName = (hPropName != null) ? (String) hPropName : null;
+
+			Display.getDefault().syncExec(new Runnable() {
+				public void run() {
+					if (prop != null)
+						mediator.notifyChange(TabAdvanced.this, prop, text.getText());
+					else
+						mediator.notifyChange(TabAdvanced.this, propName, text.getText());
+				}
+			});
+		}
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopServerSelectionListContentProvider.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopServerSelectionListContentProvider.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopServerSelectionListContentProvider.java
new file mode 100644
index 0000000..1f854d0
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopServerSelectionListContentProvider.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.launch;
+
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.eclipse.jface.viewers.IContentProvider;
+import org.eclipse.jface.viewers.ILabelProviderListener;
+import org.eclipse.jface.viewers.IStructuredContentProvider;
+import org.eclipse.jface.viewers.ITableLabelProvider;
+import org.eclipse.jface.viewers.Viewer;
+import org.eclipse.swt.graphics.Image;
+
+/**
+ * Provider that enables selection of a predefined Hadoop server.
+ */
+
+public class HadoopServerSelectionListContentProvider implements
+    IContentProvider, ITableLabelProvider, IStructuredContentProvider {
+  public void dispose() {
+
+  }
+
+  public void inputChanged(Viewer viewer, Object oldInput, Object newInput) {
+
+  }
+
+  public Image getColumnImage(Object element, int columnIndex) {
+    return null;
+  }
+
+  public String getColumnText(Object element, int columnIndex) {
+    if (element instanceof AbstractHadoopCluster) {
+    	AbstractHadoopCluster location = (AbstractHadoopCluster) element;
+      if (columnIndex == 0) {
+        return location.getLocationName();
+
+      } else if (columnIndex == 1) {
+        return location.getMasterHostName();
+      }
+    }
+
+    return element.toString();
+  }
+
+  public void addListener(ILabelProviderListener listener) {
+
+  }
+
+  public boolean isLabelProperty(Object element, String property) {
+    return false;
+  }
+
+  public void removeListener(ILabelProviderListener listener) {
+
+  }
+
+  public Object[] getElements(Object inputElement) {
+    return ServerRegistry.getInstance().getServers().toArray();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/JarModule.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/JarModule.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/JarModule.java
new file mode 100644
index 0000000..a494baa
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/JarModule.java
@@ -0,0 +1,146 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.launch;
+
+import java.io.File;
+import java.util.logging.Logger;
+
+import org.apache.hdt.core.launch.ErrorMessageDialog;
+import org.apache.hdt.core.launch.IJarModule;
+import org.apache.hdt.ui.Activator;
+import org.eclipse.core.resources.IResource;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.jdt.core.ICompilationUnit;
+import org.eclipse.jdt.core.IJavaElement;
+import org.eclipse.jdt.core.IType;
+import org.eclipse.jdt.ui.jarpackager.IJarExportRunnable;
+import org.eclipse.jdt.ui.jarpackager.JarPackageData;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.ui.PlatformUI;
+
+/**
+ * Methods for interacting with the jar file containing the
+ * Mapper/Reducer/Driver classes for a MapReduce job.
+ */
+
+public class JarModule implements IJarModule {
+
+  static Logger log = Logger.getLogger(JarModule.class.getName());
+
+  private IResource resource;
+
+  private File jarFile;
+
+  public JarModule(IResource resource) {
+    this.resource = resource;
+  }
+
+  public String getName() {
+    return resource.getProject().getName() + "/" + resource.getName();
+  }
+
+  /**
+   * Creates a JAR file containing the given resource (Java class with
+   * main()) and all associated resources
+   * 
+   * @param resource the resource
+   * @return a file designing the created package
+   */
+  public void run(IProgressMonitor monitor) {
+
+    log.fine("Build jar");
+    JarPackageData jarrer = new JarPackageData();
+
+    jarrer.setExportJavaFiles(true);
+    jarrer.setExportClassFiles(true);
+    jarrer.setExportOutputFolders(true);
+    jarrer.setOverwrite(true);
+
+    try {
+      // IJavaProject project =
+      // (IJavaProject) resource.getProject().getNature(JavaCore.NATURE_ID);
+
+      // check this is the case before letting this method get called
+      Object element = resource.getAdapter(IJavaElement.class);
+      IType type = ((ICompilationUnit) element).findPrimaryType();
+      jarrer.setManifestMainClass(type);
+
+      // Create a temporary JAR file name
+      File baseDir = Activator.getDefault().getStateLocation().toFile();
+
+      String prefix =
+          String.format("%s_%s-", resource.getProject().getName(), resource
+              .getName());
+      File jarFile = File.createTempFile(prefix, ".jar", baseDir);
+      jarrer.setJarLocation(new Path(jarFile.getAbsolutePath()));
+
+      jarrer.setElements(resource.getProject().members(IResource.FILE));
+      IJarExportRunnable runnable =
+          jarrer.createJarExportRunnable(Display.getDefault()
+              .getActiveShell());
+      runnable.run(monitor);
+
+      this.jarFile = jarFile;
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      throw new RuntimeException(e);
+    }
+  }
+
+  /**
+   * Allow the retrieval of the resulting JAR file
+   * 
+   * @return the generated JAR file
+   */
+  public File getJarFile() {
+    return this.jarFile;
+  }
+
+  /**
+   * Static way to create a JAR package for the given resource and showing a
+   * progress bar
+   * 
+   * @param resource
+   * @return
+   */
+  public static File createJarPackage(IResource resource) {
+
+    JarModule jarModule = new JarModule(resource);
+    try {
+      PlatformUI.getWorkbench().getProgressService().run(false, true,
+          jarModule);
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      return null;
+    }
+
+    File jarFile = jarModule.getJarFile();
+    if (jarFile == null) {
+      ErrorMessageDialog.display("Run on Hadoop",
+          "Unable to create or locate the JAR file for the Job");
+      return null;
+    }
+
+    return jarFile;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/RunOnHadoopWizard.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/RunOnHadoopWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/RunOnHadoopWizard.java
new file mode 100644
index 0000000..fd9f465
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/RunOnHadoopWizard.java
@@ -0,0 +1,346 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.launch;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hdt.core.launch.ErrorMessageDialog;
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.ui.Activator;
+import org.eclipse.core.resources.IFile;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IPath;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.debug.core.ILaunchConfigurationWorkingCopy;
+import org.eclipse.jdt.launching.IJavaLaunchConfigurationConstants;
+import org.eclipse.jdt.launching.IRuntimeClasspathEntry;
+import org.eclipse.jdt.launching.JavaRuntime;
+import org.eclipse.jface.viewers.TableViewer;
+import org.eclipse.jface.wizard.Wizard;
+import org.eclipse.jface.wizard.WizardPage;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.events.SelectionEvent;
+import org.eclipse.swt.events.SelectionListener;
+import org.eclipse.swt.layout.FillLayout;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.swt.widgets.Label;
+import org.eclipse.swt.widgets.Table;
+import org.eclipse.swt.widgets.TableColumn;
+import org.eclipse.swt.widgets.Text;
+
+/**
+ * Wizard for publishing a job to a Hadoop server.
+ */
+
+public class RunOnHadoopWizard extends Wizard {
+
+	private MainWizardPage mainPage;
+
+	private HadoopLocationWizard createNewPage;
+
+	/**
+	 * The file resource (containing a main()) to run on the Hadoop location
+	 */
+	private IFile resource;
+
+	/**
+	 * The launch configuration to update
+	 */
+	private ILaunchConfigurationWorkingCopy iConf;
+
+	private IProgressMonitor progressMonitor;
+
+	public RunOnHadoopWizard(IFile resource, ILaunchConfigurationWorkingCopy iConf) {
+		this.resource = resource;
+		this.iConf = iConf;
+		setForcePreviousAndNextButtons(true);
+		setNeedsProgressMonitor(true);
+		setWindowTitle("Run on Hadoop");
+	}
+
+	/**
+	 * This wizard contains 2 pages: <li>the first one lets the user choose an
+	 * already existing location <li>the second one allows the user to create a
+	 * new location, in case it does not already exist
+	 */
+	/* @inheritDoc */
+	@Override
+	public void addPages() {
+		addPage(this.mainPage = new MainWizardPage());
+		addPage(this.createNewPage = new HadoopLocationWizard());
+	}
+
+	/**
+	 * Performs any actions appropriate in response to the user having pressed
+	 * the Finish button, or refuse if finishing now is not permitted.
+	 */
+	/* @inheritDoc */
+	@Override
+	public boolean performFinish() {
+
+		/*
+		 * Create a new location or get an existing one
+		 */
+		AbstractHadoopCluster location = null;
+		if (mainPage.createNew.getSelection()) {
+			location = createNewPage.performFinish();
+
+		} else if (mainPage.table.getSelection().length == 1) {
+			location = (AbstractHadoopCluster) mainPage.table.getSelection()[0].getData();
+		}
+
+		if (location == null)
+			return false;
+
+		/*
+		 * Get the base directory of the plug-in for storing configurations and
+		 * JARs
+		 */
+		File baseDir = Activator.getDefault().getStateLocation().toFile();
+
+		// Package the Job into a JAR
+		File jarFile = JarModule.createJarPackage(resource);
+		if (jarFile == null) {
+			ErrorMessageDialog.display("Run on Hadoop", "Unable to create or locate the JAR file for the Job");
+			return false;
+		}
+
+		/*
+		 * Generate a temporary Hadoop configuration directory and add it to the
+		 * classpath of the launch configuration
+		 */
+
+		File confDir;
+		try {
+			confDir = File.createTempFile("hadoop-conf-", "", baseDir);
+			confDir.delete();
+			confDir.mkdirs();
+			if (!confDir.isDirectory()) {
+				ErrorMessageDialog.display("Run on Hadoop", "Cannot create temporary directory: " + confDir);
+				return false;
+			}
+		} catch (IOException ioe) {
+			ioe.printStackTrace();
+			return false;
+		}
+		try {
+			location.saveConfiguration(confDir, jarFile.getAbsolutePath());
+		} catch (IOException ioe) {
+			ioe.printStackTrace();
+			return false;
+		}
+		// Setup the Launch class path
+		List<String> classPath;
+		try {
+			classPath = iConf.getAttribute(IJavaLaunchConfigurationConstants.ATTR_CLASSPATH, new ArrayList());
+			IPath confIPath = new Path(confDir.getAbsolutePath());
+			IRuntimeClasspathEntry cpEntry = JavaRuntime.newArchiveRuntimeClasspathEntry(confIPath);
+			classPath.add(0, cpEntry.getMemento());
+			iConf.setAttribute(IJavaLaunchConfigurationConstants.ATTR_CLASSPATH, classPath);
+			iConf.setAttribute(IJavaLaunchConfigurationConstants.ATTR_PROGRAM_ARGUMENTS, mainPage.argumentsText.getText());
+
+		} catch (CoreException e) {
+			e.printStackTrace();
+			return false;
+		}
+
+		// location.runResource(resource, progressMonitor);
+		return true;
+	}
+
+	private void refreshButtons() {
+		getContainer().updateButtons();
+	}
+
+	/**
+	 * Allows finish when an existing server is selected or when a new server
+	 * location is defined
+	 */
+	/* @inheritDoc */
+	@Override
+	public boolean canFinish() {
+		if (mainPage != null)
+			return mainPage.canFinish();
+		return false;
+	}
+
+	/**
+	 * This is the main page of the wizard. It allows the user either to choose
+	 * an already existing location or to indicate he wants to create a new
+	 * location.
+	 */
+	public class MainWizardPage extends WizardPage {
+
+		private Button createNew;
+
+		private Table table;
+		private Text argumentsText;
+
+		private Button chooseExisting;
+
+		public MainWizardPage() {
+			super("Select or define server to run on");
+			setTitle("Select Hadoop location");
+			setDescription("Select a Hadoop location to run on.");
+		}
+
+		/* @inheritDoc */
+		@Override
+		public boolean canFlipToNextPage() {
+			return createNew.getSelection();
+		}
+
+		/* @inheritDoc */
+		public void createControl(Composite parent) {
+			Composite panel = new Composite(parent, SWT.NONE);
+			panel.setLayout(new GridLayout(1, false));
+
+			// Label
+			Label label = new Label(panel, SWT.NONE);
+			label.setText("Select a Hadoop Server to run on.");
+			GridData gData = new GridData(GridData.FILL_BOTH);
+			gData.grabExcessVerticalSpace = false;
+			label.setLayoutData(gData);
+
+			// Create location button
+			createNew = new Button(panel, SWT.RADIO);
+			createNew.setText("Define a new Hadoop server location");
+			createNew.setLayoutData(gData);
+			createNew.addSelectionListener(new SelectionListener() {
+				public void widgetDefaultSelected(SelectionEvent e) {
+				}
+
+				public void widgetSelected(SelectionEvent e) {
+					setPageComplete(true);
+					RunOnHadoopWizard.this.refreshButtons();
+				}
+			});
+			createNew.setSelection(true);
+
+			// Select existing location button
+			chooseExisting = new Button(panel, SWT.RADIO);
+			chooseExisting.setText("Choose an existing server from the list below");
+			chooseExisting.setLayoutData(gData);
+			chooseExisting.addSelectionListener(new SelectionListener() {
+				public void widgetDefaultSelected(SelectionEvent e) {
+				}
+
+				public void widgetSelected(SelectionEvent e) {
+					if (chooseExisting.getSelection() && (table.getSelectionCount() == 0)) {
+						if (table.getItems().length > 0) {
+							table.setSelection(0);
+						}
+					}
+					RunOnHadoopWizard.this.refreshButtons();
+				}
+			});
+
+			// Table of existing locations
+			Composite serverListPanel = new Composite(panel, SWT.FILL);
+			gData = new GridData(GridData.FILL_BOTH);
+			gData.horizontalSpan = 1;
+			serverListPanel.setLayoutData(gData);
+
+			FillLayout layout = new FillLayout();
+			layout.marginHeight = layout.marginWidth = 12;
+			serverListPanel.setLayout(layout);
+
+			table = new Table(serverListPanel, SWT.BORDER | SWT.H_SCROLL | SWT.V_SCROLL | SWT.FULL_SELECTION);
+			table.setHeaderVisible(true);
+			table.setLinesVisible(true);
+
+			TableColumn nameColumn = new TableColumn(table, SWT.LEFT);
+			nameColumn.setText("Location");
+			nameColumn.setWidth(450);
+
+			TableColumn hostColumn = new TableColumn(table, SWT.LEFT);
+			hostColumn.setText("Master host name");
+			hostColumn.setWidth(250);
+
+			// If the user select one entry, switch to "chooseExisting"
+			table.addSelectionListener(new SelectionListener() {
+				public void widgetDefaultSelected(SelectionEvent e) {
+				}
+
+				public void widgetSelected(SelectionEvent e) {
+					chooseExisting.setSelection(true);
+					createNew.setSelection(false);
+					setPageComplete(table.getSelectionCount() == 1);
+					RunOnHadoopWizard.this.refreshButtons();
+				}
+			});
+
+			// Label
+			Label argumentsLabel = new Label(panel, SWT.NONE);
+			argumentsLabel.setText("Arguments:");
+			GridData gDataArgumentsLabel = new GridData(GridData.FILL_BOTH);
+			gDataArgumentsLabel.grabExcessVerticalSpace = false;
+			argumentsLabel.setLayoutData(gDataArgumentsLabel);
+
+			// Textbox
+			argumentsText = new Text(panel, SWT.NONE);
+			try {
+				argumentsText.setText(iConf.getAttribute(IJavaLaunchConfigurationConstants.ATTR_PROGRAM_ARGUMENTS, ""));
+			} catch (CoreException e1) {
+				e1.printStackTrace();
+			}
+			GridData gDataArgumentsText = new GridData(GridData.FILL_BOTH);
+			gDataArgumentsText.grabExcessVerticalSpace = false;
+			argumentsText.setLayoutData(gDataArgumentsText);
+
+			TableViewer viewer = new TableViewer(table);
+			HadoopServerSelectionListContentProvider provider = new HadoopServerSelectionListContentProvider();
+			viewer.setContentProvider(provider);
+			viewer.setLabelProvider(provider);
+			viewer.setInput(new Object());
+			// don't care, get from singleton server registry
+
+			this.setControl(panel);
+		}
+
+		/**
+		 * Returns whether this page state allows the Wizard to finish or not
+		 * 
+		 * @return can the wizard finish or not?
+		 */
+		public boolean canFinish() {
+			if (!isControlCreated())
+				return false;
+
+			if (this.createNew.getSelection())
+				return getNextPage().isPageComplete();
+
+			return this.chooseExisting.getSelection();
+		}
+	}
+
+	/**
+	 * @param progressMonitor
+	 */
+	public void setProgressMonitor(IProgressMonitor progressMonitor) {
+		this.progressMonitor = progressMonitor;
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/ServerRegistry.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/ServerRegistry.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/ServerRegistry.java
new file mode 100644
index 0000000..785286c
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/ServerRegistry.java
@@ -0,0 +1,200 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.launch;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+import javax.security.auth.login.Configuration;
+
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.IHadoopClusterListener;
+import org.apache.hdt.ui.Activator;
+import org.eclipse.jface.dialogs.MessageDialog;
+
+/**
+ * Register of Hadoop locations.
+ * 
+ * Each location corresponds to a Hadoop {@link Configuration} stored as an XML
+ * file in the workspace plug-in configuration directory:
+ * <p>
+ * <tt>
+ * &lt;workspace-dir&gt;/.metadata/.plugins/org.apache.hadoop.eclipse/locations/*.xml
+ * </tt>
+ * 
+ */
+public class ServerRegistry {
+
+	private static final ServerRegistry INSTANCE = new ServerRegistry();
+
+	public static final int SERVER_ADDED = 0;
+
+	public static final int SERVER_REMOVED = 1;
+
+	public static final int SERVER_STATE_CHANGED = 2;
+
+	private final File baseDir = Activator.getDefault().getStateLocation().toFile();
+
+	private final File saveDir = new File(baseDir, "locations");
+
+	private ServerRegistry() {
+		if (saveDir.exists() && !saveDir.isDirectory())
+			saveDir.delete();
+		if (!saveDir.exists())
+			saveDir.mkdirs();
+
+		load();
+	}
+
+	private Map<String, AbstractHadoopCluster> servers;
+
+	private Set<IHadoopClusterListener> listeners = new HashSet<IHadoopClusterListener>();
+
+	public static ServerRegistry getInstance() {
+		return INSTANCE;
+	}
+
+	public synchronized Collection<AbstractHadoopCluster> getServers() {
+		return Collections.unmodifiableCollection(servers.values());
+	}
+
+	/**
+	 * Load all available locations from the workspace configuration directory.
+	 */
+	private synchronized void load() {
+		Map<String, AbstractHadoopCluster> map = new TreeMap<String, AbstractHadoopCluster>();
+		for (File file : saveDir.listFiles()) {
+			try {
+				AbstractHadoopCluster server = AbstractHadoopCluster.createCluster(file);
+				map.put(server.getLocationName(), server);
+
+			} catch (Exception exn) {
+				System.err.println(exn);
+			}
+		}
+		this.servers = map;
+	}
+
+	private synchronized void store() {
+		try {
+			File dir = File.createTempFile("locations", "new", baseDir);
+			dir.delete();
+			dir.mkdirs();
+
+			for (AbstractHadoopCluster server : servers.values()) {
+				server.storeSettingsToFile(new File(dir, server.getLocationName() + ".xml"));
+			}
+
+			FilenameFilter XMLFilter = new FilenameFilter() {
+				public boolean accept(File dir, String name) {
+					String lower = name.toLowerCase();
+					return lower.endsWith(".xml");
+				}
+			};
+
+			File backup = new File(baseDir, "locations.backup");
+			if (backup.exists()) {
+				for (File file : backup.listFiles(XMLFilter))
+					if (!file.delete())
+						throw new IOException("Unable to delete backup location file: " + file);
+				if (!backup.delete())
+					throw new IOException("Unable to delete backup location directory: " + backup);
+			}
+
+			saveDir.renameTo(backup);
+			dir.renameTo(saveDir);
+
+		} catch (IOException ioe) {
+			ioe.printStackTrace();
+			MessageDialog.openError(null, "Saving configuration of Hadoop locations failed", ioe.toString());
+		}
+	}
+
+	public void dispose() {
+		for (AbstractHadoopCluster server : getServers()) {
+			server.dispose();
+		}
+	}
+
+	public synchronized AbstractHadoopCluster getServer(String location) {
+		return servers.get(location);
+	}
+
+	/*
+	 * HadoopServer map listeners
+	 */
+
+	public void addListener(IHadoopClusterListener l) {
+		synchronized (listeners) {
+			listeners.add(l);
+		}
+	}
+
+	public void removeListener(IHadoopClusterListener l) {
+		synchronized (listeners) {
+			listeners.remove(l);
+		}
+	}
+
+	private void fireListeners(AbstractHadoopCluster location, int kind) {
+		synchronized (listeners) {
+			for (IHadoopClusterListener listener : listeners) {
+				listener.serverChanged(location, kind);
+			}
+		}
+	}
+
+	public synchronized void removeServer(AbstractHadoopCluster server) {
+		this.servers.remove(server.getLocationName());
+		store();
+		fireListeners(server, SERVER_REMOVED);
+	}
+
+	public synchronized void addServer(AbstractHadoopCluster server) {
+		this.servers.put(server.getLocationName(), server);
+		store();
+		fireListeners(server, SERVER_ADDED);
+	}
+
+	/**
+	 * Update one Hadoop location
+	 * 
+	 * @param originalName
+	 *            the original location name (might have changed)
+	 * @param server
+	 *            the location
+	 */
+	public synchronized void updateServer(String originalName, AbstractHadoopCluster server) {
+
+		// Update the map if the location name has changed
+		if (!server.getLocationName().equals(originalName)) {
+			servers.remove(originalName);
+			servers.put(server.getLocationName(), server);
+		}
+		store();
+		fireListeners(server, SERVER_STATE_CHANGED);
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/ClusterView.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/ClusterView.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/ClusterView.java
new file mode 100644
index 0000000..9952904
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/ClusterView.java
@@ -0,0 +1,450 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.mr;
+
+import java.util.Collection;
+
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.IHadoopClusterListener;
+import org.apache.hdt.core.launch.IHadoopJob;
+import org.apache.hdt.core.launch.IJarModule;
+import org.apache.hdt.core.launch.IJobListener;
+import org.apache.hdt.ui.ImageLibrary;
+import org.apache.hdt.ui.internal.launch.JarModule;
+import org.apache.hdt.ui.internal.launch.ServerRegistry;
+import org.eclipse.jface.action.Action;
+import org.eclipse.jface.action.IAction;
+import org.eclipse.jface.action.IMenuListener;
+import org.eclipse.jface.action.IMenuManager;
+import org.eclipse.jface.action.MenuManager;
+import org.eclipse.jface.dialogs.MessageDialog;
+import org.eclipse.jface.viewers.ILabelProviderListener;
+import org.eclipse.jface.viewers.ISelection;
+import org.eclipse.jface.viewers.ISelectionChangedListener;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.jface.viewers.ITableLabelProvider;
+import org.eclipse.jface.viewers.ITreeContentProvider;
+import org.eclipse.jface.viewers.ITreeSelection;
+import org.eclipse.jface.viewers.SelectionChangedEvent;
+import org.eclipse.jface.viewers.TreeViewer;
+import org.eclipse.jface.viewers.Viewer;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.graphics.Image;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.swt.widgets.Menu;
+import org.eclipse.swt.widgets.Tree;
+import org.eclipse.swt.widgets.TreeColumn;
+import org.eclipse.ui.IViewSite;
+import org.eclipse.ui.PartInitException;
+import org.eclipse.ui.actions.ActionFactory;
+import org.eclipse.ui.part.ViewPart;
+
+/**
+ * Map/Reduce locations view: displays all available Hadoop locations and the
+ * Jobs running/finished on these locations
+ */
+public class ClusterView extends ViewPart implements ITreeContentProvider, ITableLabelProvider, IJobListener, IHadoopClusterListener {
+
+	/**
+	 * Deletion action: delete a Hadoop location, kill a running job or remove a
+	 * finished job entry
+	 */
+	class DeleteAction extends Action {
+
+		DeleteAction() {
+			setText("Delete");
+			setImageDescriptor(ImageLibrary.get("server.view.action.delete"));
+		}
+
+		/* @inheritDoc */
+		@Override
+		public void run() {
+			ISelection selection = getViewSite().getSelectionProvider().getSelection();
+			if ((selection != null) && (selection instanceof IStructuredSelection)) {
+				Object selItem = ((IStructuredSelection) selection).getFirstElement();
+
+				if (selItem instanceof AbstractHadoopCluster) {
+					AbstractHadoopCluster location = (AbstractHadoopCluster) selItem;
+					if (MessageDialog.openConfirm(Display.getDefault().getActiveShell(), "Confirm delete Hadoop location",
+							"Do you really want to remove the Hadoop location: " + location.getLocationName())) {
+						ServerRegistry.getInstance().removeServer(location);
+					}
+
+				} else if (selItem instanceof IHadoopJob) {
+
+					// kill the job
+					IHadoopJob job = (IHadoopJob) selItem;
+					if (job.isCompleted()) {
+						// Job already finished, remove the entry
+						job.getLocation().purgeJob(job);
+
+					} else {
+						// Job is running, kill the job?
+						if (MessageDialog.openConfirm(Display.getDefault().getActiveShell(), "Confirm kill running Job",
+								"Do you really want to kill running Job: " + job.getJobID())) {
+							job.kill();
+						}
+					}
+				}
+			}
+		}
+	}
+
+	/**
+	 * This object is the root content for this content provider
+	 */
+	private static final Object CONTENT_ROOT = new Object();
+
+	private final IAction deleteAction = new DeleteAction();
+
+	private final IAction editServerAction = new EditLocationAction(this);
+
+	private final IAction newLocationAction = new NewLocationAction();
+
+	private TreeViewer viewer;
+
+	public ClusterView() {
+	}
+
+	/* @inheritDoc */
+	@Override
+	public void init(IViewSite site) throws PartInitException {
+		super.init(site);
+	}
+
+	/* @inheritDoc */
+	@Override
+	public void dispose() {
+		ServerRegistry.getInstance().removeListener(this);
+	}
+
+	/**
+	 * Creates the columns for the view
+	 */
+	@Override
+	public void createPartControl(Composite parent) {
+		Tree main = new Tree(parent, SWT.SINGLE | SWT.FULL_SELECTION | SWT.H_SCROLL | SWT.V_SCROLL);
+		main.setHeaderVisible(true);
+		main.setLinesVisible(false);
+		main.setLayoutData(new GridData(GridData.FILL_BOTH));
+
+		TreeColumn serverCol = new TreeColumn(main, SWT.SINGLE);
+		serverCol.setText("Location");
+		serverCol.setWidth(300);
+		serverCol.setResizable(true);
+
+		TreeColumn locationCol = new TreeColumn(main, SWT.SINGLE);
+		locationCol.setText("Master node");
+		locationCol.setWidth(185);
+		locationCol.setResizable(true);
+
+		TreeColumn stateCol = new TreeColumn(main, SWT.SINGLE);
+		stateCol.setText("State");
+		stateCol.setWidth(95);
+		stateCol.setResizable(true);
+
+		TreeColumn statusCol = new TreeColumn(main, SWT.SINGLE);
+		statusCol.setText("Status");
+		statusCol.setWidth(300);
+		statusCol.setResizable(true);
+
+		viewer = new TreeViewer(main);
+		viewer.setContentProvider(this);
+		viewer.setLabelProvider(this);
+		viewer.setInput(CONTENT_ROOT); // don't care
+
+		getViewSite().setSelectionProvider(viewer);
+
+		getViewSite().getActionBars().setGlobalActionHandler(ActionFactory.DELETE.getId(), deleteAction);
+		getViewSite().getActionBars().getToolBarManager().add(editServerAction);
+		getViewSite().getActionBars().getToolBarManager().add(newLocationAction);
+
+		createActions();
+		createContextMenu();
+	}
+
+	/**
+	 * Actions
+	 */
+	private void createActions() {
+		/*
+		 * addItemAction = new Action("Add...") { public void run() { addItem();
+		 * } }; addItemAction.setImageDescriptor(ImageLibrary
+		 * .get("server.view.location.new"));
+		 */
+		/*
+		 * deleteItemAction = new Action("Delete") { public void run() {
+		 * deleteItem(); } };
+		 * deleteItemAction.setImageDescriptor(getImageDescriptor
+		 * ("delete.gif"));
+		 * 
+		 * selectAllAction = new Action("Select All") { public void run() {
+		 * selectAll(); } };
+		 */
+		// Add selection listener.
+		viewer.addSelectionChangedListener(new ISelectionChangedListener() {
+			public void selectionChanged(SelectionChangedEvent event) {
+				updateActionEnablement();
+			}
+		});
+	}
+
+	private void addItem() {
+		System.out.printf("ADD ITEM\n");
+	}
+
+	private void updateActionEnablement() {
+		IStructuredSelection sel = (IStructuredSelection) viewer.getSelection();
+		// deleteItemAction.setEnabled(sel.size() > 0);
+	}
+
+	/**
+	 * Contextual menu
+	 */
+	private void createContextMenu() {
+		// Create menu manager.
+		MenuManager menuMgr = new MenuManager();
+		menuMgr.setRemoveAllWhenShown(true);
+		menuMgr.addMenuListener(new IMenuListener() {
+			public void menuAboutToShow(IMenuManager mgr) {
+				fillContextMenu(mgr);
+			}
+		});
+
+		// Create menu.
+		Menu menu = menuMgr.createContextMenu(viewer.getControl());
+		viewer.getControl().setMenu(menu);
+
+		// Register menu for extension.
+		getSite().registerContextMenu(menuMgr, viewer);
+	}
+
+	private void fillContextMenu(IMenuManager mgr) {
+		mgr.add(newLocationAction);
+		mgr.add(editServerAction);
+		mgr.add(deleteAction);
+		/*
+		 * mgr.add(new GroupMarker(IWorkbenchActionConstants.MB_ADDITIONS));
+		 * mgr.add(deleteItemAction); mgr.add(new Separator());
+		 * mgr.add(selectAllAction);
+		 */
+	}
+
+	/* @inheritDoc */
+	@Override
+	public void setFocus() {
+
+	}
+
+	/*
+	 * IHadoopServerListener implementation
+	 */
+
+	/* @inheritDoc */
+	public void serverChanged(AbstractHadoopCluster location, int type) {
+		Display.getDefault().syncExec(new Runnable() {
+			public void run() {
+				ClusterView.this.viewer.refresh();
+			}
+		});
+	}
+
+	/*
+	 * IStructuredContentProvider implementation
+	 */
+
+	/* @inheritDoc */
+	public void inputChanged(final Viewer viewer, Object oldInput, Object newInput) {
+		if (oldInput == CONTENT_ROOT)
+			ServerRegistry.getInstance().removeListener(this);
+		if (newInput == CONTENT_ROOT)
+			ServerRegistry.getInstance().addListener(this);
+	}
+
+	/**
+	 * The root elements displayed by this view are the existing Hadoop
+	 * locations
+	 */
+	/* @inheritDoc */
+	public Object[] getElements(Object inputElement) {
+		return ServerRegistry.getInstance().getServers().toArray();
+	}
+
+	/*
+	 * ITreeStructuredContentProvider implementation
+	 */
+
+	/**
+	 * Each location contains a child entry for each job it runs.
+	 */
+	/* @inheritDoc */
+	public Object[] getChildren(Object parent) {
+
+		if (parent instanceof AbstractHadoopCluster) {
+			AbstractHadoopCluster location = (AbstractHadoopCluster) parent;
+			location.addJobListener(this);
+			Collection<? extends IHadoopJob> jobs = location.getJobs();
+			return jobs.toArray();
+		}
+
+		return null;
+	}
+
+	/* @inheritDoc */
+	public Object getParent(Object element) {
+		if (element instanceof AbstractHadoopCluster) {
+			return CONTENT_ROOT;
+
+		} else if (element instanceof IHadoopJob) {
+			return ((IHadoopJob) element).getLocation();
+		}
+
+		return null;
+	}
+
+	/* @inheritDoc */
+	public boolean hasChildren(Object element) {
+		/* Only server entries have children */
+		return (element instanceof AbstractHadoopCluster);
+	}
+
+	/*
+	 * ITableLabelProvider implementation
+	 */
+
+	/* @inheritDoc */
+	public void addListener(ILabelProviderListener listener) {
+		// no listeners handling
+	}
+
+	public boolean isLabelProperty(Object element, String property) {
+		return false;
+	}
+
+	/* @inheritDoc */
+	public void removeListener(ILabelProviderListener listener) {
+		// no listener handling
+	}
+
+	/* @inheritDoc */
+	public Image getColumnImage(Object element, int columnIndex) {
+		if ((columnIndex == 0) && (element instanceof AbstractHadoopCluster)) {
+			return ImageLibrary.getImage("server.view.location.entry");
+
+		} else if ((columnIndex == 0) && (element instanceof IHadoopJob)) {
+			return ImageLibrary.getImage("server.view.job.entry");
+		}
+		return null;
+	}
+
+	/* @inheritDoc */
+	public String getColumnText(Object element, int columnIndex) {
+		if (element instanceof AbstractHadoopCluster) {
+			AbstractHadoopCluster server = (AbstractHadoopCluster) element;
+
+			switch (columnIndex) {
+			case 0:
+				return server.getLocationName();
+			case 1:
+				return server.getMasterHostName().toString();
+			case 2:
+				return server.getState();
+			case 3:
+				return "";
+			}
+		} else if (element instanceof IHadoopJob) {
+			IHadoopJob job = (IHadoopJob) element;
+
+			switch (columnIndex) {
+			case 0:
+				return "" + job.getJobID();
+			case 1:
+				return "";
+			case 2:
+				return job.getState();
+			case 3:
+				return job.getStatus();
+			}
+		} else if (element instanceof JarModule) {
+			JarModule jar = (JarModule) element;
+
+			switch (columnIndex) {
+			case 0:
+				return jar.toString();
+			case 1:
+				return "Publishing jar to server..";
+			case 2:
+				return "";
+			}
+		}
+
+		return null;
+	}
+
+	/*
+	 * IJobListener (Map/Reduce Jobs listener) implementation
+	 */
+
+	/* @inheritDoc */
+	public void jobAdded(IHadoopJob job) {
+		viewer.refresh();
+	}
+
+	/* @inheritDoc */
+	public void jobRemoved(IHadoopJob job) {
+		viewer.refresh();
+	}
+
+	/* @inheritDoc */
+	public void jobChanged(IHadoopJob job) {
+		viewer.refresh(job);
+	}
+
+	/* @inheritDoc */
+	public void publishDone(IJarModule jar) {
+		viewer.refresh();
+	}
+
+	/* @inheritDoc */
+	public void publishStart(IJarModule jar) {
+		viewer.refresh();
+	}
+
+	/*
+	 * Miscellaneous
+	 */
+
+	/**
+	 * Return the currently selected server (null if there is no selection or if
+	 * the selection is not a server)
+	 * 
+	 * @return the currently selected server entry
+	 */
+	public AbstractHadoopCluster getSelectedServer() {
+		ITreeSelection selection = (ITreeSelection) viewer.getSelection();
+		Object first = selection.getFirstElement();
+		if (first instanceof AbstractHadoopCluster) {
+			return (AbstractHadoopCluster) first;
+		}
+		return null;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/EditLocationAction.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/EditLocationAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/EditLocationAction.java
new file mode 100644
index 0000000..416241a
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/EditLocationAction.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.mr;
+
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.ui.ImageLibrary;
+import org.apache.hdt.ui.internal.launch.HadoopLocationWizard;
+import org.eclipse.jface.action.Action;
+import org.eclipse.jface.wizard.Wizard;
+import org.eclipse.jface.wizard.WizardDialog;
+
+/**
+ * Editing server properties action
+ */
+public class EditLocationAction extends Action {
+
+	private ClusterView serverView;
+
+	public EditLocationAction(ClusterView serverView) {
+		this.serverView = serverView;
+
+		setText("Edit Hadoop location...");
+		setImageDescriptor(ImageLibrary.get("server.view.action.location.edit"));
+	}
+
+	@Override
+	public void run() {
+
+		final AbstractHadoopCluster server = serverView.getSelectedServer();
+		if (server == null)
+			return;
+
+		WizardDialog dialog = new WizardDialog(null, new Wizard() {
+			private HadoopLocationWizard page = new HadoopLocationWizard(server);
+
+			@Override
+			public void addPages() {
+				super.addPages();
+				setWindowTitle("Edit Hadoop location...");
+				addPage(page);
+			}
+
+			@Override
+			public boolean performFinish() {
+				page.performFinish();
+				return true;
+			}
+		});
+
+		dialog.create();
+		dialog.setBlockOnOpen(true);
+		dialog.open();
+
+		super.run();
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewDriverWizard.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewDriverWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewDriverWizard.java
new file mode 100644
index 0000000..14dcb49
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewDriverWizard.java
@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.mr;
+
+import org.eclipse.core.resources.IFile;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.jdt.core.IJavaElement;
+import org.eclipse.jdt.internal.ui.wizards.NewElementWizard;
+import org.eclipse.jface.operation.IRunnableWithProgress;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.ui.INewWizard;
+import org.eclipse.ui.IWorkbench;
+
+/**
+ * Wizard for creating a new Driver class (a class that runs a MapReduce job).
+ * 
+ */
+
+public class NewDriverWizard extends NewElementWizard implements INewWizard,
+    IRunnableWithProgress {
+  private NewDriverWizardPage page;
+
+  /*
+   * @Override public boolean performFinish() { }
+   */
+  public void run(IProgressMonitor monitor) {
+    try {
+      page.createType(monitor);
+    } catch (CoreException e) {
+      // TODO Auto-generated catch block
+      e.printStackTrace();
+    } catch (InterruptedException e) {
+      // TODO Auto-generated catch block
+      e.printStackTrace();
+    }
+  }
+
+  public NewDriverWizard() {
+    setWindowTitle("New MapReduce Driver");
+  }
+
+  @Override
+  public void init(IWorkbench workbench, IStructuredSelection selection) {
+    super.init(workbench, selection);
+
+    page = new NewDriverWizardPage();
+    addPage(page);
+    page.setSelection(selection);
+  }
+
+  @Override
+  /**
+   * Performs any actions appropriate in response to the user having pressed the
+   * Finish button, or refuse if finishing now is not permitted.
+   */
+  public boolean performFinish() {
+    if (super.performFinish()) {
+      if (getCreatedElement() != null) {
+        selectAndReveal(page.getModifiedResource());
+        openResource((IFile) page.getModifiedResource());
+      }
+
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  @Override
+  /**
+   * 
+   */
+  protected void finishPage(IProgressMonitor monitor)
+      throws InterruptedException, CoreException {
+    this.run(monitor);
+  }
+
+  @Override
+  public IJavaElement getCreatedElement() {
+    return page.getCreatedType().getPrimaryElement();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewDriverWizardPage.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewDriverWizardPage.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewDriverWizardPage.java
new file mode 100644
index 0000000..4857529
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewDriverWizardPage.java
@@ -0,0 +1,264 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.ui.internal.mr;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import org.apache.hdt.ui.ImageLibrary;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.FileLocator;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.jdt.core.IType;
+import org.eclipse.jdt.core.JavaModelException;
+import org.eclipse.jdt.core.search.SearchEngine;
+import org.eclipse.jdt.ui.IJavaElementSearchConstants;
+import org.eclipse.jdt.ui.JavaUI;
+import org.eclipse.jdt.ui.wizards.NewTypeWizardPage;
+import org.eclipse.jface.dialogs.ProgressMonitorDialog;
+import org.eclipse.jface.resource.ImageDescriptor;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.jface.window.Window;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.swt.widgets.Event;
+import org.eclipse.swt.widgets.Label;
+import org.eclipse.swt.widgets.Listener;
+import org.eclipse.swt.widgets.Text;
+import org.eclipse.ui.dialogs.SelectionDialog;
+
+/**
+ * Pre-fills the new MapReduce driver class with a template.
+ * 
+ */
+
+public class NewDriverWizardPage extends NewTypeWizardPage {
+  private Button isCreateMapMethod;
+
+  private Text reducerText;
+
+  private Text mapperText;
+
+  private final boolean showContainerSelector;
+
+  public NewDriverWizardPage() {
+    this(true);
+  }
+
+  public NewDriverWizardPage(boolean showContainerSelector) {
+    super(true, "MapReduce Driver");
+
+    this.showContainerSelector = showContainerSelector;
+    setTitle("MapReduce Driver");
+    setDescription("Create a new MapReduce driver");
+    setImageDescriptor(ImageLibrary.get("wizard.driver.new"));
+  }
+
+  public void setSelection(IStructuredSelection selection) {
+    initContainerPage(getInitialJavaElement(selection));
+    initTypePage(getInitialJavaElement(selection));
+  }
+
+  @Override
+  /**
+   * Creates the new type using the entered field values.
+   */
+  public void createType(IProgressMonitor monitor) throws CoreException,
+      InterruptedException {
+    super.createType(monitor);
+  }
+
+  @Override
+  protected void createTypeMembers(final IType newType, ImportsManager imports,
+      final IProgressMonitor monitor) throws CoreException {
+    super.createTypeMembers(newType, imports, monitor);
+    imports.addImport("org.apache.hadoop.fs.Path");
+    imports.addImport("org.apache.hadoop.io.Text");
+    imports.addImport("org.apache.hadoop.io.IntWritable");
+    imports.addImport("org.apache.hadoop.mapreduce.Job");
+    imports.addImport("org.apache.hadoop.mapreduce.lib.input.FileInputFormat");
+    imports.addImport("org.apache.hadoop.mapreduce.lib.output.FileOutputFormat");
+
+    /**
+     * TODO(jz) - move most code out of the runnable
+     */
+    getContainer().getShell().getDisplay().syncExec(new Runnable() {
+      public void run() {
+
+        String method = "public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {\n";
+        method += "  Job job = new Job();\n\n";
+        method += "  job.setJarByClass( ... );\n\n";
+        method += "  job.setJobName( \"a nice name\"  );\n\n";
+
+        method += "  FileInputFormat.setInputPaths(job, new Path(args[0]));\n";
+        method += "  FileOutputFormat.setOutputPath(job, new Path(args[1]));\n\n";
+        
+        if (mapperText.getText().length() > 0) {
+          method += "  job.setMapperClass(" + mapperText.getText()
+              + ".class);\n\n";
+        } else {
+          method += "  // TODO: specify a mapper\njob.setMapperClass( ... );\n\n";
+        }
+        if (reducerText.getText().length() > 0) {
+          method += "  job.setReducerClass(" + reducerText.getText()
+              + ".class);\n\n";
+        } else {
+          method += "  // TODO: specify a reducer\njob.setReducerClass( ... );\n\n";
+        }
+
+        method += "  job.setOutputKeyClass(Text.class);\n";
+    	method += "  job.setOutputValueClass(IntWritable.class);\n\n";
+        
+        method += "  boolean success = job.waitForCompletion(true);\n";
+        method += "  System.exit(success ? 0 : 1);\n\t};";
+
+        try {
+          newType.createMethod(method, null, false, monitor);
+        } catch (JavaModelException e) {
+          // TODO Auto-generated catch block
+          e.printStackTrace();
+        }
+      }
+    });
+  }
+
+  public void createControl(Composite parent) {
+    // super.createControl(parent);
+
+    initializeDialogUnits(parent);
+    Composite composite = new Composite(parent, SWT.NONE);
+    GridLayout layout = new GridLayout();
+    layout.numColumns = 4;
+    composite.setLayout(layout);
+
+    createContainerControls(composite, 4);
+
+    createPackageControls(composite, 4);
+    createSeparator(composite, 4);
+    createTypeNameControls(composite, 4);
+
+    createSuperClassControls(composite, 4);
+    createSuperInterfacesControls(composite, 4);
+    createSeparator(composite, 4);
+
+    createMapperControls(composite);
+    createReducerControls(composite);
+
+    if (!showContainerSelector) {
+      setPackageFragmentRoot(null, false);
+      setSuperClass("java.lang.Object", false);
+      setSuperInterfaces(new ArrayList(), false);
+    }
+
+    setControl(composite);
+
+    setFocus();
+    handleFieldChanged(CONTAINER);
+
+    // setSuperClass("org.apache.hadoop.mapred.MapReduceBase", true);
+    // setSuperInterfaces(Arrays.asList(new String[]{
+    // "org.apache.hadoop.mapred.Mapper" }), true);
+  }
+
+  @Override
+  protected void handleFieldChanged(String fieldName) {
+    super.handleFieldChanged(fieldName);
+
+    validate();
+  }
+
+  private void validate() {
+    if (showContainerSelector) {
+      updateStatus(new IStatus[] { fContainerStatus, fPackageStatus,
+          fTypeNameStatus, fSuperClassStatus, fSuperInterfacesStatus });
+    } else {
+      updateStatus(new IStatus[] { fTypeNameStatus, });
+    }
+  }
+
+  private void createMapperControls(Composite composite) {
+    this.mapperText = createBrowseClassControl(composite, "Ma&pper:",
+        "&Browse...", "org.apache.hadoop.mapreduce.Mapper", "Mapper Selection");
+  }
+
+  private void createReducerControls(Composite composite) {
+    this.reducerText = createBrowseClassControl(composite, "&Reducer:",
+        "Browse&...", "org.apache.hadoop.mapreduce.Reducer", "Reducer Selection");
+  }
+
+  private Text createBrowseClassControl(final Composite composite,
+      final String string, String browseButtonLabel,
+      final String baseClassName, final String dialogTitle) {
+    Label label = new Label(composite, SWT.NONE);
+    GridData data = new GridData(GridData.FILL_HORIZONTAL);
+    label.setText(string);
+    label.setLayoutData(data);
+
+    final Text text = new Text(composite, SWT.SINGLE | SWT.BORDER);
+    GridData data2 = new GridData(GridData.FILL_HORIZONTAL);
+    data2.horizontalSpan = 2;
+    text.setLayoutData(data2);
+
+    Button browse = new Button(composite, SWT.NONE);
+    browse.setText(browseButtonLabel);
+    GridData data3 = new GridData(GridData.FILL_HORIZONTAL);
+    browse.setLayoutData(data3);
+    browse.addListener(SWT.Selection, new Listener() {
+      public void handleEvent(Event event) {
+        IType baseType;
+        try {
+          baseType = getPackageFragmentRoot().getJavaProject().findType(
+              baseClassName);
+
+          // edit this to limit the scope
+          SelectionDialog dialog = JavaUI.createTypeDialog(
+              composite.getShell(), new ProgressMonitorDialog(composite
+                  .getShell()), SearchEngine.createHierarchyScope(baseType),
+              IJavaElementSearchConstants.CONSIDER_CLASSES, false);
+
+          dialog.setMessage("&Choose a type:");
+          dialog.setBlockOnOpen(true);
+          dialog.setTitle(dialogTitle);
+          dialog.open();
+
+          if ((dialog.getReturnCode() == Window.OK)
+              && (dialog.getResult().length > 0)) {
+            IType type = (IType) dialog.getResult()[0];
+            text.setText(type.getFullyQualifiedName());
+          }
+        } catch (JavaModelException e) {
+          // TODO Auto-generated catch block
+          e.printStackTrace();
+        }
+      }
+    });
+
+    if (!showContainerSelector) {
+      label.setEnabled(false);
+      text.setEnabled(false);
+      browse.setEnabled(false);
+    }
+
+    return text;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/29467b54/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewLocationAction.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewLocationAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewLocationAction.java
new file mode 100644
index 0000000..20e269e
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewLocationAction.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.mr;
+
+import org.apache.hdt.ui.ImageLibrary;
+import org.apache.hdt.ui.internal.launch.HadoopLocationWizard;
+import org.eclipse.jface.action.Action;
+import org.eclipse.jface.wizard.Wizard;
+import org.eclipse.jface.wizard.WizardDialog;
+
+/**
+ * Action corresponding to creating a new MapReduce Server.
+ */
+
+public class NewLocationAction extends Action {
+	public NewLocationAction() {
+		setText("New Hadoop location...");
+		setImageDescriptor(ImageLibrary.get("server.view.action.location.new"));
+	}
+
+	@Override
+	public void run() {
+		WizardDialog dialog = new WizardDialog(null, new Wizard() {
+			private HadoopLocationWizard page = new HadoopLocationWizard();
+
+			@Override
+			public void addPages() {
+				super.addPages();
+				setWindowTitle("New Hadoop location...");
+				addPage(page);
+			}
+
+			@Override
+			public boolean performFinish() {
+				page.performFinish();
+				return true;
+			}
+
+		});
+
+		dialog.create();
+		dialog.setBlockOnOpen(true);
+		dialog.open();
+
+		super.run();
+	}
+}


[16/27] HDT 7 : - extraction of UI for each Version to individual plugins. - cleaning up existing UI

Posted by rs...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/c308e976/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
index bcf5944..c21ce79 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
@@ -20,16 +20,14 @@ package org.apache.hdt.ui.internal.launch;
 
 import java.net.URI;
 import java.net.URISyntaxException;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.TreeMap;
 
+import javax.swing.event.ChangeEvent;
+
+import org.apache.hdt.core.HadoopVersion;
 import org.apache.hdt.core.internal.hdfs.HDFSManager;
 import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.AbstractHadoopCluster.ChangeListener;
+import org.apache.hdt.core.launch.AbstractHadoopCluster.HadoopConfigurationBuilder;
 import org.apache.hdt.core.launch.ConfProp;
 import org.eclipse.core.resources.IProject;
 import org.eclipse.core.resources.IWorkspaceRoot;
@@ -39,25 +37,18 @@ import org.eclipse.jface.dialogs.IMessageProvider;
 import org.eclipse.jface.dialogs.MessageDialog;
 import org.eclipse.jface.wizard.WizardPage;
 import org.eclipse.swt.SWT;
-import org.eclipse.swt.custom.ScrolledComposite;
 import org.eclipse.swt.events.ModifyEvent;
 import org.eclipse.swt.events.ModifyListener;
-import org.eclipse.swt.events.SelectionEvent;
-import org.eclipse.swt.events.SelectionListener;
 import org.eclipse.swt.graphics.Image;
 import org.eclipse.swt.layout.GridData;
 import org.eclipse.swt.layout.GridLayout;
 import org.eclipse.swt.widgets.Button;
 import org.eclipse.swt.widgets.Combo;
 import org.eclipse.swt.widgets.Composite;
-import org.eclipse.swt.widgets.Control;
 import org.eclipse.swt.widgets.Display;
 import org.eclipse.swt.widgets.Event;
-import org.eclipse.swt.widgets.Group;
 import org.eclipse.swt.widgets.Label;
 import org.eclipse.swt.widgets.Listener;
-import org.eclipse.swt.widgets.TabFolder;
-import org.eclipse.swt.widgets.TabItem;
 import org.eclipse.swt.widgets.Text;
 
 /**
@@ -69,9 +60,7 @@ import org.eclipse.swt.widgets.Text;
  */
 
 public class HadoopLocationWizard extends WizardPage {
-
-	public  static final String HADOOP_1 = "1.1";
-	public  static final String HADOOP_2 = "2.2";
+	
 	Image circle;
 
 	/**
@@ -85,6 +74,8 @@ public class HadoopLocationWizard extends WizardPage {
 	 * instance).
 	 */
 	private AbstractHadoopCluster original;
+	private Text locationName;
+	private Combo hadoopVersion;
 
 	/**
 	 * New Hadoop location wizard
@@ -94,7 +85,7 @@ public class HadoopLocationWizard extends WizardPage {
 
 		this.original = null;
 		try {
-			this.location = AbstractHadoopCluster.createCluster(ConfProp.PI_HADOOP_VERSION.defVal);
+			this.location = AbstractHadoopCluster.createCluster(HadoopVersion.Version1.getDisplayName());
 		} catch (CoreException e) {
 			e.printStackTrace();
 		}
@@ -110,7 +101,7 @@ public class HadoopLocationWizard extends WizardPage {
 		super("Create a new Hadoop location", "Edit Hadoop Location", null);
 		this.original = server;
 		try {
-			this.location = AbstractHadoopCluster.createCluster(server);
+			this.location = AbstractHadoopCluster.createCluster(server.getVersion().getDisplayName());
 		} catch (CoreException e) {
 			e.printStackTrace();
 		}
@@ -130,7 +121,7 @@ public class HadoopLocationWizard extends WizardPage {
 					public void run() {
 						HDFSManager.addServer(location.getLocationName(),
 								location.getConfPropValue(ConfProp.FS_DEFAULT_URI), location
-								.getConfPropValue(ConfProp.PI_USER_NAME), null,location.getVersion());
+								.getConfPropValue(ConfProp.PI_USER_NAME), null,location.getVersion().getDisplayName());
 					}
 				});
 				// New location
@@ -165,7 +156,7 @@ public class HadoopLocationWizard extends WizardPage {
 							}
 							HDFSManager.addServer(location.getLocationName(),
 									location.getConfPropValue(ConfProp.FS_DEFAULT_URI), location
-									.getConfPropValue(ConfProp.PI_USER_NAME), null,location.getVersion());
+									.getConfPropValue(ConfProp.PI_USER_NAME), null,location.getVersion().getDisplayName());
 						}
 					});
 				}
@@ -264,21 +255,100 @@ public class HadoopLocationWizard extends WizardPage {
 	 * Create the wizard
 	 */
 	/* @inheritDoc */
-	public void createControl(Composite parent) {
+	public void createControl(final Composite parent) {
 		setTitle("Define Hadoop location");
 		setDescription("Define the location of a Hadoop infrastructure " + "for running MapReduce applications.");
 
-		Composite panel = new Composite(parent, SWT.FILL);
+		final Composite panel = new Composite(parent, SWT.FILL);
 		GridLayout glayout = new GridLayout(2, false);
 		panel.setLayout(glayout);
+		final HadoopConfigurationBuilder uiConfigurationBuilder = location.getUIConfigurationBuilder();
+		uiConfigurationBuilder.setChangeListener(new ChangeListener() {
+			
+			@Override
+			public void notifyChange(ConfProp prop, String propValue) {
+				Display.getDefault().syncExec(new Runnable() {
+					public void run() {
+						 getContainer().updateButtons();
+					}});
+			}
+		});
+		/*
+		 * Location name
+		 */
+		{
+			Label label = new Label(panel, SWT.NONE);
+			label.setText( "&Location name:");
+			Text text = new Text(panel, SWT.SINGLE | SWT.BORDER);
+			GridData data = new GridData(GridData.FILL_HORIZONTAL);
+			text.setLayoutData(data);
+			text.setText(location.getConfPropValue(ConfProp.PI_LOCATION_NAME));
+			text.addModifyListener(new ModifyListener() {
+				@Override
+				public void modifyText(ModifyEvent e) {
+					final Text text = (Text) e.widget;
+					final ConfProp prop = (ConfProp) text.getData("hProp");
+					Display.getDefault().syncExec(new Runnable() {
+						public void run() {
+							uiConfigurationBuilder.notifyChange(ConfProp.PI_LOCATION_NAME,text.getText());
+						}
+					});
+				}
+			});
+			locationName=text;
+		}
+		/*
+		 * Hadoop version
+		 */
+		{
+			Label label = new Label(panel, SWT.NONE);
+			label.setText("&Hadoop Version:");
+			Combo options = new Combo(panel, SWT.BORDER | SWT.READ_ONLY);
+			for(HadoopVersion ver:HadoopVersion.values()){
+				options.add(ver.getDisplayName());
+			}
+			int pos=0;
+			for(String item:options.getItems()){
+				if(item.equalsIgnoreCase(location.getVersion().getDisplayName())){
+					options.select(pos);
+					break;
+				}
+				pos++;
+			}
+			options.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
+			options.addListener(SWT.Selection, new Listener() {
+				@Override
+				public void handleEvent(Event event) {
+					final String selection = hadoopVersion.getText();
+					if (location == null || !selection.equals(location.getVersion())) {
+						Display.getDefault().syncExec(new Runnable() {
+							@Override
+							public void run() {
+								try {
+									location = AbstractHadoopCluster.createCluster(selection);
+									location.setConfPropValue(ConfProp.PI_HADOOP_VERSION, selection);
+									location.setConfPropValue(ConfProp.PI_LOCATION_NAME, locationName.getText());
+									panel.dispose();
+									createControl(parent);
+									parent.pack();
+									parent.getParent().layout(true);
+								} catch (CoreException e) {
+									MessageDialog.openError(Display.getDefault().getActiveShell(), "HDFS Error", "Unable to create HDFS site :"
+											+ e.getMessage());
+								}
+							}
+						});
+					}
 
-		TabMediator mediator = new TabMediator(panel);
+				}
+			});
+			hadoopVersion=options;
+		}
 		{
-			GridData gdata = new GridData(GridData.FILL_BOTH);
-			gdata.horizontalSpan = 2;
-			mediator.folder.setLayoutData(gdata);
+			uiConfigurationBuilder.buildControl(panel);
+			this.setControl(panel);
 		}
-		this.setControl(panel /* mediator.folder */);
+		
 		{
 			final Button btn = new Button(panel, SWT.NONE);
 			btn.setText("&Load from file");
@@ -301,739 +371,8 @@ public class HadoopLocationWizard extends WizardPage {
 				}
 			});
 		}
-	}
-
-	private interface TabListener {
-		void notifyChange(ConfProp prop, String propValue);
-		void reloadData();
-	}
-
-	/*
-	 * Mediator pattern to keep tabs synchronized with each other and with the
-	 * location state.
-	 */
-
-	private class TabMediator {
-		TabFolder folder;
-
-		private Set<TabListener> tabs = new HashSet<TabListener>();
-
-		TabMediator(Composite parent) {
-			folder = new TabFolder(parent, SWT.NONE);
-			tabs.add(new TabMain(this));
-			tabs.add(new TabAdvanced(this));
-		}
-
-		/**
-		 * Implements change notifications from any tab: update the location
-		 * state and other tabs
-		 * 
-		 * @param source
-		 *            origin of the notification (one of the tree tabs)
-		 * @param propName
-		 *            modified property
-		 * @param propValue
-		 *            new value
-		 */
-		void notifyChange(TabListener source, final ConfProp prop, final String propValue) {
-			// Ignore notification when no change
-			String oldValue = location.getConfPropValue(prop);
-			if ((oldValue != null) && oldValue.equals(propValue))
-				return;
-
-			location.setConfPropValue(prop, propValue);
-			Display.getDefault().syncExec(new Runnable() {
-				public void run() {
-					getContainer().updateButtons();
-				}
-			});
-
-			this.fireChange(source, prop, propValue);
-
-			/*
-			 * Now we deal with dependencies between settings
-			 */
-			final String jobTrackerHost = location.getConfPropValue(ConfProp.PI_JOB_TRACKER_HOST);
-			final String jobTrackerPort = location.getConfPropValue(ConfProp.PI_JOB_TRACKER_PORT);
-			final String nameNodeHost = location.getConfPropValue(ConfProp.PI_NAME_NODE_HOST);
-			final String nameNodePort = location.getConfPropValue(ConfProp.PI_NAME_NODE_PORT);
-			final boolean colocate = location.getConfPropValue(ConfProp.PI_COLOCATE_MASTERS).equalsIgnoreCase("yes");
-			final String jobTrackerURI = location.getConfPropValue(ConfProp.JOB_TRACKER_URI) ;
-			final String fsDefaultURI = location.getConfPropValue(ConfProp.FS_DEFAULT_URI);
-			final String socksServerURI = location.getConfPropValue(ConfProp.SOCKS_SERVER);
-			final boolean socksProxyEnable = location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_ENABLE).equalsIgnoreCase("yes");
-			final String socksProxyHost = location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_HOST);
-			final String socksProxyPort = location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_PORT);
-
-			Display.getDefault().syncExec(new Runnable() {
-				public void run() {
-					switch (prop) {
-					case PI_JOB_TRACKER_HOST: {
-						if (colocate)
-							notifyChange(null, ConfProp.PI_NAME_NODE_HOST, jobTrackerHost);
-						String newJobTrackerURI = String.format("%s:%s", jobTrackerHost, jobTrackerPort);
-						notifyChange(null, ConfProp.JOB_TRACKER_URI, newJobTrackerURI);
-						break;
-					}
-					case PI_JOB_TRACKER_PORT: {
-						String newJobTrackerURI = String.format("%s:%s", jobTrackerHost, jobTrackerPort);
-						notifyChange(null, ConfProp.JOB_TRACKER_URI, newJobTrackerURI);
-						break;
-					}
-					case PI_NAME_NODE_HOST: {
-						String newHDFSURI = String.format("hdfs://%s:%s/", nameNodeHost, nameNodePort);
-						notifyChange(null, ConfProp.FS_DEFAULT_URI, newHDFSURI);
-
-						// Break colocation if someone force the DFS Master
-						if (!colocate && !nameNodeHost.equals(jobTrackerHost))
-							notifyChange(null, ConfProp.PI_COLOCATE_MASTERS, "no");
-						break;
-					}
-					case PI_NAME_NODE_PORT: {
-						String newHDFSURI = String.format("hdfs://%s:%s/", nameNodeHost, nameNodePort);
-						notifyChange(null, ConfProp.FS_DEFAULT_URI, newHDFSURI);
-						break;
-					}
-					case PI_SOCKS_PROXY_HOST: {
-						String newSocksProxyURI = String.format("%s:%s", socksProxyHost, socksProxyPort);
-						notifyChange(null, ConfProp.SOCKS_SERVER, newSocksProxyURI);
-						break;
-					}
-					case PI_SOCKS_PROXY_PORT: {
-						String newSocksProxyURI = String.format("%s:%s", socksProxyHost, socksProxyPort);
-						notifyChange(null, ConfProp.SOCKS_SERVER, newSocksProxyURI);
-						break;
-					}
-					case JOB_TRACKER_URI: {
-						String[] strs = jobTrackerURI.split(":", 2);
-						String host = strs[0];
-						String port = (strs.length == 2) ? strs[1] : "";
-						notifyChange(null, ConfProp.PI_JOB_TRACKER_HOST, host);
-						notifyChange(null, ConfProp.PI_JOB_TRACKER_PORT, port);
-						break;
-					}
-					case FS_DEFAULT_URI: {
-						try {
-							URI uri = new URI(fsDefaultURI);
-							if (uri.getScheme().equals("hdfs")) {
-								String host = uri.getHost();
-								String port = Integer.toString(uri.getPort());
-								notifyChange(null, ConfProp.PI_NAME_NODE_HOST, host);
-								notifyChange(null, ConfProp.PI_NAME_NODE_PORT, port);
-							}
-						} catch (URISyntaxException use) {
-							// Ignore the update!
-						}
-						break;
-					}
-					case SOCKS_SERVER: {
-						String[] strs = socksServerURI.split(":", 2);
-						String host = strs[0];
-						String port = (strs.length == 2) ? strs[1] : "";
-						notifyChange(null, ConfProp.PI_SOCKS_PROXY_HOST, host);
-						notifyChange(null, ConfProp.PI_SOCKS_PROXY_PORT, port);
-						break;
-					}
-					case PI_COLOCATE_MASTERS: {
-						if (colocate)
-							notifyChange(null, ConfProp.PI_NAME_NODE_HOST, jobTrackerHost);
-						break;
-					}
-					case PI_SOCKS_PROXY_ENABLE: {
-						if (socksProxyEnable) {
-							notifyChange(null, ConfProp.SOCKET_FACTORY_DEFAULT, "org.apache.hadoop.net.SocksSocketFactory");
-						} else {
-							notifyChange(null, ConfProp.SOCKET_FACTORY_DEFAULT, "org.apache.hadoop.net.StandardSocketFactory");
-						}
-						break;
-					}					
-					}
-				}
-			});
-
-		}
-
-		/**
-		 * Change notifications on properties (by name). A property might not be
-		 * reflected as a ConfProp enum. If it is, the notification is forwarded
-		 * to the ConfProp notifyChange method. If not, it is processed here.
-		 * 
-		 * @param source
-		 * @param propName
-		 * @param propValue
-		 */
-		void notifyChange(TabListener source, String propName, String propValue) {
-			ConfProp prop = location.getConfPropForName(propName);
-			if (prop != null)
-				notifyChange(source, prop, propValue);
-			else
-				location.setConfPropValue(propName, propValue);
-		}
-
-		/**
-		 * Broadcast a property change to all registered tabs. If a tab is
-		 * identified as the source of the change, this tab will not be
-		 * notified.
-		 * 
-		 * @param source
-		 *            TODO
-		 * @param prop
-		 * @param value
-		 */
-		private void fireChange(TabListener source, ConfProp prop, String value) {
-			for (TabListener tab : tabs) {
-				if (tab != source)
-					tab.notifyChange(prop, value);
-			}
-		}
-
-	}
-
-	/**
-	 * Create a SWT Text component for the given {@link ConfProp} text
-	 * configuration property.
-	 * 
-	 * @param listener
-	 * @param parent
-	 * @param prop
-	 * @return
-	 */
-	private Text createConfText(ModifyListener listener, Composite parent, ConfProp prop) {
-		Text text = new Text(parent, SWT.SINGLE | SWT.BORDER);
-		GridData data = new GridData(GridData.FILL_HORIZONTAL);
-		text.setLayoutData(data);
-		text.setData("hProp",prop);
-		text.setText(location.getConfPropValue(prop));
-		text.addModifyListener(listener);
-
-		return text;
-	}
-
-	/**
-	 * Create a SWT Checked Button component for the given {@link ConfProp}
-	 * boolean configuration property.
-	 * 
-	 * @param listener
-	 * @param parent
-	 * @param prop
-	 * @return
-	 */
-	private Button createConfCheckButton(SelectionListener listener, Composite parent, ConfProp prop, String text) {
-		Button button = new Button(parent, SWT.CHECK);
-		button.setText(text);
-		button.setData("hProp", prop);
-		button.setSelection(location.getConfPropValue(prop).equalsIgnoreCase("yes"));
-		button.addSelectionListener(listener);
-		return button;
-	}
-
-	/**
-	 * Create editor entry for the given configuration property. The editor is a
-	 * couple (Label, Text).
-	 * 
-	 * @param listener
-	 *            the listener to trigger on property change
-	 * @param parent
-	 *            the SWT parent container
-	 * @param prop
-	 *            the property to create an editor for
-	 * @param labelText
-	 *            a label (null will defaults to the property name)
-	 * 
-	 * @return a SWT Text field
-	 */
-	private Text createConfLabelText(ModifyListener listener, Composite parent, ConfProp prop, String labelText) {
-		Label label = new Label(parent, SWT.NONE);
-		if (labelText == null)
-			labelText = location.getConfPropName(prop);
-		label.setText(labelText);
-		return createConfText(listener, parent, prop);
-	}
-
-	/**
-	 * Create an editor entry for the given configuration name
-	 * 
-	 * @param listener
-	 *            the listener to trigger on property change
-	 * @param parent
-	 *            the SWT parent container
-	 * @param propName
-	 *            the name of the property to create an editor for
-	 * @param labelText
-	 *            a label (null will defaults to the property name)
-	 * 
-	 * @return a SWT Text field
-	 */
-	private Text createConfNameEditor(ModifyListener listener, Composite parent, String propName, String labelText) {
-
-		{
-			ConfProp prop = location.getConfPropForName(propName);
-			if (prop != null)
-				return createConfLabelText(listener, parent, prop, labelText);
-		}
-
-		Label label = new Label(parent, SWT.NONE);
-		if (labelText == null)
-			labelText = propName;
-		label.setText(labelText);
-
-		Text text = new Text(parent, SWT.SINGLE | SWT.BORDER);
-		GridData data = new GridData(GridData.FILL_HORIZONTAL);
-		text.setLayoutData(data);
-		text.setData("hPropName", propName);
-		text.setText(location.getConfPropValue(propName));
-		text.addModifyListener(listener);
-
-		return text;
-	}
-
-	/**
-	 * Main parameters of the Hadoop location: <li>host and port of the
-	 * Map/Reduce master (Job tracker) <li>host and port of the DFS master (Name
-	 * node) <li>SOCKS proxy
-	 */
-	private class TabMain implements TabListener, ModifyListener, SelectionListener {
-
-		/**
-		 * 
-		 */
 		
-
-		TabMediator mediator;
-
-		Text locationName;
-		
-		Combo hadoopVersion;
-
-		Text textJTHost;
-		
-
-		Text textNNHost;
-
-		Button colocateMasters;
-
-		Text textJTPort;
-
-		Text textNNPort;
-
-		Text userName;
-
-		Button useSocksProxy;
-
-		Text socksProxyHost;
-
-		Text socksProxyPort;
-
-		private Group groupMR;
-
-		TabMain(TabMediator mediator) {
-			this.mediator = mediator;
-			TabItem tab = new TabItem(mediator.folder, SWT.NONE);
-			tab.setText("General");
-			tab.setToolTipText("General location parameters");
-			tab.setImage(circle);
-			tab.setControl(createControl(mediator.folder));
-		}
-
-		private Control createControl(Composite parent) {
-
-			Composite panel = new Composite(parent, SWT.FILL);
-			panel.setLayout(new GridLayout(2, false));
-
-			GridData data;
-
-			/*
-			 * Location name
-			 */
-			{
-				Composite subpanel = new Composite(panel, SWT.FILL);
-				subpanel.setLayout(new GridLayout(2, false));
-				data = new GridData();
-				data.horizontalSpan = 2;
-				data.horizontalAlignment = SWT.FILL;
-				subpanel.setLayoutData(data);
-
-				locationName = createConfLabelText(this, subpanel, ConfProp.PI_LOCATION_NAME, "&Location name:");
-			}
-			/*
-			 * Hadoop version
-			 */
-			{
-				Composite subpanel = new Composite(panel, SWT.FILL);
-				subpanel.setLayout(new GridLayout(2, false));
-				data = new GridData();
-				data.horizontalSpan = 2;
-				data.horizontalAlignment = SWT.FILL;
-				subpanel.setLayoutData(data);
-				
-				Label label = new Label(subpanel, SWT.NONE);
-				label.setText("&Hadoop Version:");
-				Combo options =  new Combo (subpanel, SWT.BORDER | SWT.READ_ONLY);
-				options.add (HADOOP_1);
-				options.add (HADOOP_2);
-				options.select(0);
-				options.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
-				options.addListener (SWT.Selection, new Listener () {
-					@Override
-					public void handleEvent(Event event) {
-						final String selection = hadoopVersion.getText();
-						if (location == null || !selection.equals(location.getVersion())) {
-							Display.getDefault().syncExec(new Runnable() {
-
-								@Override
-								public void run() {
-									try {
-										location = AbstractHadoopCluster.createCluster(selection);
-										location.setConfPropValue(ConfProp.PI_HADOOP_VERSION, selection);
-										location.setConfPropValue(ConfProp.PI_LOCATION_NAME, "");
-										for (TabListener tab : mediator.tabs) {
-											tab.reloadData();
-										}
-									} catch (CoreException e) {
-										MessageDialog.openError(Display.getDefault().getActiveShell(), "HDFS Error", "Unable to create HDFS site :"
-												+ e.getMessage());
-									}
-								}
-							});
-						}
-
-					}
-				});
-				hadoopVersion = options;
-			}
-			
-			/*
-			 * Map/Reduce group
-			 */
-			{
-				groupMR = new Group(panel, SWT.SHADOW_NONE);
-				groupMR.setText("Map/Reduce Master");
-				groupMR.setToolTipText("Address of the Map/Reduce master node " + "(the Job Tracker).");
-				GridLayout layout = new GridLayout(2, false);
-				groupMR.setLayout(layout);
-				data = new GridData();
-				data.verticalAlignment = SWT.FILL;
-				data.horizontalAlignment = SWT.CENTER;
-				data.widthHint = 250;
-				groupMR.setLayoutData(data);
-
-				// Job Tracker host
-				Label label = new Label(groupMR, SWT.NONE);
-				label.setText("Host:");
-				data = new GridData(GridData.BEGINNING, GridData.CENTER, false, true);
-				label.setLayoutData(data);
-
-				textJTHost = createConfText(this, groupMR, ConfProp.PI_JOB_TRACKER_HOST);
-				data = new GridData(GridData.FILL, GridData.CENTER, true, true);
-				textJTHost.setLayoutData(data);
-
-				// Job Tracker port
-				label = new Label(groupMR, SWT.NONE);
-				label.setText("Port:");
-				data = new GridData(GridData.BEGINNING, GridData.CENTER, false, true);
-				label.setLayoutData(data);
-
-				textJTPort = createConfText(this, groupMR, ConfProp.PI_JOB_TRACKER_PORT);
-				data = new GridData(GridData.FILL, GridData.CENTER, true, true);
-				textJTPort.setLayoutData(data);
-			}
-
-			/*
-			 * DFS group
-			 */
-			{
-				Group groupDFS = new Group(panel, SWT.SHADOW_NONE);
-				groupDFS.setText("DFS Master");
-				groupDFS.setToolTipText("Address of the Distributed FileSystem " + "master node (the Name Node).");
-				GridLayout layout = new GridLayout(2, false);
-				groupDFS.setLayout(layout);
-				data = new GridData();
-				data.horizontalAlignment = SWT.CENTER;
-				data.widthHint = 250;
-				groupDFS.setLayoutData(data);
-
-				colocateMasters = createConfCheckButton(this, groupDFS, ConfProp.PI_COLOCATE_MASTERS, "Use M/R Master host");
-				data = new GridData();
-				data.horizontalSpan = 2;
-				colocateMasters.setLayoutData(data);
-
-				// Job Tracker host
-				Label label = new Label(groupDFS, SWT.NONE);
-				data = new GridData();
-				label.setText("Host:");
-				label.setLayoutData(data);
-
-				textNNHost = createConfText(this, groupDFS, ConfProp.PI_NAME_NODE_HOST);
-
-				// Job Tracker port
-				label = new Label(groupDFS, SWT.NONE);
-				data = new GridData();
-				label.setText("Port:");
-				label.setLayoutData(data);
-
-				textNNPort = createConfText(this, groupDFS, ConfProp.PI_NAME_NODE_PORT);
-			}
-
-			{
-				Composite subpanel = new Composite(panel, SWT.FILL);
-				subpanel.setLayout(new GridLayout(2, false));
-				data = new GridData();
-				data.horizontalSpan = 2;
-				data.horizontalAlignment = SWT.FILL;
-				subpanel.setLayoutData(data);
-
-				userName = createConfLabelText(this, subpanel, ConfProp.PI_USER_NAME, "&User name:");
-			}
-
-			// SOCKS proxy group
-			{
-				Group groupSOCKS = new Group(panel, SWT.SHADOW_NONE);
-				groupSOCKS.setText("SOCKS proxy");
-				groupSOCKS.setToolTipText("Address of the SOCKS proxy to use " + "to connect to the infrastructure.");
-				GridLayout layout = new GridLayout(2, false);
-				groupSOCKS.setLayout(layout);
-				data = new GridData();
-				data.horizontalAlignment = SWT.CENTER;
-				data.horizontalSpan = 2;
-				data.widthHint = 250;
-				groupSOCKS.setLayoutData(data);
-
-				useSocksProxy = createConfCheckButton(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_ENABLE, "Enable SOCKS proxy");
-				data = new GridData();
-				data.horizontalSpan = 2;
-				useSocksProxy.setLayoutData(data);
-
-				// SOCKS proxy host
-				Label label = new Label(groupSOCKS, SWT.NONE);
-				data = new GridData();
-				label.setText("Host:");
-				label.setLayoutData(data);
-
-				socksProxyHost = createConfText(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_HOST);
-
-				// SOCKS proxy port
-				label = new Label(groupSOCKS, SWT.NONE);
-				data = new GridData();
-				label.setText("Port:");
-				label.setLayoutData(data);
-
-				socksProxyPort = createConfText(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_PORT);
-			}
-
-			// Update the state of all widgets according to the current values!
-			reloadConfProp(ConfProp.PI_COLOCATE_MASTERS);
-			reloadConfProp(ConfProp.PI_SOCKS_PROXY_ENABLE);
-			reloadConfProp(ConfProp.PI_HADOOP_VERSION);
-
-			return panel;
-		}
-
-		/**
-		 * Reload the given configuration property value
-		 * 
-		 * @param prop
-		 */
-		private void reloadConfProp(ConfProp prop) {
-			this.notifyChange(prop, location.getConfPropValue(prop));
-		}
-		
-		@Override
-		public void reloadData() {
-			if (HADOOP_2.equals(hadoopVersion.getText())) {
-				groupMR.setText("Resource Manager Master");
-				groupMR.setToolTipText("Address of the Resouce manager node ");
-			} else {
-				groupMR.setText("Map/Reduce Master");
-				groupMR.setToolTipText("Address of the Map/Reduce master node " + "(the Job Tracker).");
-			}
-			groupMR.layout(true);
-			notifyChange(ConfProp.PI_JOB_TRACKER_HOST,location.getConfPropValue(ConfProp.PI_JOB_TRACKER_HOST));
-			notifyChange(ConfProp.PI_JOB_TRACKER_PORT,location.getConfPropValue(ConfProp.PI_JOB_TRACKER_PORT));
-			notifyChange(ConfProp.PI_USER_NAME,location.getConfPropValue(ConfProp.PI_USER_NAME));
-			notifyChange(ConfProp.PI_NAME_NODE_HOST,location.getConfPropValue(ConfProp.PI_NAME_NODE_HOST));
-			notifyChange(ConfProp.PI_USER_NAME,location.getConfPropValue(ConfProp.PI_USER_NAME));
-			notifyChange(ConfProp.PI_COLOCATE_MASTERS,location.getConfPropValue(ConfProp.PI_COLOCATE_MASTERS));
-			notifyChange(ConfProp.PI_SOCKS_PROXY_ENABLE,location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_ENABLE));
-			notifyChange(ConfProp.PI_SOCKS_PROXY_HOST,location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_HOST));
-			notifyChange(ConfProp.PI_SOCKS_PROXY_PORT,location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_PORT));
-			notifyChange(ConfProp.PI_LOCATION_NAME,location.getConfPropValue(ConfProp.PI_LOCATION_NAME));
-		}
-
-		public void notifyChange(ConfProp prop, String propValue) {
-			switch (prop) {
-			case PI_JOB_TRACKER_HOST: {
-				textJTHost.setText(propValue);
-				break;
-			}
-			case PI_JOB_TRACKER_PORT: {
-				textJTPort.setText(propValue);
-				break;
-			}
-			case PI_LOCATION_NAME: {
-				locationName.setText(propValue);
-				break;
-			}
-			case PI_USER_NAME: {
-				userName.setText(propValue);
-				break;
-			}
-			case PI_COLOCATE_MASTERS: {
-				if (colocateMasters != null) {
-					boolean colocate = propValue.equalsIgnoreCase("yes");
-					colocateMasters.setSelection(colocate);
-					if (textNNHost != null) {
-						textNNHost.setEnabled(!colocate);
-					}
-				}
-				break;
-			}
-			case PI_NAME_NODE_HOST: {
-				textNNHost.setText(propValue);
-				break;
-			}
-			case PI_NAME_NODE_PORT: {
-				textNNPort.setText(propValue);
-				break;
-			}
-			case PI_SOCKS_PROXY_ENABLE: {
-				if (useSocksProxy != null) {
-					boolean useProxy = propValue.equalsIgnoreCase("yes");
-					useSocksProxy.setSelection(useProxy);
-					if (socksProxyHost != null)
-						socksProxyHost.setEnabled(useProxy);
-					if (socksProxyPort != null)
-						socksProxyPort.setEnabled(useProxy);
-				}
-				break;
-			}
-			case PI_SOCKS_PROXY_HOST: {
-				socksProxyHost.setText(propValue);
-				break;
-			}
-			case PI_SOCKS_PROXY_PORT: {
-				socksProxyPort.setText(propValue);
-				break;
-			}			
-			}
-		}
-
-		
-		/* @inheritDoc */
-		public void modifyText(ModifyEvent e) {
-			final Text text = (Text) e.widget;
-			final ConfProp prop = (ConfProp) text.getData("hProp");
-			Display.getDefault().syncExec(new Runnable() {
-				public void run() {
-					mediator.notifyChange(TabMain.this, prop, text.getText());
-				}
-			});
-		}
-
-		/* @inheritDoc */
-		public void widgetDefaultSelected(SelectionEvent e) {
-			this.widgetSelected(e);
-		}
-
-		/* @inheritDoc */
-		public void widgetSelected(SelectionEvent e) {
-			final Button button = (Button) e.widget;
-			final ConfProp prop = (ConfProp) button.getData("hProp");
-
-			Display.getDefault().syncExec(new Runnable() {
-				public void run() {
-					// We want to receive the update also!
-					mediator.notifyChange(null, prop, button.getSelection() ? "yes" : "no");
-				}
-			});
-		}
-
-	}
-
-	private class TabAdvanced implements TabListener, ModifyListener {
-		TabMediator mediator;
-		private Composite panel;
-		private Map<String, Text> textMap = new TreeMap<String, Text>();
-
-		TabAdvanced(TabMediator mediator) {
-			this.mediator = mediator;
-			TabItem tab = new TabItem(mediator.folder, SWT.NONE);
-			tab.setText("Advanced parameters");
-			tab.setToolTipText("Access to advanced Hadoop parameters");
-			tab.setImage(circle);
-			tab.setControl(createControl(mediator.folder));
-
-		}
-
-		private Control createControl(Composite parent) {
-			ScrolledComposite sc = new ScrolledComposite(parent, SWT.BORDER | SWT.H_SCROLL | SWT.V_SCROLL);
-			panel=buildPanel(sc);
-			sc.setContent(panel);
-			sc.setExpandHorizontal(true);
-			sc.setExpandVertical(true);
-			sc.setMinSize(640, 480);
-			sc.setMinSize(panel.computeSize(SWT.DEFAULT, SWT.DEFAULT));
-			return sc;
-		}
-		
-		@Override
-		public void reloadData() {
-			ScrolledComposite parent = (ScrolledComposite)panel.getParent();
-			panel.dispose();
-			Composite panel = buildPanel(parent);
-			parent.setContent(panel);
-			parent.setMinSize(panel.computeSize(SWT.DEFAULT, SWT.DEFAULT));
-			parent.pack();
-			parent.layout(true);
-			this.panel=panel;
-		}
-
-		private Composite buildPanel(Composite parent) {
-			Composite panel = new Composite(parent, SWT.NONE);
-			GridLayout layout = new GridLayout();
-			layout.numColumns = 2;
-			layout.makeColumnsEqualWidth = false;
-			panel.setLayout(layout);
-			panel.setLayoutData(new GridData(GridData.FILL, GridData.FILL, true, true, 1, 1));
-
-			// Sort by property name
-			SortedMap<String, String> map = new TreeMap<String, String>();
-			Iterator<Entry<String, String>> it = location.getConfiguration();
-			while (it.hasNext()) {
-				Entry<String, String> entry = it.next();
-				map.put(entry.getKey(), entry.getValue());
-			}
-
-			for (Entry<String, String> entry : map.entrySet()) {
-				Text text = createConfNameEditor(this, panel, entry.getKey(), null);
-				textMap.put(entry.getKey(), text);
-			}
-			return panel;
-		}
-		
-
-		public void notifyChange(ConfProp prop, final String propValue) {
-			Text text = textMap.get(location.getConfPropName(prop));
-			text.setText(propValue);
-		}
-
-		public void modifyText(ModifyEvent e) {
-			final Text text = (Text) e.widget;
-			Object hProp = text.getData("hProp");
-			final ConfProp prop = (hProp != null) ? (ConfProp) hProp : null;
-			Object hPropName = text.getData("hPropName");
-			final String propName = (hPropName != null) ? (String) hPropName : null;
-
-			Display.getDefault().syncExec(new Runnable() {
-				public void run() {
-					if (prop != null)
-						mediator.notifyChange(TabAdvanced.this, prop, text.getText());
-					else
-						mediator.notifyChange(TabAdvanced.this, propName, text.getText());
-				}
-			});
-		}
-
-	
+		this.setControl(panel);
 	}
 
 }


[11/27] git commit: HDT-55 - Adding Error Handling to ZooKeeper Actions - Modified actions to log error and show Error dialog - Do not add ZooKeeper if error in connection

Posted by rs...@apache.org.
HDT-55 - Adding Error Handling to ZooKeeper Actions - Modified actions to log error and show Error dialog - Do not add ZooKeeper if error in connection


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/08355408
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/08355408
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/08355408

Branch: refs/heads/hadoop-eclipse-merge
Commit: 08355408b16dd99f5bbba84a750b4f29b4f497e5
Parents: 092213c
Author: Rahul Sharma <rs...@apache.org>
Authored: Fri May 16 12:02:08 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Fri May 23 08:47:33 2014 +0530

----------------------------------------------------------------------
 .../zookeeper/InterruptableZooKeeperClient.java |  5 +-
 .../internal/zookeeper/ZooKeeperManager.java    | 47 +++++-----
 .../hdt/ui/internal/zookeeper/DeleteAction.java | 91 +++++++++++---------
 .../ui/internal/zookeeper/DisconnectAction.java | 52 +++++++----
 .../internal/zookeeper/NewZooKeeperWizard.java  | 12 ++-
 .../ui/internal/zookeeper/ReconnectAction.java  | 54 ++++++++----
 .../ZooKeeperCommonContentProvider.java         | 10 +--
 7 files changed, 164 insertions(+), 107 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/08355408/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/InterruptableZooKeeperClient.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/InterruptableZooKeeperClient.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/InterruptableZooKeeperClient.java
index 133b9dd..38c5664 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/InterruptableZooKeeperClient.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/InterruptableZooKeeperClient.java
@@ -93,7 +93,10 @@ public class InterruptableZooKeeperClient extends ZooKeeperClient {
 			// Tell HDFS manager that the server timed out
 			if (logger.isDebugEnabled())
 				logger.debug("executeWithTimeout(): Server timed out: " + server);
-			ZooKeeperManager.INSTANCE.disconnect(server);
+			try {
+			  ZooKeeperManager.INSTANCE.disconnect(server);
+			} catch (Throwable t) {
+			}
 			throw new InterruptedException();
 		}
 		if (data.size() > 0)

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/08355408/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/ZooKeeperManager.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/ZooKeeperManager.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/ZooKeeperManager.java
index 4c36259..87b5cd5 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/ZooKeeperManager.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/ZooKeeperManager.java
@@ -35,6 +35,8 @@ import org.eclipse.core.runtime.IStatus;
 import org.eclipse.core.runtime.Platform;
 import org.eclipse.core.runtime.Status;
 import org.eclipse.emf.common.util.EList;
+import org.eclipse.jface.dialogs.MessageDialog;
+import org.eclipse.swt.widgets.Display;
 
 /**
  * @author Srimanth Gunturi
@@ -62,11 +64,20 @@ public class ZooKeeperManager {
 	/**
 	 * @param zkServerName
 	 * @param uri
+	 * @throws CoreException 
+	 * @throws InterruptedException 
+	 * @throws IOException 
 	 */
-	public ZooKeeperServer createServer(String zkServerName, String zkServerLocation) {
+	public ZooKeeperServer createServer(String zkServerName, String zkServerLocation) throws  CoreException {
 		ZooKeeperServer zkServer = HadoopFactory.eINSTANCE.createZooKeeperServer();
 		zkServer.setName(zkServerName);
 		zkServer.setUri(zkServerLocation);
+		try {
+			ZooKeeperManager.INSTANCE.getClient(zkServer).connect();
+		} catch (Exception e) {
+			logger.error("Error getting children of node", e);
+			throw new CoreException(new Status(Status.ERROR, Activator.BUNDLE_ID, "Error in creating server",e));
+		}
 		getServers().add(zkServer);
 		HadoopManager.INSTANCE.saveServers();
 		return zkServer;
@@ -74,22 +85,18 @@ public class ZooKeeperManager {
 
 	/**
 	 * @param r
+	 * @throws CoreException 
 	 */
-	public void disconnect(ZooKeeperServer server) {
+	public void disconnect(ZooKeeperServer server) throws CoreException {
 		try {
 			if (ServerStatus.DISCONNECTED_VALUE != server.getStatusCode()) {
 				getClient(server).disconnect();
 				server.setStatusCode(ServerStatus.DISCONNECTED_VALUE);
 			}
-		} catch (IOException e) {
-			// TODO Auto-generated catch block
-			e.printStackTrace();
-		} catch (InterruptedException e) {
-			// TODO Auto-generated catch block
-			e.printStackTrace();
-		} catch (CoreException e) {
-			// TODO Auto-generated catch block
-			e.printStackTrace();
+		} catch (Exception e) {
+			logger.error("Error in disconnet", e);
+			throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID,
+					"Unable to disconnect.",e));
 		}
 	}
 
@@ -97,8 +104,9 @@ public class ZooKeeperManager {
 	 * Provides a ZooKeeper instance using plugin extensions.
 	 * 
 	 * @param r
+	 * @throws CoreException 
 	 */
-	public void reconnect(ZooKeeperServer server) {
+	public void reconnect(ZooKeeperServer server) throws CoreException {
 		try {
 			if (logger.isDebugEnabled())
 				logger.debug("reconnect(): Reconnecting: " + server);
@@ -111,18 +119,11 @@ public class ZooKeeperManager {
 			}
 			if (logger.isDebugEnabled())
 				logger.debug("reconnect(): Reconnected: " + server);
-		} catch (IOException e) {
-			server.setStatusCode(ServerStatus.DISCONNECTED_VALUE);
-			// TODO Auto-generated catch block
-			e.printStackTrace();
-		} catch (InterruptedException e) {
-			server.setStatusCode(ServerStatus.DISCONNECTED_VALUE);
-			// TODO Auto-generated catch block
-			e.printStackTrace();
-		} catch (CoreException e) {
+		} catch (Exception e) {
 			server.setStatusCode(ServerStatus.DISCONNECTED_VALUE);
-			// TODO Auto-generated catch block
-			e.printStackTrace();
+			logger.error("Error in disconnet", e);
+			throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID,
+					"Unable to reconnect.",e));
 		}
 	}
 

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/08355408/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DeleteAction.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DeleteAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DeleteAction.java
index 599c011..0147b6b 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DeleteAction.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DeleteAction.java
@@ -18,7 +18,6 @@
  */
 package org.apache.hdt.ui.internal.zookeeper;
 
-import java.io.IOException;
 import java.util.Iterator;
 
 import org.apache.hdt.core.internal.model.ZNode;
@@ -27,9 +26,12 @@ import org.apache.hdt.core.internal.zookeeper.ZooKeeperManager;
 import org.apache.hdt.core.zookeeper.ZooKeeperClient;
 import org.apache.log4j.Logger;
 import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IStatus;
 import org.eclipse.jface.action.IAction;
+import org.eclipse.jface.dialogs.MessageDialog;
 import org.eclipse.jface.viewers.ISelection;
 import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.swt.widgets.Display;
 import org.eclipse.ui.IObjectActionDelegate;
 import org.eclipse.ui.IWorkbenchPart;
 import org.eclipse.ui.navigator.resources.ProjectExplorer;
@@ -39,7 +41,12 @@ public class DeleteAction implements IObjectActionDelegate {
 	private final static Logger logger = Logger.getLogger(DeleteAction.class);
 	private ISelection selection;
 	private IWorkbenchPart targetPart;
-
+	
+	
+	private void showError(String message) {
+		MessageDialog.openError(Display.getDefault().getActiveShell(), 
+				"ZooKeeper Delete Error", message);
+	}
 	/*
 	 * (non-Javadoc)
 	 * 
@@ -47,48 +54,54 @@ public class DeleteAction implements IObjectActionDelegate {
 	 */
 	@Override
 	public void run(IAction action) {
-		if (this.selection != null && !this.selection.isEmpty()) {
-			IStructuredSelection sSelection = (IStructuredSelection) this.selection;
-			@SuppressWarnings("rawtypes")
-			Iterator itr = sSelection.iterator();
-			while (itr.hasNext()) {
-				Object object = itr.next();
-				if (object instanceof ZooKeeperServer) {
-					ZooKeeperServer r = (ZooKeeperServer) object;
-					if (logger.isDebugEnabled())
-						logger.debug("Deleting: " + r);
-					try {
-						ZooKeeperManager.INSTANCE.disconnect(r);
-					} finally {
+		Display.getDefault().syncExec(new Runnable() {
+			@Override
+			public void run() {
+				if (selection != null && !selection.isEmpty()) {
+				IStructuredSelection sSelection = (IStructuredSelection) selection;
+				@SuppressWarnings("rawtypes")
+				Iterator itr = sSelection.iterator();
+				while (itr.hasNext()) {
+					Object object = itr.next();
+					if (object instanceof ZooKeeperServer) {
+						ZooKeeperServer r = (ZooKeeperServer) object;
+						if (logger.isDebugEnabled())
+							logger.debug("Deleting: " + r);
 						try {
-							ZooKeeperManager.INSTANCE.delete(r);
+							ZooKeeperManager.INSTANCE.disconnect(r);
 						} catch (CoreException e) {
-							logger.error(e.getMessage());
+							logger.error("Error occurred ", e);
+						} finally {
+							 try {
+								ZooKeeperManager.INSTANCE.delete(r);
+							} catch (CoreException e) {
+								logger.error("Error occurred ", e);
+								IStatus status = e.getStatus();
+								showError(status.getException().getMessage());
+							}
+						}
+						if (logger.isDebugEnabled())
+							logger.debug("Deleted: " + r);
+						if (targetPart instanceof ProjectExplorer) {
+							ProjectExplorer pe = (ProjectExplorer) targetPart;
+							pe.getCommonViewer().refresh();
+						}
+					} else if (object instanceof ZNode) {
+						ZNode zkn = (ZNode) object;
+						if (logger.isDebugEnabled())
+							logger.debug("Deleting: " + zkn);
+						try {
+							ZooKeeperClient client = ZooKeeperManager.INSTANCE.getClient(zkn.getServer());
+							client.delete(zkn);
+						} catch (Exception e) {
+							logger.error("Error occurred ", e);
+							showError(e.getMessage());
 						}
-					}
-					if (logger.isDebugEnabled())
-						logger.debug("Deleted: " + r);
-					if (targetPart instanceof ProjectExplorer) {
-						ProjectExplorer pe = (ProjectExplorer) targetPart;
-						pe.getCommonViewer().refresh();
-					}
-				} else if (object instanceof ZNode) {
-					ZNode zkn = (ZNode) object;
-					if (logger.isDebugEnabled())
-						logger.debug("Deleting: " + zkn);
-					try {
-						ZooKeeperClient client = ZooKeeperManager.INSTANCE.getClient(zkn.getServer());
-						client.delete(zkn);
-					} catch (CoreException e) {
-						logger.error(e.getMessage(), e);
-					} catch (IOException e) {
-						logger.error(e.getMessage(), e);
-					} catch (InterruptedException e) {
-						logger.error(e.getMessage(), e);
 					}
 				}
-			}
-		}
+			}}
+		});
+		
 	}
 
 	/*

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/08355408/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DisconnectAction.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DisconnectAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DisconnectAction.java
index d335c79..af293c5 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DisconnectAction.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DisconnectAction.java
@@ -24,9 +24,12 @@ import org.apache.hdt.core.internal.model.ServerStatus;
 import org.apache.hdt.core.internal.model.ZooKeeperServer;
 import org.apache.hdt.core.internal.zookeeper.ZooKeeperManager;
 import org.apache.log4j.Logger;
+import org.eclipse.core.runtime.CoreException;
 import org.eclipse.jface.action.IAction;
+import org.eclipse.jface.dialogs.MessageDialog;
 import org.eclipse.jface.viewers.ISelection;
 import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.swt.widgets.Display;
 import org.eclipse.ui.IObjectActionDelegate;
 import org.eclipse.ui.IWorkbenchPart;
 import org.eclipse.ui.navigator.resources.ProjectExplorer;
@@ -37,6 +40,10 @@ public class DisconnectAction implements IObjectActionDelegate {
 	private ISelection selection;
 	private IWorkbenchPart targetPart;
 
+	private void showError(String message) {
+		MessageDialog.openError(Display.getDefault().getActiveShell(), 
+				"ZooKeeper Disconnect Error",message);
+	}
 	/*
 	 * (non-Javadoc)
 	 * 
@@ -44,26 +51,33 @@ public class DisconnectAction implements IObjectActionDelegate {
 	 */
 	@Override
 	public void run(IAction action) {
-		if (this.selection != null && !this.selection.isEmpty()) {
-			IStructuredSelection sSelection = (IStructuredSelection) this.selection;
-			@SuppressWarnings("rawtypes")
-			Iterator itr = sSelection.iterator();
-			while (itr.hasNext()) {
-				Object object = itr.next();
-				if (object instanceof ZooKeeperServer) {
-					ZooKeeperServer r = (ZooKeeperServer) object;
-					if(logger.isDebugEnabled())
-						logger.debug("Disconnecting: "+r);
-					ZooKeeperManager.INSTANCE.disconnect(r);
-					if(logger.isDebugEnabled())
-						logger.debug("Disconnected: "+r);
-					if (targetPart instanceof ProjectExplorer) {
-						ProjectExplorer pe = (ProjectExplorer) targetPart;
-						pe.getCommonViewer().refresh(r, true);
+		Display.getDefault().syncExec(new Runnable() {
+			@Override
+			public void run() {
+			if (selection != null && !selection.isEmpty()) {
+				IStructuredSelection sSelection = (IStructuredSelection) selection;
+				@SuppressWarnings("rawtypes")
+				Iterator itr = sSelection.iterator();
+				while (itr.hasNext()) {
+					Object object = itr.next();
+					if (object instanceof ZooKeeperServer) {
+						ZooKeeperServer r = (ZooKeeperServer) object;
+						if(logger.isDebugEnabled())
+							logger.debug("Disconnecting: "+r);
+						try {
+							ZooKeeperManager.INSTANCE.disconnect(r);
+						} catch (CoreException e) {
+							logger.error("Error occurred ", e);
+							showError(e.getStatus().getException().getMessage());
+						}
+						if(logger.isDebugEnabled())
+							logger.debug("Disconnected: "+r);
+						if (targetPart instanceof ProjectExplorer) {
+							ProjectExplorer pe = (ProjectExplorer) targetPart;
+							pe.getCommonViewer().refresh(r, true);
+						}
 					}
-				}
-			}
-		}
+				}}}});
 	}
 
 	/*

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/08355408/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperWizard.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperWizard.java
index 60e740b..9a8e7c0 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperWizard.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperWizard.java
@@ -24,8 +24,10 @@ import org.apache.hdt.ui.internal.launch.ServerRegistry;
 import org.eclipse.core.runtime.CoreException;
 import org.eclipse.core.runtime.IConfigurationElement;
 import org.eclipse.core.runtime.IExecutableExtension;
+import org.eclipse.core.runtime.IStatus;
 import org.eclipse.core.runtime.Status;
 import org.eclipse.core.runtime.jobs.Job;
+import org.eclipse.jface.dialogs.MessageDialog;
 import org.eclipse.jface.preference.IPreferenceStore;
 import org.eclipse.jface.viewers.IStructuredSelection;
 import org.eclipse.jface.wizard.Wizard;
@@ -80,7 +82,15 @@ public class NewZooKeeperWizard extends Wizard implements INewWizard,IExecutable
 
 				Job j = new Job("Creating ZooKeeper project [" + serverLocationWizardPage.getZkServerName() + "]") {
 					protected org.eclipse.core.runtime.IStatus run(org.eclipse.core.runtime.IProgressMonitor monitor) {
-						ZooKeeperManager.INSTANCE.createServer(serverLocationWizardPage.getZkServerName(), serverLocationWizardPage.getZkServerLocation());
+						try {
+							ZooKeeperManager.INSTANCE.createServer(serverLocationWizardPage.getZkServerName(), serverLocationWizardPage.getZkServerLocation());
+						} catch (final CoreException e) {
+							Display.getDefault().syncExec(new Runnable(){
+								public void run(){
+								IStatus status = e.getStatus();
+								MessageDialog.openError(Display.getDefault().getActiveShell(), 
+									"ZooKeeper Error", status.getMessage()+" "+status.getException().getMessage());}});
+						}
 						return Status.OK_STATUS;
 					};
 				};

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/08355408/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ReconnectAction.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ReconnectAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ReconnectAction.java
index 17d228c..e5905bc 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ReconnectAction.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ReconnectAction.java
@@ -24,9 +24,13 @@ import org.apache.hdt.core.internal.model.ServerStatus;
 import org.apache.hdt.core.internal.model.ZooKeeperServer;
 import org.apache.hdt.core.internal.zookeeper.ZooKeeperManager;
 import org.apache.log4j.Logger;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IStatus;
 import org.eclipse.jface.action.IAction;
+import org.eclipse.jface.dialogs.MessageDialog;
 import org.eclipse.jface.viewers.ISelection;
 import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.swt.widgets.Display;
 import org.eclipse.ui.IObjectActionDelegate;
 import org.eclipse.ui.IWorkbenchPart;
 import org.eclipse.ui.navigator.resources.ProjectExplorer;
@@ -37,6 +41,10 @@ public class ReconnectAction implements IObjectActionDelegate {
 	private ISelection selection;
 	private IWorkbenchPart targetPart;
 
+	private void showError(String message) {
+		MessageDialog.openError(Display.getDefault().getActiveShell(), 
+				"ZooKeeper Re-connect Error", message);
+	}
 	/*
 	 * (non-Javadoc)
 	 * 
@@ -44,26 +52,34 @@ public class ReconnectAction implements IObjectActionDelegate {
 	 */
 	@Override
 	public void run(IAction action) {
-		if (this.selection != null && !this.selection.isEmpty()) {
-			IStructuredSelection sSelection = (IStructuredSelection) this.selection;
-			@SuppressWarnings("rawtypes")
-			Iterator itr = sSelection.iterator();
-			while (itr.hasNext()) {
-				Object object = itr.next();
-				if (object instanceof ZooKeeperServer) {
-					ZooKeeperServer r = (ZooKeeperServer) object;
-					if(logger.isDebugEnabled())
-						logger.debug("Reconnecting: "+r);
-					ZooKeeperManager.INSTANCE.reconnect(r);
-					if(logger.isDebugEnabled())
-						logger.debug("Reconnected: "+r);
-					if (targetPart instanceof ProjectExplorer) {
-						ProjectExplorer pe = (ProjectExplorer) targetPart;
-						pe.getCommonViewer().refresh(r, true);
+		Display.getDefault().syncExec(new Runnable() {
+			@Override
+			public void run() {
+			if (selection != null && !selection.isEmpty()) {
+				IStructuredSelection sSelection = (IStructuredSelection) selection;
+				@SuppressWarnings("rawtypes")
+				Iterator itr = sSelection.iterator();
+				while (itr.hasNext()) {
+					Object object = itr.next();
+					if (object instanceof ZooKeeperServer) {
+						ZooKeeperServer r = (ZooKeeperServer) object;
+						if(logger.isDebugEnabled())
+							logger.debug("Reconnecting: "+r);
+						try {
+							ZooKeeperManager.INSTANCE.reconnect(r);
+						} catch (CoreException e) {
+							logger.error("Error occurred ", e);
+							IStatus status = e.getStatus();
+							showError(status.getException().getMessage());
+						}
+						if(logger.isDebugEnabled())
+							logger.debug("Reconnected: "+r);
+						if (targetPart instanceof ProjectExplorer) {
+							ProjectExplorer pe = (ProjectExplorer) targetPart;
+							pe.getCommonViewer().refresh(r, true);
+						}
 					}
-				}
-			}
-		}
+				}}}});
 	}
 
 	/*

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/08355408/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperCommonContentProvider.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperCommonContentProvider.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperCommonContentProvider.java
index 1579846..0c816e3 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperCommonContentProvider.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperCommonContentProvider.java
@@ -32,6 +32,7 @@ import org.apache.log4j.Logger;
 import org.eclipse.core.runtime.CoreException;
 import org.eclipse.emf.common.notify.Notification;
 import org.eclipse.emf.ecore.util.EContentAdapter;
+import org.eclipse.jface.dialogs.MessageDialog;
 import org.eclipse.jface.viewers.Viewer;
 import org.eclipse.swt.widgets.Display;
 import org.eclipse.ui.IMemento;
@@ -91,12 +92,11 @@ public class ZooKeeperCommonContentProvider implements ICommonContentProvider {
 				ZooKeeperClient client = ZooKeeperManager.INSTANCE.getClient(zkn.getServer());
 				List<ZNode> zkChildren = client.getChildren(zkn);
 				return zkChildren.toArray();
-			} catch (CoreException e) {
-				logger.error("Error getting children of node", e);
-			} catch (IOException e) {
-				logger.error("Error getting children of node", e);
-			} catch (InterruptedException e) {
+			} catch (Exception e) {
 				logger.error("Error getting children of node", e);
+				MessageDialog.openError(Display.getDefault().getActiveShell(), 
+						"ZooKeeper Error",e.getMessage());
+			
 			}
 		}
 		return null;


[18/27] git commit: HDT-61 : - Extrating hadoop home validation to plugin - using hadoop home validator in NewProjectWizard and MapReduceNature - setting version as part of prefrence

Posted by rs...@apache.org.
 HDT-61 :
  - Extrating hadoop home validation to plugin
  - using hadoop home validator in NewProjectWizard and MapReduceNature
  - setting version as part of prefrence


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/bf1a4949
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/bf1a4949
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/bf1a4949

Branch: refs/heads/hadoop-eclipse-merge
Commit: bf1a4949564d7f78556d941dde1ba971fba02204
Parents: c308e97
Author: Rahul Sharma <rs...@apache.org>
Authored: Mon Jun 16 14:50:05 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Tue Jun 17 10:11:07 2014 +0530

----------------------------------------------------------------------
 org.apache.hdt.core/plugin.xml                  |   1 +
 ....apache.hadoop.eclipse.hadoopHomeReader.exsd | 126 +++++++++++++++++++
 .../hdt/core/AbstractHadoopHomeReader.java      |  46 +++++++
 .../hdt/core/natures/MapReduceNature.java       |  28 +----
 org.apache.hdt.hadoop.release/plugin.xml        |   8 ++
 .../hdt/hadoop/release/HadoopHomeReader.java    |  77 ++++++++++++
 org.apache.hdt.hadoop2.release/plugin.xml       |   7 ++
 .../hdt/hadoop2/release/HadoopHomeReader.java   | 101 +++++++++++++++
 .../internal/mr/NewMapReduceProjectWizard.java  |  82 ++++++++++--
 .../ui/preferences/MapReducePreferencePage.java |  11 ++
 .../hdt/ui/preferences/PreferenceConstants.java |   2 +
 11 files changed, 454 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/bf1a4949/org.apache.hdt.core/plugin.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/plugin.xml b/org.apache.hdt.core/plugin.xml
index 94f3d49..86ca57b 100644
--- a/org.apache.hdt.core/plugin.xml
+++ b/org.apache.hdt.core/plugin.xml
@@ -20,6 +20,7 @@
    <extension-point id="org.apache.hdt.core.hdfsClient" name="Apache Hadoop HDFS Client" schema="schema/org.apache.hadoop.eclipse.hdfsclient.exsd"/>
    <extension-point id="org.apache.hdt.core.zookeeperClient" name="Apache Hadoop ZooKeeper Client" schema="schema/org.apache.hadoop.eclipse.zookeeperClient.exsd"/>
    <extension-point id="org.apache.hdt.core.hadoopCluster" name="Apache Hadoop Cluster" schema="schema/org.apache.hadoop.eclipse.hadoopCluster.exsd"/>
+   <extension-point id="org.apache.hdt.core.hadoopHomeReader" name="Apache Hadoop Home Location Reader" schema="schema/org.apache.hadoop.eclipse.hadoopHomeReader.exsd"/>
    
    <extension
          id="org.apache.hadoop.hdfs.filesystem"

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/bf1a4949/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.hadoopHomeReader.exsd
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.hadoopHomeReader.exsd b/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.hadoopHomeReader.exsd
new file mode 100644
index 0000000..bfd8941
--- /dev/null
+++ b/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.hadoopHomeReader.exsd
@@ -0,0 +1,126 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<schema targetNamespace="org.apache.hdt.core" xmlns="http://www.w3.org/2001/XMLSchema">
+<annotation>
+      <appinfo>
+         <meta.schema plugin="org.apache.hdt.core" id="org.apache.hdt.core.hadoopHomeReader" name="Apache Hadoop Home Location Reader"/>
+      </appinfo>
+      <documentation>
+         [Enter description of this extension point.]
+      </documentation>
+   </annotation>
+
+   <element name="extension">
+      <annotation>
+         <appinfo>
+            <meta.element />
+         </appinfo>
+      </annotation>
+      <complexType>
+         <choice>
+            <sequence>
+               <element ref="hadoopHomeReader" minOccurs="0" maxOccurs="unbounded"/>
+            </sequence>
+         </choice>
+         <attribute name="point" type="string" use="required">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+            </annotation>
+         </attribute>
+         <attribute name="id" type="string">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+            </annotation>
+         </attribute>
+         <attribute name="name" type="string">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+               <appinfo>
+                  <meta.attribute translatable="true"/>
+               </appinfo>
+            </annotation>
+         </attribute>
+      </complexType>
+   </element>
+
+   <element name="hadoopHomeReader">
+      <complexType>
+         <attribute name="class" type="string" use="required">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+               <appinfo>
+                  <meta.attribute kind="java" basedOn="org.apache.hdt.core.AbstractHadoopHomeReader:"/>
+               </appinfo>
+            </annotation>
+         </attribute>
+         <attribute name="protocolVersion" type="string" use="required">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+            </annotation>
+         </attribute>
+      </complexType>
+   </element>
+
+   <annotation>
+      <appinfo>
+         <meta.section type="since"/>
+      </appinfo>
+      <documentation>
+         [Enter the first release in which this extension point appears.]
+      </documentation>
+   </annotation>
+
+   <annotation>
+      <appinfo>
+         <meta.section type="examples"/>
+      </appinfo>
+      <documentation>
+         [Enter extension point usage example here.]
+      </documentation>
+   </annotation>
+
+   <annotation>
+      <appinfo>
+         <meta.section type="apiinfo"/>
+      </appinfo>
+      <documentation>
+         [Enter API information here.]
+      </documentation>
+   </annotation>
+
+   <annotation>
+      <appinfo>
+         <meta.section type="implementation"/>
+      </appinfo>
+      <documentation>
+         [Enter information about supplied implementation of this extension point.]
+      </documentation>
+   </annotation>
+
+
+</schema>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/bf1a4949/org.apache.hdt.core/src/org/apache/hdt/core/AbstractHadoopHomeReader.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/AbstractHadoopHomeReader.java b/org.apache.hdt.core/src/org/apache/hdt/core/AbstractHadoopHomeReader.java
new file mode 100644
index 0000000..aa61296
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/AbstractHadoopHomeReader.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core;
+
+import java.io.File;
+import java.util.List;
+
+import org.apache.log4j.Logger;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IConfigurationElement;
+import org.eclipse.core.runtime.Platform;
+import org.eclipse.core.runtime.Status;
+
+public abstract class AbstractHadoopHomeReader {
+	private static final Logger logger = Logger.getLogger(AbstractHadoopHomeReader.class);
+	public abstract boolean validateHadoopHome(File location);
+	public abstract List<File> getHadoopJars(File location);
+	
+	public static AbstractHadoopHomeReader createReader(String hadoopVersion) throws CoreException {
+		logger.debug("Creating  hadoop home reader"); 
+		IConfigurationElement[] elementsFor = Platform.getExtensionRegistry().getConfigurationElementsFor("org.apache.hdt.core.hadoopHomeReader");
+		for (IConfigurationElement configElement : elementsFor) {
+			String version = configElement.getAttribute("protocolVersion");
+			if (version.equalsIgnoreCase(hadoopVersion)) {
+				return (AbstractHadoopHomeReader)configElement.createExecutableExtension("class");
+			}
+		}
+		throw new CoreException(new Status(Status.ERROR,Activator.BUNDLE_ID,"No Reader found for hadoop version"+hadoopVersion));
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/bf1a4949/org.apache.hdt.core/src/org/apache/hdt/core/natures/MapReduceNature.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/natures/MapReduceNature.java b/org.apache.hdt.core/src/org/apache/hdt/core/natures/MapReduceNature.java
index e93ee9a..d350def 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/natures/MapReduceNature.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/natures/MapReduceNature.java
@@ -19,13 +19,13 @@
 package org.apache.hdt.core.natures;
 
 import java.io.File;
-import java.io.FilenameFilter;
 import java.net.URL;
-import java.util.ArrayList;
 import java.util.Iterator;
+import java.util.List;
 import java.util.logging.Level;
 import java.util.logging.Logger;
 
+import org.apache.hdt.core.AbstractHadoopHomeReader;
 import org.apache.hdt.core.Activator;
 import org.eclipse.core.resources.IProject;
 import org.eclipse.core.resources.IProjectNature;
@@ -60,12 +60,10 @@ public class MapReduceNature implements IProjectNature {
 	public void configure() throws CoreException {
 
 		String hadoopHomePath = project.getPersistentProperty(new QualifiedName(Activator.BUNDLE_ID, "hadoop.runtime.path"));
-		File hadoopHome = new Path(hadoopHomePath).toFile();
-		File hadoopLib = new File(hadoopHome, "lib");
-
-		final ArrayList<File> coreJars = new ArrayList<File>();
-		coreJars.addAll(getJarFiles(hadoopHome));
-		coreJars.addAll(getJarFiles(hadoopLib));
+		String hadoopVersion = project.getPersistentProperty(new QualifiedName(Activator.BUNDLE_ID, "hadoop.version"));
+		
+		AbstractHadoopHomeReader homeReader = AbstractHadoopHomeReader.createReader(hadoopVersion);
+		final List<File> coreJars = homeReader.getHadoopJars(new Path(hadoopHomePath).toFile());
 
 		// Add Hadoop libraries onto classpath
 		IJavaProject javaProject = JavaCore.create(getProject());
@@ -96,20 +94,6 @@ public class MapReduceNature implements IProjectNature {
 		}
 	}
 
-	private ArrayList<File> getJarFiles(File hadoopHome) {
-		FilenameFilter jarFileFilter = new FilenameFilter() {
-			@Override
-			public boolean accept(File dir, String name) {
-				return name.endsWith(".jar");
-			}
-		};
-		final ArrayList<File> jars = new ArrayList<File>();
-		for (String hadopCoreLibFileName : hadoopHome.list(jarFileFilter)) {
-			jars.add(new File(hadoopHome, hadopCoreLibFileName));
-		}
-		return jars;
-	}
-
 	/**
 	 * Deconfigure a project from MapReduce status. Currently unimplemented.
 	 */

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/bf1a4949/org.apache.hdt.hadoop.release/plugin.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/plugin.xml b/org.apache.hdt.hadoop.release/plugin.xml
index 476bdcd..62cb794 100644
--- a/org.apache.hdt.hadoop.release/plugin.xml
+++ b/org.apache.hdt.hadoop.release/plugin.xml
@@ -39,5 +39,13 @@
             protocolVersion="1.1">
       </hadoopCluster>
    </extension>
+   <extension
+         point="org.apache.hdt.core.hadoopHomeReader">
+      <hadoopHomeReader
+            class="org.apache.hdt.hadoop.release.HadoopHomeReader"
+            protocolVersion="1.1">
+      </hadoopHomeReader>
+   </extension>
+   
 
 </plugin>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/bf1a4949/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopHomeReader.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopHomeReader.java b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopHomeReader.java
new file mode 100644
index 0000000..ef0952d
--- /dev/null
+++ b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopHomeReader.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.hadoop.release;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hdt.core.AbstractHadoopHomeReader;
+import org.eclipse.core.runtime.Path;
+
+public class HadoopHomeReader extends AbstractHadoopHomeReader {
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.AbstractHadoopHomeReader#validateHadoopHome(java.
+	 * io.File)
+	 */
+	@Override
+	public boolean validateHadoopHome(File location) {
+		FilenameFilter gotHadoopJar = new FilenameFilter() {
+			public boolean accept(File dir, String name) {
+				return (name.startsWith("hadoop") && name.endsWith(".jar") && (name.indexOf("test") == -1) && (name.indexOf("examples") == -1));
+			}
+		};
+		return location.exists() && (location.list(gotHadoopJar).length > 0);
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.AbstractHadoopHomeReader#getHadoopJars(java.io.File)
+	 */
+	@Override
+	public List<File> getHadoopJars(File hadoopHome) {
+		File hadoopLib = new File(hadoopHome, "lib");
+
+		final ArrayList<File> coreJars = new ArrayList<File>();
+		coreJars.addAll(getJarFiles(hadoopHome));
+		coreJars.addAll(getJarFiles(hadoopLib));
+		return coreJars;
+	}
+	
+	private ArrayList<File> getJarFiles(File hadoopHome) {
+		FilenameFilter jarFileFilter = new FilenameFilter() {
+			@Override
+			public boolean accept(File dir, String name) {
+				return name.endsWith(".jar");
+			}
+		};
+		final ArrayList<File> jars = new ArrayList<File>();
+		for (String hadopCoreLibFileName : hadoopHome.list(jarFileFilter)) {
+			jars.add(new File(hadoopHome, hadopCoreLibFileName));
+		}
+		return jars;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/bf1a4949/org.apache.hdt.hadoop2.release/plugin.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/plugin.xml b/org.apache.hdt.hadoop2.release/plugin.xml
index b200aca..2b14915 100644
--- a/org.apache.hdt.hadoop2.release/plugin.xml
+++ b/org.apache.hdt.hadoop2.release/plugin.xml
@@ -32,4 +32,11 @@
             protocolVersion="2.2">
       </hdfsClient>
    </extension>
+   <extension
+         point="org.apache.hdt.core.hadoopHomeReader">
+      <hadoopHomeReader
+            class="org.apache.hdt.hadoop2.release.HadoopHomeReader"
+            protocolVersion="2.2">
+      </hadoopHomeReader>
+   </extension>
 </plugin>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/bf1a4949/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopHomeReader.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopHomeReader.java b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopHomeReader.java
new file mode 100644
index 0000000..a45086c
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopHomeReader.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.hadoop2.release;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hdt.core.AbstractHadoopHomeReader;
+
+public class HadoopHomeReader extends AbstractHadoopHomeReader {
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.AbstractHadoopHomeReader#validateHadoopHome(java.
+	 * io.File)
+	 */
+	@Override
+	public boolean validateHadoopHome(File location) {
+	        File hadoopBin = new File(location, "bin");
+	        File hadoopSBIn = new File(location, "sbin");
+		FilenameFilter gotHadoopYarn = new FilenameFilter() {
+			public boolean accept(File dir, String name) {
+				return (name.indexOf("yarn") != -1);
+			}
+		};
+		return hadoopBin.exists() && (hadoopBin.list(gotHadoopYarn).length > 0) 
+		        && hadoopSBIn.exists() && (hadoopSBIn.list(gotHadoopYarn).length > 0);
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.AbstractHadoopHomeReader#getHadoopJars(java.io.File)
+	 */
+	@Override
+	public List<File> getHadoopJars(File hadoopHome) {
+	        File mrCommonHome =  FileUtils.getFile(hadoopHome, "share","hadoop","common");
+                File mrCommonLib =  FileUtils.getFile(mrCommonHome,"lib");
+                File hdfsHome =  FileUtils.getFile(hadoopHome, "share","hadoop","hdfs");
+                File hdfsLib =  FileUtils.getFile(hdfsHome,"lib");
+                File yarnHome =  FileUtils.getFile(hadoopHome, "share","hadoop","yarn");
+                File yarnLib =  FileUtils.getFile(yarnHome,"lib");
+		File mrHome =  FileUtils.getFile(hadoopHome, "share","hadoop","mapreduce");
+		File mrLib =  FileUtils.getFile(mrHome,"lib");
+		
+		FilenameFilter jarFileFilter = new FilenameFilter() {
+		    Set<String> selectedFileName= new HashSet<String>();
+                    @Override
+                    public boolean accept(File dir, String name) {
+                            boolean accept = name.endsWith(".jar") 
+                                    && !selectedFileName.contains(name);
+                            if(accept){
+                                selectedFileName.add(name);
+                            }
+                            return accept;
+                    }
+            };
+		final ArrayList<File> coreJars = new ArrayList<File>();
+		coreJars.addAll(getJarFiles(mrCommonHome,jarFileFilter));
+		coreJars.addAll(getJarFiles(mrCommonLib,jarFileFilter));
+		coreJars.addAll(getJarFiles(hdfsHome,jarFileFilter));
+                coreJars.addAll(getJarFiles(hdfsLib,jarFileFilter));
+                coreJars.addAll(getJarFiles(yarnHome,jarFileFilter));
+                coreJars.addAll(getJarFiles(yarnLib,jarFileFilter));
+		coreJars.addAll(getJarFiles(mrHome,jarFileFilter));
+		coreJars.addAll(getJarFiles(mrLib,jarFileFilter));
+		return coreJars;
+	}
+	
+	private ArrayList<File> getJarFiles(File hadoopHome, FilenameFilter jarFileFilter) {
+		final ArrayList<File> jars = new ArrayList<File>();
+		for (String hadopCoreLibFileName : hadoopHome.list(jarFileFilter)) {
+			jars.add(new File(hadoopHome, hadopCoreLibFileName));
+		}
+		return jars;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/bf1a4949/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewMapReduceProjectWizard.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewMapReduceProjectWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewMapReduceProjectWizard.java
index 3963828..4b88403 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewMapReduceProjectWizard.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewMapReduceProjectWizard.java
@@ -18,12 +18,12 @@
 
 package org.apache.hdt.ui.internal.mr;
 
-import java.io.File;
-import java.io.FilenameFilter;
 import java.lang.reflect.InvocationTargetException;
 import java.util.logging.Level;
 import java.util.logging.Logger;
 
+import org.apache.hdt.core.AbstractHadoopHomeReader;
+import org.apache.hdt.core.HadoopVersion;
 import org.apache.hdt.core.natures.MapReduceNature;
 import org.apache.hdt.ui.Activator;
 import org.apache.hdt.ui.ImageLibrary;
@@ -55,10 +55,14 @@ import org.eclipse.swt.events.SelectionListener;
 import org.eclipse.swt.layout.GridData;
 import org.eclipse.swt.layout.GridLayout;
 import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Combo;
 import org.eclipse.swt.widgets.Composite;
 import org.eclipse.swt.widgets.DirectoryDialog;
+import org.eclipse.swt.widgets.Event;
 import org.eclipse.swt.widgets.Group;
+import org.eclipse.swt.widgets.Label;
 import org.eclipse.swt.widgets.Link;
+import org.eclipse.swt.widgets.Listener;
 import org.eclipse.swt.widgets.Text;
 import org.eclipse.ui.INewWizard;
 import org.eclipse.ui.IWorkbench;
@@ -132,9 +136,13 @@ public class NewMapReduceProjectWizard extends Wizard implements INewWizard, IEx
 	}
 
 	static class HadoopFirstPage extends WizardNewProjectCreationPage implements SelectionListener {
-		public HadoopFirstPage() {
+		public HadoopFirstPage() throws CoreException {
 			super("New Hadoop Project");
 			setImageDescriptor(ImageLibrary.get("wizard.mapreduce.project.new"));
+			String prefVersion = Activator.getDefault().getPreferenceStore().getString(PreferenceConstants.P_VERSION);
+			prefVersion = prefVersion != null && !prefVersion.isEmpty() ? prefVersion :
+				HadoopVersion.Version1.getDisplayName();
+			homeReader = AbstractHadoopHomeReader.createReader(prefVersion);
 		}
 
 		private Link openPreferences;
@@ -151,6 +159,12 @@ public class NewMapReduceProjectWizard extends Wizard implements INewWizard, IEx
 
 		public String currentPath;
 
+		AbstractHadoopHomeReader homeReader;
+
+		private Combo hadoopVersion;
+
+		private String hadoopVersionText;
+
 		// private Button generateDriver;
 
 		@Override
@@ -204,6 +218,47 @@ public class NewMapReduceProjectWizard extends Wizard implements INewWizard, IEx
 			browse.setEnabled(false);
 			browse.addSelectionListener(this);
 
+			/*
+			 * HDFS version
+			 */
+			{
+				Label label = new Label(group, SWT.NONE);
+				label.setText("&Hadoop Version:");
+				Combo options = new Combo(group, SWT.SINGLE | SWT.BORDER | SWT.READ_ONLY);
+				options.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
+				for (HadoopVersion ver : HadoopVersion.values()) {
+					options.add(ver.getDisplayName());
+				}
+				options.addListener(SWT.Selection, new Listener() {
+					public void handleEvent(Event e) {
+						try {
+							if (!hadoopVersionText.equalsIgnoreCase(hadoopVersion.getText())) {
+								homeReader = AbstractHadoopHomeReader.createReader(hadoopVersion.getText());
+								hadoopVersionText = hadoopVersion.getText();
+								getContainer().updateButtons();
+							}
+						} catch (CoreException e1) {
+							e1.printStackTrace();
+						}
+					}
+
+				});
+
+				hadoopVersion = options;
+				if (hadoopVersionText == null || hadoopVersionText.isEmpty())
+					hadoopVersionText = HadoopVersion.Version1.getDisplayName();
+
+				int pos = 0;
+				for (String item : options.getItems()) {
+					if (item.equalsIgnoreCase(hadoopVersionText)) {
+						options.select(pos);
+						break;
+					}
+					pos++;
+				}
+				options.setEnabled(false);
+			}
+
 			projectHadoop.addSelectionListener(this);
 			workspaceHadoop.addSelectionListener(this);
 
@@ -230,24 +285,18 @@ public class NewMapReduceProjectWizard extends Wizard implements INewWizard, IEx
 		}
 
 		private boolean validateHadoopLocation() {
-			FilenameFilter gotHadoopJar = new FilenameFilter() {
-				public boolean accept(File dir, String name) {
-					return (name.startsWith("hadoop") && name.endsWith(".jar") && (name.indexOf("test") == -1) && (name.indexOf("examples") == -1));
-				}
-			};
-
 			if (workspaceHadoop.getSelection()) {
 				this.currentPath = path;
-				return new Path(path).toFile().exists() && (new Path(path).toFile().list(gotHadoopJar).length > 0);
+				return homeReader.validateHadoopHome(new Path(path).toFile());
 			} else {
 				this.currentPath = location.getText();
-				File file = new Path(location.getText()).toFile();
-				return file.exists() && (new Path(location.getText()).toFile().list(gotHadoopJar).length > 0);
+				return homeReader.validateHadoopHome(new Path(location.getText()).toFile());
 			}
 		}
 
 		private void updateHadoopDirLabelFromPreferences() {
 			path = Activator.getDefault().getPreferenceStore().getString(PreferenceConstants.P_PATH);
+			hadoopVersionText = Activator.getDefault().getPreferenceStore().getString(PreferenceConstants.P_VERSION);
 
 			if ((path != null) && (path.length() > 0)) {
 				workspaceHadoop.setText("Use default Hadoop");
@@ -288,9 +337,11 @@ public class NewMapReduceProjectWizard extends Wizard implements INewWizard, IEx
 			} else if (projectHadoop.getSelection()) {
 				location.setEnabled(true);
 				browse.setEnabled(true);
+				hadoopVersion.setEnabled(true);
 			} else {
 				location.setEnabled(false);
 				browse.setEnabled(false);
+				hadoopVersion.setEnabled(false);
 			}
 
 			getContainer().updateButtons();
@@ -304,7 +355,11 @@ public class NewMapReduceProjectWizard extends Wizard implements INewWizard, IEx
 		 * JavaProjectWizardSecondPage(firstPage) );
 		 */
 
-		firstPage = new HadoopFirstPage();
+		try {
+			firstPage = new HadoopFirstPage();
+		} catch (CoreException e) {
+			e.printStackTrace();
+		}
 		javaPage = new NewJavaProjectWizardPage(ResourcesPlugin.getWorkspace().getRoot(), firstPage);
 		// newDriverPage = new NewDriverWizardPage(false);
 		// newDriverPage.setPageComplete(false); // ensure finish button
@@ -345,6 +400,7 @@ public class NewMapReduceProjectWizard extends Wizard implements INewWizard, IEx
 						description.setNatureIds(natures);
 
 						project.setPersistentProperty(new QualifiedName(Activator.PLUGIN_ID, "hadoop.runtime.path"), firstPage.currentPath);
+						project.setPersistentProperty(new QualifiedName(Activator.PLUGIN_ID, "hadoop.version"), firstPage.hadoopVersionText);
 						project.setDescription(description, new NullProgressMonitor());
 
 						String[] natureIds = project.getDescription().getNatureIds();

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/bf1a4949/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/MapReducePreferencePage.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/MapReducePreferencePage.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/MapReducePreferencePage.java
index b653b10..b711f91 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/MapReducePreferencePage.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/MapReducePreferencePage.java
@@ -17,7 +17,9 @@
  */
 package org.apache.hdt.ui.preferences;
 
+import org.apache.hdt.core.HadoopVersion;
 import org.apache.hdt.ui.Activator;
+import org.eclipse.jface.preference.ComboFieldEditor;
 import org.eclipse.jface.preference.DirectoryFieldEditor;
 import org.eclipse.jface.preference.FieldEditorPreferencePage;
 import org.eclipse.ui.IWorkbench;
@@ -54,6 +56,15 @@ public class MapReducePreferencePage extends FieldEditorPreferencePage
   public void createFieldEditors() {
     addField(new DirectoryFieldEditor(PreferenceConstants.P_PATH,
         "&Hadoop installation directory:", getFieldEditorParent()));
+    HadoopVersion[] versions = HadoopVersion.values();
+    String[][] values= new String[versions.length][2];
+    int pos=0;
+	for(HadoopVersion ver:versions){
+		values[pos][0]=values[pos][1]=ver.getDisplayName();
+		pos++;
+	}
+    addField(new ComboFieldEditor(PreferenceConstants.P_VERSION,
+            "&Hadoop Version:",values,getFieldEditorParent()));
 
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/bf1a4949/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/PreferenceConstants.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/PreferenceConstants.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/PreferenceConstants.java
index 4efcbdd..b0bfa48 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/PreferenceConstants.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/PreferenceConstants.java
@@ -24,6 +24,8 @@ package org.apache.hdt.ui.preferences;
 public class PreferenceConstants {
 
   public static final String P_PATH = "pathPreference";
+  
+  public static final String P_VERSION = "versionPreference";
 
   // public static final String P_BOOLEAN = "booleanPreference";
   //


[15/27] git commit: HDT 7: - fixing bug : Cluster view loads onlly Hadoop 1.1 based cluster

Posted by rs...@apache.org.
 HDT 7:
  - fixing bug : Cluster view loads onlly Hadoop 1.1  based cluster


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/9d78cbdb
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/9d78cbdb
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/9d78cbdb

Branch: refs/heads/hadoop-eclipse-merge
Commit: 9d78cbdb45294608954cf82863f869ab48104095
Parents: 34799ce
Author: Rahul Sharma <rs...@apache.org>
Authored: Wed Jun 11 10:54:11 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Wed Jun 11 10:54:11 2014 +0530

----------------------------------------------------------------------
 .../hdt/core/launch/AbstractHadoopCluster.java  | 69 +++++++++++++++-
 .../hdt/hadoop/release/HadoopCluster.java       | 70 ++--------------
 .../hdt/hadoop2/release/HadoopCluster.java      | 84 +++-----------------
 .../internal/launch/HadoopLocationWizard.java   |  3 +
 4 files changed, 85 insertions(+), 141 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/9d78cbdb/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
index 47d00f4..e2fa064 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
@@ -21,16 +21,27 @@ package org.apache.hdt.core.launch;
 import java.io.File;
 import java.io.IOException;
 import java.util.Collection;
+import java.util.HashMap;
 import java.util.Iterator;
+import java.util.Map;
 import java.util.Map.Entry;
 
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+
 import org.apache.hdt.core.Activator;
-import org.apache.hdt.core.internal.HadoopManager;
 import org.apache.log4j.Logger;
 import org.eclipse.core.runtime.CoreException;
 import org.eclipse.core.runtime.IConfigurationElement;
 import org.eclipse.core.runtime.Platform;
 import org.eclipse.core.runtime.Status;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+import org.w3c.dom.Text;
+import org.xml.sax.SAXException;
 
 public abstract class AbstractHadoopCluster {
 	
@@ -68,15 +79,17 @@ public abstract class AbstractHadoopCluster {
 
 	abstract public String getState();
 
-	abstract public boolean loadFromXML(File file) throws IOException;
+	abstract protected boolean loadConfiguration(Map<String, String> configuration);
 	
 	abstract public boolean isAvailable() throws CoreException;
 	
 	abstract public String getVersion();
 	
 	public static AbstractHadoopCluster createCluster(File file) throws CoreException, IOException {
-		AbstractHadoopCluster hadoopCluster = createCluster(ConfProp.PI_HADOOP_VERSION.defVal);
-		hadoopCluster.loadFromXML(file);
+		Map<String, String> configuration = loadXML(file);
+		String version = configuration.get(ConfProp.PI_HADOOP_VERSION.name);
+		AbstractHadoopCluster hadoopCluster = createCluster(version!=null?version:ConfProp.PI_HADOOP_VERSION.defVal);
+		hadoopCluster.loadConfiguration(configuration);
 		return hadoopCluster;
 	}
 
@@ -97,6 +110,54 @@ public abstract class AbstractHadoopCluster {
 		hadoopCluster.load(existing);
 		return hadoopCluster;
 	}
+	
+	
+	protected static Map<String,String> loadXML(File file) {
+		DocumentBuilder builder;
+		Document document;
+		try {
+			builder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
+			document = builder.parse(file);
+		} catch (ParserConfigurationException e) {
+			e.printStackTrace();
+			return null;
+		} catch (SAXException e) {
+			e.printStackTrace();
+			return null;
+		} catch (IOException e) {
+			e.printStackTrace();
+			return null;
+		}
+		Element root = document.getDocumentElement();
+		if (!"configuration".equals(root.getTagName()))
+			return null;
+		NodeList props = root.getChildNodes();
+		Map<String,String> configuration= new HashMap<String, String>();
+		for (int i = 0; i < props.getLength(); i++) {
+			Node propNode = props.item(i);
+			if (!(propNode instanceof Element))
+				continue;
+			Element prop = (Element) propNode;
+			if (!"property".equals(prop.getTagName()))
+				return null;
+			NodeList fields = prop.getChildNodes();
+			String attr = null;
+			String value = null;
+			for (int j = 0; j < fields.getLength(); j++) {
+				Node fieldNode = fields.item(j);
+				if (!(fieldNode instanceof Element))
+					continue;
+				Element field = (Element) fieldNode;
+				if ("name".equals(field.getTagName()))
+					attr = ((Text) field.getFirstChild()).getData();
+				if ("value".equals(field.getTagName()) && field.hasChildNodes())
+					value = ((Text) field.getFirstChild()).getData();
+			}
+			if (attr != null && value != null)
+				configuration.put(attr, value);
+		}
+		return configuration;
+	}
 
 	/**
 	 * @param propName

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/9d78cbdb/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
index 0014bb6..167ae29 100644
--- a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
+++ b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
@@ -248,18 +248,6 @@ public class HadoopCluster extends AbstractHadoopCluster {
 	}
 
 	/**
-	 * Creates a location from a file
-	 * 
-	 * @throws IOException
-	 * @throws SAXException
-	 * @throws ParserConfigurationException
-	 */
-	public HadoopCluster(File file) throws ParserConfigurationException, SAXException, IOException {
-		this();
-		this.loadFromXML(file);
-	}
-
-	/**
 	 * Create a new Hadoop location by copying an already existing one.
 	 * 
 	 * @param source
@@ -369,61 +357,13 @@ public class HadoopCluster extends AbstractHadoopCluster {
 		this.conf = new Configuration(((HadoopCluster) existing).conf);
 	}
 
-	/**
-	 * Overwrite this location with settings available in the given XML file.
-	 * The existing configuration is preserved if the XML file is invalid.
-	 * 
-	 * @param file
-	 *            the file path of the XML file
-	 * @return validity of the XML file
-	 * @throws ParserConfigurationException
-	 * @throws IOException
-	 * @throws SAXException
-	 */
-	public boolean loadFromXML(File file) {
-
+	protected boolean loadConfiguration(Map<String, String> configuration) {
 		Configuration newConf = new Configuration(this.conf);
-		DocumentBuilder builder;
-		Document document;
-		try {
-			builder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
-			document = builder.parse(file);
-		} catch (ParserConfigurationException e) {
-			e.printStackTrace();
-			return false;
-		} catch (SAXException e) {
-			e.printStackTrace();
+		if(configuration ==null)
 			return false;
-		} catch (IOException e) {
-			e.printStackTrace();
-			return false;
-		}
-		Element root = document.getDocumentElement();
-		if (!"configuration".equals(root.getTagName()))
-			return false;
-		NodeList props = root.getChildNodes();
-		for (int i = 0; i < props.getLength(); i++) {
-			Node propNode = props.item(i);
-			if (!(propNode instanceof Element))
-				continue;
-			Element prop = (Element) propNode;
-			if (!"property".equals(prop.getTagName()))
-				return false;
-			NodeList fields = prop.getChildNodes();
-			String attr = null;
-			String value = null;
-			for (int j = 0; j < fields.getLength(); j++) {
-				Node fieldNode = fields.item(j);
-				if (!(fieldNode instanceof Element))
-					continue;
-				Element field = (Element) fieldNode;
-				if ("name".equals(field.getTagName()))
-					attr = ((Text) field.getFirstChild()).getData();
-				if ("value".equals(field.getTagName()) && field.hasChildNodes())
-					value = ((Text) field.getFirstChild()).getData();
-			}
-			if (attr != null && value != null)
-				newConf.set(attr, value);
+		
+		for (Entry<String, String> entry : configuration.entrySet()) {
+			newConf.set(entry.getKey() , entry.getValue());
 		}
 
 		this.conf = newConf;

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/9d78cbdb/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java
index b200a9f..56f1880 100644
--- a/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java
+++ b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java
@@ -248,18 +248,6 @@ public class HadoopCluster extends AbstractHadoopCluster {
         }
    
 	/**
-	 * Creates a location from a file
-	 * 
-	 * @throws IOException
-	 * @throws SAXException
-	 * @throws ParserConfigurationException
-	 */
-	public HadoopCluster(File file) throws ParserConfigurationException, SAXException, IOException {
-		this();
-		this.loadFromXML(file);
-	}
-
-	/**
 	 * Create a new Hadoop location by copying an already existing one.
 	 * 
 	 * @param source
@@ -370,66 +358,18 @@ public class HadoopCluster extends AbstractHadoopCluster {
 		this.conf = new Configuration(((HadoopCluster) existing).conf);
 	}
 
-	/**
-	 * Overwrite this location with settings available in the given XML file.
-	 * The existing configuration is preserved if the XML file is invalid.
-	 * 
-	 * @param file
-	 *            the file path of the XML file
-	 * @return validity of the XML file
-	 * @throws ParserConfigurationException
-	 * @throws IOException
-	 * @throws SAXException
-	 */
-	public boolean loadFromXML(File file) {
-
-		Configuration newConf = new Configuration(this.conf);
-		DocumentBuilder builder;
-		Document document;
-		try {
-			builder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
-			document = builder.parse(file);
-		} catch (ParserConfigurationException e) {
-			e.printStackTrace();
-			return false;
-		} catch (SAXException e) {
-			e.printStackTrace();
-			return false;
-		} catch (IOException e) {
-			e.printStackTrace();
-			return false;
-		}
-		Element root = document.getDocumentElement();
-		if (!"configuration".equals(root.getTagName()))
-			return false;
-		NodeList props = root.getChildNodes();
-		for (int i = 0; i < props.getLength(); i++) {
-			Node propNode = props.item(i);
-			if (!(propNode instanceof Element))
-				continue;
-			Element prop = (Element) propNode;
-			if (!"property".equals(prop.getTagName()))
-				return false;
-			NodeList fields = prop.getChildNodes();
-			String attr = null;
-			String value = null;
-			for (int j = 0; j < fields.getLength(); j++) {
-				Node fieldNode = fields.item(j);
-				if (!(fieldNode instanceof Element))
-					continue;
-				Element field = (Element) fieldNode;
-				if ("name".equals(field.getTagName()))
-					attr = ((Text) field.getFirstChild()).getData();
-				if ("value".equals(field.getTagName()) && field.hasChildNodes())
-					value = ((Text) field.getFirstChild()).getData();
-			}
-			if (attr != null && value != null)
-				newConf.set(attr, value);
-		}
-
-		this.conf = newConf;
-		return true;
-	}
+        protected boolean loadConfiguration(Map<String, String> configuration) {
+            Configuration newConf = new Configuration(this.conf);
+            if (configuration == null)
+                return false;
+            for (Entry<String, String> entry : configuration.entrySet()) {
+                newConf.set(entry.getKey(), entry.getValue());
+            }
+    
+    
+            this.conf = newConf;
+            return true;
+        }
 
 	/**
 	 * Sets a Hadoop configuration property value

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/9d78cbdb/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
index 7f3cbfb..bcf5944 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
@@ -684,6 +684,8 @@ public class HadoopLocationWizard extends WizardPage {
 								public void run() {
 									try {
 										location = AbstractHadoopCluster.createCluster(selection);
+										location.setConfPropValue(ConfProp.PI_HADOOP_VERSION, selection);
+										location.setConfPropValue(ConfProp.PI_LOCATION_NAME, "");
 										for (TabListener tab : mediator.tabs) {
 											tab.reloadData();
 										}
@@ -854,6 +856,7 @@ public class HadoopLocationWizard extends WizardPage {
 			notifyChange(ConfProp.PI_SOCKS_PROXY_ENABLE,location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_ENABLE));
 			notifyChange(ConfProp.PI_SOCKS_PROXY_HOST,location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_HOST));
 			notifyChange(ConfProp.PI_SOCKS_PROXY_PORT,location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_PORT));
+			notifyChange(ConfProp.PI_LOCATION_NAME,location.getConfPropValue(ConfProp.PI_LOCATION_NAME));
 		}
 
 		public void notifyChange(ConfProp prop, String propValue) {


[27/27] git commit: - Incrementing version for next development

Posted by rs...@apache.org.
- Incrementing version for next development


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/66900960
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/66900960
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/66900960

Branch: refs/heads/hadoop-eclipse-merge-development
Commit: 669009605aa3d36128fbbbe69c28afffbf375e7d
Parents: d04238c
Author: Rahul Sharma <rs...@apache.org>
Authored: Thu Jun 26 12:09:52 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Thu Jun 26 12:09:52 2014 +0530

----------------------------------------------------------------------
 org.apache.hdt.core/META-INF/MANIFEST.MF            |  2 +-
 org.apache.hdt.core/pom.xml                         |  2 +-
 org.apache.hdt.dist/pom.xml                         |  2 +-
 org.apache.hdt.feature/feature.xml                  | 10 +++++-----
 org.apache.hdt.feature/pom.xml                      |  2 +-
 org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF  |  2 +-
 org.apache.hdt.hadoop.release/pom.xml               |  2 +-
 org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF |  2 +-
 org.apache.hdt.hadoop2.release/pom.xml              |  2 +-
 org.apache.hdt.ui.test/META-INF/MANIFEST.MF         |  2 +-
 org.apache.hdt.ui.test/pom.xml                      |  2 +-
 org.apache.hdt.ui/META-INF/MANIFEST.MF              |  2 +-
 org.apache.hdt.ui/pom.xml                           |  2 +-
 org.apache.hdt.updateSite/pom.xml                   |  2 +-
 pom.xml                                             |  2 +-
 15 files changed, 19 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/66900960/org.apache.hdt.core/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/META-INF/MANIFEST.MF b/org.apache.hdt.core/META-INF/MANIFEST.MF
index bbe6c84..0de895c 100644
--- a/org.apache.hdt.core/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.core/META-INF/MANIFEST.MF
@@ -2,7 +2,7 @@ Manifest-Version: 1.0
 Bundle-ManifestVersion: 2
 Bundle-Name: Apache Hadoop Eclipse Plugin
 Bundle-SymbolicName: org.apache.hdt.core;singleton:=true
-Bundle-Version: 0.0.2.incubating
+Bundle-Version: 0.0.3.qualifier
 Bundle-Activator: org.apache.hdt.core.Activator
 Require-Bundle: org.eclipse.core.runtime,
  org.eclipse.core.filesystem;bundle-version="1.3.0";visibility:=reexport,

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/66900960/org.apache.hdt.core/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/pom.xml b/org.apache.hdt.core/pom.xml
index 095e1dc..82b2247 100644
--- a/org.apache.hdt.core/pom.xml
+++ b/org.apache.hdt.core/pom.xml
@@ -23,7 +23,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2.incubating</version>
+    <version>0.0.3-SNAPSHOT</version>
   </parent>
 
   <artifactId>org.apache.hdt.core</artifactId>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/66900960/org.apache.hdt.dist/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.dist/pom.xml b/org.apache.hdt.dist/pom.xml
index ef1af8e..72d9465 100644
--- a/org.apache.hdt.dist/pom.xml
+++ b/org.apache.hdt.dist/pom.xml
@@ -22,7 +22,7 @@
   <parent>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2.incubating</version>
+    <version>0.0.3-SNAPSHOT</version>
   </parent>
   <artifactId>org.apache.hdt.dist</artifactId>
   <name>Apache Hadoop Development Tools Distribution</name>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/66900960/org.apache.hdt.feature/feature.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.feature/feature.xml b/org.apache.hdt.feature/feature.xml
index 9a1253a..dc059b7 100644
--- a/org.apache.hdt.feature/feature.xml
+++ b/org.apache.hdt.feature/feature.xml
@@ -2,7 +2,7 @@
 <feature
       id="org.apache.hdt.feature"
       label="Hadoop Development Tools"
-      version="0.0.2.incubating"
+      version="0.0.3.qualifier"
       provider-name="Apache Software Foundation">
 
    <description url="http://hdt.incubator.apache.org/">
@@ -40,7 +40,7 @@ permissions and limitations under the License.
          id="org.apache.hdt.hadoop.release"
          download-size="0"
          install-size="0"
-         version="0.0.2.incubating"
+         version="0.0.3.qualifier"
          fragment="true"
          unpack="false"/>
 
@@ -48,7 +48,7 @@ permissions and limitations under the License.
          id="org.apache.hdt.hadoop2.release"
          download-size="0"
          install-size="0"
-         version="0.0.2.incubating"
+         version="0.0.3.qualifier"
          fragment="true"
          unpack="false"/>
 
@@ -56,14 +56,14 @@ permissions and limitations under the License.
          id="org.apache.hdt.ui"
          download-size="0"
          install-size="0"
-         version="0.0.2.incubating"
+         version="0.0.3.qualifier"
          unpack="false"/>
          
    <plugin
          id="org.apache.hdt.core"
          download-size="0"
          install-size="0"
-         version="0.0.2.incubating"
+         version="0.0.3.qualifier"
          unpack="false"/>
 
 </feature>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/66900960/org.apache.hdt.feature/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.feature/pom.xml b/org.apache.hdt.feature/pom.xml
index 19a5a18..0598dfa 100644
--- a/org.apache.hdt.feature/pom.xml
+++ b/org.apache.hdt.feature/pom.xml
@@ -22,7 +22,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2.incubating</version>
+    <version>0.0.3-SNAPSHOT</version>
   </parent>
   
   <artifactId>org.apache.hdt.feature</artifactId>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/66900960/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF b/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
index 0f56f4b..3f081f5 100644
--- a/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
@@ -2,7 +2,7 @@ Manifest-Version: 1.0
 Bundle-ManifestVersion: 2
 Bundle-Name: Apache Hadoop 0.0.1.qualifier Release Eclipse Plugin
 Bundle-SymbolicName: org.apache.hdt.hadoop.release;singleton:=true
-Bundle-Version: 0.0.2.incubating
+Bundle-Version: 0.0.3.qualifier
 Bundle-Vendor: Apache Hadoop
 Bundle-RequiredExecutionEnvironment: JavaSE-1.6
 Require-Bundle: org.apache.hdt.core,

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/66900960/org.apache.hdt.hadoop.release/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/pom.xml b/org.apache.hdt.hadoop.release/pom.xml
index e08e28c..e3322a8 100644
--- a/org.apache.hdt.hadoop.release/pom.xml
+++ b/org.apache.hdt.hadoop.release/pom.xml
@@ -22,7 +22,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2.incubating</version>
+    <version>0.0.3-SNAPSHOT</version>
   </parent>
   <artifactId>org.apache.hdt.hadoop.release</artifactId>
   <packaging>eclipse-plugin</packaging>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/66900960/org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF b/org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF
index 3a6e69f..37a1143 100644
--- a/org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF
@@ -2,7 +2,7 @@ Manifest-Version: 1.0
 Bundle-ManifestVersion: 2
 Bundle-Name: Apache Hadoop2 Release Eclipse Plugin
 Bundle-SymbolicName: org.apache.hdt.hadoop2.release;singleton:=true
-Bundle-Version: 0.0.2.incubating
+Bundle-Version: 0.0.3.qualifier
 Bundle-Vendor: Apache Hadoop
 Bundle-RequiredExecutionEnvironment: JavaSE-1.6
 Require-Bundle: org.apache.hdt.core,

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/66900960/org.apache.hdt.hadoop2.release/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/pom.xml b/org.apache.hdt.hadoop2.release/pom.xml
index 74db93b..16bda11 100644
--- a/org.apache.hdt.hadoop2.release/pom.xml
+++ b/org.apache.hdt.hadoop2.release/pom.xml
@@ -22,7 +22,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2.incubating</version>
+    <version>0.0.3-SNAPSHOT</version>
   </parent>
   <artifactId>org.apache.hdt.hadoop2.release</artifactId>
   <packaging>eclipse-plugin</packaging>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/66900960/org.apache.hdt.ui.test/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui.test/META-INF/MANIFEST.MF b/org.apache.hdt.ui.test/META-INF/MANIFEST.MF
index 01bbaee..74c41fb 100644
--- a/org.apache.hdt.ui.test/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.ui.test/META-INF/MANIFEST.MF
@@ -2,7 +2,7 @@ Manifest-Version: 1.0
 Bundle-ManifestVersion: 2
 Bundle-Name: Apache Hadoop UI Test Eclipse Plugin
 Bundle-SymbolicName: org.apache.hdt.ui.test;singleton:=true
-Bundle-Version: 0.0.2.incubating
+Bundle-Version: 0.0.3.qualifier
 Bundle-Activator: org.apache.hdt.ui.test.Activator
 Bundle-Vendor: Apache Hadoop
 Require-Bundle: org.eclipse.ui,

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/66900960/org.apache.hdt.ui.test/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui.test/pom.xml b/org.apache.hdt.ui.test/pom.xml
index 5b10a29..275be1f 100644
--- a/org.apache.hdt.ui.test/pom.xml
+++ b/org.apache.hdt.ui.test/pom.xml
@@ -24,7 +24,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2.incubating</version>
+    <version>0.0.3-SNAPSHOT</version>
   </parent>
 
   <artifactId>org.apache.hdt.ui.test</artifactId>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/66900960/org.apache.hdt.ui/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/META-INF/MANIFEST.MF b/org.apache.hdt.ui/META-INF/MANIFEST.MF
index 86b811a..3c17c86 100644
--- a/org.apache.hdt.ui/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.ui/META-INF/MANIFEST.MF
@@ -2,7 +2,7 @@ Manifest-Version: 1.0
 Bundle-ManifestVersion: 2
 Bundle-Name: Apache Hadoop UI Eclipse Plugin
 Bundle-SymbolicName: org.apache.hdt.ui;singleton:=true
-Bundle-Version: 0.0.2.incubating
+Bundle-Version: 0.0.3.qualifier
 Bundle-Activator: org.apache.hdt.ui.Activator
 Bundle-Vendor: Apache Hadoop
 Require-Bundle: org.eclipse.core.runtime,

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/66900960/org.apache.hdt.ui/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/pom.xml b/org.apache.hdt.ui/pom.xml
index 0df6a60..8c96c8c 100644
--- a/org.apache.hdt.ui/pom.xml
+++ b/org.apache.hdt.ui/pom.xml
@@ -22,7 +22,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2.incubating</version>
+    <version>0.0.3-SNAPSHOT</version>
   </parent>
   <artifactId>org.apache.hdt.ui</artifactId>
   <packaging>eclipse-plugin</packaging>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/66900960/org.apache.hdt.updateSite/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.updateSite/pom.xml b/org.apache.hdt.updateSite/pom.xml
index f3a33e8..8f8ee26 100644
--- a/org.apache.hdt.updateSite/pom.xml
+++ b/org.apache.hdt.updateSite/pom.xml
@@ -23,7 +23,7 @@ under the License.
     <relativePath>../pom.xml</relativePath>
     <groupId>org.apache.hdt</groupId>
     <artifactId>hdt.master</artifactId>
-    <version>0.0.2.incubating</version>
+    <version>0.0.3-SNAPSHOT</version>
   </parent>
   
   <artifactId>org.apache.hdt.updateSite</artifactId>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/66900960/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 3c1ce87..cd8bf5c 100644
--- a/pom.xml
+++ b/pom.xml
@@ -25,7 +25,7 @@ under the License.
   </parent>
   <groupId>org.apache.hdt</groupId>
   <artifactId>hdt.master</artifactId>
-  <version>0.0.2.incubating</version>
+  <version>0.0.3-SNAPSHOT</version>
   <packaging>pom</packaging>
   <name>Apache Hadoop Development Tools</name>
   <description>Eclipse tools for developing against the Hadoop platform</description>


[17/27] git commit: HDT 7 : - extraction of UI for each Version to individual plugins. - cleaning up existing UI

Posted by rs...@apache.org.
 HDT 7 :
  - extraction of UI for each Version to individual plugins.
  - cleaning up existing UI


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/c308e976
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/c308e976
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/c308e976

Branch: refs/heads/hadoop-eclipse-merge
Commit: c308e976b39c4a78fe991f4652bc40e3dcf7e4a4
Parents: 9d78cbd
Author: Rahul Sharma <rs...@apache.org>
Authored: Sat Jun 14 20:08:19 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Mon Jun 16 11:07:28 2014 +0530

----------------------------------------------------------------------
 .../src/org/apache/hdt/core/HadoopVersion.java  |  34 +
 .../hdt/core/launch/AbstractHadoopCluster.java  |  49 +-
 .../org/apache/hdt/core/launch/ConfProp.java    |  16 +-
 .../hdt/hadoop/release/HadoopCluster.java       |  26 +-
 .../release/HadoopV1ConfigurationBuilder.java   | 690 +++++++++++++++
 .../hdt/hadoop2/release/HadoopCluster.java      |  42 +-
 .../release/HadoopV2ConfigurationBuilder.java   | 771 +++++++++++++++++
 .../internal/hdfs/NewHDFSServerWizardPage.java  |   7 +-
 .../internal/launch/HadoopLocationWizard.java   | 859 +++----------------
 9 files changed, 1664 insertions(+), 830 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/c308e976/org.apache.hdt.core/src/org/apache/hdt/core/HadoopVersion.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/HadoopVersion.java b/org.apache.hdt.core/src/org/apache/hdt/core/HadoopVersion.java
new file mode 100644
index 0000000..020b7d9
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/HadoopVersion.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.core;
+
+
+public enum HadoopVersion {
+	Version1("1.1"), Version2("2.2");
+
+	private String displayName;
+
+	private HadoopVersion(String displayName) {
+		this.displayName = displayName;
+	}
+	
+	public String getDisplayName() {
+		return displayName;
+	}
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/c308e976/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
index e2fa064..57862ef 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
@@ -31,11 +31,13 @@ import javax.xml.parsers.DocumentBuilderFactory;
 import javax.xml.parsers.ParserConfigurationException;
 
 import org.apache.hdt.core.Activator;
+import org.apache.hdt.core.HadoopVersion;
 import org.apache.log4j.Logger;
 import org.eclipse.core.runtime.CoreException;
 import org.eclipse.core.runtime.IConfigurationElement;
 import org.eclipse.core.runtime.Platform;
 import org.eclipse.core.runtime.Status;
+import org.eclipse.swt.widgets.Composite;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 import org.w3c.dom.Node;
@@ -44,7 +46,19 @@ import org.w3c.dom.Text;
 import org.xml.sax.SAXException;
 
 public abstract class AbstractHadoopCluster {
-	
+
+	public interface ChangeListener {
+		void notifyChange(ConfProp prop, String propValue);
+	}
+
+	public interface HadoopConfigurationBuilder {
+		void buildControl(Composite panel);
+
+		void notifyChange(ConfProp confProp, String text);
+
+		void setChangeListener(ChangeListener l);
+	}
+
 	private static final Logger logger = Logger.getLogger(AbstractHadoopCluster.class);
 
 	abstract public String getLocationName();
@@ -68,7 +82,7 @@ public abstract class AbstractHadoopCluster {
 	abstract public void setConfPropValue(ConfProp prop, String propValue);
 
 	abstract public void setConfPropValue(String propName, String propValue);
-	
+
 	abstract public Iterator<Entry<String, String>> getConfiguration();
 
 	abstract public void purgeJob(IHadoopJob job);
@@ -80,39 +94,40 @@ public abstract class AbstractHadoopCluster {
 	abstract public String getState();
 
 	abstract protected boolean loadConfiguration(Map<String, String> configuration);
-	
+
 	abstract public boolean isAvailable() throws CoreException;
-	
-	abstract public String getVersion();
-	
+
+	abstract public HadoopVersion getVersion();
+
+	abstract public HadoopConfigurationBuilder getUIConfigurationBuilder();
+
 	public static AbstractHadoopCluster createCluster(File file) throws CoreException, IOException {
 		Map<String, String> configuration = loadXML(file);
 		String version = configuration.get(ConfProp.PI_HADOOP_VERSION.name);
-		AbstractHadoopCluster hadoopCluster = createCluster(version!=null?version:ConfProp.PI_HADOOP_VERSION.defVal);
+		AbstractHadoopCluster hadoopCluster = createCluster(version != null ? version : ConfProp.PI_HADOOP_VERSION.defVal);
 		hadoopCluster.loadConfiguration(configuration);
 		return hadoopCluster;
 	}
 
 	public static AbstractHadoopCluster createCluster(String hadoopVersion) throws CoreException {
-		logger.debug("Creating client for version "+hadoopVersion); 
+		logger.debug("Creating client for version " + hadoopVersion);
 		IConfigurationElement[] elementsFor = Platform.getExtensionRegistry().getConfigurationElementsFor("org.apache.hdt.core.hadoopCluster");
 		for (IConfigurationElement configElement : elementsFor) {
 			String version = configElement.getAttribute("protocolVersion");
-			if(version.equalsIgnoreCase(hadoopVersion)){
-				return (AbstractHadoopCluster)configElement.createExecutableExtension("class");
+			if (version.equalsIgnoreCase(hadoopVersion)) {
+				return (AbstractHadoopCluster) configElement.createExecutableExtension("class");
 			}
 		}
-		throw new CoreException(new Status(Status.ERROR,Activator.BUNDLE_ID,"No clinet found for hadoop version "+hadoopVersion));
+		throw new CoreException(new Status(Status.ERROR, Activator.BUNDLE_ID, "No clinet found for hadoop version " + hadoopVersion));
 	}
 
 	public static AbstractHadoopCluster createCluster(AbstractHadoopCluster existing) throws CoreException {
-		AbstractHadoopCluster hadoopCluster = createCluster(existing.getVersion());
+		AbstractHadoopCluster hadoopCluster = createCluster(existing.getVersion().getDisplayName());
 		hadoopCluster.load(existing);
 		return hadoopCluster;
 	}
-	
-	
-	protected static Map<String,String> loadXML(File file) {
+
+	protected static Map<String, String> loadXML(File file) {
 		DocumentBuilder builder;
 		Document document;
 		try {
@@ -132,7 +147,7 @@ public abstract class AbstractHadoopCluster {
 		if (!"configuration".equals(root.getTagName()))
 			return null;
 		NodeList props = root.getChildNodes();
-		Map<String,String> configuration= new HashMap<String, String>();
+		Map<String, String> configuration = new HashMap<String, String>();
 		for (int i = 0; i < props.getLength(); i++) {
 			Node propNode = props.item(i);
 			if (!(propNode instanceof Element))
@@ -166,7 +181,7 @@ public abstract class AbstractHadoopCluster {
 	public ConfProp getConfPropForName(String propName) {
 		return ConfProp.getByName(propName);
 	}
-	
+
 	public String getConfPropName(ConfProp prop) {
 		return prop.name;
 	}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/c308e976/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java
index 8b91dbe..b23adf9 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java
@@ -36,6 +36,10 @@ public enum ConfProp {
 	 * Property name for the master host name (the Job tracker)
 	 */
 	PI_JOB_TRACKER_HOST(true, "jobtracker.host", "localhost"),
+	
+	PI_RESOURCE_MGR_HOST(true, "rm.host", "localhost"),
+	
+	PI_JOB_HISTORY_HOST(true, "jobhistory.host", "localhost"),
 
 	/**
 	 * Property name for the DFS master host name (the Name node)
@@ -43,10 +47,6 @@ public enum ConfProp {
 	PI_NAME_NODE_HOST(true, "namenode.host", "localhost"),
 
 	/**
-	 * Property name for the installation directory on the master node
-	 */
-	// PI_INSTALL_DIR(true, "install.dir", "/dir/hadoop-version/"),
-	/**
 	 * User name to use for Hadoop operations
 	 */
 	PI_USER_NAME(true, "user.name", System.getProperty("user.name", "who are you?")),
@@ -75,6 +75,10 @@ public enum ConfProp {
 	 * TCP port number for the job tracker
 	 */
 	PI_JOB_TRACKER_PORT(true, "jobtracker.port", "50020"),
+	
+	PI_RESOURCE_MGR_PORT(true, "rm.port", "8032"),
+	
+	PI_JOB_HISTORY_PORT(true, "jobhistory.port", "10020"),
 
 	/**
 	 * Are the Map/Reduce and the Distributed FS masters hosted on the same
@@ -92,6 +96,10 @@ public enum ConfProp {
 	 * Property name for naming the default file system (URI).
 	 */
 	FS_DEFAULT_URI(false, "fs.default.name", "hdfs://localhost:50040/"),
+	
+	RM_DEFAULT_URI(false, "yarn.resourcemanager.address", "localhost:8032"),
+	
+	JOB_HISTORY_DEFAULT_URI(false, "mapreduce.jobhistory.address", "localhost:10020"),
 
 	/**
 	 * Property name for the default socket factory:

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/c308e976/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
index 167ae29..54c5500 100644
--- a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
+++ b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
@@ -28,22 +28,17 @@ import java.util.Collections;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Map;
-import java.util.ServiceLoader;
 import java.util.Map.Entry;
+import java.util.ServiceLoader;
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.logging.Logger;
 
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.io.IOUtils;
@@ -53,6 +48,7 @@ import org.apache.hadoop.mapred.JobID;
 import org.apache.hadoop.mapred.JobStatus;
 import org.apache.hadoop.mapred.RunningJob;
 import org.apache.hdt.core.Activator;
+import org.apache.hdt.core.HadoopVersion;
 import org.apache.hdt.core.launch.AbstractHadoopCluster;
 import org.apache.hdt.core.launch.ConfProp;
 import org.apache.hdt.core.launch.IHadoopJob;
@@ -64,12 +60,6 @@ import org.eclipse.core.runtime.IStatus;
 import org.eclipse.core.runtime.Status;
 import org.eclipse.core.runtime.jobs.Job;
 import org.eclipse.swt.widgets.Display;
-import org.w3c.dom.Document;
-import org.w3c.dom.Element;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-import org.w3c.dom.Text;
-import org.xml.sax.SAXException;
 
 /**
  * Representation of a Hadoop location, meaning of the master node (NameNode,
@@ -543,7 +533,15 @@ public class HadoopCluster extends AbstractHadoopCluster {
 	 * @see org.apache.hdt.core.launch.AbstractHadoopCluster#getVersion()
 	 */
 	@Override
-	public String getVersion() {
-		return "1.1";
+	public HadoopVersion getVersion() {
+		return HadoopVersion.Version1;
+	}
+
+	/* (non-Javadoc)
+	 * @see org.apache.hdt.core.launch.AbstractHadoopCluster#getUIConfigurationBuilder()
+	 */
+	@Override
+	public HadoopConfigurationBuilder getUIConfigurationBuilder() {
+		return new HadoopV1ConfigurationBuilder(this);
 	}
 }

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/c308e976/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopV1ConfigurationBuilder.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopV1ConfigurationBuilder.java b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopV1ConfigurationBuilder.java
new file mode 100644
index 0000000..fb5eace
--- /dev/null
+++ b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopV1ConfigurationBuilder.java
@@ -0,0 +1,690 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.hadoop.release;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.AbstractHadoopCluster.ChangeListener;
+import org.apache.hdt.core.launch.AbstractHadoopCluster.HadoopConfigurationBuilder;
+import org.apache.hdt.core.launch.ConfProp;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.custom.ScrolledComposite;
+import org.eclipse.swt.events.ModifyEvent;
+import org.eclipse.swt.events.ModifyListener;
+import org.eclipse.swt.events.SelectionEvent;
+import org.eclipse.swt.events.SelectionListener;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.swt.widgets.Control;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.swt.widgets.Group;
+import org.eclipse.swt.widgets.Label;
+import org.eclipse.swt.widgets.TabFolder;
+import org.eclipse.swt.widgets.TabItem;
+import org.eclipse.swt.widgets.Text;
+
+class HadoopV1ConfigurationBuilder implements HadoopConfigurationBuilder {
+
+	private AbstractHadoopCluster location;
+	private TabMediator mediator;
+	private ChangeListener changelistener;
+
+	public HadoopV1ConfigurationBuilder(AbstractHadoopCluster location) {
+		this.location = location;
+	}
+	@Override
+	public void buildControl(Composite panel) {
+		mediator = new TabMediator(panel);
+		GridData gdata = new GridData(GridData.FILL_BOTH);
+		gdata.horizontalSpan = 2;
+		mediator.folder.setLayoutData(gdata);
+	}
+
+
+	private class TabMediator {
+		TabFolder folder;
+		private Set<ChangeListener> tabs = new HashSet<ChangeListener>();
+
+		TabMediator(Composite parent) {
+			folder = new TabFolder(parent, SWT.NONE);
+			tabs.add(new TabMain(this));
+			tabs.add(new TabAdvanced(this));
+		}
+
+		/**
+		 * Implements change notifications from any tab: update the location
+		 * state and other tabs
+		 * 
+		 * @param source
+		 *            origin of the notification (one of the tree tabs)
+		 * @param propName
+		 *            modified property
+		 * @param propValue
+		 *            new value
+		 */
+		void notifyChange(ChangeListener source, final ConfProp prop, final String propValue) {
+			// Ignore notification when no change
+			String oldValue = location.getConfPropValue(prop);
+			if ((oldValue != null) && oldValue.equals(propValue))
+				return;
+
+			location.setConfPropValue(prop, propValue);
+			changelistener.notifyChange(prop, propValue);
+			this.fireChange(source, prop, propValue);
+
+			/*
+			 * Now we deal with dependencies between settings
+			 */
+			final String jobTrackerHost = location.getConfPropValue(ConfProp.PI_JOB_TRACKER_HOST);
+			final String jobTrackerPort = location.getConfPropValue(ConfProp.PI_JOB_TRACKER_PORT);
+			final String nameNodeHost = location.getConfPropValue(ConfProp.PI_NAME_NODE_HOST);
+			final String nameNodePort = location.getConfPropValue(ConfProp.PI_NAME_NODE_PORT);
+			final boolean colocate = location.getConfPropValue(ConfProp.PI_COLOCATE_MASTERS).equalsIgnoreCase("yes");
+			final String jobTrackerURI = location.getConfPropValue(ConfProp.JOB_TRACKER_URI);
+			final String fsDefaultURI = location.getConfPropValue(ConfProp.FS_DEFAULT_URI);
+			final String socksServerURI = location.getConfPropValue(ConfProp.SOCKS_SERVER);
+			final boolean socksProxyEnable = location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_ENABLE).equalsIgnoreCase("yes");
+			final String socksProxyHost = location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_HOST);
+			final String socksProxyPort = location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_PORT);
+
+			Display.getDefault().syncExec(new Runnable() {
+				public void run() {
+					switch (prop) {
+					case PI_JOB_TRACKER_HOST: {
+						if (colocate)
+							notifyChange(null, ConfProp.PI_NAME_NODE_HOST, jobTrackerHost);
+						String newJobTrackerURI = String.format("%s:%s", jobTrackerHost, jobTrackerPort);
+						notifyChange(null, ConfProp.JOB_TRACKER_URI, newJobTrackerURI);
+						break;
+					}
+					case PI_JOB_TRACKER_PORT: {
+						String newJobTrackerURI = String.format("%s:%s", jobTrackerHost, jobTrackerPort);
+						notifyChange(null, ConfProp.JOB_TRACKER_URI, newJobTrackerURI);
+						break;
+					}
+					case PI_NAME_NODE_HOST: {
+						String newHDFSURI = String.format("hdfs://%s:%s/", nameNodeHost, nameNodePort);
+						notifyChange(null, ConfProp.FS_DEFAULT_URI, newHDFSURI);
+
+						// Break colocation if someone force the DFS Master
+						if (!colocate && !nameNodeHost.equals(jobTrackerHost))
+							notifyChange(null, ConfProp.PI_COLOCATE_MASTERS, "no");
+						break;
+					}
+					case PI_NAME_NODE_PORT: {
+						String newHDFSURI = String.format("hdfs://%s:%s/", nameNodeHost, nameNodePort);
+						notifyChange(null, ConfProp.FS_DEFAULT_URI, newHDFSURI);
+						break;
+					}
+					case PI_SOCKS_PROXY_HOST: {
+						String newSocksProxyURI = String.format("%s:%s", socksProxyHost, socksProxyPort);
+						notifyChange(null, ConfProp.SOCKS_SERVER, newSocksProxyURI);
+						break;
+					}
+					case PI_SOCKS_PROXY_PORT: {
+						String newSocksProxyURI = String.format("%s:%s", socksProxyHost, socksProxyPort);
+						notifyChange(null, ConfProp.SOCKS_SERVER, newSocksProxyURI);
+						break;
+					}
+					case JOB_TRACKER_URI: {
+						String[] strs = jobTrackerURI.split(":", 2);
+						String host = strs[0];
+						String port = (strs.length == 2) ? strs[1] : "";
+						notifyChange(null, ConfProp.PI_JOB_TRACKER_HOST, host);
+						notifyChange(null, ConfProp.PI_JOB_TRACKER_PORT, port);
+						break;
+					}
+					case FS_DEFAULT_URI: {
+						try {
+							URI uri = new URI(fsDefaultURI);
+							if (uri.getScheme().equals("hdfs")) {
+								String host = uri.getHost();
+								String port = Integer.toString(uri.getPort());
+								notifyChange(null, ConfProp.PI_NAME_NODE_HOST, host);
+								notifyChange(null, ConfProp.PI_NAME_NODE_PORT, port);
+							}
+						} catch (URISyntaxException use) {
+							// Ignore the update!
+						}
+						break;
+					}
+					case SOCKS_SERVER: {
+						String[] strs = socksServerURI.split(":", 2);
+						String host = strs[0];
+						String port = (strs.length == 2) ? strs[1] : "";
+						notifyChange(null, ConfProp.PI_SOCKS_PROXY_HOST, host);
+						notifyChange(null, ConfProp.PI_SOCKS_PROXY_PORT, port);
+						break;
+					}
+					case PI_COLOCATE_MASTERS: {
+						if (colocate)
+							notifyChange(null, ConfProp.PI_NAME_NODE_HOST, jobTrackerHost);
+						break;
+					}
+					case PI_SOCKS_PROXY_ENABLE: {
+						if (socksProxyEnable) {
+							notifyChange(null, ConfProp.SOCKET_FACTORY_DEFAULT, "org.apache.hadoop.net.SocksSocketFactory");
+						} else {
+							notifyChange(null, ConfProp.SOCKET_FACTORY_DEFAULT, "org.apache.hadoop.net.StandardSocketFactory");
+						}
+						break;
+					}
+					}
+				}
+			});
+
+		}
+
+		/**
+		 * Change notifications on properties (by name). A property might not be
+		 * reflected as a ConfProp enum. If it is, the notification is forwarded
+		 * to the ConfProp notifyChange method. If not, it is processed here.
+		 * 
+		 * @param source
+		 * @param propName
+		 * @param propValue
+		 */
+		void notifyChange(ChangeListener source, String propName, String propValue) {
+			ConfProp prop = location.getConfPropForName(propName);
+			if (prop != null)
+				notifyChange(source, prop, propValue);
+			else
+				location.setConfPropValue(propName, propValue);
+		}
+
+		/**
+		 * Broadcast a property change to all registered tabs. If a tab is
+		 * identified as the source of the change, this tab will not be
+		 * notified.
+		 * 
+		 * @param source
+		 *            TODO
+		 * @param prop
+		 * @param value
+		 */
+		private void fireChange(ChangeListener source, ConfProp prop, String value) {
+			for (ChangeListener tab : tabs) {
+				if (tab != source)
+					tab.notifyChange(prop, value);
+			}
+		}
+
+	}
+
+	/**
+	 * Create a SWT Text component for the given {@link ConfProp} text
+	 * configuration property.
+	 * 
+	 * @param listener
+	 * @param parent
+	 * @param prop
+	 * @return
+	 */
+	private Text createConfText(ModifyListener listener, Composite parent, ConfProp prop) {
+		Text text = new Text(parent, SWT.SINGLE | SWT.BORDER);
+		GridData data = new GridData(GridData.FILL_HORIZONTAL);
+		text.setLayoutData(data);
+		text.setData("hProp", prop);
+		text.setText(location.getConfPropValue(prop));
+		text.addModifyListener(listener);
+		return text;
+	}
+
+	/**
+	 * Create a SWT Checked Button component for the given {@link ConfProp}
+	 * boolean configuration property.
+	 * 
+	 * @param listener
+	 * @param parent
+	 * @param prop
+	 * @return
+	 */
+	private Button createConfCheckButton(SelectionListener listener, Composite parent, ConfProp prop, String text) {
+		Button button = new Button(parent, SWT.CHECK);
+		button.setText(text);
+		button.setData("hProp", prop);
+		button.setSelection(location.getConfPropValue(prop).equalsIgnoreCase("yes"));
+		button.addSelectionListener(listener);
+		return button;
+	}
+
+	/**
+	 * Create editor entry for the given configuration property. The editor is a
+	 * couple (Label, Text).
+	 * 
+	 * @param listener
+	 *            the listener to trigger on property change
+	 * @param parent
+	 *            the SWT parent container
+	 * @param prop
+	 *            the property to create an editor for
+	 * @param labelText
+	 *            a label (null will defaults to the property name)
+	 * 
+	 * @return a SWT Text field
+	 */
+	private Text createConfLabelText(ModifyListener listener, Composite parent, ConfProp prop, String labelText) {
+		Label label = new Label(parent, SWT.NONE);
+		if (labelText == null)
+			labelText = location.getConfPropName(prop);
+		label.setText(labelText);
+		return createConfText(listener, parent, prop);
+	}
+
+	/**
+	 * Create an editor entry for the given configuration name
+	 * 
+	 * @param listener
+	 *            the listener to trigger on property change
+	 * @param parent
+	 *            the SWT parent container
+	 * @param propName
+	 *            the name of the property to create an editor for
+	 * @param labelText
+	 *            a label (null will defaults to the property name)
+	 * 
+	 * @return a SWT Text field
+	 */
+	private Text createConfNameEditor(ModifyListener listener, Composite parent, String propName, String labelText) {
+
+		{
+			ConfProp prop = location.getConfPropForName(propName);
+			if (prop != null)
+				return createConfLabelText(listener, parent, prop, labelText);
+		}
+
+		Label label = new Label(parent, SWT.NONE);
+		if (labelText == null)
+			labelText = propName;
+		label.setText(labelText);
+
+		Text text = new Text(parent, SWT.SINGLE | SWT.BORDER);
+		GridData data = new GridData(GridData.FILL_HORIZONTAL);
+		text.setLayoutData(data);
+		text.setData("hPropName", propName);
+		text.setText(location.getConfPropValue(propName));
+		text.addModifyListener(listener);
+
+		return text;
+	}
+
+	/**
+	 * Main parameters of the Hadoop location: <li>host and port of the
+	 * Map/Reduce master (Job tracker) <li>host and port of the DFS master (Name
+	 * node) <li>SOCKS proxy
+	 */
+	private class TabMain implements ChangeListener, ModifyListener, SelectionListener {
+
+		TabMediator mediator;
+
+		Text textJTHost;
+
+		Text textNNHost;
+
+		Button colocateMasters;
+
+		Text textJTPort;
+
+		Text textNNPort;
+
+		Text userName;
+
+		Button useSocksProxy;
+
+		Text socksProxyHost;
+
+		Text socksProxyPort;
+
+		private Group groupMR;
+
+		TabMain(TabMediator mediator) {
+			this.mediator = mediator;
+			TabItem tab = new TabItem(mediator.folder, SWT.NONE);
+			tab.setText("General");
+			tab.setToolTipText("General location parameters");
+			tab.setControl(createControl(mediator.folder));
+		}
+
+		private Control createControl(Composite parent) {
+
+			Composite panel = new Composite(parent, SWT.FILL);
+			panel.setLayout(new GridLayout(2, false));
+
+			GridData data;
+
+			/*
+			 * Map/Reduce group
+			 */
+			{
+				groupMR = new Group(panel, SWT.SHADOW_NONE);
+				groupMR.setText("Map/Reduce Master Node");
+				groupMR.setToolTipText("Address of the Map/Reduce Master node.");
+				GridLayout layout = new GridLayout(2, false);
+				groupMR.setLayout(layout);
+				data = new GridData();
+				data.verticalAlignment = SWT.FILL;
+				data.horizontalAlignment = SWT.CENTER;
+				data.widthHint = 250;
+				groupMR.setLayoutData(data);
+
+				// Job Tracker host
+				Label label = new Label(groupMR, SWT.NONE);
+				label.setText("Host:");
+				data = new GridData(GridData.BEGINNING, GridData.CENTER, false, true);
+				label.setLayoutData(data);
+
+				textJTHost = createConfText(this, groupMR, ConfProp.PI_JOB_TRACKER_HOST);
+				data = new GridData(GridData.FILL, GridData.CENTER, true, true);
+				textJTHost.setLayoutData(data);
+
+				// Job Tracker port
+				label = new Label(groupMR, SWT.NONE);
+				label.setText("Port:");
+				data = new GridData(GridData.BEGINNING, GridData.CENTER, false, true);
+				label.setLayoutData(data);
+
+				textJTPort = createConfText(this, groupMR, ConfProp.PI_JOB_TRACKER_PORT);
+				data = new GridData(GridData.FILL, GridData.CENTER, true, true);
+				textJTPort.setLayoutData(data);
+			}
+
+			/*
+			 * DFS group
+			 */
+			{
+				Group groupDFS = new Group(panel, SWT.SHADOW_NONE);
+				groupDFS.setText("DFS Master");
+				groupDFS.setToolTipText("Address of the Distributed FileSystem " + "master node (the Name Node).");
+				GridLayout layout = new GridLayout(2, false);
+				groupDFS.setLayout(layout);
+				data = new GridData();
+				data.horizontalAlignment = SWT.CENTER;
+				data.widthHint = 250;
+				groupDFS.setLayoutData(data);
+
+				colocateMasters = createConfCheckButton(this, groupDFS, ConfProp.PI_COLOCATE_MASTERS, "Use M/R Master host");
+				data = new GridData();
+				data.horizontalSpan = 2;
+				colocateMasters.setLayoutData(data);
+
+				// Job Tracker host
+				Label label = new Label(groupDFS, SWT.NONE);
+				data = new GridData();
+				label.setText("Host:");
+				label.setLayoutData(data);
+
+				textNNHost = createConfText(this, groupDFS, ConfProp.PI_NAME_NODE_HOST);
+
+				// Job Tracker port
+				label = new Label(groupDFS, SWT.NONE);
+				data = new GridData();
+				label.setText("Port:");
+				label.setLayoutData(data);
+
+				textNNPort = createConfText(this, groupDFS, ConfProp.PI_NAME_NODE_PORT);
+			}
+
+			{
+				Composite subpanel = new Composite(panel, SWT.FILL);
+				subpanel.setLayout(new GridLayout(2, false));
+				data = new GridData();
+				data.horizontalSpan = 2;
+				data.horizontalAlignment = SWT.FILL;
+				subpanel.setLayoutData(data);
+
+				userName = createConfLabelText(this, subpanel, ConfProp.PI_USER_NAME, "&User name:");
+			}
+
+			// SOCKS proxy group
+			{
+				Group groupSOCKS = new Group(panel, SWT.SHADOW_NONE);
+				groupSOCKS.setText("SOCKS proxy");
+				groupSOCKS.setToolTipText("Address of the SOCKS proxy to use " + "to connect to the infrastructure.");
+				GridLayout layout = new GridLayout(2, false);
+				groupSOCKS.setLayout(layout);
+				data = new GridData();
+				data.horizontalAlignment = SWT.CENTER;
+				data.horizontalSpan = 2;
+				data.widthHint = 250;
+				groupSOCKS.setLayoutData(data);
+
+				useSocksProxy = createConfCheckButton(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_ENABLE, "Enable SOCKS proxy");
+				data = new GridData();
+				data.horizontalSpan = 2;
+				useSocksProxy.setLayoutData(data);
+
+				// SOCKS proxy host
+				Label label = new Label(groupSOCKS, SWT.NONE);
+				data = new GridData();
+				label.setText("Host:");
+				label.setLayoutData(data);
+
+				socksProxyHost = createConfText(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_HOST);
+
+				// SOCKS proxy port
+				label = new Label(groupSOCKS, SWT.NONE);
+				data = new GridData();
+				label.setText("Port:");
+				label.setLayoutData(data);
+
+				socksProxyPort = createConfText(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_PORT);
+			}
+
+			// Update the state of all widgets according to the current values!
+			reloadConfProp(ConfProp.PI_COLOCATE_MASTERS);
+			reloadConfProp(ConfProp.PI_SOCKS_PROXY_ENABLE);
+			reloadConfProp(ConfProp.PI_HADOOP_VERSION);
+
+			return panel;
+		}
+
+		/**
+		 * Reload the given configuration property value
+		 * 
+		 * @param prop
+		 */
+		private void reloadConfProp(ConfProp prop) {
+			this.notifyChange(prop, location.getConfPropValue(prop));
+		}
+
+		public void notifyChange(ConfProp prop, String propValue) {
+			switch (prop) {
+			case PI_JOB_TRACKER_HOST: {
+				textJTHost.setText(propValue);
+				break;
+			}
+			case PI_JOB_TRACKER_PORT: {
+				textJTPort.setText(propValue);
+				break;
+			}
+			case PI_USER_NAME: {
+				userName.setText(propValue);
+				break;
+			}
+			case PI_COLOCATE_MASTERS: {
+				if (colocateMasters != null) {
+					boolean colocate = propValue.equalsIgnoreCase("yes");
+					colocateMasters.setSelection(colocate);
+					if (textNNHost != null) {
+						textNNHost.setEnabled(!colocate);
+					}
+				}
+				break;
+			}
+			case PI_NAME_NODE_HOST: {
+				textNNHost.setText(propValue);
+				break;
+			}
+			case PI_NAME_NODE_PORT: {
+				textNNPort.setText(propValue);
+				break;
+			}
+			case PI_SOCKS_PROXY_ENABLE: {
+				if (useSocksProxy != null) {
+					boolean useProxy = propValue.equalsIgnoreCase("yes");
+					useSocksProxy.setSelection(useProxy);
+					if (socksProxyHost != null)
+						socksProxyHost.setEnabled(useProxy);
+					if (socksProxyPort != null)
+						socksProxyPort.setEnabled(useProxy);
+				}
+				break;
+			}
+			case PI_SOCKS_PROXY_HOST: {
+				socksProxyHost.setText(propValue);
+				break;
+			}
+			case PI_SOCKS_PROXY_PORT: {
+				socksProxyPort.setText(propValue);
+				break;
+			}
+			}
+		}
+
+		/* @inheritDoc */
+		public void modifyText(ModifyEvent e) {
+			final Text text = (Text) e.widget;
+			final ConfProp prop = (ConfProp) text.getData("hProp");
+			Display.getDefault().syncExec(new Runnable() {
+				public void run() {
+					mediator.notifyChange(TabMain.this, prop, text.getText());
+				}
+			});
+		}
+
+		/* @inheritDoc */
+		public void widgetDefaultSelected(SelectionEvent e) {
+			this.widgetSelected(e);
+		}
+
+		/* @inheritDoc */
+		public void widgetSelected(SelectionEvent e) {
+			final Button button = (Button) e.widget;
+			final ConfProp prop = (ConfProp) button.getData("hProp");
+
+			Display.getDefault().syncExec(new Runnable() {
+				public void run() {
+					// We want to receive the update also!
+					mediator.notifyChange(null, prop, button.getSelection() ? "yes" : "no");
+				}
+			});
+		}
+
+	}
+
+	private class TabAdvanced implements ChangeListener, ModifyListener {
+		TabMediator mediator;
+		private Composite panel;
+		private Map<String, Text> textMap = new TreeMap<String, Text>();
+
+		TabAdvanced(TabMediator mediator) {
+			this.mediator = mediator;
+			TabItem tab = new TabItem(mediator.folder, SWT.NONE);
+			tab.setText("Advanced parameters");
+			tab.setToolTipText("Access to advanced Hadoop parameters");
+			tab.setControl(createControl(mediator.folder));
+
+		}
+
+		private Control createControl(Composite parent) {
+			ScrolledComposite sc = new ScrolledComposite(parent, SWT.BORDER | SWT.H_SCROLL | SWT.V_SCROLL);
+			panel = buildPanel(sc);
+			sc.setContent(panel);
+			sc.setExpandHorizontal(true);
+			sc.setExpandVertical(true);
+			sc.setMinSize(640, 480);
+			sc.setMinSize(panel.computeSize(SWT.DEFAULT, SWT.DEFAULT));
+			return sc;
+		}
+
+		private Composite buildPanel(Composite parent) {
+			Composite panel = new Composite(parent, SWT.NONE);
+			GridLayout layout = new GridLayout();
+			layout.numColumns = 2;
+			layout.makeColumnsEqualWidth = false;
+			panel.setLayout(layout);
+			panel.setLayoutData(new GridData(GridData.FILL, GridData.FILL, true, true, 1, 1));
+
+			// Sort by property name
+			SortedMap<String, String> map = new TreeMap<String, String>();
+			Iterator<Entry<String, String>> it = location.getConfiguration();
+			while (it.hasNext()) {
+				Entry<String, String> entry = it.next();
+				map.put(entry.getKey(), entry.getValue());
+			}
+
+			for (Entry<String, String> entry : map.entrySet()) {
+				Text text = createConfNameEditor(this, panel, entry.getKey(), null);
+				textMap.put(entry.getKey(), text);
+			}
+			return panel;
+		}
+
+		public void notifyChange(ConfProp prop, final String propValue) {
+			Text text = textMap.get(location.getConfPropName(prop));
+			text.setText(propValue);
+		}
+
+		public void modifyText(ModifyEvent e) {
+			final Text text = (Text) e.widget;
+			Object hProp = text.getData("hProp");
+			final ConfProp prop = (hProp != null) ? (ConfProp) hProp : null;
+			Object hPropName = text.getData("hPropName");
+			final String propName = (hPropName != null) ? (String) hPropName : null;
+
+			Display.getDefault().syncExec(new Runnable() {
+				public void run() {
+					if (prop != null)
+						mediator.notifyChange(TabAdvanced.this, prop, text.getText());
+					else
+						mediator.notifyChange(TabAdvanced.this, propName, text.getText());
+				}
+			});
+		}
+
+	}
+
+	/* (non-Javadoc)
+	 * @see org.apache.hdt.core.launch.AbstractHadoopCluster.HadoopConfigurationBuilder#update(org.apache.hdt.core.launch.ConfProp, java.lang.String)
+	 */
+	@Override
+	public void notifyChange(ConfProp confProp, String text) {
+		mediator.notifyChange(null, ConfProp.PI_LOCATION_NAME, text);
+		
+	}
+	/* (non-Javadoc)
+	 * @see org.apache.hdt.core.launch.AbstractHadoopCluster.HadoopConfigurationBuilder#setChangeListener(org.apache.hdt.core.launch.AbstractHadoopCluster.ChangeListener)
+	 */
+	@Override
+	public void setChangeListener(ChangeListener l) {
+		changelistener=l;
+	}
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/c308e976/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java
index 56f1880..cf4dcd4 100644
--- a/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java
+++ b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java
@@ -38,10 +38,6 @@ import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.logging.Logger;
 
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.io.IOUtils;
@@ -50,8 +46,8 @@ import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.JobID;
 import org.apache.hadoop.mapred.JobStatus;
 import org.apache.hadoop.mapred.RunningJob;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hdt.core.Activator;
+import org.apache.hdt.core.HadoopVersion;
 import org.apache.hdt.core.launch.AbstractHadoopCluster;
 import org.apache.hdt.core.launch.ConfProp;
 import org.apache.hdt.core.launch.IHadoopJob;
@@ -63,12 +59,6 @@ import org.eclipse.core.runtime.IStatus;
 import org.eclipse.core.runtime.Status;
 import org.eclipse.core.runtime.jobs.Job;
 import org.eclipse.swt.widgets.Display;
-import org.w3c.dom.Document;
-import org.w3c.dom.Element;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-import org.w3c.dom.Text;
-import org.xml.sax.SAXException;
 
 /**
  * Representation of a Hadoop location, meaning of the master node (NameNode,
@@ -242,9 +232,6 @@ public class HadoopCluster extends AbstractHadoopCluster {
             this.conf = new Configuration();
             this.addPluginConfigDefaultProperties();
             conf.set("mapreduce.framework.name", "yarn");
-            conf.set(YarnConfiguration.RM_ADDRESS, "localhost:8032");
-            conf.set(getConfPropName(ConfProp.PI_JOB_TRACKER_PORT), "8032");
-            conf.set("mapreduce.jobhistory.address", "localhost:10020");
         }
    
 	/**
@@ -341,7 +328,7 @@ public class HadoopCluster extends AbstractHadoopCluster {
 	 * @return the host name of the Job tracker
 	 */
 	public String getMasterHostName() {
-		return getConfPropValue(ConfProp.PI_JOB_TRACKER_HOST);
+		return getConfPropValue(ConfProp.PI_RESOURCE_MGR_HOST);
 	}
 
 	public String getState() {
@@ -393,21 +380,6 @@ public class HadoopCluster extends AbstractHadoopCluster {
 		setConfPropValue(ConfProp.PI_LOCATION_NAME, newName);
 	}
     
-	@Override
-	public String getConfPropName(ConfProp prop) {
-	    if(ConfProp.JOB_TRACKER_URI.equals(prop))
-	        return YarnConfiguration.RM_ADDRESS;
-	    return super.getConfPropName(prop);
-	}
-        @Override
-        public ConfProp getConfPropForName(String propName) {
-            if(YarnConfiguration.RM_ADDRESS.equals(propName))
-                return ConfProp.JOB_TRACKER_URI;
-            if("mapred.job.tracker".equals(propName))
-                return null;
-            return super.getConfPropForName(propName);
-        }
-    
 	/**
 	 * Write this location settings to the given output stream
 	 * 
@@ -553,7 +525,13 @@ public class HadoopCluster extends AbstractHadoopCluster {
 	}
 
     @Override
-    public String getVersion() {
-        return "2.2";
+    public HadoopVersion getVersion() {
+            return HadoopVersion.Version2;
+    }
+
+
+    @Override
+    public HadoopConfigurationBuilder getUIConfigurationBuilder() {
+        return new HadoopV2ConfigurationBuilder(this);
     }
 }

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/c308e976/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopV2ConfigurationBuilder.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopV2ConfigurationBuilder.java b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopV2ConfigurationBuilder.java
new file mode 100644
index 0000000..02f05c4
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopV2ConfigurationBuilder.java
@@ -0,0 +1,771 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.hadoop2.release;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.AbstractHadoopCluster.ChangeListener;
+import org.apache.hdt.core.launch.AbstractHadoopCluster.HadoopConfigurationBuilder;
+import org.apache.hdt.core.launch.ConfProp;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.custom.ScrolledComposite;
+import org.eclipse.swt.events.ModifyEvent;
+import org.eclipse.swt.events.ModifyListener;
+import org.eclipse.swt.events.SelectionEvent;
+import org.eclipse.swt.events.SelectionListener;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.swt.widgets.Control;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.swt.widgets.Group;
+import org.eclipse.swt.widgets.Label;
+import org.eclipse.swt.widgets.TabFolder;
+import org.eclipse.swt.widgets.TabItem;
+import org.eclipse.swt.widgets.Text;
+
+class HadoopV2ConfigurationBuilder implements HadoopConfigurationBuilder {
+
+    private AbstractHadoopCluster location;
+    private TabMediator mediator;
+    private ChangeListener changelistener;
+
+    public HadoopV2ConfigurationBuilder(AbstractHadoopCluster location) {
+        this.location = location;
+    }
+
+    @Override
+    public void buildControl(Composite panel) {
+        mediator = new TabMediator(panel);
+        GridData gdata = new GridData(GridData.FILL_BOTH);
+        gdata.horizontalSpan = 2;
+        mediator.folder.setLayoutData(gdata);
+    }
+
+    private interface TabListener {
+        void notifyChange(ConfProp prop, String propValue);
+    }
+
+    private class TabMediator {
+        TabFolder folder;
+        private Set<TabListener> tabs = new HashSet<TabListener>();
+
+        TabMediator(Composite parent) {
+            folder = new TabFolder(parent, SWT.NONE);
+            tabs.add(new TabMain(this));
+            tabs.add(new TabAdvanced(this));
+        }
+
+        /**
+         * Implements change notifications from any tab: update the
+         * location state and other tabs
+         * 
+         * @param source
+         *            origin of the notification (one of the tree tabs)
+         * @param propName
+         *            modified property
+         * @param propValue
+         *            new value
+         */
+        void notifyChange(TabListener source, final ConfProp prop, final String propValue) {
+            // Ignore notification when no change
+            String oldValue = location.getConfPropValue(prop);
+            if ((oldValue != null) && oldValue.equals(propValue))
+                return;
+
+            location.setConfPropValue(prop, propValue);
+            changelistener.notifyChange(prop, propValue);
+
+            this.fireChange(source, prop, propValue);
+
+            /*
+             * Now we deal with dependencies between settings
+             */
+            final String rmHost = location.getConfPropValue(ConfProp.PI_RESOURCE_MGR_HOST);
+            final String rmPort = location.getConfPropValue(ConfProp.PI_RESOURCE_MGR_PORT);
+            final String jhHost = location.getConfPropValue(ConfProp.PI_JOB_HISTORY_HOST);
+            final String jhPort = location.getConfPropValue(ConfProp.PI_JOB_HISTORY_PORT);
+            final String nameNodeHost = location.getConfPropValue(ConfProp.PI_NAME_NODE_HOST);
+            final String nameNodePort = location.getConfPropValue(ConfProp.PI_NAME_NODE_PORT);
+            final boolean colocate = location.getConfPropValue(ConfProp.PI_COLOCATE_MASTERS).equalsIgnoreCase("yes");
+            final String rmDefaultURI = location.getConfPropValue(ConfProp.RM_DEFAULT_URI);
+            final String jhDefaultURI = location.getConfPropValue(ConfProp.JOB_HISTORY_DEFAULT_URI);
+            final String fsDefaultURI = location.getConfPropValue(ConfProp.FS_DEFAULT_URI);
+            final String socksServerURI = location.getConfPropValue(ConfProp.SOCKS_SERVER);
+            final boolean socksProxyEnable = location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_ENABLE).equalsIgnoreCase("yes");
+            final String socksProxyHost = location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_HOST);
+            final String socksProxyPort = location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_PORT);
+
+            Display.getDefault().syncExec(new Runnable() {
+                public void run() {
+                    switch (prop) {
+                    case PI_RESOURCE_MGR_HOST: {
+                        if (colocate) {
+                            notifyChange(null, ConfProp.PI_NAME_NODE_HOST, rmHost);
+                            notifyChange(null, ConfProp.PI_JOB_HISTORY_HOST, rmHost);
+                        }
+                        String newJobTrackerURI = String.format("%s:%s", rmHost, rmPort);
+                        notifyChange(null, ConfProp.RM_DEFAULT_URI, newJobTrackerURI);
+                        break;
+                    }
+                    case PI_RESOURCE_MGR_PORT: {
+                        String newJobTrackerURI = String.format("%s:%s", rmHost, rmPort);
+                        notifyChange(null, ConfProp.RM_DEFAULT_URI, newJobTrackerURI);
+                        break;
+                    }
+                    case PI_NAME_NODE_HOST: {
+                        String newHDFSURI = String.format("hdfs://%s:%s/", nameNodeHost, nameNodePort);
+                        notifyChange(null, ConfProp.FS_DEFAULT_URI, newHDFSURI);
+
+                        // Break colocation if someone force the DFS Master
+                        if (!colocate && !nameNodeHost.equals(rmHost))
+                            notifyChange(null, ConfProp.PI_COLOCATE_MASTERS, "no");
+                        break;
+                    }
+                    case PI_NAME_NODE_PORT: {
+                        String newHDFSURI = String.format("hdfs://%s:%s/", nameNodeHost, nameNodePort);
+                        notifyChange(null, ConfProp.FS_DEFAULT_URI, newHDFSURI);
+                        break;
+                    }
+
+                    case PI_JOB_HISTORY_HOST: {
+                        String newJobHistoryURI = String.format("%s:%s", jhHost, jhPort);
+                        notifyChange(null, ConfProp.JOB_HISTORY_DEFAULT_URI, newJobHistoryURI);
+
+                        // Break colocation if someone force the DFS Master
+                        if (!colocate && !nameNodeHost.equals(rmHost))
+                            notifyChange(null, ConfProp.PI_COLOCATE_MASTERS, "no");
+                        break;
+                    }
+                    case PI_JOB_HISTORY_PORT: {
+                        String newJobHistoryURI = String.format("%s:%s", jhHost, jhPort);
+                        notifyChange(null, ConfProp.JOB_HISTORY_DEFAULT_URI, newJobHistoryURI);
+                        break;
+                    }
+
+                    case PI_SOCKS_PROXY_HOST: {
+                        String newSocksProxyURI = String.format("%s:%s", socksProxyHost, socksProxyPort);
+                        notifyChange(null, ConfProp.SOCKS_SERVER, newSocksProxyURI);
+                        break;
+                    }
+                    case PI_SOCKS_PROXY_PORT: {
+                        String newSocksProxyURI = String.format("%s:%s", socksProxyHost, socksProxyPort);
+                        notifyChange(null, ConfProp.SOCKS_SERVER, newSocksProxyURI);
+                        break;
+                    }
+                    case RM_DEFAULT_URI: {
+                        String[] strs = rmDefaultURI.split(":", 2);
+                        String host = strs[0];
+                        String port = (strs.length == 2) ? strs[1] : "";
+                        notifyChange(null, ConfProp.PI_RESOURCE_MGR_HOST, host);
+                        notifyChange(null, ConfProp.PI_RESOURCE_MGR_PORT, port);
+                        break;
+                    }
+                    case JOB_HISTORY_DEFAULT_URI: {
+                        String[] strs = jhDefaultURI.split(":", 2);
+                        String host = strs[0];
+                        String port = (strs.length == 2) ? strs[1] : "";
+                        notifyChange(null, ConfProp.PI_JOB_HISTORY_HOST, host);
+                        notifyChange(null, ConfProp.PI_JOB_HISTORY_PORT, port);
+                        break;
+                    }
+                    case FS_DEFAULT_URI: {
+                        try {
+                            URI uri = new URI(fsDefaultURI);
+                            if (uri.getScheme().equals("hdfs")) {
+                                String host = uri.getHost();
+                                String port = Integer.toString(uri.getPort());
+                                notifyChange(null, ConfProp.PI_NAME_NODE_HOST, host);
+                                notifyChange(null, ConfProp.PI_NAME_NODE_PORT, port);
+                            }
+                        } catch (URISyntaxException use) {
+                            // Ignore the update!
+                        }
+                        break;
+                    }
+                    case SOCKS_SERVER: {
+                        String[] strs = socksServerURI.split(":", 2);
+                        String host = strs[0];
+                        String port = (strs.length == 2) ? strs[1] : "";
+                        notifyChange(null, ConfProp.PI_SOCKS_PROXY_HOST, host);
+                        notifyChange(null, ConfProp.PI_SOCKS_PROXY_PORT, port);
+                        break;
+                    }
+                    case PI_COLOCATE_MASTERS: {
+                        if (colocate) {
+                            notifyChange(null, ConfProp.PI_NAME_NODE_HOST, rmHost);
+                            notifyChange(null, ConfProp.PI_JOB_HISTORY_HOST, rmHost);
+                        }
+                        break;
+                    }
+                    case PI_SOCKS_PROXY_ENABLE: {
+                        if (socksProxyEnable) {
+                            notifyChange(null, ConfProp.SOCKET_FACTORY_DEFAULT, "org.apache.hadoop.net.SocksSocketFactory");
+                        } else {
+                            notifyChange(null, ConfProp.SOCKET_FACTORY_DEFAULT, "org.apache.hadoop.net.StandardSocketFactory");
+                        }
+                        break;
+                    }
+                    }
+                }
+            });
+
+        }
+
+        /**
+         * Change notifications on properties (by name). A property might
+         * not be reflected as a ConfProp enum. If it is, the notification
+         * is forwarded to the ConfProp notifyChange method. If not, it is
+         * processed here.
+         * 
+         * @param source
+         * @param propName
+         * @param propValue
+         */
+        void notifyChange(TabListener source, String propName, String propValue) {
+            ConfProp prop = location.getConfPropForName(propName);
+            if (prop != null)
+                notifyChange(source, prop, propValue);
+            else
+                location.setConfPropValue(propName, propValue);
+        }
+
+        /**
+         * Broadcast a property change to all registered tabs. If a tab is
+         * identified as the source of the change, this tab will not be
+         * notified.
+         * 
+         * @param source
+         *            TODO
+         * @param prop
+         * @param value
+         */
+        private void fireChange(TabListener source, ConfProp prop, String value) {
+            for (TabListener tab : tabs) {
+                if (tab != source)
+                    tab.notifyChange(prop, value);
+            }
+        }
+
+    }
+
+    /**
+     * Create a SWT Text component for the given {@link ConfProp} text
+     * configuration property.
+     * 
+     * @param listener
+     * @param parent
+     * @param prop
+     * @return
+     */
+    private Text createConfText(ModifyListener listener, Composite parent, ConfProp prop) {
+        Text text = new Text(parent, SWT.SINGLE | SWT.BORDER);
+        GridData data = new GridData(GridData.FILL_HORIZONTAL);
+        text.setLayoutData(data);
+        text.setData("hProp", prop);
+        text.setText(location.getConfPropValue(prop));
+        text.addModifyListener(listener);
+        return text;
+    }
+
+    /**
+     * Create a SWT Checked Button component for the given {@link ConfProp}
+     * boolean configuration property.
+     * 
+     * @param listener
+     * @param parent
+     * @param prop
+     * @return
+     */
+    private Button createConfCheckButton(SelectionListener listener, Composite parent, ConfProp prop, String text) {
+        Button button = new Button(parent, SWT.CHECK);
+        button.setText(text);
+        button.setData("hProp", prop);
+        button.setSelection(location.getConfPropValue(prop).equalsIgnoreCase("yes"));
+        button.addSelectionListener(listener);
+        return button;
+    }
+
+    /**
+     * Create editor entry for the given configuration property. The editor
+     * is a couple (Label, Text).
+     * 
+     * @param listener
+     *            the listener to trigger on property change
+     * @param parent
+     *            the SWT parent container
+     * @param prop
+     *            the property to create an editor for
+     * @param labelText
+     *            a label (null will defaults to the property name)
+     * 
+     * @return a SWT Text field
+     */
+    private Text createConfLabelText(ModifyListener listener, Composite parent, ConfProp prop, String labelText) {
+        Label label = new Label(parent, SWT.NONE);
+        if (labelText == null)
+            labelText = location.getConfPropName(prop);
+        label.setText(labelText);
+        return createConfText(listener, parent, prop);
+    }
+
+    /**
+     * Create an editor entry for the given configuration name
+     * 
+     * @param listener
+     *            the listener to trigger on property change
+     * @param parent
+     *            the SWT parent container
+     * @param propName
+     *            the name of the property to create an editor for
+     * @param labelTextRACKER_HOST
+     *            a label (null will defaults to the property name)
+     * 
+     * @return a SWT Text field
+     */
+    private Text createConfNameEditor(ModifyListener listener, Composite parent, String propName, String labelText) {
+
+        {
+            ConfProp prop = location.getConfPropForName(propName);
+            if (prop != null)
+                return createConfLabelText(listener, parent, prop, labelText);
+        }
+
+        Label label = new Label(parent, SWT.NONE);
+        if (labelText == null)
+            labelText = propName;
+        label.setText(labelText);
+
+        Text text = new Text(parent, SWT.SINGLE | SWT.BORDER);
+        GridData data = new GridData(GridData.FILL_HORIZONTAL);
+        text.setLayoutData(data);
+        text.setData("hPropName", propName);
+        text.setText(location.getConfPropValue(propName));
+        text.addModifyListener(listener);
+
+        return text;
+    }
+
+    /**
+     * Main parameters of the Hadoop location: <li>host and port of the
+     * Map/Reduce master (Job tracker) <li>host and port of the DFS master
+     * (Name node) <li>SOCKS proxy
+     */
+    private class TabMain implements TabListener, ModifyListener, SelectionListener {
+
+        TabMediator mediator;
+
+        Text textRMHost;
+
+        Text textNNHost;
+
+        Button colocateMasters;
+
+        Text textJTPort;
+
+        Text textNNPort;
+
+        Text userName;
+
+        Button useSocksProxy;
+
+        Text socksProxyHost;
+
+        Text socksProxyPort;
+
+        private Group groupMR;
+
+        private Text textJHHost;
+
+        private Text textJHPort;
+
+        TabMain(TabMediator mediator) {
+            this.mediator = mediator;
+            TabItem tab = new TabItem(mediator.folder, SWT.NONE);
+            tab.setText("General");
+            tab.setToolTipText("General location parameters");
+            tab.setControl(createControl(mediator.folder));
+        }
+
+        private Control createControl(Composite parent) {
+
+            Composite panel = new Composite(parent, SWT.FILL);
+            panel.setLayout(new GridLayout(2, false));
+
+            GridData data;
+
+            /*
+             * Map/Reduce group
+             */
+            {
+                groupMR = new Group(panel, SWT.SHADOW_NONE);
+                groupMR.setText("Resource Manager Node");
+                groupMR.setToolTipText("Address of the Resource Manager node.");
+                GridLayout layout = new GridLayout(2, false);
+                groupMR.setLayout(layout);
+                data = new GridData();
+                data.verticalAlignment = SWT.FILL;
+                data.horizontalAlignment = SWT.CENTER;
+                data.widthHint = 250;
+                groupMR.setLayoutData(data);
+
+                // Job Tracker host
+                Label label = new Label(groupMR, SWT.NONE);
+                label.setText("Host:");
+                data = new GridData(GridData.BEGINNING, GridData.CENTER, false, true);
+                label.setLayoutData(data);
+
+                textRMHost = createConfText(this, groupMR, ConfProp.PI_RESOURCE_MGR_HOST);
+                data = new GridData(GridData.FILL, GridData.CENTER, true, true);
+                textRMHost.setLayoutData(data);
+
+                colocateMasters = createConfCheckButton(this, groupMR, ConfProp.PI_COLOCATE_MASTERS, "Use RM host for other services.");
+                data = new GridData();
+                data.horizontalSpan = 2;
+                colocateMasters.setLayoutData(data);
+
+                // Job Tracker port
+                label = new Label(groupMR, SWT.NONE);
+                label.setText("Port:");
+                data = new GridData(GridData.BEGINNING, GridData.CENTER, false, true);
+                label.setLayoutData(data);
+
+                textJTPort = createConfText(this, groupMR, ConfProp.PI_RESOURCE_MGR_PORT);
+                data = new GridData(GridData.FILL, GridData.CENTER, true, true);
+                textJTPort.setLayoutData(data);
+            }
+
+            /*
+             * Job history Server
+             */
+            {
+                Group groupDFS = new Group(panel, SWT.SHADOW_NONE);
+                groupDFS.setText("Job History Node");
+                groupDFS.setToolTipText("Address of the Job Histroy Node.");
+                GridLayout layout = new GridLayout(2, false);
+                groupDFS.setLayout(layout);
+                data = new GridData();
+                data.horizontalAlignment = SWT.CENTER;
+                data.verticalAlignment = SWT.FILL;
+                data.widthHint = 250;
+                groupDFS.setLayoutData(data);
+
+                // Job Tracker host
+                Label label = new Label(groupDFS, SWT.NONE);
+                data = new GridData();
+                label.setText("Host:");
+                label.setLayoutData(data);
+
+                textJHHost = createConfText(this, groupDFS, ConfProp.PI_JOB_HISTORY_HOST);
+
+                // Job Tracker port
+                label = new Label(groupDFS, SWT.NONE);
+                data = new GridData();
+                label.setText("Port:");
+                label.setLayoutData(data);
+
+                textJHPort = createConfText(this, groupDFS, ConfProp.PI_JOB_HISTORY_PORT);
+            }
+
+            {
+                Composite subpanel = new Composite(panel, SWT.FILL);
+                subpanel.setLayout(new GridLayout(2, false));
+                data = new GridData();
+                data.horizontalSpan = 2;
+                data.horizontalAlignment = SWT.FILL;
+                subpanel.setLayoutData(data);
+
+                userName = createConfLabelText(this, subpanel, ConfProp.PI_USER_NAME, "&User name:");
+            }
+
+
+            /*
+             * DFS group
+             */
+            {
+                Group groupDFS = new Group(panel, SWT.SHADOW_NONE);
+                groupDFS.setText("DFS Master");
+                groupDFS.setToolTipText("Address of the Distributed FileSystem " + "master node (the Name Node).");
+                GridLayout layout = new GridLayout(2, false);
+                groupDFS.setLayout(layout);
+                data = new GridData();
+                data.horizontalAlignment = SWT.CENTER;
+                data.verticalAlignment = SWT.FILL;
+                data.widthHint = 250;
+                groupDFS.setLayoutData(data);
+
+                // Job Tracker host
+                Label label = new Label(groupDFS, SWT.NONE);
+                data = new GridData();
+                label.setText("Host:");
+                label.setLayoutData(data);
+
+                textNNHost = createConfText(this, groupDFS, ConfProp.PI_NAME_NODE_HOST);
+
+                // Job Tracker port
+                label = new Label(groupDFS, SWT.NONE);
+                data = new GridData();
+                label.setText("Port:");
+                label.setLayoutData(data);
+
+                textNNPort = createConfText(this, groupDFS, ConfProp.PI_NAME_NODE_PORT);
+            }
+
+            // SOCKS proxy group
+            {
+                Group groupSOCKS = new Group(panel, SWT.SHADOW_NONE);
+                groupSOCKS.setText("SOCKS proxy");
+                groupSOCKS.setToolTipText("Address of the SOCKS proxy to use " + "to connect to the infrastructure.");
+                GridLayout layout = new GridLayout(2, false);
+                groupSOCKS.setLayout(layout);
+                data = new GridData();
+                data.horizontalAlignment = SWT.CENTER;
+                data.widthHint = 250;
+                groupSOCKS.setLayoutData(data);
+
+                useSocksProxy = createConfCheckButton(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_ENABLE, "Enable SOCKS proxy");
+                data = new GridData();
+                data.horizontalSpan = 2;
+                useSocksProxy.setLayoutData(data);
+
+                // SOCKS proxy host
+                Label label = new Label(groupSOCKS, SWT.NONE);
+                data = new GridData();
+                label.setText("Host:");
+                label.setLayoutData(data);
+
+                socksProxyHost = createConfText(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_HOST);
+
+                // SOCKS proxy port
+                label = new Label(groupSOCKS, SWT.NONE);
+                data = new GridData();
+                label.setText("Port:");
+                label.setLayoutData(data);
+
+                socksProxyPort = createConfText(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_PORT);
+            }
+
+            // Update the state of all widgets according to the current
+            // values!
+            reloadConfProp(ConfProp.PI_COLOCATE_MASTERS);
+            reloadConfProp(ConfProp.PI_SOCKS_PROXY_ENABLE);
+            reloadConfProp(ConfProp.PI_HADOOP_VERSION);
+
+            return panel;
+        }
+
+        /**
+         * Reload the given configuration property value
+         * 
+         * @param prop
+         */
+        private void reloadConfProp(ConfProp prop) {
+            this.notifyChange(prop, location.getConfPropValue(prop));
+        }
+
+        public void notifyChange(ConfProp prop, String propValue) {
+            switch (prop) {
+            case PI_RESOURCE_MGR_HOST: {
+                textRMHost.setText(propValue);
+                break;
+            }
+            case PI_RESOURCE_MGR_PORT: {
+                textJTPort.setText(propValue);
+                break;
+            }
+            case PI_USER_NAME: {
+                userName.setText(propValue);
+                break;
+            }
+            case PI_COLOCATE_MASTERS: {
+                if (colocateMasters != null) {
+                    boolean colocate = propValue.equalsIgnoreCase("yes");
+                    colocateMasters.setSelection(colocate);
+                    if (textNNHost != null) {
+                        textNNHost.setEnabled(!colocate);
+                    }
+                    if (textJHHost != null) {
+                        textJHHost.setEnabled(!colocate);
+                    }
+                }
+                break;
+            }
+            case PI_NAME_NODE_HOST: {
+                textNNHost.setText(propValue);
+                break;
+            }
+            case PI_NAME_NODE_PORT: {
+                textNNPort.setText(propValue);
+                break;
+            }
+            case PI_JOB_HISTORY_HOST: {
+                textJHHost.setText(propValue);
+                break;
+            }
+            case PI_JOB_HISTORY_PORT: {
+                textJHPort.setText(propValue);
+                break;
+            }
+
+            case PI_SOCKS_PROXY_ENABLE: {
+                if (useSocksProxy != null) {
+                    boolean useProxy = propValue.equalsIgnoreCase("yes");
+                    useSocksProxy.setSelection(useProxy);
+                    if (socksProxyHost != null)
+                        socksProxyHost.setEnabled(useProxy);
+                    if (socksProxyPort != null)
+                        socksProxyPort.setEnabled(useProxy);
+                }
+                break;
+            }
+            case PI_SOCKS_PROXY_HOST: {
+                socksProxyHost.setText(propValue);
+                break;
+            }
+            case PI_SOCKS_PROXY_PORT: {
+                socksProxyPort.setText(propValue);
+                break;
+            }
+            }
+        }
+
+        /* @inheritDoc */
+        public void modifyText(ModifyEvent e) {
+            final Text text = (Text) e.widget;
+            final ConfProp prop = (ConfProp) text.getData("hProp");
+            Display.getDefault().syncExec(new Runnable() {
+                public void run() {
+                    mediator.notifyChange(TabMain.this, prop, text.getText());
+                }
+            });
+        }
+
+        /* @inheritDoc */
+        public void widgetDefaultSelected(SelectionEvent e) {
+            this.widgetSelected(e);
+        }
+
+        /* @inheritDoc */
+        public void widgetSelected(SelectionEvent e) {
+            final Button button = (Button) e.widget;
+            final ConfProp prop = (ConfProp) button.getData("hProp");
+
+            Display.getDefault().syncExec(new Runnable() {
+                public void run() {
+                    // We want to receive the update also!
+                    mediator.notifyChange(null, prop, button.getSelection() ? "yes" : "no");
+                }
+            });
+        }
+
+    }
+
+    private class TabAdvanced implements TabListener, ModifyListener {
+        TabMediator mediator;
+        private Composite panel;
+        private Map<String, Text> textMap = new TreeMap<String, Text>();
+
+        TabAdvanced(TabMediator mediator) {
+            this.mediator = mediator;
+            TabItem tab = new TabItem(mediator.folder, SWT.NONE);
+            tab.setText("Advanced parameters");
+            tab.setToolTipText("Access to advanced Hadoop parameters");
+            tab.setControl(createControl(mediator.folder));
+
+        }
+
+        private Control createControl(Composite parent) {
+            ScrolledComposite sc = new ScrolledComposite(parent, SWT.BORDER | SWT.H_SCROLL | SWT.V_SCROLL);
+            panel = buildPanel(sc);
+            sc.setContent(panel);
+            sc.setExpandHorizontal(true);
+            sc.setExpandVertical(true);
+            sc.setMinSize(640, 480);
+            sc.setMinSize(panel.computeSize(SWT.DEFAULT, SWT.DEFAULT));
+            return sc;
+        }
+
+        private Composite buildPanel(Composite parent) {
+            Composite panel = new Composite(parent, SWT.NONE);
+            GridLayout layout = new GridLayout();
+            layout.numColumns = 2;
+            layout.makeColumnsEqualWidth = false;
+            panel.setLayout(layout);
+            panel.setLayoutData(new GridData(GridData.FILL, GridData.FILL, true, true, 1, 1));
+
+            // Sort by property name
+            SortedMap<String, String> map = new TreeMap<String, String>();
+            Iterator<Entry<String, String>> it = location.getConfiguration();
+            while (it.hasNext()) {
+                Entry<String, String> entry = it.next();
+                map.put(entry.getKey(), entry.getValue());
+            }
+
+            for (Entry<String, String> entry : map.entrySet()) {
+                Text text = createConfNameEditor(this, panel, entry.getKey(), null);
+                textMap.put(entry.getKey(), text);
+            }
+            return panel;
+        }
+
+        public void notifyChange(ConfProp prop, final String propValue) {
+            Text text = textMap.get(location.getConfPropName(prop));
+            text.setText(propValue);
+        }
+
+        public void modifyText(ModifyEvent e) {
+            final Text text = (Text) e.widget;
+            Object hProp = text.getData("hProp");
+            final ConfProp prop = (hProp != null) ? (ConfProp) hProp : null;
+            Object hPropName = text.getData("hPropName");
+            final String propName = (hPropName != null) ? (String) hPropName : null;
+
+            Display.getDefault().syncExec(new Runnable() {
+                public void run() {
+                    if (prop != null)
+                        mediator.notifyChange(TabAdvanced.this, prop, text.getText());
+                    else
+                        mediator.notifyChange(TabAdvanced.this, propName, text.getText());
+                }
+            });
+        }
+
+    }
+
+    @Override
+    public void notifyChange(ConfProp confProp, String text) {
+        mediator.notifyChange(null, ConfProp.PI_LOCATION_NAME, text);
+    }
+
+    @Override
+    public void setChangeListener(ChangeListener l) {
+        changelistener=l;
+    }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/c308e976/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerWizardPage.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerWizardPage.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerWizardPage.java
index f0a68af..cf49d40 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerWizardPage.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerWizardPage.java
@@ -24,11 +24,11 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.StringTokenizer;
 
+import org.apache.hdt.core.HadoopVersion;
 import org.apache.hdt.core.hdfs.HDFSClient;
 import org.apache.hdt.core.internal.hdfs.HDFSManager;
 import org.apache.hdt.core.launch.ConfProp;
 import org.apache.hdt.ui.Activator;
-import org.apache.hdt.ui.internal.launch.HadoopLocationWizard;
 import org.apache.log4j.Logger;
 import org.eclipse.core.runtime.CoreException;
 import org.eclipse.jface.wizard.WizardPage;
@@ -130,8 +130,9 @@ public class NewHDFSServerWizardPage extends WizardPage {
 			label.setText("&HDFS Version:");
 			Combo options =  new Combo (c, SWT.SINGLE | SWT.BORDER|SWT.READ_ONLY);
 			options.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
-			options.add (HadoopLocationWizard.HADOOP_1);
-			options.add (HadoopLocationWizard.HADOOP_2);
+			for(HadoopVersion ver:HadoopVersion.values()){
+				options.add(ver.getDisplayName());
+			}
 			options.addListener (SWT.Selection, new Listener () {
 
 				@Override


[09/27] git commit: HDT-53: prompt user for hadoop perspective on adding zookeeper server/hadoop server

Posted by rs...@apache.org.
HDT-53: prompt user for hadoop perspective on adding zookeeper server/hadoop server


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/ec415a98
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/ec415a98
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/ec415a98

Branch: refs/heads/hadoop-eclipse-merge
Commit: ec415a9809821a077ec0ed48d0dca45a7817d48a
Parents: 4514771
Author: Rahul Sharma <rs...@apache.org>
Authored: Thu May 15 14:51:02 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Fri May 23 08:46:13 2014 +0530

----------------------------------------------------------------------
 .../hdt/ui/internal/hdfs/NewHDFSWizard.java     | 22 ++++++++++++++++--
 .../internal/zookeeper/NewZooKeeperWizard.java  | 24 +++++++++++++++++++-
 2 files changed, 43 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/ec415a98/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java
index 4cd40de..9b0706c 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java
@@ -20,17 +20,23 @@ package org.apache.hdt.ui.internal.hdfs;
 import org.apache.hdt.core.internal.hdfs.HDFSManager;
 import org.apache.hdt.ui.Activator;
 import org.apache.log4j.Logger;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IConfigurationElement;
+import org.eclipse.core.runtime.IExecutableExtension;
 import org.eclipse.core.runtime.jobs.Job;
 import org.eclipse.jface.preference.IPreferenceStore;
 import org.eclipse.jface.viewers.IStructuredSelection;
 import org.eclipse.jface.wizard.Wizard;
+import org.eclipse.swt.widgets.Display;
 import org.eclipse.ui.INewWizard;
 import org.eclipse.ui.IWorkbench;
+import org.eclipse.ui.wizards.newresource.BasicNewProjectResourceWizard;
 
-public class NewHDFSWizard extends Wizard implements INewWizard {
+public class NewHDFSWizard extends Wizard implements INewWizard,IExecutableExtension {
 
 	private static Logger logger = Logger.getLogger(NewHDFSWizard.class);
 	private NewHDFSServerWizardPage serverLocationWizardPage = null;
+	private IConfigurationElement configElement;
 
 	public NewHDFSWizard() {
 		// TODO Auto-generated constructor stub
@@ -56,6 +62,11 @@ public class NewHDFSWizard extends Wizard implements INewWizard {
 
 	@Override
 	public boolean performFinish() {
+		Display.getDefault().syncExec(new Runnable() {
+			public void run() {
+				BasicNewProjectResourceWizard.updatePerspective(configElement);
+			}
+		});
 		if (serverLocationWizardPage != null) {
 			String ambariUrl = serverLocationWizardPage.getHdfsServerLocation();
 			if (ambariUrl != null) {
@@ -79,7 +90,14 @@ public class NewHDFSWizard extends Wizard implements INewWizard {
 		}
 		return false;
 	}
-
+	/* (non-Javadoc)
+	 * @see org.eclipse.core.runtime.IExecutableExtension#setInitializationData
+	 * (org.eclipse.core.runtime.IConfigurationElement, java.lang.String, java.lang.Object)
+	 */
+	@Override
+	public void setInitializationData(IConfigurationElement config, String propertyName, Object data) throws CoreException {
+		this.configElement=config;
+	}
 	
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/ec415a98/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperWizard.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperWizard.java
index 405773a..60e740b 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperWizard.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperWizard.java
@@ -19,18 +19,26 @@ package org.apache.hdt.ui.internal.zookeeper;
 
 import org.apache.hdt.core.internal.zookeeper.ZooKeeperManager;
 import org.apache.hdt.ui.Activator;
+import org.apache.hdt.ui.internal.launch.HadoopLocationWizard;
+import org.apache.hdt.ui.internal.launch.ServerRegistry;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IConfigurationElement;
+import org.eclipse.core.runtime.IExecutableExtension;
 import org.eclipse.core.runtime.Status;
 import org.eclipse.core.runtime.jobs.Job;
 import org.eclipse.jface.preference.IPreferenceStore;
 import org.eclipse.jface.viewers.IStructuredSelection;
 import org.eclipse.jface.wizard.Wizard;
+import org.eclipse.swt.widgets.Display;
 import org.eclipse.ui.INewWizard;
 import org.eclipse.ui.IWorkbench;
+import org.eclipse.ui.wizards.newresource.BasicNewProjectResourceWizard;
 
-public class NewZooKeeperWizard extends Wizard implements INewWizard {
+public class NewZooKeeperWizard extends Wizard implements INewWizard,IExecutableExtension {
 
 	//private static Logger logger = Logger.getLogger(NewZooKeeperWizard.class);
 	private NewZooKeeperServerWizardPage serverLocationWizardPage = null;
+	private IConfigurationElement configElement;
 
 	public NewZooKeeperWizard() {
 	}
@@ -55,6 +63,11 @@ public class NewZooKeeperWizard extends Wizard implements INewWizard {
 
 	@Override
 	public boolean performFinish() {
+		Display.getDefault().syncExec(new Runnable() {
+			public void run() {
+				BasicNewProjectResourceWizard.updatePerspective(configElement);
+			}
+		});
 		if (serverLocationWizardPage != null) {
 			String ambariUrl = serverLocationWizardPage.getZkServerLocation();
 			if (ambariUrl != null) {
@@ -78,4 +91,13 @@ public class NewZooKeeperWizard extends Wizard implements INewWizard {
 		return false;
 	}
 
+	/* (non-Javadoc)
+	 * @see org.eclipse.core.runtime.IExecutableExtension#setInitializationData
+	 * (org.eclipse.core.runtime.IConfigurationElement, java.lang.String, java.lang.Object)
+	 */
+	@Override
+	public void setInitializationData(IConfigurationElement config, String propertyName, Object data) throws CoreException {
+		this.configElement=config;
+	}
+
 }


[13/27] - Adding hadoop2 based on 2.2 version - Changing fragments to plugins(Fragment classpath is appended to host classpath which causes issues thus making it as plugin) - Loading classes in diffrent context loaders(http://wiki.eclipse.org/FAQ_How_

Posted by rs...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java
new file mode 100644
index 0000000..b200a9f
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java
@@ -0,0 +1,619 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.hadoop2.release;
+
+import java.io.BufferedInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.logging.Logger;
+
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.mapred.JobClient;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobID;
+import org.apache.hadoop.mapred.JobStatus;
+import org.apache.hadoop.mapred.RunningJob;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hdt.core.Activator;
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.ConfProp;
+import org.apache.hdt.core.launch.IHadoopJob;
+import org.apache.hdt.core.launch.IJarModule;
+import org.apache.hdt.core.launch.IJobListener;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.Status;
+import org.eclipse.core.runtime.jobs.Job;
+import org.eclipse.swt.widgets.Display;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+import org.w3c.dom.Text;
+import org.xml.sax.SAXException;
+
+/**
+ * Representation of a Hadoop location, meaning of the master node (NameNode,
+ * JobTracker).
+ * 
+ * <p>
+ * This class does not create any SSH connection anymore. Tunneling must be
+ * setup outside of Eclipse for now (using Putty or <tt>ssh -D&lt;port&gt;
+ * &lt;host&gt;</tt>)
+ * 
+ * <p>
+ * <em> TODO </em>
+ * <li>Disable the updater if a location becomes unreachable or fails for tool
+ * long
+ * <li>Stop the updater on location's disposal/removal
+ */
+
+public class HadoopCluster extends AbstractHadoopCluster {
+	private ExecutorService service= Executors.newSingleThreadExecutor();
+
+	/**
+	 * Frequency of location status observations expressed as the delay in ms
+	 * between each observation
+	 * 
+	 * TODO Add a preference parameter for this
+	 */
+	protected static final long STATUS_OBSERVATION_DELAY = 1500;
+
+	/**
+   * 
+   */
+	public class LocationStatusUpdater extends Job {
+
+		JobClient client = null;
+
+		/**
+		 * Setup the updater
+		 */
+		public LocationStatusUpdater() {
+			super("Map/Reduce location status updater");
+			this.setSystem(true);
+		}
+
+		/* @inheritDoc */
+		@Override
+		protected IStatus run(IProgressMonitor monitor) {
+			if (client == null) {
+				try {
+					client = HadoopCluster.this.getJobClient();
+
+				} catch (IOException ioe) {
+					client = null;
+					return new Status(Status.ERROR, Activator.BUNDLE_ID, 0, "Cannot connect to the Map/Reduce location: "
+							+ HadoopCluster.this.getLocationName(), ioe);
+				}
+			}
+			Thread current = Thread.currentThread();
+			ClassLoader oldLoader = current.getContextClassLoader();
+			try {
+			        current.setContextClassLoader(HadoopCluster.class.getClassLoader());
+				// Set of all known existing Job IDs we want fresh info of
+				Set<JobID> missingJobIds = new HashSet<JobID>(runningJobs.keySet());
+
+				JobStatus[] jstatus = client.jobsToComplete();
+				jstatus = jstatus == null ? new JobStatus[0] : jstatus;
+				for (JobStatus status : jstatus) {
+
+					JobID jobId = status.getJobID();
+					missingJobIds.remove(jobId);
+
+					HadoopJob hJob;
+					synchronized (HadoopCluster.this.runningJobs) {
+						hJob = runningJobs.get(jobId);
+						if (hJob == null) {
+							// Unknown job, create an entry
+							RunningJob running = client.getJob(jobId);
+							hJob = new HadoopJob(HadoopCluster.this, jobId, running, status);
+							newJob(hJob);
+						}
+					}
+
+					// Update HadoopJob with fresh infos
+					updateJob(hJob, status);
+				}
+
+				// Ask explicitly for fresh info for these Job IDs
+				for (JobID jobId : missingJobIds) {
+					HadoopJob hJob = runningJobs.get(jobId);
+					if (!hJob.isCompleted())
+						updateJob(hJob, null);
+				}
+
+			} catch (IOException ioe) {
+				client = null;
+				return new Status(Status.ERROR, Activator.BUNDLE_ID, 0, "Cannot retrieve running Jobs on location: " + HadoopCluster.this.getLocationName(),
+						ioe);
+			} finally {
+                            current.setContextClassLoader(oldLoader);
+                         }
+
+
+			// Schedule the next observation
+			schedule(STATUS_OBSERVATION_DELAY);
+
+			return Status.OK_STATUS;
+		}
+
+		/**
+		 * Stores and make the new job available
+		 * 
+		 * @param data
+		 */
+		private void newJob(final HadoopJob data) {
+			runningJobs.put(data.jobId, data);
+
+			Display.getDefault().asyncExec(new Runnable() {
+				public void run() {
+					fireJobAdded(data);
+				}
+			});
+		}
+
+		/**
+		 * Updates the status of a job
+		 * 
+		 * @param job
+		 *            the job to update
+		 */
+		private void updateJob(final HadoopJob job, JobStatus status) {
+			job.update(status);
+
+			Display.getDefault().asyncExec(new Runnable() {
+				public void run() {
+					fireJobChanged(job);
+				}
+			});
+		}
+
+	}
+
+	static Logger log = Logger.getLogger(HadoopCluster.class.getName());
+
+	/**
+	 * Hadoop configuration of the location. Also contains specific parameters
+	 * for the plug-in. These parameters are prefix with eclipse.plug-in.*
+	 */
+	private Configuration conf;
+
+	/**
+	 * Jobs listeners
+	 */
+	private Set<IJobListener> jobListeners = new HashSet<IJobListener>();
+
+	/**
+	 * Jobs running on this location. The keys of this map are the Job IDs.
+	 */
+	private transient Map<JobID, HadoopJob> runningJobs = Collections.synchronizedMap(new TreeMap<JobID, HadoopJob>());
+
+	/**
+	 * Status updater for this location
+	 */
+	private LocationStatusUpdater statusUpdater;
+
+	// state and status - transient
+	private transient String state = "";
+
+	/**
+	 * Creates a new default Hadoop location
+	 */
+        public HadoopCluster() {
+            this.conf = new Configuration();
+            this.addPluginConfigDefaultProperties();
+            conf.set("mapreduce.framework.name", "yarn");
+            conf.set(YarnConfiguration.RM_ADDRESS, "localhost:8032");
+            conf.set(getConfPropName(ConfProp.PI_JOB_TRACKER_PORT), "8032");
+            conf.set("mapreduce.jobhistory.address", "localhost:10020");
+        }
+   
+	/**
+	 * Creates a location from a file
+	 * 
+	 * @throws IOException
+	 * @throws SAXException
+	 * @throws ParserConfigurationException
+	 */
+	public HadoopCluster(File file) throws ParserConfigurationException, SAXException, IOException {
+		this();
+		this.loadFromXML(file);
+	}
+
+	/**
+	 * Create a new Hadoop location by copying an already existing one.
+	 * 
+	 * @param source
+	 *            the location to copy
+	 */
+	public HadoopCluster(HadoopCluster existing) {
+		this();
+		this.load(existing);
+	}
+
+	public void addJobListener(IJobListener l) {
+		jobListeners.add(l);
+	}
+
+	public void dispose() {
+		// TODO close DFS connections?
+	}
+
+	/**
+	 * List all elements that should be present in the Server window (all
+	 * servers and all jobs running on each servers)
+	 * 
+	 * @return collection of jobs for this location
+	 */
+	public Collection<? extends IHadoopJob> getJobs() {
+		startStatusUpdater();
+		return this.runningJobs.values();
+	}
+
+	/**
+	 * Remove the given job from the currently running jobs map
+	 * 
+	 * @param job
+	 *            the job to remove
+	 */
+	public void purgeJob(final IHadoopJob job) {
+		runningJobs.remove(job.getJobID());
+		Display.getDefault().asyncExec(new Runnable() {
+			public void run() {
+				fireJobRemoved(job);
+			}
+		});
+	}
+
+	/**
+	 * Returns the {@link Configuration} defining this location.
+	 * 
+	 * @return the location configuration
+	 */
+	public Iterator<Entry<String, String>> getConfiguration() {
+		return this.conf.iterator();
+	}
+
+	/**
+	 * @return the conf
+	 */
+	public Configuration getConf() {
+		return conf;
+	}
+
+	/**
+	 * Gets a Hadoop configuration property value
+	 * 
+	 * @param prop
+	 *            the configuration property
+	 * @return the property value
+	 */
+	public String getConfPropValue(ConfProp prop) {
+		String confPropName = getConfPropName(prop);
+		return conf.get(confPropName);
+	}
+
+	/**
+	 * Gets a Hadoop configuration property value
+	 * 
+	 * @param propName
+	 *            the property name
+	 * @return the property value
+	 */
+	public String getConfPropValue(String propName) {
+		return this.conf.get(propName);
+	}
+
+	public String getLocationName() {
+		return getConfPropValue(ConfProp.PI_LOCATION_NAME);
+	}
+
+	/**
+	 * Returns the master host name of the Hadoop location (the Job tracker)
+	 * 
+	 * @return the host name of the Job tracker
+	 */
+	public String getMasterHostName() {
+		return getConfPropValue(ConfProp.PI_JOB_TRACKER_HOST);
+	}
+
+	public String getState() {
+		return state;
+	}
+
+	/**
+	 * Overwrite this location with the given existing location
+	 * 
+	 * @param existing
+	 *            the existing location
+	 */
+	public void load(AbstractHadoopCluster existing) {
+		this.conf = new Configuration(((HadoopCluster) existing).conf);
+	}
+
+	/**
+	 * Overwrite this location with settings available in the given XML file.
+	 * The existing configuration is preserved if the XML file is invalid.
+	 * 
+	 * @param file
+	 *            the file path of the XML file
+	 * @return validity of the XML file
+	 * @throws ParserConfigurationException
+	 * @throws IOException
+	 * @throws SAXException
+	 */
+	public boolean loadFromXML(File file) {
+
+		Configuration newConf = new Configuration(this.conf);
+		DocumentBuilder builder;
+		Document document;
+		try {
+			builder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
+			document = builder.parse(file);
+		} catch (ParserConfigurationException e) {
+			e.printStackTrace();
+			return false;
+		} catch (SAXException e) {
+			e.printStackTrace();
+			return false;
+		} catch (IOException e) {
+			e.printStackTrace();
+			return false;
+		}
+		Element root = document.getDocumentElement();
+		if (!"configuration".equals(root.getTagName()))
+			return false;
+		NodeList props = root.getChildNodes();
+		for (int i = 0; i < props.getLength(); i++) {
+			Node propNode = props.item(i);
+			if (!(propNode instanceof Element))
+				continue;
+			Element prop = (Element) propNode;
+			if (!"property".equals(prop.getTagName()))
+				return false;
+			NodeList fields = prop.getChildNodes();
+			String attr = null;
+			String value = null;
+			for (int j = 0; j < fields.getLength(); j++) {
+				Node fieldNode = fields.item(j);
+				if (!(fieldNode instanceof Element))
+					continue;
+				Element field = (Element) fieldNode;
+				if ("name".equals(field.getTagName()))
+					attr = ((Text) field.getFirstChild()).getData();
+				if ("value".equals(field.getTagName()) && field.hasChildNodes())
+					value = ((Text) field.getFirstChild()).getData();
+			}
+			if (attr != null && value != null)
+				newConf.set(attr, value);
+		}
+
+		this.conf = newConf;
+		return true;
+	}
+
+	/**
+	 * Sets a Hadoop configuration property value
+	 * 
+	 * @param prop
+	 *            the property
+	 * @param propvalue
+	 *            the property value
+	 */
+	public void setConfPropValue(ConfProp prop, String propValue) {
+            if (propValue != null)
+                    setConfPropValue(getConfPropName(prop), propValue);
+        }
+    
+        @Override
+        public void setConfPropValue(String propName, String propValue) {
+                conf.set(propName, propValue);
+        }
+
+	public void setLocationName(String newName) {
+		setConfPropValue(ConfProp.PI_LOCATION_NAME, newName);
+	}
+    
+	@Override
+	public String getConfPropName(ConfProp prop) {
+	    if(ConfProp.JOB_TRACKER_URI.equals(prop))
+	        return YarnConfiguration.RM_ADDRESS;
+	    return super.getConfPropName(prop);
+	}
+        @Override
+        public ConfProp getConfPropForName(String propName) {
+            if(YarnConfiguration.RM_ADDRESS.equals(propName))
+                return ConfProp.JOB_TRACKER_URI;
+            if("mapred.job.tracker".equals(propName))
+                return null;
+            return super.getConfPropForName(propName);
+        }
+    
+	/**
+	 * Write this location settings to the given output stream
+	 * 
+	 * @param out
+	 *            the output stream
+	 * @throws IOException
+	 */
+	public void storeSettingsToFile(File file) throws IOException {
+		FileOutputStream fos = new FileOutputStream(file);
+		try {
+			this.conf.writeXml(fos);
+			fos.close();
+			fos = null;
+		} finally {
+			IOUtils.closeStream(fos);
+		}
+
+	}
+
+	/* @inheritDoc */
+	@Override
+	public String toString() {
+		return this.getLocationName();
+	}
+
+	/**
+	 * Fill the configuration with valid default values
+	 */
+	private void addPluginConfigDefaultProperties() {
+		for (ConfProp prop : ConfProp.values()) {
+			conf.set(getConfPropName(prop), prop.defVal);
+		}
+	}
+
+	/**
+	 * Starts the location status updater
+	 */
+	private synchronized void startStatusUpdater() {
+		if (statusUpdater == null) {
+			statusUpdater = new LocationStatusUpdater();
+			statusUpdater.schedule();
+		}
+	}
+
+	/*
+	 * Rewrite of the connecting and tunneling to the Hadoop location
+	 */
+
+	/**
+	 * Provides access to the default file system of this location.
+	 * 
+	 * @return a {@link FileSystem}
+	 */
+	public FileSystem getDFS() throws IOException {
+		return FileSystem.get(this.conf);
+	}
+
+	/**
+	 * Provides access to the Job tracking system of this location
+	 * 
+	 * @return a {@link JobClient}
+	 */
+	public JobClient getJobClient() throws IOException {
+		JobConf jconf = new JobConf(this.conf);
+		return new JobClient(jconf);
+	}
+
+	/*
+	 * Listeners handling
+	 */
+
+	protected void fireJarPublishDone(IJarModule jar) {
+		for (IJobListener listener : jobListeners) {
+			listener.publishDone(jar);
+		}
+	}
+
+	protected void fireJarPublishStart(IJarModule jar) {
+		for (IJobListener listener : jobListeners) {
+			listener.publishStart(jar);
+		}
+	}
+
+	protected void fireJobAdded(HadoopJob job) {
+		for (IJobListener listener : jobListeners) {
+			listener.jobAdded(job);
+		}
+	}
+
+	protected void fireJobRemoved(IHadoopJob job) {
+		for (IJobListener listener : jobListeners) {
+			listener.jobRemoved(job);
+		}
+	}
+
+	protected void fireJobChanged(HadoopJob job) {
+		for (IJobListener listener : jobListeners) {
+			listener.jobChanged(job);
+		}
+	}
+
+	@Override
+	public void saveConfiguration(File confDir, String jarFilePath) throws IOException {
+		// Prepare the Hadoop configuration
+		JobConf conf = new JobConf(this.conf);
+		conf.setJar(jarFilePath);
+		// Write it to the disk file
+		File coreSiteFile = new File(confDir, "core-site.xml");
+		File mapredSiteFile = new File(confDir, "yarn-site.xml");
+		FileOutputStream fos = new FileOutputStream(coreSiteFile);
+		FileInputStream fis = null;
+		try {
+			conf.writeXml(fos);
+			fos.close();
+			fos = new FileOutputStream(mapredSiteFile);
+			fis = new FileInputStream(coreSiteFile);
+			IOUtils.copyBytes(new BufferedInputStream(fis), fos, 4096);
+		} finally {
+			IOUtils.closeStream(fos);
+			IOUtils.closeStream(fis);
+		}
+
+	}
+
+	/* (non-Javadoc)
+	 * @see org.apache.hdt.core.launch.AbstractHadoopCluster#isAvailable()
+	 */
+	@Override
+	public boolean isAvailable() throws CoreException {
+		Callable<JobClient> task= new Callable<JobClient>() {
+			@Override
+			public JobClient call() throws Exception {
+			    return getJobClient();}}; 
+		Future<JobClient> jobClientFuture = service.submit(task);
+		try{
+			jobClientFuture.get(500, TimeUnit.SECONDS);
+			return true;
+		}catch(Exception e){
+			e.printStackTrace();
+			throw new CoreException(new Status(Status.ERROR, 
+					Activator.BUNDLE_ID, "unable to connect to server", e));
+		}
+	}
+
+    @Override
+    public String getVersion() {
+        return "2.2";
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopJob.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopJob.java b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopJob.java
new file mode 100644
index 0000000..a648cae
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopJob.java
@@ -0,0 +1,338 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.hadoop2.release;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.Counters;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobID;
+import org.apache.hadoop.mapred.JobStatus;
+import org.apache.hadoop.mapred.RunningJob;
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.IHadoopJob;
+
+/**
+ * Representation of a Map/Reduce running job on a given location
+ */
+
+public class HadoopJob implements IHadoopJob {
+
+	/**
+	 * Enum representation of a Job state
+	 */
+	public enum JobState {
+		PREPARE(JobStatus.PREP), RUNNING(JobStatus.RUNNING), FAILED(JobStatus.FAILED), SUCCEEDED(JobStatus.SUCCEEDED);
+
+		final int state;
+
+		JobState(int state) {
+			this.state = state;
+		}
+
+		static JobState ofInt(int state) {
+			if (state == JobStatus.PREP) {
+				return PREPARE;
+			} else if (state == JobStatus.RUNNING) {
+				return RUNNING;
+			} else if (state == JobStatus.FAILED) {
+				return FAILED;
+			} else if (state == JobStatus.SUCCEEDED) {
+				return SUCCEEDED;
+			} else {
+				return null;
+			}
+		}
+	}
+
+	/**
+	 * Location this Job runs on
+	 */
+	private final HadoopCluster location;
+
+	/**
+	 * Unique identifier of this Job
+	 */
+	final JobID jobId;
+
+	/**
+	 * Status representation of a running job. This actually contains a
+	 * reference to a JobClient. Its methods might block.
+	 */
+	RunningJob running;
+
+	/**
+	 * Last polled status
+	 * 
+	 * @deprecated should apparently not be used
+	 */
+	JobStatus status;
+
+	/**
+	 * Last polled counters
+	 */
+	Counters counters;
+
+	/**
+	 * Job Configuration
+	 */
+	JobConf jobConf = null;
+
+	boolean completed = false;
+
+	boolean successful = false;
+
+	boolean killed = false;
+
+	int totalMaps;
+
+	int totalReduces;
+
+	int completedMaps;
+
+	int completedReduces;
+
+	float mapProgress;
+
+	float reduceProgress;
+
+	/**
+	 * Constructor for a Hadoop job representation
+	 * 
+	 * @param location
+	 * @param id
+	 * @param running
+	 * @param status
+	 */
+	public HadoopJob(HadoopCluster location, JobID id, RunningJob running, JobStatus status) {
+		this.location = location;
+		this.jobId = id;
+		this.running = running;
+		loadJobFile();
+		update(status);
+	}
+
+	/**
+	 * Try to locate and load the JobConf file for this job so to get more
+	 * details on the job (number of maps and of reduces)
+	 */
+	private void loadJobFile() {
+		try {
+			String jobFile = getJobFile();
+			FileSystem fs = location.getDFS();
+			File tmp = File.createTempFile(getJobID().toString(), ".xml");
+			if (FileUtil.copy(fs, new Path(jobFile), tmp, false, location.getConf())) {
+				this.jobConf = new JobConf(tmp.toString());
+
+				this.totalMaps = jobConf.getNumMapTasks();
+				this.totalReduces = jobConf.getNumReduceTasks();
+			}
+
+		} catch (IOException ioe) {
+			ioe.printStackTrace();
+		}
+	}
+
+	/* @inheritDoc */
+	@Override
+	public int hashCode() {
+		final int prime = 31;
+		int result = 1;
+		result = prime * result + ((jobId == null) ? 0 : jobId.hashCode());
+		result = prime * result + ((location == null) ? 0 : location.hashCode());
+		return result;
+	}
+
+	/* @inheritDoc */
+	@Override
+	public boolean equals(Object obj) {
+		if (this == obj)
+			return true;
+		if (obj == null)
+			return false;
+		if (!(obj instanceof HadoopJob))
+			return false;
+		final HadoopJob other = (HadoopJob) obj;
+		if (jobId == null) {
+			if (other.jobId != null)
+				return false;
+		} else if (!jobId.equals(other.jobId))
+			return false;
+		if (location == null) {
+			if (other.location != null)
+				return false;
+		} else if (!location.equals(other.location))
+			return false;
+		return true;
+	}
+
+	/**
+	 * Get the running status of the Job (@see {@link JobStatus}).
+	 * 
+	 * @return
+	 */
+	public String getState() {
+		if (this.completed) {
+			if (this.successful) {
+				return JobState.SUCCEEDED.toString();
+			} else {
+				return JobState.FAILED.toString();
+			}
+		} else {
+			return JobState.RUNNING.toString();
+		}
+	}
+
+	/**
+	 * @return
+	 */
+	public String getJobID() {
+		return this.jobId.toString();
+	}
+
+	/**
+	 * @return
+	 */
+	public AbstractHadoopCluster getLocation() {
+		return this.location;
+	}
+
+	/**
+	 * @return
+	 */
+	public boolean isCompleted() {
+		return this.completed;
+	}
+
+	/**
+	 * @return
+	 */
+	public String getJobName() {
+		return this.running.getJobName();
+	}
+
+	/**
+	 * @return
+	 */
+	public String getJobFile() {
+		return this.running.getJobFile();
+	}
+
+	/**
+	 * Return the tracking URL for this Job.
+	 * 
+	 * @return string representation of the tracking URL for this Job
+	 */
+	public String getTrackingURL() {
+		return this.running.getTrackingURL();
+	}
+
+	/**
+	 * Returns a string representation of this job status
+	 * 
+	 * @return string representation of this job status
+	 */
+	public String getStatus() {
+
+		StringBuffer s = new StringBuffer();
+
+		s.append("Maps : " + completedMaps + "/" + totalMaps);
+		s.append(" (" + mapProgress + ")");
+		s.append("  Reduces : " + completedReduces + "/" + totalReduces);
+		s.append(" (" + reduceProgress + ")");
+
+		return s.toString();
+	}
+
+	/**
+	 * Update this job status according to the given JobStatus
+	 * 
+	 * @param status
+	 */
+	void update(JobStatus status) {
+		this.status = status;
+		try {
+			this.counters = running.getCounters();
+			this.completed = running.isComplete();
+			this.successful = running.isSuccessful();
+			this.mapProgress = running.mapProgress();
+			this.reduceProgress = running.reduceProgress();
+			// running.getTaskCompletionEvents(fromEvent);
+
+		} catch (IOException ioe) {
+			ioe.printStackTrace();
+		}
+
+		this.completedMaps = (int) (this.totalMaps * this.mapProgress);
+		this.completedReduces = (int) (this.totalReduces * this.reduceProgress);
+	}
+
+	/**
+	 * Print this job counters (for debugging purpose)
+	 */
+	void printCounters() {
+		System.out.printf("New Job:\n", counters);
+		for (String groupName : counters.getGroupNames()) {
+			Counters.Group group = counters.getGroup(groupName);
+			System.out.printf("\t%s[%s]\n", groupName, group.getDisplayName());
+
+			for (Counters.Counter counter : group) {
+				System.out.printf("\t\t%s: %s\n", counter.getDisplayName(), counter.getCounter());
+			}
+		}
+		System.out.printf("\n");
+	}
+
+	/**
+	 * Kill this job
+	 */
+	public void kill() {
+		try {
+			this.running.killJob();
+			this.killed = true;
+
+		} catch (IOException e) {
+			e.printStackTrace();
+		}
+	}
+
+	/**
+	 * Print this job status (for debugging purpose)
+	 */
+	public void display() {
+		System.out.printf("Job id=%s, name=%s\n", getJobID(), getJobName());
+		System.out.printf("Configuration file: %s\n", getJobID());
+		System.out.printf("Tracking URL: %s\n", getTrackingURL());
+
+		System.out.printf("Completion: map: %f reduce %f\n", 100.0 * this.mapProgress, 100.0 * this.reduceProgress);
+
+		System.out.println("Job total maps = " + totalMaps);
+		System.out.println("Job completed maps = " + completedMaps);
+		System.out.println("Map percentage complete = " + mapProgress);
+		System.out.println("Job total reduces = " + totalReduces);
+		System.out.println("Job completed reduces = " + completedReduces);
+		System.out.println("Reduce percentage complete = " + reduceProgress);
+		System.out.flush();
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSLightweightLabelDecorator.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSLightweightLabelDecorator.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSLightweightLabelDecorator.java
index 9424a45..f0d01f8 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSLightweightLabelDecorator.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSLightweightLabelDecorator.java
@@ -111,7 +111,7 @@ public class HDFSLightweightLabelDecorator implements ILightweightLabelDecorator
 							String userId = server.getUserId();
 							if (userId == null) {
 								try {
-									userId = hdfsManager.getClient(serverUrl).getDefaultUserAndGroupIds().get(0);
+									userId = hdfsManager.getClient(serverUrl,server.getVersion()).getDefaultUserAndGroupIds().get(0);
 								} catch (Throwable e) {
 									userId = null;
 								}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerWizardPage.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerWizardPage.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerWizardPage.java
index f5eca4d..f0a68af 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerWizardPage.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerWizardPage.java
@@ -26,7 +26,9 @@ import java.util.StringTokenizer;
 
 import org.apache.hdt.core.hdfs.HDFSClient;
 import org.apache.hdt.core.internal.hdfs.HDFSManager;
+import org.apache.hdt.core.launch.ConfProp;
 import org.apache.hdt.ui.Activator;
+import org.apache.hdt.ui.internal.launch.HadoopLocationWizard;
 import org.apache.log4j.Logger;
 import org.eclipse.core.runtime.CoreException;
 import org.eclipse.jface.wizard.WizardPage;
@@ -41,8 +43,10 @@ import org.eclipse.swt.widgets.Button;
 import org.eclipse.swt.widgets.Combo;
 import org.eclipse.swt.widgets.Composite;
 import org.eclipse.swt.widgets.Display;
+import org.eclipse.swt.widgets.Event;
 import org.eclipse.swt.widgets.Group;
 import org.eclipse.swt.widgets.Label;
+import org.eclipse.swt.widgets.Listener;
 import org.eclipse.swt.widgets.Text;
 
 public class NewHDFSServerWizardPage extends WizardPage {
@@ -55,6 +59,8 @@ public class NewHDFSServerWizardPage extends WizardPage {
 	private String hdfsServerName = null;
 	private boolean overrideDefaultSecurity = false;
 	private String userId = null;
+	private Combo hdfsVersionOptions;
+	private String hdfsVersion;;
 	private List<String> groupIds = new ArrayList<String>();
 
 	protected NewHDFSServerWizardPage() {
@@ -115,6 +121,30 @@ public class NewHDFSServerWizardPage extends WizardPage {
 		Label exampleLabel = new Label(c, SWT.NONE);
 		exampleLabel.setText("Example: hdfs://hdfs.server.hostname:8020");
 		exampleLabel.setForeground(Display.getCurrent().getSystemColor(SWT.COLOR_DARK_GRAY));
+		
+		/*
+		 * HDFS version
+		 */
+		{
+			Label label = new Label(c, SWT.NONE);
+			label.setText("&HDFS Version:");
+			Combo options =  new Combo (c, SWT.SINGLE | SWT.BORDER|SWT.READ_ONLY);
+			options.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
+			options.add (HadoopLocationWizard.HADOOP_1);
+			options.add (HadoopLocationWizard.HADOOP_2);
+			options.addListener (SWT.Selection, new Listener () {
+
+				@Override
+				public void handleEvent(Event arg0) {
+					hdfsVersion = hdfsVersionOptions.getText();
+				}
+				
+			});
+			options.select(0);
+			hdfsVersion=options.getItem(0);
+			hdfsVersionOptions = options;
+		}
+		
 		// Security
 		Group securityGroup = new Group(c, SWT.SHADOW_ETCHED_IN);
 		GridData gd = new GridData(GridData.FILL_HORIZONTAL);
@@ -191,7 +221,7 @@ public class NewHDFSServerWizardPage extends WizardPage {
 	private List<String> getUserAndGroupIds() {
 		List<String> list = new ArrayList<String>();
 		try {
-			HDFSClient client = HDFSManager.INSTANCE.getClient(hdfsServerLocation);
+			HDFSClient client = HDFSManager.INSTANCE.getClient(hdfsServerLocation,ConfProp.PI_HADOOP_VERSION.defVal);
 			List<String> defaultUserAndGroupIds = client.getDefaultUserAndGroupIds();
 			if (defaultUserAndGroupIds != null)
 				list.addAll(defaultUserAndGroupIds);
@@ -239,4 +269,8 @@ public class NewHDFSServerWizardPage extends WizardPage {
 	public List<String> getGroupIds() {
 		return groupIds;
 	}
+
+	public String getHDFSVersion() {
+		return hdfsVersion;
+	}
 }

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java
index 9b0706c..e66c9c4 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java
@@ -81,7 +81,8 @@ public class NewHDFSWizard extends Wizard implements INewWizard,IExecutableExten
 					protected org.eclipse.core.runtime.IStatus run(org.eclipse.core.runtime.IProgressMonitor monitor) {
 						return HDFSManager.addServer(serverLocationWizardPage.getHdfsServerName(),serverLocationWizardPage.getHdfsServerLocation(),
 								serverLocationWizardPage.isOverrideDefaultSecurity() ? serverLocationWizardPage.getUserId() : null,
-								serverLocationWizardPage.isOverrideDefaultSecurity() ? serverLocationWizardPage.getGroupIds() : null);
+								serverLocationWizardPage.isOverrideDefaultSecurity() ? serverLocationWizardPage.getGroupIds() : null,
+										serverLocationWizardPage.getHDFSVersion());
 					};
 				};
 				j.schedule();

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
index 3757c05..7f3cbfb 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
@@ -36,6 +36,7 @@ import org.eclipse.core.resources.IWorkspaceRoot;
 import org.eclipse.core.resources.ResourcesPlugin;
 import org.eclipse.core.runtime.CoreException;
 import org.eclipse.jface.dialogs.IMessageProvider;
+import org.eclipse.jface.dialogs.MessageDialog;
 import org.eclipse.jface.wizard.WizardPage;
 import org.eclipse.swt.SWT;
 import org.eclipse.swt.custom.ScrolledComposite;
@@ -47,6 +48,7 @@ import org.eclipse.swt.graphics.Image;
 import org.eclipse.swt.layout.GridData;
 import org.eclipse.swt.layout.GridLayout;
 import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Combo;
 import org.eclipse.swt.widgets.Composite;
 import org.eclipse.swt.widgets.Control;
 import org.eclipse.swt.widgets.Display;
@@ -68,6 +70,8 @@ import org.eclipse.swt.widgets.Text;
 
 public class HadoopLocationWizard extends WizardPage {
 
+	public  static final String HADOOP_1 = "1.1";
+	public  static final String HADOOP_2 = "2.2";
 	Image circle;
 
 	/**
@@ -90,7 +94,7 @@ public class HadoopLocationWizard extends WizardPage {
 
 		this.original = null;
 		try {
-			this.location = AbstractHadoopCluster.createCluster();
+			this.location = AbstractHadoopCluster.createCluster(ConfProp.PI_HADOOP_VERSION.defVal);
 		} catch (CoreException e) {
 			e.printStackTrace();
 		}
@@ -125,8 +129,8 @@ public class HadoopLocationWizard extends WizardPage {
 				Display.getDefault().syncExec(new Runnable() {
 					public void run() {
 						HDFSManager.addServer(location.getLocationName(),
-								location.getConfProp(ConfProp.FS_DEFAULT_URI), location
-								.getConfProp(ConfProp.PI_USER_NAME), null);
+								location.getConfPropValue(ConfProp.FS_DEFAULT_URI), location
+								.getConfPropValue(ConfProp.PI_USER_NAME), null,location.getVersion());
 					}
 				});
 				// New location
@@ -141,9 +145,9 @@ public class HadoopLocationWizard extends WizardPage {
 				
 				// Update location
 				final String originalName = this.original.getLocationName();
-				final String originalLoc = this.original.getConfProp(ConfProp.FS_DEFAULT_URI);
+				final String originalLoc = this.original.getConfPropValue(ConfProp.FS_DEFAULT_URI);
 				final String newName = this.location.getLocationName();
-				final String newLoc = this.location.getConfProp(ConfProp.FS_DEFAULT_URI);
+				final String newLoc = this.location.getConfPropValue(ConfProp.FS_DEFAULT_URI);
 				
 				if (!originalName.equals(newName) || !originalLoc.equals(newLoc)){
 					IWorkspaceRoot root = ResourcesPlugin.getWorkspace().getRoot();
@@ -160,8 +164,8 @@ public class HadoopLocationWizard extends WizardPage {
 								}
 							}
 							HDFSManager.addServer(location.getLocationName(),
-									location.getConfProp(ConfProp.FS_DEFAULT_URI), location
-									.getConfProp(ConfProp.PI_USER_NAME), null);
+									location.getConfPropValue(ConfProp.FS_DEFAULT_URI), location
+									.getConfPropValue(ConfProp.PI_USER_NAME), null,location.getVersion());
 						}
 					});
 				}
@@ -204,7 +208,7 @@ public class HadoopLocationWizard extends WizardPage {
 	public boolean isPageComplete() {
 
 		{
-			String locName = location.getConfProp(ConfProp.PI_LOCATION_NAME);
+			String locName = location.getConfPropValue(ConfProp.PI_LOCATION_NAME);
 			if ((locName == null) || (locName.length() == 0) || locName.contains("/")) {
 
 				setMessage("Bad location name: " + "the location name should not contain " + "any character prohibited in a file name.", WARNING);
@@ -214,7 +218,7 @@ public class HadoopLocationWizard extends WizardPage {
 		}
 
 		{
-			String master = location.getConfProp(ConfProp.PI_JOB_TRACKER_HOST);
+			String master = location.getConfPropValue(ConfProp.PI_JOB_TRACKER_HOST);
 			if ((master == null) || (master.length() == 0)) {
 
 				setMessage("Bad master host name: " + "the master host name refers to the machine " + "that runs the Job tracker.", WARNING);
@@ -224,7 +228,7 @@ public class HadoopLocationWizard extends WizardPage {
 		}
 
 		{
-			String jobTracker = location.getConfProp(ConfProp.JOB_TRACKER_URI);
+			String jobTracker = location.getConfPropValue(ConfProp.JOB_TRACKER_URI);
 			String[] strs = jobTracker.split(":");
 			boolean ok = (strs.length == 2);
 			if (ok) {
@@ -236,14 +240,14 @@ public class HadoopLocationWizard extends WizardPage {
 				}
 			}
 			if (!ok) {
-				setMessage("The job tracker information (" + ConfProp.JOB_TRACKER_URI.name + ") is invalid. " + "This usually looks like \"host:port\"",
+				setMessage("The job tracker information is invalid. " + "This usually looks like \"host:port\"",
 						WARNING);
 				return false;
 			}
 		}
 
 		{
-			String fsDefaultURI = location.getConfProp(ConfProp.FS_DEFAULT_URI);
+			String fsDefaultURI = location.getConfPropValue(ConfProp.FS_DEFAULT_URI);
 			try {
 				URI uri = new URI(fsDefaultURI);
 			} catch (URISyntaxException e) {
@@ -301,6 +305,7 @@ public class HadoopLocationWizard extends WizardPage {
 
 	private interface TabListener {
 		void notifyChange(ConfProp prop, String propValue);
+		void reloadData();
 	}
 
 	/*
@@ -320,21 +325,6 @@ public class HadoopLocationWizard extends WizardPage {
 		}
 
 		/**
-		 * Access to current configuration settings
-		 * 
-		 * @param propName
-		 *            the property name
-		 * @return the current property value
-		 */
-		String get(String propName) {
-			return location.getConfProp(propName);
-		}
-
-		String get(ConfProp prop) {
-			return location.getConfProp(prop);
-		}
-
-		/**
 		 * Implements change notifications from any tab: update the location
 		 * state and other tabs
 		 * 
@@ -347,11 +337,11 @@ public class HadoopLocationWizard extends WizardPage {
 		 */
 		void notifyChange(TabListener source, final ConfProp prop, final String propValue) {
 			// Ignore notification when no change
-			String oldValue = location.getConfProp(prop);
+			String oldValue = location.getConfPropValue(prop);
 			if ((oldValue != null) && oldValue.equals(propValue))
 				return;
 
-			location.setConfProp(prop, propValue);
+			location.setConfPropValue(prop, propValue);
 			Display.getDefault().syncExec(new Runnable() {
 				public void run() {
 					getContainer().updateButtons();
@@ -363,17 +353,17 @@ public class HadoopLocationWizard extends WizardPage {
 			/*
 			 * Now we deal with dependencies between settings
 			 */
-			final String jobTrackerHost = location.getConfProp(ConfProp.PI_JOB_TRACKER_HOST);
-			final String jobTrackerPort = location.getConfProp(ConfProp.PI_JOB_TRACKER_PORT);
-			final String nameNodeHost = location.getConfProp(ConfProp.PI_NAME_NODE_HOST);
-			final String nameNodePort = location.getConfProp(ConfProp.PI_NAME_NODE_PORT);
-			final boolean colocate = location.getConfProp(ConfProp.PI_COLOCATE_MASTERS).equalsIgnoreCase("yes");
-			final String jobTrackerURI = location.getConfProp(ConfProp.JOB_TRACKER_URI);
-			final String fsDefaultURI = location.getConfProp(ConfProp.FS_DEFAULT_URI);
-			final String socksServerURI = location.getConfProp(ConfProp.SOCKS_SERVER);
-			final boolean socksProxyEnable = location.getConfProp(ConfProp.PI_SOCKS_PROXY_ENABLE).equalsIgnoreCase("yes");
-			final String socksProxyHost = location.getConfProp(ConfProp.PI_SOCKS_PROXY_HOST);
-			final String socksProxyPort = location.getConfProp(ConfProp.PI_SOCKS_PROXY_PORT);
+			final String jobTrackerHost = location.getConfPropValue(ConfProp.PI_JOB_TRACKER_HOST);
+			final String jobTrackerPort = location.getConfPropValue(ConfProp.PI_JOB_TRACKER_PORT);
+			final String nameNodeHost = location.getConfPropValue(ConfProp.PI_NAME_NODE_HOST);
+			final String nameNodePort = location.getConfPropValue(ConfProp.PI_NAME_NODE_PORT);
+			final boolean colocate = location.getConfPropValue(ConfProp.PI_COLOCATE_MASTERS).equalsIgnoreCase("yes");
+			final String jobTrackerURI = location.getConfPropValue(ConfProp.JOB_TRACKER_URI) ;
+			final String fsDefaultURI = location.getConfPropValue(ConfProp.FS_DEFAULT_URI);
+			final String socksServerURI = location.getConfPropValue(ConfProp.SOCKS_SERVER);
+			final boolean socksProxyEnable = location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_ENABLE).equalsIgnoreCase("yes");
+			final String socksProxyHost = location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_HOST);
+			final String socksProxyPort = location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_PORT);
 
 			Display.getDefault().syncExec(new Runnable() {
 				public void run() {
@@ -456,7 +446,7 @@ public class HadoopLocationWizard extends WizardPage {
 							notifyChange(null, ConfProp.SOCKET_FACTORY_DEFAULT, "org.apache.hadoop.net.StandardSocketFactory");
 						}
 						break;
-					}
+					}					
 					}
 				}
 			});
@@ -473,12 +463,11 @@ public class HadoopLocationWizard extends WizardPage {
 		 * @param propValue
 		 */
 		void notifyChange(TabListener source, String propName, String propValue) {
-
-			ConfProp prop = ConfProp.getByName(propName);
+			ConfProp prop = location.getConfPropForName(propName);
 			if (prop != null)
 				notifyChange(source, prop, propValue);
-
-			location.setConfProp(propName, propValue);
+			else
+				location.setConfPropValue(propName, propValue);
 		}
 
 		/**
@@ -510,12 +499,11 @@ public class HadoopLocationWizard extends WizardPage {
 	 * @return
 	 */
 	private Text createConfText(ModifyListener listener, Composite parent, ConfProp prop) {
-
 		Text text = new Text(parent, SWT.SINGLE | SWT.BORDER);
 		GridData data = new GridData(GridData.FILL_HORIZONTAL);
 		text.setLayoutData(data);
-		text.setData("hProp", prop);
-		text.setText(location.getConfProp(prop));
+		text.setData("hProp",prop);
+		text.setText(location.getConfPropValue(prop));
 		text.addModifyListener(listener);
 
 		return text;
@@ -531,13 +519,11 @@ public class HadoopLocationWizard extends WizardPage {
 	 * @return
 	 */
 	private Button createConfCheckButton(SelectionListener listener, Composite parent, ConfProp prop, String text) {
-
 		Button button = new Button(parent, SWT.CHECK);
 		button.setText(text);
 		button.setData("hProp", prop);
-		button.setSelection(location.getConfProp(prop).equalsIgnoreCase("yes"));
+		button.setSelection(location.getConfPropValue(prop).equalsIgnoreCase("yes"));
 		button.addSelectionListener(listener);
-
 		return button;
 	}
 
@@ -557,12 +543,10 @@ public class HadoopLocationWizard extends WizardPage {
 	 * @return a SWT Text field
 	 */
 	private Text createConfLabelText(ModifyListener listener, Composite parent, ConfProp prop, String labelText) {
-
 		Label label = new Label(parent, SWT.NONE);
 		if (labelText == null)
-			labelText = prop.name;
+			labelText = location.getConfPropName(prop);
 		label.setText(labelText);
-
 		return createConfText(listener, parent, prop);
 	}
 
@@ -583,7 +567,7 @@ public class HadoopLocationWizard extends WizardPage {
 	private Text createConfNameEditor(ModifyListener listener, Composite parent, String propName, String labelText) {
 
 		{
-			ConfProp prop = ConfProp.getByName(propName);
+			ConfProp prop = location.getConfPropForName(propName);
 			if (prop != null)
 				return createConfLabelText(listener, parent, prop, labelText);
 		}
@@ -597,7 +581,7 @@ public class HadoopLocationWizard extends WizardPage {
 		GridData data = new GridData(GridData.FILL_HORIZONTAL);
 		text.setLayoutData(data);
 		text.setData("hPropName", propName);
-		text.setText(location.getConfProp(propName));
+		text.setText(location.getConfPropValue(propName));
 		text.addModifyListener(listener);
 
 		return text;
@@ -610,11 +594,19 @@ public class HadoopLocationWizard extends WizardPage {
 	 */
 	private class TabMain implements TabListener, ModifyListener, SelectionListener {
 
+		/**
+		 * 
+		 */
+		
+
 		TabMediator mediator;
 
 		Text locationName;
+		
+		Combo hadoopVersion;
 
 		Text textJTHost;
+		
 
 		Text textNNHost;
 
@@ -632,6 +624,8 @@ public class HadoopLocationWizard extends WizardPage {
 
 		Text socksProxyPort;
 
+		private Group groupMR;
+
 		TabMain(TabMediator mediator) {
 			this.mediator = mediator;
 			TabItem tab = new TabItem(mediator.folder, SWT.NONE);
@@ -661,12 +655,56 @@ public class HadoopLocationWizard extends WizardPage {
 
 				locationName = createConfLabelText(this, subpanel, ConfProp.PI_LOCATION_NAME, "&Location name:");
 			}
+			/*
+			 * Hadoop version
+			 */
+			{
+				Composite subpanel = new Composite(panel, SWT.FILL);
+				subpanel.setLayout(new GridLayout(2, false));
+				data = new GridData();
+				data.horizontalSpan = 2;
+				data.horizontalAlignment = SWT.FILL;
+				subpanel.setLayoutData(data);
+				
+				Label label = new Label(subpanel, SWT.NONE);
+				label.setText("&Hadoop Version:");
+				Combo options =  new Combo (subpanel, SWT.BORDER | SWT.READ_ONLY);
+				options.add (HADOOP_1);
+				options.add (HADOOP_2);
+				options.select(0);
+				options.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
+				options.addListener (SWT.Selection, new Listener () {
+					@Override
+					public void handleEvent(Event event) {
+						final String selection = hadoopVersion.getText();
+						if (location == null || !selection.equals(location.getVersion())) {
+							Display.getDefault().syncExec(new Runnable() {
+
+								@Override
+								public void run() {
+									try {
+										location = AbstractHadoopCluster.createCluster(selection);
+										for (TabListener tab : mediator.tabs) {
+											tab.reloadData();
+										}
+									} catch (CoreException e) {
+										MessageDialog.openError(Display.getDefault().getActiveShell(), "HDFS Error", "Unable to create HDFS site :"
+												+ e.getMessage());
+									}
+								}
+							});
+						}
 
+					}
+				});
+				hadoopVersion = options;
+			}
+			
 			/*
 			 * Map/Reduce group
 			 */
 			{
-				Group groupMR = new Group(panel, SWT.SHADOW_NONE);
+				groupMR = new Group(panel, SWT.SHADOW_NONE);
 				groupMR.setText("Map/Reduce Master");
 				groupMR.setToolTipText("Address of the Map/Reduce master node " + "(the Job Tracker).");
 				GridLayout layout = new GridLayout(2, false);
@@ -783,7 +821,7 @@ public class HadoopLocationWizard extends WizardPage {
 			// Update the state of all widgets according to the current values!
 			reloadConfProp(ConfProp.PI_COLOCATE_MASTERS);
 			reloadConfProp(ConfProp.PI_SOCKS_PROXY_ENABLE);
-			reloadConfProp(ConfProp.PI_JOB_TRACKER_HOST);
+			reloadConfProp(ConfProp.PI_HADOOP_VERSION);
 
 			return panel;
 		}
@@ -794,7 +832,28 @@ public class HadoopLocationWizard extends WizardPage {
 		 * @param prop
 		 */
 		private void reloadConfProp(ConfProp prop) {
-			this.notifyChange(prop, location.getConfProp(prop));
+			this.notifyChange(prop, location.getConfPropValue(prop));
+		}
+		
+		@Override
+		public void reloadData() {
+			if (HADOOP_2.equals(hadoopVersion.getText())) {
+				groupMR.setText("Resource Manager Master");
+				groupMR.setToolTipText("Address of the Resouce manager node ");
+			} else {
+				groupMR.setText("Map/Reduce Master");
+				groupMR.setToolTipText("Address of the Map/Reduce master node " + "(the Job Tracker).");
+			}
+			groupMR.layout(true);
+			notifyChange(ConfProp.PI_JOB_TRACKER_HOST,location.getConfPropValue(ConfProp.PI_JOB_TRACKER_HOST));
+			notifyChange(ConfProp.PI_JOB_TRACKER_PORT,location.getConfPropValue(ConfProp.PI_JOB_TRACKER_PORT));
+			notifyChange(ConfProp.PI_USER_NAME,location.getConfPropValue(ConfProp.PI_USER_NAME));
+			notifyChange(ConfProp.PI_NAME_NODE_HOST,location.getConfPropValue(ConfProp.PI_NAME_NODE_HOST));
+			notifyChange(ConfProp.PI_USER_NAME,location.getConfPropValue(ConfProp.PI_USER_NAME));
+			notifyChange(ConfProp.PI_COLOCATE_MASTERS,location.getConfPropValue(ConfProp.PI_COLOCATE_MASTERS));
+			notifyChange(ConfProp.PI_SOCKS_PROXY_ENABLE,location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_ENABLE));
+			notifyChange(ConfProp.PI_SOCKS_PROXY_HOST,location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_HOST));
+			notifyChange(ConfProp.PI_SOCKS_PROXY_PORT,location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_PORT));
 		}
 
 		public void notifyChange(ConfProp prop, String propValue) {
@@ -851,10 +910,11 @@ public class HadoopLocationWizard extends WizardPage {
 			case PI_SOCKS_PROXY_PORT: {
 				socksProxyPort.setText(propValue);
 				break;
-			}
+			}			
 			}
 		}
 
+		
 		/* @inheritDoc */
 		public void modifyText(ModifyEvent e) {
 			final Text text = (Text) e.widget;
@@ -888,9 +948,7 @@ public class HadoopLocationWizard extends WizardPage {
 
 	private class TabAdvanced implements TabListener, ModifyListener {
 		TabMediator mediator;
-
 		private Composite panel;
-
 		private Map<String, Text> textMap = new TreeMap<String, Text>();
 
 		TabAdvanced(TabMediator mediator) {
@@ -905,15 +963,29 @@ public class HadoopLocationWizard extends WizardPage {
 
 		private Control createControl(Composite parent) {
 			ScrolledComposite sc = new ScrolledComposite(parent, SWT.BORDER | SWT.H_SCROLL | SWT.V_SCROLL);
-
-			panel = new Composite(sc, SWT.NONE);
+			panel=buildPanel(sc);
 			sc.setContent(panel);
-
 			sc.setExpandHorizontal(true);
 			sc.setExpandVertical(true);
-
 			sc.setMinSize(640, 480);
+			sc.setMinSize(panel.computeSize(SWT.DEFAULT, SWT.DEFAULT));
+			return sc;
+		}
+		
+		@Override
+		public void reloadData() {
+			ScrolledComposite parent = (ScrolledComposite)panel.getParent();
+			panel.dispose();
+			Composite panel = buildPanel(parent);
+			parent.setContent(panel);
+			parent.setMinSize(panel.computeSize(SWT.DEFAULT, SWT.DEFAULT));
+			parent.pack();
+			parent.layout(true);
+			this.panel=panel;
+		}
 
+		private Composite buildPanel(Composite parent) {
+			Composite panel = new Composite(parent, SWT.NONE);
 			GridLayout layout = new GridLayout();
 			layout.numColumns = 2;
 			layout.makeColumnsEqualWidth = false;
@@ -932,14 +1004,12 @@ public class HadoopLocationWizard extends WizardPage {
 				Text text = createConfNameEditor(this, panel, entry.getKey(), null);
 				textMap.put(entry.getKey(), text);
 			}
-
-			sc.setMinSize(panel.computeSize(SWT.DEFAULT, SWT.DEFAULT));
-
-			return sc;
+			return panel;
 		}
+		
 
 		public void notifyChange(ConfProp prop, final String propValue) {
-			Text text = textMap.get(prop.name);
+			Text text = textMap.get(location.getConfPropName(prop));
 			text.setText(propValue);
 		}
 
@@ -959,6 +1029,8 @@ public class HadoopLocationWizard extends WizardPage {
 				}
 			});
 		}
+
+	
 	}
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.updateSite/.classpath
----------------------------------------------------------------------
diff --git a/org.apache.hdt.updateSite/.classpath b/org.apache.hdt.updateSite/.classpath
index 4c2b7c4..36851f4 100644
--- a/org.apache.hdt.updateSite/.classpath
+++ b/org.apache.hdt.updateSite/.classpath
@@ -1,9 +1,10 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <classpath>
-  <classpathentry kind="src" path="target/maven-shared-archive-resources" excluding="**/*.java"/>
-  <classpathentry kind="output" path="target/classes"/>
-  <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
-  <classpathentry kind="src" path="/org.apache.hdt.core"/>
-  <classpathentry kind="src" path="/org.apache.hdt.hadoop.release"/>
-  <classpathentry kind="src" path="/org.apache.hdt.ui"/>
-</classpath>
\ No newline at end of file
+	<classpathentry excluding="**/*.java" kind="src" path="target/maven-shared-archive-resources"/>
+	<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
+	<classpathentry kind="src" path="/org.apache.hdt.core"/>
+	<classpathentry kind="src" path="/org.apache.hdt.hadoop.release"/>
+	<classpathentry kind="src" path="/org.apache.hdt.ui"/>
+	<classpathentry combineaccessrules="false" kind="src" path="/org.apache.hdt.hadoop2.release"/>
+	<classpathentry kind="output" path="target/classes"/>
+</classpath>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.updateSite/.project
----------------------------------------------------------------------
diff --git a/org.apache.hdt.updateSite/.project b/org.apache.hdt.updateSite/.project
index 99c4771..b94eb36 100644
--- a/org.apache.hdt.updateSite/.project
+++ b/org.apache.hdt.updateSite/.project
@@ -6,6 +6,7 @@
 		<project>org.apache.hdt.core</project>
 		<project>org.apache.hdt.feature</project>
 		<project>org.apache.hdt.hadoop.release</project>
+		<project>org.apache.hdt.hadoop2.release</project>
 		<project>org.apache.hdt.ui</project>
 	</projects>
 	<buildSpec>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 19b9ec5..4005645 100644
--- a/pom.xml
+++ b/pom.xml
@@ -127,6 +127,7 @@ under the License.
     <module>org.apache.hdt.core</module>
     <module>org.apache.hdt.ui</module>
     <module>org.apache.hdt.hadoop.release</module>
+    <module>org.apache.hdt.hadoop2.release</module>
     <module>org.apache.hdt.feature</module>
     <module>org.apache.hdt.updateSite</module>
     <module>org.apache.hdt.ui.test</module>


[14/27] git commit: - Adding hadoop2 based on 2.2 version - Changing fragments to plugins(Fragment classpath is appended to host classpath which causes issues thus making it as plugin) - Loading classes in diffrent context loaders(http://wiki.eclipse.

Posted by rs...@apache.org.
 - Adding hadoop2 based on 2.2 version
 - Changing fragments to plugins(Fragment classpath is appended to host classpath which causes issues thus making it as plugin)
 - Loading classes in diffrent context loaders(http://wiki.eclipse.org/FAQ_How_do_I_use_the_context_class_loader_in_Eclipse%3F)
 - Adding version to HDFSServer which can enble to determine version of client.
 - Updation the hadoop locationWizard to show resoucemanager address and select hadoop version


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/34799cec
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/34799cec
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/34799cec

Branch: refs/heads/hadoop-eclipse-merge
Commit: 34799cece189198189fb8e8c64a1d8b2ff397f52
Parents: a7a89f4
Author: Rahul Sharma <rs...@apache.org>
Authored: Fri May 23 16:35:30 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Tue Jun 10 10:17:30 2014 +0530

----------------------------------------------------------------------
 org.apache.hdt.core/META-INF/MANIFEST.MF        |   9 +-
 org.apache.hdt.core/models/Hadoop.ecore         |   2 +
 .../hdt/core/internal/hdfs/HDFSFileStore.java   |   2 +-
 .../hdt/core/internal/hdfs/HDFSManager.java     |  17 +-
 .../internal/hdfs/InterruptableHDFSClient.java  |   9 +-
 .../hdt/core/internal/model/HDFSServer.java     |  28 +
 .../hdt/core/internal/model/HadoopPackage.java  |  30 +-
 .../internal/model/impl/HDFSServerImpl.java     |  54 ++
 .../internal/model/impl/HadoopFactoryImpl.java  |   2 +-
 .../internal/model/impl/HadoopPackageImpl.java  |  11 +
 .../core/internal/model/util/HadoopSwitch.java  |  36 +-
 .../hdt/core/launch/AbstractHadoopCluster.java  |  43 +-
 .../org/apache/hdt/core/launch/ConfProp.java    |   8 +-
 org.apache.hdt.feature/.classpath               |   3 +-
 org.apache.hdt.feature/.project                 |   1 +
 org.apache.hdt.feature/feature.xml              |  10 +-
 .../META-INF/MANIFEST.MF                        |   7 +-
 org.apache.hdt.hadoop.release/build.properties  |   2 +-
 org.apache.hdt.hadoop.release/fragment.xml      |  43 --
 org.apache.hdt.hadoop.release/plugin.xml        |  43 ++
 .../hdt/hadoop/release/HadoopCluster.java       |  65 +-
 .../apache/hdt/hadoop/release/HadoopJob.java    |   2 +
 org.apache.hdt.hadoop2.release/.classpath       |  91 +++
 .../.settings/org.eclipse.core.resources.prefs  |   2 +
 .../.settings/org.eclipse.jdt.core.prefs        |   7 +
 .../.settings/org.eclipse.m2e.core.prefs        |   4 +
 .../META-INF/MANIFEST.MF                        |  98 +++
 org.apache.hdt.hadoop2.release/build.properties |  23 +
 org.apache.hdt.hadoop2.release/plugin.xml       |  35 ++
 org.apache.hdt.hadoop2.release/pom.xml          | 127 ++++
 .../hdt/hadoop2/release/HDFSClientRelease.java  | 235 +++++++
 .../hdt/hadoop2/release/HadoopCluster.java      | 619 +++++++++++++++++++
 .../apache/hdt/hadoop2/release/HadoopJob.java   | 338 ++++++++++
 .../hdfs/HDFSLightweightLabelDecorator.java     |   2 +-
 .../internal/hdfs/NewHDFSServerWizardPage.java  |  36 +-
 .../hdt/ui/internal/hdfs/NewHDFSWizard.java     |   3 +-
 .../internal/launch/HadoopLocationWizard.java   | 214 ++++---
 org.apache.hdt.updateSite/.classpath            |  15 +-
 org.apache.hdt.updateSite/.project              |   1 +
 pom.xml                                         |   1 +
 40 files changed, 2066 insertions(+), 212 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.core/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/META-INF/MANIFEST.MF b/org.apache.hdt.core/META-INF/MANIFEST.MF
index 1d6b8c4..e50301c 100644
--- a/org.apache.hdt.core/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.core/META-INF/MANIFEST.MF
@@ -16,7 +16,7 @@ Bundle-RequiredExecutionEnvironment: JavaSE-1.6
 Bundle-Vendor: Apache Hadoop
 Bundle-ClassPath: .,
  jars/log4j-1.2.15.jar
-Export-Package:  org.apache.hdt.core,
+Export-Package: org.apache.hdt.core,
  org.apache.hdt.core.hdfs,
  org.apache.hdt.core.internal,
  org.apache.hdt.core.internal.hdfs;x-friends:="org.apache.hdt.ui",
@@ -45,9 +45,6 @@ Export-Package:  org.apache.hdt.core,
  org.apache.log4j.or.sax,
  org.apache.log4j.spi,
  org.apache.log4j.varia,
- org.apache.log4j.xml,
- org.apache.hadoop,
- org.apache.hadoop.conf,
- org.apache.hadoop.io,
- org.apache.hadoop.mapred
+ org.apache.log4j.xml
 Bundle-ActivationPolicy: lazy
+

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.core/models/Hadoop.ecore
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/models/Hadoop.ecore b/org.apache.hdt.core/models/Hadoop.ecore
index 2b3e8ea..70207c0 100644
--- a/org.apache.hdt.core/models/Hadoop.ecore
+++ b/org.apache.hdt.core/models/Hadoop.ecore
@@ -30,6 +30,8 @@
     <eStructuralFeatures xsi:type="ecore:EAttribute" name="userId" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EString"/>
     <eStructuralFeatures xsi:type="ecore:EAttribute" name="groupIds" upperBound="-1"
         eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EString"/>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="version" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EString"
+         defaultValueLiteral="1.0.0.0"/>
   </eClassifiers>
   <eClassifiers xsi:type="ecore:EClass" name="Servers">
     <eStructuralFeatures xsi:type="ecore:EReference" name="hdfsServers" upperBound="-1"

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileStore.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileStore.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileStore.java
index ffd68ec..2809e55 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileStore.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileStore.java
@@ -115,7 +115,7 @@ public class HDFSFileStore extends FileStore {
 	 * @throws CoreException
 	 */
 	private HDFSClient getClient() throws CoreException {
-		return HDFSManager.INSTANCE.getClient(getServer().getUri());
+		return HDFSManager.INSTANCE.getClient(getServer().getUri(),getServer().getVersion());
 	}
 
 	/**

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java
index 5897cea..43ebf1f 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java
@@ -150,7 +150,7 @@ public class HDFSManager {
 	 * @return
 	 * @throws CoreException
 	 */
-	public HDFSServer createServer(String name, java.net.URI hdfsURI, String userId, List<String> groupIds) throws CoreException {
+	public HDFSServer createServer(String name, java.net.URI hdfsURI, String userId, List<String> groupIds,String version) throws CoreException {
 		if (hdfsURI.getPath() == null || hdfsURI.getPath().length() < 1) {
 			try {
 				hdfsURI = new java.net.URI(hdfsURI.toString() + "/");
@@ -163,6 +163,7 @@ public class HDFSManager {
 		hdfsServer.setName(name);
 		hdfsServer.setUri(hdfsURI.toString());
 		hdfsServer.setLoaded(true);
+		hdfsServer.setVersion(version);
 		if (userId != null)
 			hdfsServer.setUserId(userId);
 		if (groupIds != null)
@@ -289,7 +290,7 @@ public class HDFSManager {
 	 * @return
 	 * @throws CoreException
 	 */
-	public HDFSClient getClient(String serverURI) throws CoreException {
+	public HDFSClient getClient(String serverURI,String hdfsVersion) throws CoreException {
 		if (logger.isDebugEnabled())
 			logger.debug("getClient(" + serverURI + "): Server=" + serverURI);
 		HDFSServer server = getServer(serverURI);
@@ -306,8 +307,11 @@ public class HDFSManager {
 				IConfigurationElement[] elementsFor = Platform.getExtensionRegistry().getConfigurationElementsFor("org.apache.hdt.core.hdfsClient");
 				for (IConfigurationElement element : elementsFor) {
 					if (sUri.getScheme().equals(element.getAttribute("protocol"))) {
-						HDFSClient client = (HDFSClient) element.createExecutableExtension("class");
-						hdfsClientsMap.put(serverURI, new InterruptableHDFSClient(serverURI, client));
+						String version = element.getAttribute("protocolVersion");
+						if(hdfsVersion.equalsIgnoreCase(version)){
+							HDFSClient client = (HDFSClient) element.createExecutableExtension("class");
+							hdfsClientsMap.put(serverURI, new InterruptableHDFSClient(serverURI, client));						
+						}
 					}
 				}
 			} catch (URISyntaxException e) {
@@ -317,9 +321,10 @@ public class HDFSManager {
 		}
 	}
 	
-	public static org.eclipse.core.runtime.IStatus addServer(String serverName, String location, String userId, List<String> groupId) {
+	public static org.eclipse.core.runtime.IStatus addServer(String serverName, String location,
+			String userId, List<String> groupId,String version) {
 		try {
-			HDFSManager.INSTANCE.createServer(serverName, new URI(location), userId, groupId);
+			HDFSManager.INSTANCE.createServer(serverName, new URI(location), userId, groupId,version);
 		} catch (CoreException e) {
 			logger.warn(e.getMessage(), e);
 			return e.getStatus();

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/InterruptableHDFSClient.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/InterruptableHDFSClient.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/InterruptableHDFSClient.java
index 0301d5f..b6e9c46 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/InterruptableHDFSClient.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/InterruptableHDFSClient.java
@@ -28,7 +28,6 @@ import java.util.List;
 import org.apache.hdt.core.hdfs.HDFSClient;
 import org.apache.hdt.core.hdfs.ResourceInformation;
 import org.apache.hdt.core.internal.model.HDFSServer;
-import org.apache.hdt.core.internal.model.ServerStatus;
 import org.apache.log4j.Logger;
 import org.eclipse.core.resources.IProject;
 import org.eclipse.core.resources.ResourcesPlugin;
@@ -41,9 +40,6 @@ import org.eclipse.core.resources.ResourcesPlugin;
 public class InterruptableHDFSClient extends HDFSClient {
 	private static final int DEFAULT_TIMEOUT = 5000;
 	private static final Logger logger = Logger.getLogger(InterruptableHDFSClient.class);
-	// private static ExecutorService threadPool =
-	// Executors.newFixedThreadPool(10);
-
 	private final HDFSClient client;
 	private final int timeoutMillis = DEFAULT_TIMEOUT;
 	private final String serverURI;
@@ -67,12 +63,17 @@ public class InterruptableHDFSClient extends HDFSClient {
 		final InterruptedException[] inE = new InterruptedException[1];
 		Thread runnerThread = new Thread(new Runnable() {
 			public void run() {
+     		   Thread current = Thread.currentThread();
+	      	   ClassLoader oldLoader = current.getContextClassLoader();
 				try {
+					current.setContextClassLoader(client.getClass().getClassLoader());
 					data.add(runnable.run());
 				} catch (IOException e) {
 					ioE[0] = e;
 				} catch (InterruptedException e) {
 					inE[0] = e;
+				}finally {
+				      current.setContextClassLoader(oldLoader);
 				}
 			}
 		});

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HDFSServer.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HDFSServer.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HDFSServer.java
index be04f74..0419f2a 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HDFSServer.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HDFSServer.java
@@ -33,6 +33,7 @@ import org.eclipse.emf.ecore.EObject;
  *   <li>{@link org.apache.hdt.core.internal.model.HDFSServer#getOperationURIs <em>Operation UR Is</em>}</li>
  *   <li>{@link org.apache.hdt.core.internal.model.HDFSServer#getUserId <em>User Id</em>}</li>
  *   <li>{@link org.apache.hdt.core.internal.model.HDFSServer#getGroupIds <em>Group Ids</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.HDFSServer#getVersion <em>Version</em>}</li>
  * </ul>
  * </p>
  *
@@ -124,4 +125,31 @@ public interface HDFSServer extends Server {
 	 */
 	EList<String> getGroupIds();
 
+	/**
+	 * Returns the value of the '<em><b>Version</b></em>' attribute.
+	 * The default value is <code>"1.0.0.0"</code>.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Version</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Version</em>' attribute.
+	 * @see #setVersion(String)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getHDFSServer_Version()
+	 * @model default="1.0.0.0"
+	 * @generated
+	 */
+	String getVersion();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.HDFSServer#getVersion <em>Version</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Version</em>' attribute.
+	 * @see #getVersion()
+	 * @generated
+	 */
+	void setVersion(String value);
+
 } // HDFSServer

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HadoopPackage.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HadoopPackage.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HadoopPackage.java
index 8332b4e..f2fd035 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HadoopPackage.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HadoopPackage.java
@@ -228,13 +228,22 @@ public interface HadoopPackage extends EPackage {
 	int HDFS_SERVER__GROUP_IDS = SERVER_FEATURE_COUNT + 3;
 
 	/**
+	 * The feature id for the '<em><b>Version</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int HDFS_SERVER__VERSION = SERVER_FEATURE_COUNT + 4;
+
+	/**
 	 * The number of structural features of the '<em>HDFS Server</em>' class.
 	 * <!-- begin-user-doc -->
 	 * <!-- end-user-doc -->
 	 * @generated
 	 * @ordered
 	 */
-	int HDFS_SERVER_FEATURE_COUNT = SERVER_FEATURE_COUNT + 4;
+	int HDFS_SERVER_FEATURE_COUNT = SERVER_FEATURE_COUNT + 5;
 
 	/**
 	 * The meta object id for the '{@link org.apache.hdt.core.internal.model.impl.ServersImpl <em>Servers</em>}' class.
@@ -737,6 +746,17 @@ public interface HadoopPackage extends EPackage {
 	EAttribute getHDFSServer_GroupIds();
 
 	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.HDFSServer#getVersion <em>Version</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>Version</em>'.
+	 * @see org.apache.hdt.core.internal.model.HDFSServer#getVersion()
+	 * @see #getHDFSServer()
+	 * @generated
+	 */
+	EAttribute getHDFSServer_Version();
+
+	/**
 	 * Returns the meta object for class '{@link org.apache.hdt.core.internal.model.Servers <em>Servers</em>}'.
 	 * <!-- begin-user-doc -->
 	 * <!-- end-user-doc -->
@@ -1126,6 +1146,14 @@ public interface HadoopPackage extends EPackage {
 		EAttribute HDFS_SERVER__GROUP_IDS = eINSTANCE.getHDFSServer_GroupIds();
 
 		/**
+		 * The meta object literal for the '<em><b>Version</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute HDFS_SERVER__VERSION = eINSTANCE.getHDFSServer_Version();
+
+		/**
 		 * The meta object literal for the '{@link org.apache.hdt.core.internal.model.impl.ServersImpl <em>Servers</em>}' class.
 		 * <!-- begin-user-doc -->
 		 * <!-- end-user-doc -->

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HDFSServerImpl.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HDFSServerImpl.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HDFSServerImpl.java
index ed25f07..5cc260c 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HDFSServerImpl.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HDFSServerImpl.java
@@ -43,6 +43,7 @@ import org.eclipse.emf.ecore.util.EDataTypeUniqueEList;
  *   <li>{@link org.apache.hdt.core.internal.model.impl.HDFSServerImpl#getOperationURIs <em>Operation UR Is</em>}</li>
  *   <li>{@link org.apache.hdt.core.internal.model.impl.HDFSServerImpl#getUserId <em>User Id</em>}</li>
  *   <li>{@link org.apache.hdt.core.internal.model.impl.HDFSServerImpl#getGroupIds <em>Group Ids</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.HDFSServerImpl#getVersion <em>Version</em>}</li>
  * </ul>
  * </p>
  *
@@ -110,6 +111,26 @@ public class HDFSServerImpl extends ServerImpl implements HDFSServer {
 	protected EList<String> groupIds;
 
 	/**
+	 * The default value of the '{@link #getVersion() <em>Version</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getVersion()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final String VERSION_EDEFAULT = "1.0.0.0";
+
+	/**
+	 * The cached value of the '{@link #getVersion() <em>Version</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getVersion()
+	 * @generated
+	 * @ordered
+	 */
+	protected String version = VERSION_EDEFAULT;
+
+	/**
 	 * <!-- begin-user-doc -->
 	 * <!-- end-user-doc -->
 	 * @generated
@@ -199,6 +220,27 @@ public class HDFSServerImpl extends ServerImpl implements HDFSServer {
 	 * <!-- end-user-doc -->
 	 * @generated
 	 */
+	public String getVersion() {
+		return version;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setVersion(String newVersion) {
+		String oldVersion = version;
+		version = newVersion;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.HDFS_SERVER__VERSION, oldVersion, version));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
 	@Override
 	public Object eGet(int featureID, boolean resolve, boolean coreType) {
 		switch (featureID) {
@@ -210,6 +252,8 @@ public class HDFSServerImpl extends ServerImpl implements HDFSServer {
 				return getUserId();
 			case HadoopPackage.HDFS_SERVER__GROUP_IDS:
 				return getGroupIds();
+			case HadoopPackage.HDFS_SERVER__VERSION:
+				return getVersion();
 		}
 		return super.eGet(featureID, resolve, coreType);
 	}
@@ -237,6 +281,9 @@ public class HDFSServerImpl extends ServerImpl implements HDFSServer {
 				getGroupIds().clear();
 				getGroupIds().addAll((Collection<? extends String>)newValue);
 				return;
+			case HadoopPackage.HDFS_SERVER__VERSION:
+				setVersion((String)newValue);
+				return;
 		}
 		super.eSet(featureID, newValue);
 	}
@@ -261,6 +308,9 @@ public class HDFSServerImpl extends ServerImpl implements HDFSServer {
 			case HadoopPackage.HDFS_SERVER__GROUP_IDS:
 				getGroupIds().clear();
 				return;
+			case HadoopPackage.HDFS_SERVER__VERSION:
+				setVersion(VERSION_EDEFAULT);
+				return;
 		}
 		super.eUnset(featureID);
 	}
@@ -281,6 +331,8 @@ public class HDFSServerImpl extends ServerImpl implements HDFSServer {
 				return USER_ID_EDEFAULT == null ? userId != null : !USER_ID_EDEFAULT.equals(userId);
 			case HadoopPackage.HDFS_SERVER__GROUP_IDS:
 				return groupIds != null && !groupIds.isEmpty();
+			case HadoopPackage.HDFS_SERVER__VERSION:
+				return VERSION_EDEFAULT == null ? version != null : !VERSION_EDEFAULT.equals(version);
 		}
 		return super.eIsSet(featureID);
 	}
@@ -303,6 +355,8 @@ public class HDFSServerImpl extends ServerImpl implements HDFSServer {
 		result.append(userId);
 		result.append(", groupIds: ");
 		result.append(groupIds);
+		result.append(", version: ");
+		result.append(version);
 		result.append(')');
 		return result.toString();
 	}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopFactoryImpl.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopFactoryImpl.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopFactoryImpl.java
index c3e5c2b..ac640c8 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopFactoryImpl.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopFactoryImpl.java
@@ -44,7 +44,7 @@ public class HadoopFactoryImpl extends EFactoryImpl implements HadoopFactory {
 	 */
 	public static HadoopFactory init() {
 		try {
-			HadoopFactory theHadoopFactory = (HadoopFactory)EPackage.Registry.INSTANCE.getEFactory("http://hadoop/1.0"); 
+			HadoopFactory theHadoopFactory = (HadoopFactory)EPackage.Registry.INSTANCE.getEFactory(HadoopPackage.eNS_URI);
 			if (theHadoopFactory != null) {
 				return theHadoopFactory;
 			}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopPackageImpl.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopPackageImpl.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopPackageImpl.java
index a698d56..c436729 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopPackageImpl.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopPackageImpl.java
@@ -196,6 +196,15 @@ public class HadoopPackageImpl extends EPackageImpl implements HadoopPackage {
 	 * <!-- end-user-doc -->
 	 * @generated
 	 */
+	public EAttribute getHDFSServer_Version() {
+		return (EAttribute)hdfsServerEClass.getEStructuralFeatures().get(4);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
 	public EClass getServers() {
 		return serversEClass;
 	}
@@ -494,6 +503,7 @@ public class HadoopPackageImpl extends EPackageImpl implements HadoopPackage {
 		createEAttribute(hdfsServerEClass, HDFS_SERVER__OPERATION_UR_IS);
 		createEAttribute(hdfsServerEClass, HDFS_SERVER__USER_ID);
 		createEAttribute(hdfsServerEClass, HDFS_SERVER__GROUP_IDS);
+		createEAttribute(hdfsServerEClass, HDFS_SERVER__VERSION);
 
 		serversEClass = createEClass(SERVERS);
 		createEReference(serversEClass, SERVERS__HDFS_SERVERS);
@@ -570,6 +580,7 @@ public class HadoopPackageImpl extends EPackageImpl implements HadoopPackage {
 		initEAttribute(getHDFSServer_OperationURIs(), ecorePackage.getEString(), "operationURIs", null, 0, -1, HDFSServer.class, IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
 		initEAttribute(getHDFSServer_UserId(), ecorePackage.getEString(), "userId", null, 0, 1, HDFSServer.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
 		initEAttribute(getHDFSServer_GroupIds(), ecorePackage.getEString(), "groupIds", null, 0, -1, HDFSServer.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getHDFSServer_Version(), ecorePackage.getEString(), "version", "1.0.0.0", 0, 1, HDFSServer.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
 
 		initEClass(serversEClass, Servers.class, "Servers", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
 		initEReference(getServers_HdfsServers(), this.getHDFSServer(), null, "hdfsServers", null, 0, -1, Servers.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, IS_COMPOSITE, !IS_RESOLVE_PROXIES, !IS_UNSETTABLE, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopSwitch.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopSwitch.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopSwitch.java
index 6f0b337..c56f87e 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopSwitch.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopSwitch.java
@@ -24,6 +24,8 @@ import org.apache.hdt.core.internal.model.*;
 
 import org.eclipse.emf.ecore.EClass;
 import org.eclipse.emf.ecore.EObject;
+import org.eclipse.emf.ecore.EPackage;
+import org.eclipse.emf.ecore.util.Switch;
 
 /**
  * <!-- begin-user-doc -->
@@ -38,7 +40,7 @@ import org.eclipse.emf.ecore.EObject;
  * @see org.apache.hdt.core.internal.model.HadoopPackage
  * @generated
  */
-public class HadoopSwitch<T> {
+public class HadoopSwitch<T> extends Switch<T> {
 	/**
 	 * The cached model package
 	 * <!-- begin-user-doc -->
@@ -60,34 +62,16 @@ public class HadoopSwitch<T> {
 	}
 
 	/**
-	 * Calls <code>caseXXX</code> for each class of the model until one returns a non null result; it yields that result.
+	 * Checks whether this is a switch for the given package.
 	 * <!-- begin-user-doc -->
 	 * <!-- end-user-doc -->
-	 * @return the first non-null result returned by a <code>caseXXX</code> call.
+	 * @parameter ePackage the package in question.
+	 * @return whether this is a switch for the given package.
 	 * @generated
 	 */
-	public T doSwitch(EObject theEObject) {
-		return doSwitch(theEObject.eClass(), theEObject);
-	}
-
-	/**
-	 * Calls <code>caseXXX</code> for each class of the model until one returns a non null result; it yields that result.
-	 * <!-- begin-user-doc -->
-	 * <!-- end-user-doc -->
-	 * @return the first non-null result returned by a <code>caseXXX</code> call.
-	 * @generated
-	 */
-	protected T doSwitch(EClass theEClass, EObject theEObject) {
-		if (theEClass.eContainer() == modelPackage) {
-			return doSwitch(theEClass.getClassifierID(), theEObject);
-		}
-		else {
-			List<EClass> eSuperTypes = theEClass.getESuperTypes();
-			return
-				eSuperTypes.isEmpty() ?
-					defaultCase(theEObject) :
-					doSwitch(eSuperTypes.get(0), theEObject);
-		}
+	@Override
+	protected boolean isSwitchFor(EPackage ePackage) {
+		return ePackage == modelPackage;
 	}
 
 	/**
@@ -97,6 +81,7 @@ public class HadoopSwitch<T> {
 	 * @return the first non-null result returned by a <code>caseXXX</code> call.
 	 * @generated
 	 */
+	@Override
 	protected T doSwitch(int classifierID, EObject theEObject) {
 		switch (classifierID) {
 			case HadoopPackage.HDFS_SERVER: {
@@ -222,6 +207,7 @@ public class HadoopSwitch<T> {
 	 * @see #doSwitch(org.eclipse.emf.ecore.EObject)
 	 * @generated
 	 */
+	@Override
 	public T defaultCase(EObject object) {
 		return null;
 	}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
index cd06f0e..47d00f4 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
@@ -24,11 +24,17 @@ import java.util.Collection;
 import java.util.Iterator;
 import java.util.Map.Entry;
 
+import org.apache.hdt.core.Activator;
+import org.apache.hdt.core.internal.HadoopManager;
+import org.apache.log4j.Logger;
 import org.eclipse.core.runtime.CoreException;
 import org.eclipse.core.runtime.IConfigurationElement;
 import org.eclipse.core.runtime.Platform;
+import org.eclipse.core.runtime.Status;
 
 public abstract class AbstractHadoopCluster {
+	
+	private static final Logger logger = Logger.getLogger(AbstractHadoopCluster.class);
 
 	abstract public String getLocationName();
 
@@ -44,14 +50,14 @@ public abstract class AbstractHadoopCluster {
 
 	abstract public void load(AbstractHadoopCluster server);
 
-	abstract public String getConfProp(String propName);
-
-	abstract public String getConfProp(ConfProp prop);
+	abstract public String getConfPropValue(String propName);
 
-	abstract public void setConfProp(ConfProp prop, String propValue);
+	abstract public String getConfPropValue(ConfProp prop);
 
-	abstract public void setConfProp(String propName, String propValue);
+	abstract public void setConfPropValue(ConfProp prop, String propValue);
 
+	abstract public void setConfPropValue(String propName, String propValue);
+	
 	abstract public Iterator<Entry<String, String>> getConfiguration();
 
 	abstract public void purgeJob(IHadoopJob job);
@@ -66,23 +72,42 @@ public abstract class AbstractHadoopCluster {
 	
 	abstract public boolean isAvailable() throws CoreException;
 	
+	abstract public String getVersion();
+	
 	public static AbstractHadoopCluster createCluster(File file) throws CoreException, IOException {
-		AbstractHadoopCluster hadoopCluster = createCluster();
+		AbstractHadoopCluster hadoopCluster = createCluster(ConfProp.PI_HADOOP_VERSION.defVal);
 		hadoopCluster.loadFromXML(file);
 		return hadoopCluster;
 	}
 
-	public static AbstractHadoopCluster createCluster() throws CoreException {
+	public static AbstractHadoopCluster createCluster(String hadoopVersion) throws CoreException {
+		logger.debug("Creating client for version "+hadoopVersion); 
 		IConfigurationElement[] elementsFor = Platform.getExtensionRegistry().getConfigurationElementsFor("org.apache.hdt.core.hadoopCluster");
-		return (AbstractHadoopCluster) elementsFor[0].createExecutableExtension("class");
+		for (IConfigurationElement configElement : elementsFor) {
+			String version = configElement.getAttribute("protocolVersion");
+			if(version.equalsIgnoreCase(hadoopVersion)){
+				return (AbstractHadoopCluster)configElement.createExecutableExtension("class");
+			}
+		}
+		throw new CoreException(new Status(Status.ERROR,Activator.BUNDLE_ID,"No clinet found for hadoop version "+hadoopVersion));
 	}
 
 	public static AbstractHadoopCluster createCluster(AbstractHadoopCluster existing) throws CoreException {
-		AbstractHadoopCluster hadoopCluster = createCluster();
+		AbstractHadoopCluster hadoopCluster = createCluster(existing.getVersion());
 		hadoopCluster.load(existing);
 		return hadoopCluster;
 	}
 
+	/**
+	 * @param propName
+	 * @return
+	 */
+	public ConfProp getConfPropForName(String propName) {
+		return ConfProp.getByName(propName);
+	}
 	
+	public String getConfPropName(ConfProp prop) {
+		return prop.name;
+	}
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java
index c7c64f9..8b91dbe 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java
@@ -26,6 +26,11 @@ public enum ConfProp {
 	 * Property name for the Hadoop location name
 	 */
 	PI_LOCATION_NAME(true, "location.name", "New Hadoop location"),
+	
+	/**
+	 * Property name for the Hadoop Version
+	 */
+	PI_HADOOP_VERSION(true, "hadoop.version", "1.1"),
 
 	/**
 	 * Property name for the master host name (the Job tracker)
@@ -116,8 +121,7 @@ public enum ConfProp {
 	public static ConfProp getByName(String propName) {
 		return map.get(propName);
 	}
-
-	public final String name;
+	protected  final String name;
 
 	public final String defVal;
 

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.feature/.classpath
----------------------------------------------------------------------
diff --git a/org.apache.hdt.feature/.classpath b/org.apache.hdt.feature/.classpath
index 4c2b7c4..39b5586 100644
--- a/org.apache.hdt.feature/.classpath
+++ b/org.apache.hdt.feature/.classpath
@@ -5,5 +5,6 @@
   <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
   <classpathentry kind="src" path="/org.apache.hdt.core"/>
   <classpathentry kind="src" path="/org.apache.hdt.hadoop.release"/>
+  <classpathentry kind="src" path="/org.apache.hdt.hadoop2.release"/>
   <classpathentry kind="src" path="/org.apache.hdt.ui"/>
-</classpath>
\ No newline at end of file
+</classpath>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.feature/.project
----------------------------------------------------------------------
diff --git a/org.apache.hdt.feature/.project b/org.apache.hdt.feature/.project
index 017e5f9..6aff5d5 100644
--- a/org.apache.hdt.feature/.project
+++ b/org.apache.hdt.feature/.project
@@ -5,6 +5,7 @@
 	<projects>
 		<project>org.apache.hdt.core</project>
 		<project>org.apache.hdt.hadoop.release</project>
+		<project>org.apache.hdt.hadoop2.release</project>
 		<project>org.apache.hdt.ui</project>
 	</projects>
 	<buildSpec>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.feature/feature.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.feature/feature.xml b/org.apache.hdt.feature/feature.xml
index 0f13637..120c73f 100644
--- a/org.apache.hdt.feature/feature.xml
+++ b/org.apache.hdt.feature/feature.xml
@@ -35,7 +35,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
 or implied. See the License for the specific language governing
 permissions and limitations under the License.
    </license>
-
+   
    <plugin
          id="org.apache.hdt.hadoop.release"
          download-size="0"
@@ -45,6 +45,14 @@ permissions and limitations under the License.
          unpack="false"/>
 
    <plugin
+         id="org.apache.hdt.hadoop2.release"
+         download-size="0"
+         install-size="0"
+         version="0.0.2.qualifier"
+         fragment="true"
+         unpack="false"/>
+
+   <plugin
          id="org.apache.hdt.ui"
          download-size="0"
          install-size="0"

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF b/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
index db5e83c..ec6c80c 100644
--- a/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
@@ -4,8 +4,13 @@ Bundle-Name: Apache Hadoop 0.0.1.qualifier Release Eclipse Plugin
 Bundle-SymbolicName: org.apache.hdt.hadoop.release;singleton:=true
 Bundle-Version: 0.0.2.qualifier
 Bundle-Vendor: Apache Hadoop
-Fragment-Host: org.apache.hdt.core
 Bundle-RequiredExecutionEnvironment: JavaSE-1.6
+Require-Bundle: org.apache.hdt.core,
+ org.eclipse.core.runtime,
+ org.eclipse.core.filesystem;bundle-version="1.3.0";visibility:=reexport,
+ org.eclipse.core.resources;bundle-version="3.6.0",
+ org.eclipse.swt,
+ org.eclipse.jface
 Bundle-ClassPath: .,
  jars/zookeeper-3.4.5.jar,
  jars/slf4j-log4j12-1.6.1.jar,

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.hadoop.release/build.properties
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/build.properties b/org.apache.hdt.hadoop.release/build.properties
index 4c1d15a..848ab4a 100644
--- a/org.apache.hdt.hadoop.release/build.properties
+++ b/org.apache.hdt.hadoop.release/build.properties
@@ -19,5 +19,5 @@ source.. = src/
 output.. = bin/
 bin.includes = META-INF/,\
                .,\
-               fragment.xml,\
+               plugin.xml,\
                jars/

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.hadoop.release/fragment.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/fragment.xml b/org.apache.hdt.hadoop.release/fragment.xml
deleted file mode 100644
index 729d38f..0000000
--- a/org.apache.hdt.hadoop.release/fragment.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?eclipse version="3.4"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<fragment>
-   <extension
-         point="org.apache.hdt.core.hdfsClient">
-      <hdfsClient
-            class="org.apache.hdt.hadoop.release.HDFSClientRelease"
-            protocol="hdfs"
-            protocolVersion="1.1.2.21">
-      </hdfsClient>
-   </extension>
-   <extension
-         point="org.apache.hdt.core.zookeeperClient">
-      <zookeeperClient
-            class="org.apache.hdt.hadoop.release.ZooKeeperClientRelease"
-            protocolVersion="3.4.5">
-      </zookeeperClient>
-   </extension>
-   <extension
-         point="org.apache.hdt.core.hadoopCluster">
-      <hadoopCluster
-            class="org.apache.hdt.hadoop.release.HadoopCluster"
-            protocolVersion="1.1">
-      </hadoopCluster>
-   </extension>
-
-</fragment>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.hadoop.release/plugin.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/plugin.xml b/org.apache.hdt.hadoop.release/plugin.xml
new file mode 100644
index 0000000..476bdcd
--- /dev/null
+++ b/org.apache.hdt.hadoop.release/plugin.xml
@@ -0,0 +1,43 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?eclipse version="3.4"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<plugin>
+   <extension
+         point="org.apache.hdt.core.hdfsClient">
+      <hdfsClient
+            class="org.apache.hdt.hadoop.release.HDFSClientRelease"
+            protocol="hdfs"
+            protocolVersion="1.1">
+      </hdfsClient>
+   </extension>
+   <extension
+         point="org.apache.hdt.core.zookeeperClient">
+      <zookeeperClient
+            class="org.apache.hdt.hadoop.release.ZooKeeperClientRelease"
+            protocolVersion="3.4.5">
+      </zookeeperClient>
+   </extension>
+   <extension
+         point="org.apache.hdt.core.hadoopCluster">
+      <hadoopCluster
+            class="org.apache.hdt.hadoop.release.HadoopCluster"
+            protocolVersion="1.1">
+      </hadoopCluster>
+   </extension>
+
+</plugin>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
index 67fcb75..0014bb6 100644
--- a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
+++ b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
@@ -28,6 +28,7 @@ import java.util.Collections;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Map;
+import java.util.ServiceLoader;
 import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeMap;
@@ -37,7 +38,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
 import java.util.logging.Logger;
 
 import javax.xml.parsers.DocumentBuilder;
@@ -53,13 +53,11 @@ import org.apache.hadoop.mapred.JobID;
 import org.apache.hadoop.mapred.JobStatus;
 import org.apache.hadoop.mapred.RunningJob;
 import org.apache.hdt.core.Activator;
-import org.apache.hdt.core.launch.ConfProp;
 import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.ConfProp;
 import org.apache.hdt.core.launch.IHadoopJob;
 import org.apache.hdt.core.launch.IJarModule;
 import org.apache.hdt.core.launch.IJobListener;
-import org.eclipse.core.internal.utils.FileUtil;
-import org.eclipse.core.resources.WorkspaceJob;
 import org.eclipse.core.runtime.CoreException;
 import org.eclipse.core.runtime.IProgressMonitor;
 import org.eclipse.core.runtime.IStatus;
@@ -128,16 +126,18 @@ public class HadoopCluster extends AbstractHadoopCluster {
 							+ HadoopCluster.this.getLocationName(), ioe);
 				}
 			}
-
+			Thread current = Thread.currentThread();
+			ClassLoader oldLoader = current.getContextClassLoader();
 			try {
+				current.setContextClassLoader(HadoopCluster.class.getClassLoader());
 				// Set of all known existing Job IDs we want fresh info of
 				Set<JobID> missingJobIds = new HashSet<JobID>(runningJobs.keySet());
 
 				JobStatus[] jstatus = client.jobsToComplete();
 				jstatus = jstatus == null ? new JobStatus[0] : jstatus;
-				for (JobStatus status : jstatus) {
+				for (final JobStatus status : jstatus) {
 
-					JobID jobId = status.getJobID();
+					final JobID jobId = status.getJobID();
 					missingJobIds.remove(jobId);
 
 					HadoopJob hJob;
@@ -145,7 +145,11 @@ public class HadoopCluster extends AbstractHadoopCluster {
 						hJob = runningJobs.get(jobId);
 						if (hJob == null) {
 							// Unknown job, create an entry
-							RunningJob running = client.getJob(jobId);
+							final RunningJob running = client.getJob(jobId);
+							ServiceLoader<FileSystem> serviceLoader = ServiceLoader.load(FileSystem.class);
+					        for (FileSystem fs : serviceLoader) {
+					        	System.out.println(fs.getClass().getProtectionDomain().getCodeSource().getLocation());
+					        }
 							hJob = new HadoopJob(HadoopCluster.this, jobId, running, status);
 							newJob(hJob);
 						}
@@ -166,7 +170,9 @@ public class HadoopCluster extends AbstractHadoopCluster {
 				client = null;
 				return new Status(Status.ERROR, Activator.BUNDLE_ID, 0, "Cannot retrieve running Jobs on location: " + HadoopCluster.this.getLocationName(),
 						ioe);
-			}
+			}finally {
+                current.setContextClassLoader(oldLoader);
+             }
 
 			// Schedule the next observation
 			schedule(STATUS_OBSERVATION_DELAY);
@@ -321,8 +327,8 @@ public class HadoopCluster extends AbstractHadoopCluster {
 	 *            the configuration property
 	 * @return the property value
 	 */
-	public String getConfProp(ConfProp prop) {
-		return conf.get(prop.name);
+	public String getConfPropValue(ConfProp prop) {
+		return conf.get(getConfPropName(prop));
 	}
 
 	/**
@@ -332,12 +338,12 @@ public class HadoopCluster extends AbstractHadoopCluster {
 	 *            the property name
 	 * @return the property value
 	 */
-	public String getConfProp(String propName) {
+	public String getConfPropValue(String propName) {
 		return this.conf.get(propName);
 	}
 
 	public String getLocationName() {
-		return getConfProp(ConfProp.PI_LOCATION_NAME);
+		return getConfPropValue(ConfProp.PI_LOCATION_NAME);
 	}
 
 	/**
@@ -346,7 +352,7 @@ public class HadoopCluster extends AbstractHadoopCluster {
 	 * @return the host name of the Job tracker
 	 */
 	public String getMasterHostName() {
-		return getConfProp(ConfProp.PI_JOB_TRACKER_HOST);
+		return getConfPropValue(ConfProp.PI_JOB_TRACKER_HOST);
 	}
 
 	public String getState() {
@@ -432,25 +438,18 @@ public class HadoopCluster extends AbstractHadoopCluster {
 	 * @param propvalue
 	 *            the property value
 	 */
-	public void setConfProp(ConfProp prop, String propValue) {
+	public void setConfPropValue(ConfProp prop, String propValue) {
 		if (propValue != null)
-			conf.set(prop.name, propValue);
+			setConfPropValue(getConfPropName(prop), propValue);
 	}
 
-	/**
-	 * Sets a Hadoop configuration property value
-	 * 
-	 * @param propName
-	 *            the property name
-	 * @param propValue
-	 *            the property value
-	 */
-	public void setConfProp(String propName, String propValue) {
-		this.conf.set(propName, propValue);
+	@Override
+	public void setConfPropValue(String propName, String propValue) {
+		conf.set(propName, propValue);
 	}
-
+	
 	public void setLocationName(String newName) {
-		setConfProp(ConfProp.PI_LOCATION_NAME, newName);
+		setConfPropValue(ConfProp.PI_LOCATION_NAME, newName);
 	}
 
 	/**
@@ -483,7 +482,7 @@ public class HadoopCluster extends AbstractHadoopCluster {
 	 */
 	private void addPluginConfigDefaultProperties() {
 		for (ConfProp prop : ConfProp.values()) {
-			conf.set(prop.name, prop.defVal);
+			conf.set(getConfPropName(prop), prop.defVal);
 		}
 	}
 
@@ -599,4 +598,12 @@ public class HadoopCluster extends AbstractHadoopCluster {
 					Activator.BUNDLE_ID, "unable to connect to server", e));
 		}
 	}
+
+	/* (non-Javadoc)
+	 * @see org.apache.hdt.core.launch.AbstractHadoopCluster#getVersion()
+	 */
+	@Override
+	public String getVersion() {
+		return "1.1";
+	}
 }

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopJob.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopJob.java b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopJob.java
index 5861967..9200674 100644
--- a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopJob.java
+++ b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopJob.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.mapred.JobStatus;
 import org.apache.hadoop.mapred.RunningJob;
 import org.apache.hdt.core.launch.AbstractHadoopCluster;
 import org.apache.hdt.core.launch.IHadoopJob;
+import org.eclipse.core.runtime.internal.adaptor.ContextFinder;
 
 /**
  * Representation of a Map/Reduce running job on a given location
@@ -125,6 +126,7 @@ public class HadoopJob implements IHadoopJob {
 	 * @param status
 	 */
 	public HadoopJob(HadoopCluster location, JobID id, RunningJob running, JobStatus status) {
+		//HadoopCluster.updateCurrentClassLoader();
 
 		this.location = location;
 		this.jobId = id;

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.hadoop2.release/.classpath
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/.classpath b/org.apache.hdt.hadoop2.release/.classpath
new file mode 100644
index 0000000..d59ac75
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/.classpath
@@ -0,0 +1,91 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<classpath>
+	<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.6"/>
+	<classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
+	<classpathentry kind="src" path="src/"/>
+	<classpathentry exported="true" kind="lib" path="jars/activation-1.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/aopalliance-1.0.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/asm-3.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/avro-1.7.4.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/commons-beanutils-1.7.0.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/commons-beanutils-core-1.8.0.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/commons-cli-1.2.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/commons-codec-1.4.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/commons-collections-3.2.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/commons-compress-1.4.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/commons-configuration-1.6.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/commons-digester-1.8.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/commons-el-1.0.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/commons-httpclient-3.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/commons-io-2.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/commons-lang-2.4.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/commons-logging-1.1.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/commons-math-2.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/commons-net-3.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/gmbal-api-only-3.0.0-b023.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/grizzly-framework-2.1.2.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/grizzly-http-2.1.2.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/grizzly-http-server-2.1.2.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/grizzly-http-servlet-2.1.2.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/grizzly-rcm-2.1.2.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/guava-11.0.2.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/guice-3.0.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/guice-servlet-3.0.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/hadoop-annotations-2.2.0.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/hadoop-auth-2.2.0.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/hadoop-client-2.2.0.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/hadoop-common-2.2.0.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/hadoop-hdfs-2.2.0.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/hadoop-mapreduce-client-app-2.2.0.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/hadoop-mapreduce-client-common-2.2.0.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/hadoop-mapreduce-client-core-2.2.0.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/hadoop-mapreduce-client-jobclient-2.2.0.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/hadoop-mapreduce-client-shuffle-2.2.0.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/hadoop-yarn-api-2.2.0.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/hadoop-yarn-client-2.2.0.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/hadoop-yarn-common-2.2.0.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/hadoop-yarn-server-common-2.2.0.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/hadoop-yarn-server-tests-2.2.0.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/hamcrest-core-1.3.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/jackson-core-asl-1.8.8.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/jackson-jaxrs-1.8.3.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/jackson-mapper-asl-1.8.8.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/jackson-xc-1.8.3.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/jasper-compiler-5.5.23.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/jasper-runtime-5.5.23.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/javax.inject-1.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/javax.servlet-3.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/javax.servlet-api-3.0.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/jaxb-api-2.2.2.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/jaxb-impl-2.2.3-1.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/jersey-client-1.9.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/jersey-core-1.9.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/jersey-grizzly2-1.9.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/jersey-guice-1.9.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/jersey-json-1.9.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/jersey-server-1.9.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/jersey-test-framework-core-1.9.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/jersey-test-framework-grizzly2-1.9.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/jets3t-0.6.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/jettison-1.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/jetty-6.1.26.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/jetty-util-6.1.26.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/jsch-0.1.42.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/jsp-api-2.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/jsr305-1.3.9.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/junit-4.11.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/log4j-1.2.15.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/management-api-3.0.0-b012.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/netty-3.6.2.Final.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/paranamer-2.3.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/protobuf-java-2.5.0.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/servlet-api-2.5.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/slf4j-api-1.6.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/slf4j-log4j12-1.6.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/snappy-java-1.0.4.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/stax-api-1.0.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/xmlenc-0.52.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/xz-1.0.jar"/>
+	<classpathentry exported="true" kind="lib" path="jars/zookeeper-3.4.5.jar"/>
+	<classpathentry kind="output" path="target/classes"/>
+</classpath>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.hadoop2.release/.settings/org.eclipse.core.resources.prefs
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/.settings/org.eclipse.core.resources.prefs b/org.apache.hdt.hadoop2.release/.settings/org.eclipse.core.resources.prefs
new file mode 100644
index 0000000..99f26c0
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/.settings/org.eclipse.core.resources.prefs
@@ -0,0 +1,2 @@
+eclipse.preferences.version=1
+encoding/<project>=UTF-8

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.hadoop2.release/.settings/org.eclipse.jdt.core.prefs
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/.settings/org.eclipse.jdt.core.prefs b/org.apache.hdt.hadoop2.release/.settings/org.eclipse.jdt.core.prefs
new file mode 100644
index 0000000..c537b63
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/.settings/org.eclipse.jdt.core.prefs
@@ -0,0 +1,7 @@
+eclipse.preferences.version=1
+org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
+org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6
+org.eclipse.jdt.core.compiler.compliance=1.6
+org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
+org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
+org.eclipse.jdt.core.compiler.source=1.6

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.hadoop2.release/.settings/org.eclipse.m2e.core.prefs
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/.settings/org.eclipse.m2e.core.prefs b/org.apache.hdt.hadoop2.release/.settings/org.eclipse.m2e.core.prefs
new file mode 100644
index 0000000..f897a7f
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/.settings/org.eclipse.m2e.core.prefs
@@ -0,0 +1,4 @@
+activeProfiles=
+eclipse.preferences.version=1
+resolveWorkspaceProjects=true
+version=1

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF b/org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF
new file mode 100644
index 0000000..eb51451
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF
@@ -0,0 +1,98 @@
+Manifest-Version: 1.0
+Bundle-ManifestVersion: 2
+Bundle-Name: Apache Hadoop2 Release Eclipse Plugin
+Bundle-SymbolicName: org.apache.hdt.hadoop2.release;singleton:=true
+Bundle-Version: 0.0.2.qualifier
+Bundle-Vendor: Apache Hadoop
+Bundle-RequiredExecutionEnvironment: JavaSE-1.6
+Require-Bundle: org.apache.hdt.core,
+ org.eclipse.core.runtime,
+ org.eclipse.core.filesystem;bundle-version="1.3.0";visibility:=reexport,
+ org.eclipse.core.resources;bundle-version="3.6.0",
+ org.eclipse.swt,
+ org.eclipse.jface
+Bundle-ClassPath: .,
+ jars/activation-1.1.jar,
+ jars/aopalliance-1.0.jar,
+ jars/asm-3.1.jar,
+ jars/avro-1.7.4.jar,
+ jars/commons-beanutils-1.7.0.jar,
+ jars/commons-beanutils-core-1.8.0.jar,
+ jars/commons-cli-1.2.jar,
+ jars/commons-codec-1.4.jar,
+ jars/commons-collections-3.2.1.jar,
+ jars/commons-compress-1.4.1.jar,
+ jars/commons-configuration-1.6.jar,
+ jars/commons-digester-1.8.jar,
+ jars/commons-el-1.0.jar,
+ jars/commons-httpclient-3.1.jar,
+ jars/commons-io-2.1.jar,
+ jars/commons-lang-2.4.jar,
+ jars/commons-logging-1.1.1.jar,
+ jars/commons-math-2.1.jar,
+ jars/commons-net-3.1.jar,
+ jars/gmbal-api-only-3.0.0-b023.jar,
+ jars/grizzly-framework-2.1.2.jar,
+ jars/grizzly-http-2.1.2.jar,
+ jars/grizzly-http-server-2.1.2.jar,
+ jars/grizzly-http-servlet-2.1.2.jar,
+ jars/grizzly-rcm-2.1.2.jar,
+ jars/guava-11.0.2.jar,
+ jars/guice-3.0.jar,
+ jars/guice-servlet-3.0.jar,
+ jars/hadoop-annotations-2.2.0.jar,
+ jars/hadoop-auth-2.2.0.jar,
+ jars/hadoop-client-2.2.0.jar,
+ jars/hadoop-common-2.2.0.jar,
+ jars/hadoop-hdfs-2.2.0.jar,
+ jars/hadoop-mapreduce-client-app-2.2.0.jar,
+ jars/hadoop-mapreduce-client-common-2.2.0.jar,
+ jars/hadoop-mapreduce-client-core-2.2.0.jar,
+ jars/hadoop-mapreduce-client-jobclient-2.2.0.jar,
+ jars/hadoop-mapreduce-client-shuffle-2.2.0.jar,
+ jars/hadoop-yarn-api-2.2.0.jar,
+ jars/hadoop-yarn-client-2.2.0.jar,
+ jars/hadoop-yarn-common-2.2.0.jar,
+ jars/hadoop-yarn-server-common-2.2.0.jar,
+ jars/hadoop-yarn-server-tests-2.2.0.jar,
+ jars/hamcrest-core-1.3.jar,
+ jars/jackson-core-asl-1.8.8.jar,
+ jars/jackson-jaxrs-1.8.3.jar,
+ jars/jackson-mapper-asl-1.8.8.jar,
+ jars/jackson-xc-1.8.3.jar,
+ jars/jasper-compiler-5.5.23.jar,
+ jars/jasper-runtime-5.5.23.jar,
+ jars/javax.inject-1.jar,
+ jars/javax.servlet-3.1.jar,
+ jars/javax.servlet-api-3.0.1.jar,
+ jars/jaxb-api-2.2.2.jar,
+ jars/jaxb-impl-2.2.3-1.jar,
+ jars/jersey-client-1.9.jar,
+ jars/jersey-core-1.9.jar,
+ jars/jersey-grizzly2-1.9.jar,
+ jars/jersey-guice-1.9.jar,
+ jars/jersey-json-1.9.jar,
+ jars/jersey-server-1.9.jar,
+ jars/jersey-test-framework-core-1.9.jar,
+ jars/jersey-test-framework-grizzly2-1.9.jar,
+ jars/jets3t-0.6.1.jar,
+ jars/jettison-1.1.jar,
+ jars/jetty-6.1.26.jar,
+ jars/jetty-util-6.1.26.jar,
+ jars/jsch-0.1.42.jar,
+ jars/jsp-api-2.1.jar,
+ jars/jsr305-1.3.9.jar,
+ jars/junit-4.11.jar,
+ jars/log4j-1.2.15.jar,
+ jars/management-api-3.0.0-b012.jar,
+ jars/netty-3.6.2.Final.jar,
+ jars/paranamer-2.3.jar,
+ jars/protobuf-java-2.5.0.jar,
+ jars/servlet-api-2.5.jar,
+ jars/slf4j-api-1.6.1.jar,
+ jars/slf4j-log4j12-1.6.1.jar,
+ jars/snappy-java-1.0.4.1.jar,
+ jars/stax-api-1.0.1.jar,
+ jars/xmlenc-0.52.jar,
+ jars/xz-1.0.jar,
+ jars/zookeeper-3.4.5.jar

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.hadoop2.release/build.properties
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/build.properties b/org.apache.hdt.hadoop2.release/build.properties
new file mode 100644
index 0000000..848ab4a
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/build.properties
@@ -0,0 +1,23 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source.. = src/
+output.. = bin/
+bin.includes = META-INF/,\
+               .,\
+               plugin.xml,\
+               jars/

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.hadoop2.release/plugin.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/plugin.xml b/org.apache.hdt.hadoop2.release/plugin.xml
new file mode 100644
index 0000000..b200aca
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/plugin.xml
@@ -0,0 +1,35 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?eclipse version="3.4"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<plugin>
+<extension
+         point="org.apache.hdt.core.hadoopCluster" >
+      <hadoopCluster
+            class="org.apache.hdt.hadoop2.release.HadoopCluster"
+            protocolVersion="2.2">
+      </hadoopCluster>
+   </extension>
+   <extension
+         point="org.apache.hdt.core.hdfsClient">
+      <hdfsClient
+            class="org.apache.hdt.hadoop2.release.HDFSClientRelease"
+            protocol="hdfs"
+            protocolVersion="2.2">
+      </hdfsClient>
+   </extension>
+</plugin>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.hadoop2.release/pom.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/pom.xml b/org.apache.hdt.hadoop2.release/pom.xml
new file mode 100644
index 0000000..249ad6e
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/pom.xml
@@ -0,0 +1,127 @@
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <relativePath>../pom.xml</relativePath>
+    <groupId>org.apache.hdt</groupId>
+    <artifactId>hdt.master</artifactId>
+    <version>0.0.2-SNAPSHOT</version>
+  </parent>
+  <artifactId>org.apache.hdt.hadoop2.release</artifactId>
+  <packaging>eclipse-plugin</packaging>
+  <name>Apache Hadoop2 Devlopment Tools Assembly</name>
+  
+  <properties>
+    <hadoop2.version>2.2.0</hadoop2.version>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <version>${hadoop2.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-mapreduce-client-core</artifactId>
+      <version>${hadoop2.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-client</artifactId>
+      <version>${hadoop2.version}</version>
+    </dependency>
+  </dependencies>
+  
+  <build>
+    <sourceDirectory>src</sourceDirectory>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <version>2.8</version>
+        <executions>
+          <execution>
+            <id>copy-dependencies</id>
+            <phase>initialize</phase>
+            <goals>
+              <goal>copy-dependencies</goal>
+            </goals>
+            <configuration>
+              <excludeScope>system</excludeScope>
+              <outputDirectory>${basedir}/jars</outputDirectory>
+              <overWriteReleases>false</overWriteReleases>
+              <overWriteSnapshots>false</overWriteSnapshots>
+              <overWriteIfNewer>true</overWriteIfNewer>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <version>2.8</version>
+        <executions>
+          <execution>
+            <id>copy</id>
+            <phase>initialize</phase>
+            <goals>
+              <goal>copy</goal>
+            </goals>
+            <configuration>
+              <artifactItems>
+                <artifactItem>
+                  <groupId>log4j</groupId>
+                  <artifactId>log4j</artifactId>
+                  <overWrite>false</overWrite>
+                </artifactItem>
+                <artifactItem>
+                  <groupId>org.apache.hadoop</groupId>
+                  <artifactId>hadoop-yarn-server-tests</artifactId>
+                  <version>${hadoop2.version}</version>
+                  <overWrite>false</overWrite>
+                </artifactItem>
+                <artifactItem>
+                  <groupId>org.apache.zookeeper</groupId>
+                  <artifactId>zookeeper</artifactId>
+                  <overWrite>false</overWrite>
+                </artifactItem>
+                <artifactItem>
+                  <groupId>org.slf4j</groupId>
+                  <artifactId>slf4j-api</artifactId>
+                  <overWrite>false</overWrite>
+                </artifactItem>
+                <artifactItem>
+                  <groupId>org.slf4j</groupId>
+                  <artifactId>slf4j-log4j12</artifactId>
+                  <overWrite>false</overWrite>
+                </artifactItem>
+              </artifactItems>
+              <outputDirectory>${basedir}/jars</outputDirectory>
+              <overWriteReleases>false</overWriteReleases>
+              <overWriteSnapshots>false</overWriteSnapshots>
+              <overWriteIfNewer>true</overWriteIfNewer>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/34799cec/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HDFSClientRelease.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HDFSClientRelease.java b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HDFSClientRelease.java
new file mode 100644
index 0000000..72874da
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HDFSClientRelease.java
@@ -0,0 +1,235 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.hadoop2.release;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hdt.core.hdfs.ResourceInformation;
+import org.apache.log4j.Logger;
+
+/**
+ * HDFS Client for HDFS version 1.1.2.21.
+ * 
+ * @author Srimanth Gunturi
+ */
+public class HDFSClientRelease extends org.apache.hdt.core.hdfs.HDFSClient {
+
+	private static Logger logger = Logger.getLogger(HDFSClientRelease.class);
+	private Configuration config;
+
+	public HDFSClientRelease() {
+		config = new Configuration();
+	}
+
+	private ResourceInformation getResourceInformation(FileStatus fileStatus) {
+		ResourceInformation fi = new ResourceInformation();
+		fi.setFolder(fileStatus.isDir());
+		fi.setGroup(fileStatus.getGroup());
+		fi.setLastAccessedTime(fileStatus.getAccessTime());
+		fi.setLastModifiedTime(fileStatus.getAccessTime());
+		fi.setName(fileStatus.getPath().getName());
+		fi.setOwner(fileStatus.getOwner());
+		fi.setPath(fileStatus.getPath().getParent() == null ? "/" : fileStatus.getPath().getParent().toString());
+		fi.setReplicationFactor(fileStatus.getReplication());
+		fi.setSize(fileStatus.getLen());
+		FsPermission fsPermission = fileStatus.getPermission();
+		updatePermissions(fi.getUserPermissions(), fsPermission.getUserAction());
+		updatePermissions(fi.getGroupPermissions(), fsPermission.getGroupAction());
+		updatePermissions(fi.getOtherPermissions(), fsPermission.getOtherAction());
+		return fi;
+	}
+
+	private void updatePermissions(ResourceInformation.Permissions permissions, FsAction action) {
+		permissions.read = action.implies(FsAction.READ);
+		permissions.write = action.implies(FsAction.WRITE);
+		permissions.execute = action.implies(FsAction.EXECUTE);
+	}
+	
+	protected FileSystem createFS(URI uri, String user) throws IOException, InterruptedException{
+		if(user==null)
+			return FileSystem.get(uri, config);
+		return FileSystem.get(uri, config, user);
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.apache.hdt.core.hdfs.HDFSClient#getResource(java.net.URI)
+	 */
+	@Override
+	public ResourceInformation getResourceInformation(URI uri, String user) throws IOException, InterruptedException {
+		FileSystem fs = createFS(uri, user);
+		Path path = new Path(uri.getPath());
+		FileStatus fileStatus = null;
+		ResourceInformation fi = null;
+		try {
+			fileStatus = fs.getFileStatus(path);
+			fi = getResourceInformation(fileStatus);
+		} catch (FileNotFoundException fne) {
+			logger.info(fne.getMessage());
+			logger.debug(fne.getMessage(), fne);
+		}
+		return fi;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.apache.hdt.core.hdfs.HDFSClient#setResource(java.net.URI,
+	 * org.apache.hdt.core.hdfs.ResourceInformation)
+	 */
+	@Override
+	public void setResourceInformation(URI uri, ResourceInformation information, String user) throws IOException, InterruptedException {
+		FileSystem fs = createFS(uri, user);
+		Path path = new Path(uri.getPath());
+		if (!information.isFolder()) {
+			fs.setTimes(path, information.getLastModifiedTime(), information.getLastAccessedTime());
+		}
+		if (information.getOwner() != null || information.getGroup() != null)
+			fs.setOwner(path, information.getOwner(), information.getGroup());
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#listResources(java.net.URI)
+	 */
+	@Override
+	public List<ResourceInformation> listResources(URI uri, String user) throws IOException, InterruptedException {
+		List<ResourceInformation> ris = null;
+		FileSystem fs = createFS(uri, user);
+		Path path = new Path(uri.getPath());
+		FileStatus[] listStatus = fs.listStatus(path);
+		if (listStatus != null) {
+			ris = new ArrayList<ResourceInformation>();
+			for (FileStatus ls : listStatus) {
+				ris.add(getResourceInformation(ls));
+			}
+		}
+		return ris;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#openInputStream(java.net.URI,
+	 * org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public InputStream openInputStream(URI uri, String user) throws IOException, InterruptedException {
+		FileSystem fs = createFS(uri, user);
+		Path path = new Path(uri.getPath());
+		FSDataInputStream open = fs.open(path);
+		return open;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#openInputStream(java.net.URI,
+	 * org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public OutputStream createOutputStream(URI uri, String user) throws IOException, InterruptedException {
+		FileSystem fs = createFS(uri, user);
+		Path path = new Path(uri.getPath());
+		FSDataOutputStream outputStream = fs.create(path);
+		return outputStream;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#openInputStream(java.net.URI,
+	 * org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public OutputStream openOutputStream(URI uri, String user) throws IOException, InterruptedException {
+		FileSystem fs = createFS(uri, user);
+		Path path = new Path(uri.getPath());
+		// TODO. Temporary fix till Issue#3 is fixed.
+		FSDataOutputStream outputStream = fs.create(path);
+		return outputStream;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.apache.hdt.core.hdfs.HDFSClient#mkdirs(java.net.URI,
+	 * org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public boolean mkdirs(URI uri, String user) throws IOException, InterruptedException {
+		FileSystem fs = createFS(uri, user);
+		Path path = new Path(uri.getPath());
+		return fs.mkdirs(path);
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.apache.hdt.core.hdfs.HDFSClient#delete(java.net.URI,
+	 * org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public void delete(URI uri, String user) throws IOException, InterruptedException {
+		FileSystem fs = createFS(uri, user);
+		Path path = new Path(uri.getPath());
+		fs.delete(path, true);
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#getDefaultUserAndGroupIds()
+	 */
+	@Override
+	public List<String> getDefaultUserAndGroupIds() throws IOException {
+		List<String> idList = new ArrayList<String>();
+		UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
+		idList.add(currentUser.getShortUserName());
+		String[] groupIds = currentUser.getGroupNames();
+		if (groupIds != null) {
+			for (String groupId : groupIds) {
+				idList.add(groupId);
+			}
+		}
+		return idList;
+	}
+
+}


[12/27] git commit: HDT-56 : server.xmi is shared - Making server.xmi in workspace location rather bundle loc - Useing HDFS manager delete when HDFS server is deleted

Posted by rs...@apache.org.
HDT-56 : server.xmi is shared - Making server.xmi in workspace location rather bundle loc - Useing HDFS manager delete when HDFS server is deleted


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/a7a89f49
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/a7a89f49
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/a7a89f49

Branch: refs/heads/hadoop-eclipse-merge
Commit: a7a89f4961f477b1c4a519e1e9741954368b6f15
Parents: 0835540
Author: Rahul Sharma <rs...@apache.org>
Authored: Fri May 16 09:56:07 2014 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Fri May 23 08:48:28 2014 +0530

----------------------------------------------------------------------
 .../src/org/apache/hdt/core/internal/HadoopManager.java        | 6 ++++--
 .../org/apache/hdt/core/internal/hdfs/HDFSMoveDeleteHook.java  | 3 +++
 2 files changed, 7 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/a7a89f49/org.apache.hdt.core/src/org/apache/hdt/core/internal/HadoopManager.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/HadoopManager.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/HadoopManager.java
index 937b171..125c9a2 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/HadoopManager.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/HadoopManager.java
@@ -53,7 +53,8 @@ public class HadoopManager {
 			loadServers();
 			if (servers == null) {
 				Bundle bundle = Platform.getBundle(Activator.BUNDLE_ID);
-				File serversFile = bundle.getBundleContext().getDataFile(MODEL_FILE_NAME);
+				File stateLocation = Platform.getStateLocation(bundle).toFile();
+				File serversFile = new File(stateLocation,MODEL_FILE_NAME);
 				Resource resource = new ResourceSetImpl().createResource(URI.createFileURI(serversFile.getPath()));
 				servers = HadoopFactory.eINSTANCE.createServers();
 				resource.getContents().add(servers);
@@ -64,7 +65,8 @@ public class HadoopManager {
 
 	private void loadServers() {
 		Bundle bundle = Platform.getBundle(Activator.BUNDLE_ID);
-		File serversFile = bundle.getBundleContext().getDataFile(MODEL_FILE_NAME);
+		File stateLocation = Platform.getStateLocation(bundle).toFile();
+		File serversFile = new File(stateLocation,MODEL_FILE_NAME);
 		if (serversFile.exists()) {
 			Resource resource = new ResourceSetImpl().getResource(URI.createFileURI(serversFile.getPath()), true);
 			servers = (Servers) resource.getContents().get(0);

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/a7a89f49/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSMoveDeleteHook.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSMoveDeleteHook.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSMoveDeleteHook.java
index 0ca0df4..f4fb099 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSMoveDeleteHook.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSMoveDeleteHook.java
@@ -18,6 +18,7 @@
 
 package org.apache.hdt.core.internal.hdfs;
 
+import org.apache.hdt.core.internal.model.HDFSServer;
 import org.eclipse.core.resources.IFile;
 import org.eclipse.core.resources.IFolder;
 import org.eclipse.core.resources.IProject;
@@ -77,6 +78,8 @@ public class HDFSMoveDeleteHook implements IMoveDeleteHook {
 				throw new RuntimeException(
 						"Deletion of HDFS project root folder is not supported. To remove project uncheck the \'Delete project contents on disk\' checkbox");
 			}
+			HDFSServer server = HDFSManager.INSTANCE.getServer(project.getLocationURI().toString());
+			HDFSManager.INSTANCE.deleteServer(server);
 		}
 		return false;
 	}