You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hdt.apache.org by rs...@apache.org on 2013/07/25 06:29:17 UTC

[1/8] HDT-32: Merge the code base of Hadoop-Eclipse project into HDT. Contributed by Srimanth Gunturi

Updated Branches:
  refs/heads/hadoop-eclipse-merge [created] 63bec2607


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSPropertySection.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSPropertySection.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSPropertySection.java
new file mode 100644
index 0000000..26b2567
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSPropertySection.java
@@ -0,0 +1,182 @@
+package org.apache.hdt.ui.internal.hdfs;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hdt.core.internal.hdfs.HDFSFileStore;
+import org.apache.hdt.core.internal.hdfs.HDFSFileSystem;
+import org.apache.log4j.Logger;
+import org.eclipse.core.filesystem.EFS;
+import org.eclipse.core.resources.IResource;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.jface.viewers.ISelection;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.jface.viewers.StructuredSelection;
+import org.eclipse.swt.custom.StackLayout;
+import org.eclipse.swt.events.ControlAdapter;
+import org.eclipse.swt.events.ControlEvent;
+import org.eclipse.swt.layout.FillLayout;
+import org.eclipse.swt.layout.FormAttachment;
+import org.eclipse.swt.layout.FormData;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.ui.IWorkbenchPart;
+import org.eclipse.ui.views.properties.IPropertySheetEntry;
+import org.eclipse.ui.views.properties.PropertySheetPage;
+import org.eclipse.ui.views.properties.PropertySheetSorter;
+import org.eclipse.ui.views.properties.tabbed.AbstractPropertySection;
+import org.eclipse.ui.views.properties.tabbed.TabbedPropertySheetPage;
+
+public class HDFSPropertySection extends AbstractPropertySection {
+
+	private static final Logger logger = Logger.getLogger(HDFSPropertySection.class);
+
+	private static class HDFSPropertySheetPage extends PropertySheetPage {
+		public HDFSPropertySheetPage() {
+			final List<String> namesList = new ArrayList<String>();
+			namesList.add("User");
+			namesList.add("Group");
+			namesList.add("Is downloaded");
+			namesList.add("Effective Permissions");
+			namesList.add("User Permissions");
+			namesList.add("Group Permissions");
+			namesList.add("Other Permissions");
+			setSorter(new PropertySheetSorter() {
+				@Override
+				public int compare(IPropertySheetEntry entryA, IPropertySheetEntry entryB) {
+					int indexA = namesList.indexOf(entryA.getCategory());
+					int indexB = namesList.indexOf(entryB.getCategory());
+					return indexA - indexB;
+				}
+				@Override
+				public int compareCategories(String categoryA, String categoryB) {
+					int indexA = namesList.indexOf(categoryA);
+					int indexB = namesList.indexOf(categoryB);
+					return indexA - indexB;
+				}
+			});
+		}
+	}
+
+	private Composite topComposite;
+	private Composite nonHDFSComposite;
+	private StackLayout stackLayout;
+	private Composite hdfsResourceTopComposite;
+	private HDFSFileStore store;
+	private Composite hdfsResourceComposite;
+	private Composite hdfsPropertiesComposite;
+	private PropertySheetPage propertySheetPage;
+
+	public HDFSPropertySection() {
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.views.properties.tabbed.AbstractPropertySection#createControls
+	 * (org.eclipse.swt.widgets.Composite,
+	 * org.eclipse.ui.views.properties.tabbed.TabbedPropertySheetPage)
+	 */
+	@Override
+	public void createControls(Composite parent, TabbedPropertySheetPage page) {
+		topComposite = page.getWidgetFactory().createComposite(parent);
+		stackLayout = new StackLayout();
+		topComposite.setLayout(stackLayout);
+		// no HDFS
+		nonHDFSComposite = page.getWidgetFactory().createComposite(topComposite);
+		nonHDFSComposite.setLayout(new GridLayout());
+		page.getWidgetFactory().createLabel(nonHDFSComposite, "Selection is not a HDFS resource").setLayoutData(new GridData());
+		// Project
+		hdfsResourceTopComposite = page.getWidgetFactory().createComposite(topComposite);
+		final GridLayout hdfsResourceLayout = new GridLayout(2, false);
+		hdfsResourceLayout.marginLeft = hdfsResourceLayout.marginBottom = hdfsResourceLayout.marginWidth = hdfsResourceLayout.marginHeight = hdfsResourceLayout.marginTop = hdfsResourceLayout.marginRight = 0;
+		hdfsResourceTopComposite.setLayout(hdfsResourceLayout);
+		hdfsResourceComposite = page.getWidgetFactory().createComposite(hdfsResourceTopComposite);
+		GridData gd = new GridData(GridData.FILL_VERTICAL | GridData.GRAB_VERTICAL);
+		gd.widthHint = 0; // TODO increase width when graphs available.
+		hdfsResourceComposite.setLayoutData(gd);
+		hdfsPropertiesComposite = page.getWidgetFactory().createComposite(hdfsResourceTopComposite);
+		hdfsPropertiesComposite.setLayoutData(new GridData(GridData.FILL_BOTH));
+		hdfsPropertiesComposite.setLayout(new FillLayout());
+		createPropertiesPage(hdfsPropertiesComposite, page);
+	}
+
+	/**
+	 * @param hdfsPropertiesComposite2
+	 * @param page
+	 */
+	private void createPropertiesPage(Composite parent, final TabbedPropertySheetPage atabbedPropertySheetPage) {
+		super.createControls(parent, atabbedPropertySheetPage);
+		Composite composite = getWidgetFactory().createFlatFormComposite(parent);
+		propertySheetPage = new HDFSPropertySheetPage();
+		propertySheetPage.createControl(composite);
+		FormData data = new FormData();
+		data.left = new FormAttachment(0, 0);
+		data.right = new FormAttachment(100, 0);
+		data.top = new FormAttachment(0, 0);
+		data.bottom = new FormAttachment(100, 0);
+		propertySheetPage.getControl().setLayoutData(data);
+		propertySheetPage.getControl().addControlListener(new ControlAdapter() {
+			public void controlResized(ControlEvent e) {
+				atabbedPropertySheetPage.resizeScrolledComposite();
+			}
+		});
+		propertySheetPage.selectionChanged(getPart(), getSelection());
+	}
+
+	@Override
+	public void setInput(IWorkbenchPart part, ISelection selection) {
+		this.store = null;
+		Composite c = nonHDFSComposite;
+		if (!selection.isEmpty()) {
+			Object firstElement = ((IStructuredSelection) selection).getFirstElement();
+			if (firstElement instanceof IResource) {
+				IResource resource = (IResource) firstElement;
+				try {
+					if (resource.getLocationURI() != null && HDFSFileSystem.SCHEME.equals(resource.getLocationURI().getScheme())) {
+						c = hdfsResourceTopComposite;
+						store = (HDFSFileStore) EFS.getStore(resource.getLocationURI());
+					}
+				} catch (CoreException e) {
+					logger.warn(e.getMessage(), e);
+				}
+			}
+		}
+		selection = new StructuredSelection(this.store);
+		if (this.propertySheetPage != null)
+			this.propertySheetPage.selectionChanged(part, selection);
+		super.setInput(part, selection);
+		stackLayout.topControl = c;
+		topComposite.layout();
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.views.properties.tabbed.AbstractPropertySection#dispose()
+	 */
+	@Override
+	public void dispose() {
+		super.dispose();
+		if (this.propertySheetPage != null) {
+			this.propertySheetPage.dispose();
+			this.propertySheetPage = null;
+		}
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.views.properties.tabbed.AbstractPropertySection#refresh()
+	 */
+	@Override
+	public void refresh() {
+		super.refresh();
+		if (this.propertySheetPage != null)
+			this.propertySheetPage.refresh();
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerAction.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerAction.java
new file mode 100644
index 0000000..327acce
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerAction.java
@@ -0,0 +1,62 @@
+package org.apache.hdt.ui.internal.hdfs;
+
+import org.eclipse.jface.action.Action;
+import org.eclipse.jface.action.IAction;
+import org.eclipse.jface.viewers.ISelection;
+import org.eclipse.jface.wizard.WizardDialog;
+import org.eclipse.swt.widgets.Shell;
+import org.eclipse.ui.IWorkbenchWindow;
+import org.eclipse.ui.IWorkbenchWindowActionDelegate;
+
+public class NewHDFSServerAction extends Action implements IWorkbenchWindowActionDelegate {
+
+	private Shell shell;
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.IActionDelegate#run(org.eclipse.jface.action.IAction)
+	 */
+	@Override
+	public void run(IAction action) {
+		NewHDFSWizard wizard = new NewHDFSWizard();
+		WizardDialog dialog = new WizardDialog(shell, wizard);
+		dialog.open();
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IActionDelegate#selectionChanged(org.eclipse.jface.action
+	 * .IAction, org.eclipse.jface.viewers.ISelection)
+	 */
+	@Override
+	public void selectionChanged(IAction action, ISelection selection) {
+		// TODO Auto-generated method stub
+
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.IWorkbenchWindowActionDelegate#dispose()
+	 */
+	@Override
+	public void dispose() {
+		// TODO Auto-generated method stub
+
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.IWorkbenchWindowActionDelegate#init(org.eclipse.ui.
+	 * IWorkbenchWindow)
+	 */
+	@Override
+	public void init(IWorkbenchWindow window) {
+		shell = window.getShell();
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerWizardPage.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerWizardPage.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerWizardPage.java
new file mode 100644
index 0000000..f5eca4d
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerWizardPage.java
@@ -0,0 +1,242 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.ui.internal.hdfs;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.StringTokenizer;
+
+import org.apache.hdt.core.hdfs.HDFSClient;
+import org.apache.hdt.core.internal.hdfs.HDFSManager;
+import org.apache.hdt.ui.Activator;
+import org.apache.log4j.Logger;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.jface.wizard.WizardPage;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.events.ModifyEvent;
+import org.eclipse.swt.events.ModifyListener;
+import org.eclipse.swt.events.SelectionAdapter;
+import org.eclipse.swt.events.SelectionEvent;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Combo;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.swt.widgets.Group;
+import org.eclipse.swt.widgets.Label;
+import org.eclipse.swt.widgets.Text;
+
+public class NewHDFSServerWizardPage extends WizardPage {
+
+	private static final Logger logger = Logger.getLogger(NewHDFSServerWizardPage.class);
+	private Combo serverCombo;
+	private Text serverNameText;
+
+	private String hdfsServerLocation = null;
+	private String hdfsServerName = null;
+	private boolean overrideDefaultSecurity = false;
+	private String userId = null;
+	private List<String> groupIds = new ArrayList<String>();
+
+	protected NewHDFSServerWizardPage() {
+		super("HDFS Server Location");
+		setTitle("HDFS Server Location");
+		setDescription("Enter the name and location of the HDFS server");
+	}
+
+	@Override
+	public void createControl(Composite parent) {
+		Composite c = new Composite(parent, SWT.NONE);
+		final GridLayout layout = new GridLayout(2, false);
+		layout.marginLeft = 5;
+		layout.marginRight = 20;
+		c.setLayout(layout);
+
+		// Add Name
+		Label nameLabel = new Label(c, SWT.NONE);
+		nameLabel.setText("Name:");
+		serverNameText = new Text(c, SWT.BORDER | SWT.SINGLE);
+		serverNameText.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
+		serverNameText.addModifyListener(new ModifyListener() {
+			@Override
+			public void modifyText(ModifyEvent e) {
+				String nameText = serverNameText.getText();
+				if (nameText != null && nameText.trim().length() > 0) {
+					setHdfsServerName(nameText);
+				} else {
+					setHdfsServerName(null);
+				}
+				NewHDFSServerWizardPage.this.validate();
+			}
+		});
+
+		// Add Combo
+		Label comboLabel = new Label(c, SWT.NONE);
+		comboLabel.setText("URL:");
+		serverCombo = new Combo(c, SWT.BORDER);
+		serverCombo.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
+		serverCombo.addModifyListener(new ModifyListener() {
+			@Override
+			public void modifyText(ModifyEvent e) {
+				String urlText = serverCombo.getText();
+				if (urlText != null && urlText.trim().length() > 0) {
+					try {
+						new URI(urlText);
+						setHdfsServerLocation(urlText);
+					} catch (URISyntaxException e1) {
+						setHdfsServerLocation(null);
+					}
+				}
+				NewHDFSServerWizardPage.this.validate();
+			}
+		});
+
+		// Add example
+		new Label(c, SWT.NONE);
+		Label exampleLabel = new Label(c, SWT.NONE);
+		exampleLabel.setText("Example: hdfs://hdfs.server.hostname:8020");
+		exampleLabel.setForeground(Display.getCurrent().getSystemColor(SWT.COLOR_DARK_GRAY));
+		// Security
+		Group securityGroup = new Group(c, SWT.SHADOW_ETCHED_IN);
+		GridData gd = new GridData(GridData.FILL_HORIZONTAL);
+		gd.horizontalSpan = 2;
+		securityGroup.setLayoutData(gd);
+		securityGroup.setText("Security");
+		securityGroup.setLayout(new GridLayout(2, false));
+		// Override security checkbox
+		List<String> userAndGroupIds = getUserAndGroupIds();
+		final Button overrideSecurityCheckbox = new Button(securityGroup, SWT.CHECK);
+		gd = new GridData();
+		gd.horizontalSpan = 2;
+		overrideSecurityCheckbox.setText("Override default security");
+		overrideSecurityCheckbox.setLayoutData(gd);
+		overrideSecurityCheckbox.setSelection(overrideDefaultSecurity);
+		// User ID
+		new Label(securityGroup, SWT.NONE).setText("User ID:");
+		final Text userIdText = new Text(securityGroup, SWT.BORDER | SWT.SINGLE);
+		userIdText.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
+		userIdText.setEnabled(overrideDefaultSecurity);
+		userIdText.addModifyListener(new ModifyListener() {
+			@Override
+			public void modifyText(ModifyEvent e) {
+				userId = userIdText.getText();
+				if (userId != null && userId.trim().length() < 1)
+					userId = null;
+				validate();
+			}
+		});
+		if (userAndGroupIds != null && userAndGroupIds.size() > 0)
+			userIdText.setText(userAndGroupIds.get(0));
+		// Group IDs
+		Label groupIdsLabel = new Label(securityGroup, SWT.NONE);
+		groupIdsLabel.setText("Group IDs:");
+		groupIdsLabel.setLayoutData(new GridData(GridData.VERTICAL_ALIGN_BEGINNING));
+		final org.eclipse.swt.widgets.List groupsList = new org.eclipse.swt.widgets.List(securityGroup, SWT.BORDER);
+		groupsList.setLayoutData(new GridData(GridData.FILL_BOTH));
+		groupsList.setEnabled(overrideDefaultSecurity);
+		if (userAndGroupIds != null && userAndGroupIds.size() > 1)
+			for (String groupId : userAndGroupIds.subList(1, userAndGroupIds.size()))
+				groupsList.add(groupId);
+		overrideSecurityCheckbox.addSelectionListener(new SelectionAdapter() {
+			@Override
+			public void widgetSelected(SelectionEvent e) {
+				overrideDefaultSecurity = overrideSecurityCheckbox.getSelection();
+				if (overrideDefaultSecurity) {
+					userId = userIdText.getText();
+					String[] gids = groupsList.getItems();
+					if (gids != null) {
+						for (String gid : gids) {
+							groupIds.add(gid);
+						}
+					}
+				} else {
+					userId = null;
+					groupIds.clear();
+				}
+				userIdText.setEnabled(overrideDefaultSecurity);
+				// We do not support selection/add/remove of groups
+				// groupsList.setEnabled(overrideDefaultSecurity);
+			}
+		});
+
+		// Populate
+		String currentUrls = Activator.getDefault().getPreferenceStore().getString(Activator.PREFERENCE_HDFS_URLS);
+		StringTokenizer st = new StringTokenizer(currentUrls, "\r\n", false);
+		while (st.hasMoreTokens()) {
+			serverCombo.add(st.nextToken());
+		}
+		setPageComplete(false);
+		this.setControl(c);
+	}
+
+	private List<String> getUserAndGroupIds() {
+		List<String> list = new ArrayList<String>();
+		try {
+			HDFSClient client = HDFSManager.INSTANCE.getClient(hdfsServerLocation);
+			List<String> defaultUserAndGroupIds = client.getDefaultUserAndGroupIds();
+			if (defaultUserAndGroupIds != null)
+				list.addAll(defaultUserAndGroupIds);
+		} catch (CoreException e) {
+			logger.warn("Unable to determine default user and groups", e);
+		} catch (IOException e) {
+			logger.warn("Unable to determine default user and groups", e);
+		} catch (InterruptedException e) {
+			logger.warn("Unable to determine default user and groups", e);
+		}
+		return list;
+	}
+
+	/**
+	 * 
+	 */
+	protected void validate() {
+		setPageComplete(getHdfsServerName() != null && getHdfsServerLocation() != null && (!overrideDefaultSecurity || userId != null));
+	}
+
+	public String getHdfsServerLocation() {
+		return hdfsServerLocation;
+	}
+
+	public void setHdfsServerLocation(String ambariServerUrl) {
+		this.hdfsServerLocation = ambariServerUrl;
+	}
+
+	public void setHdfsServerName(String hdfsServerName) {
+		this.hdfsServerName = hdfsServerName;
+	}
+
+	public String getHdfsServerName() {
+		return hdfsServerName;
+	}
+
+	public boolean isOverrideDefaultSecurity() {
+		return overrideDefaultSecurity;
+	}
+
+	public String getUserId() {
+		return userId;
+	}
+
+	public List<String> getGroupIds() {
+		return groupIds;
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java
new file mode 100644
index 0000000..545ea3a
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.ui.internal.hdfs;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import org.apache.hdt.core.internal.hdfs.HDFSManager;
+import org.apache.hdt.ui.Activator;
+import org.apache.log4j.Logger;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.Status;
+import org.eclipse.core.runtime.jobs.Job;
+import org.eclipse.jface.preference.IPreferenceStore;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.jface.wizard.Wizard;
+import org.eclipse.ui.INewWizard;
+import org.eclipse.ui.IWorkbench;
+
+public class NewHDFSWizard extends Wizard implements INewWizard {
+
+	private static Logger logger = Logger.getLogger(NewHDFSWizard.class);
+	private NewHDFSServerWizardPage serverLocationWizardPage = null;
+
+	public NewHDFSWizard() {
+		// TODO Auto-generated constructor stub
+	}
+
+	@Override
+	public void init(IWorkbench workbench, IStructuredSelection selection) {
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.jface.wizard.Wizard#addPages()
+	 */
+	@Override
+	public void addPages() {
+		super.addPages();
+		if (serverLocationWizardPage == null) {
+			serverLocationWizardPage = new NewHDFSServerWizardPage();
+		}
+		addPage(serverLocationWizardPage);
+	}
+
+	@Override
+	public boolean performFinish() {
+		if (serverLocationWizardPage != null) {
+			String ambariUrl = serverLocationWizardPage.getHdfsServerLocation();
+			if (ambariUrl != null) {
+				IPreferenceStore ps = Activator.getDefault().getPreferenceStore();
+				String currentUrls = ps.getString(Activator.PREFERENCE_HDFS_URLS);
+				if (currentUrls.indexOf(ambariUrl + "\r\n") < 0) {
+					currentUrls = ambariUrl + "\r\n" + currentUrls;
+					ps.setValue(Activator.PREFERENCE_HDFS_URLS, currentUrls);
+				}
+
+				Job j = new Job("Creating HDFS project [" + serverLocationWizardPage.getHdfsServerName() + "]") {
+					protected org.eclipse.core.runtime.IStatus run(org.eclipse.core.runtime.IProgressMonitor monitor) {
+						try {
+							HDFSManager.INSTANCE.createServer(serverLocationWizardPage.getHdfsServerName(), new URI(serverLocationWizardPage
+									.getHdfsServerLocation()), serverLocationWizardPage.isOverrideDefaultSecurity() ? serverLocationWizardPage.getUserId()
+									: null, serverLocationWizardPage.isOverrideDefaultSecurity() ? serverLocationWizardPage.getGroupIds() : null);
+						} catch (CoreException e) {
+							logger.warn(e.getMessage(), e);
+							return e.getStatus();
+						} catch (URISyntaxException e) {
+							logger.warn(e.getMessage(), e);
+						}
+						return Status.OK_STATUS;
+					};
+				};
+				j.schedule();
+				return true;
+			}
+		}
+		return false;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/PropertyTypeMapper.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/PropertyTypeMapper.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/PropertyTypeMapper.java
new file mode 100644
index 0000000..6263b7a
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/PropertyTypeMapper.java
@@ -0,0 +1,29 @@
+package org.apache.hdt.ui.internal.hdfs;
+
+import org.apache.hdt.core.internal.hdfs.HDFSFileSystem;
+import org.apache.log4j.Logger;
+import org.eclipse.core.filesystem.EFS;
+import org.eclipse.core.resources.IResource;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.ui.views.properties.tabbed.ITypeMapper;
+
+public class PropertyTypeMapper implements ITypeMapper {
+	private static final Logger logger = Logger.getLogger(PropertyTypeMapper.class);
+
+	@Override
+	public Class mapType(Object object) {
+		if (object instanceof IResource) {
+			IResource resource = (IResource) object;
+			if(HDFSFileSystem.SCHEME.equals(resource.getLocationURI().getScheme())){
+				// This is a HDFS resource - only show the HDFS tab
+				try {
+					return EFS.getStore(resource.getLocationURI()).getClass();
+				} catch (CoreException e) {
+					logger.warn(e.getMessage(), e);
+				}
+			}
+		}
+		return object.getClass();
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/ReconnectAction.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/ReconnectAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/ReconnectAction.java
new file mode 100644
index 0000000..947637c
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/ReconnectAction.java
@@ -0,0 +1,88 @@
+package org.apache.hdt.ui.internal.hdfs;
+
+import java.util.Iterator;
+
+import org.apache.hdt.core.internal.hdfs.HDFSManager;
+import org.apache.hdt.core.internal.model.HDFSServer;
+import org.apache.hdt.core.internal.model.ServerStatus;
+import org.apache.log4j.Logger;
+import org.eclipse.core.resources.IProject;
+import org.eclipse.jface.action.IAction;
+import org.eclipse.jface.viewers.ISelection;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.ui.IObjectActionDelegate;
+import org.eclipse.ui.IWorkbenchPart;
+
+public class ReconnectAction implements IObjectActionDelegate {
+
+	private final static Logger logger = Logger.getLogger(DownloadResourceAction.class);
+	private ISelection selection;
+	private IWorkbenchPart targetPart;
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.IActionDelegate#run(org.eclipse.jface.action.IAction)
+	 */
+	@Override
+	public void run(IAction action) {
+		if (this.selection != null && !this.selection.isEmpty()) {
+			IStructuredSelection sSelection = (IStructuredSelection) this.selection;
+			@SuppressWarnings("rawtypes")
+			Iterator itr = sSelection.iterator();
+			while (itr.hasNext()) {
+				Object object = itr.next();
+				if (object instanceof IProject) {
+					IProject project = (IProject) object;
+					HDFSManager.reconnectProject(project);
+				}
+			}
+		}
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IActionDelegate#selectionChanged(org.eclipse.jface.action
+	 * .IAction, org.eclipse.jface.viewers.ISelection)
+	 */
+	@Override
+	public void selectionChanged(IAction action, ISelection selection) {
+		this.selection = selection;
+		boolean enabled = true;
+		if (this.selection != null && !this.selection.isEmpty()) {
+			IStructuredSelection sSelection = (IStructuredSelection) this.selection;
+			@SuppressWarnings("rawtypes")
+			Iterator itr = sSelection.iterator();
+			while (itr.hasNext()) {
+				Object object = itr.next();
+				if (object instanceof IProject) {
+					IProject r = (IProject) object;
+					try {
+						HDFSServer server = HDFSManager.INSTANCE.getServer(r.getLocationURI().toString());
+						enabled = server == null ? false : server.getStatusCode() == ServerStatus.DISCONNECTED_VALUE;
+					} catch (Throwable t) {
+						enabled = false;
+					}
+				} else
+					enabled = false;
+			}
+		} else
+			enabled = false;
+		action.setEnabled(enabled);
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IObjectActionDelegate#setActivePart(org.eclipse.jface.
+	 * action.IAction, org.eclipse.ui.IWorkbenchPart)
+	 */
+	@Override
+	public void setActivePart(IAction action, IWorkbenchPart targetPart) {
+		this.targetPart = targetPart;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/UploadResourceAction.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/UploadResourceAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/UploadResourceAction.java
new file mode 100644
index 0000000..b788063
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/UploadResourceAction.java
@@ -0,0 +1,123 @@
+package org.apache.hdt.ui.internal.hdfs;
+
+import java.util.Iterator;
+
+import org.apache.hdt.core.hdfs.ResourceInformation.Permissions;
+import org.apache.hdt.core.internal.hdfs.HDFSFileStore;
+import org.apache.hdt.core.internal.hdfs.UploadFileJob;
+import org.apache.log4j.Logger;
+import org.eclipse.core.filesystem.EFS;
+import org.eclipse.core.resources.IFolder;
+import org.eclipse.core.resources.IResource;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.jface.action.IAction;
+import org.eclipse.jface.dialogs.MessageDialog;
+import org.eclipse.jface.viewers.ISelection;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.ui.IObjectActionDelegate;
+import org.eclipse.ui.IWorkbenchPart;
+
+public class UploadResourceAction implements IObjectActionDelegate {
+
+	private final static Logger logger = Logger.getLogger(UploadResourceAction.class);
+	private ISelection selection;
+	private IWorkbenchPart targetPart;
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.IActionDelegate#run(org.eclipse.jface.action.IAction)
+	 */
+	@Override
+	public void run(IAction action) {
+		if (this.selection != null && !this.selection.isEmpty()) {
+			IStructuredSelection sSelection = (IStructuredSelection) this.selection;
+			@SuppressWarnings("rawtypes")
+			Iterator itr = sSelection.iterator();
+			while (itr.hasNext()) {
+				Object object = itr.next();
+				if (object instanceof IResource) {
+					IResource r = (IResource) object;
+					uploadResource(r);
+				}
+			}
+		}
+	}
+
+	/**
+	 * @param r
+	 */
+	private void uploadResource(IResource r) {
+		try {
+			switch (r.getType()) {
+			case IResource.FILE:
+				UploadFileJob ufj = new UploadFileJob(r);
+				ufj.schedule();
+				break;
+			case IResource.FOLDER:
+				IFolder folder = (IFolder) r;
+				IResource[] members = folder.members();
+				if (members != null) {
+					for (int mc = 0; mc < members.length; mc++) {
+						uploadResource(members[mc]);
+					}
+				}
+			}
+		} catch (CoreException e) {
+			MessageDialog.openError(targetPart.getSite().getShell(), "Upload HDFS Resources", "Error uploading resource to " + r.getLocationURI() + ": "
+					+ e.getMessage());
+			logger.warn(e.getMessage(), e);
+		}
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IActionDelegate#selectionChanged(org.eclipse.jface.action
+	 * .IAction, org.eclipse.jface.viewers.ISelection)
+	 */
+	@Override
+	public void selectionChanged(IAction action, ISelection selection) {
+		this.selection = selection;
+		boolean enabled = true;
+		if (this.selection != null && !this.selection.isEmpty()) {
+			IStructuredSelection sSelection = (IStructuredSelection) this.selection;
+			@SuppressWarnings("rawtypes")
+			Iterator itr = sSelection.iterator();
+			while (itr.hasNext()) {
+				Object object = itr.next();
+				if (object instanceof IResource) {
+					IResource r = (IResource) object;
+					try {
+						HDFSFileStore store = (HDFSFileStore) EFS.getStore(r.getLocationURI());
+						Permissions effectivePermissions = store.getEffectivePermissions();
+						if (enabled && effectivePermissions != null && !effectivePermissions.write)
+							enabled = false;
+						if (enabled)
+							enabled = store.isLocalFile();
+					} catch (Throwable t) {
+						enabled = false;
+					}
+				} else
+					enabled = false;
+			}
+		} else
+			enabled = false;
+		action.setEnabled(enabled);
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IObjectActionDelegate#setActivePart(org.eclipse.jface.
+	 * action.IAction, org.eclipse.ui.IWorkbenchPart)
+	 */
+	@Override
+	public void setActivePart(IAction action, IWorkbenchPart targetPart) {
+		this.targetPart = targetPart;
+
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DeleteAction.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DeleteAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DeleteAction.java
new file mode 100644
index 0000000..02897e0
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DeleteAction.java
@@ -0,0 +1,119 @@
+package org.apache.hdt.ui.internal.zookeeper;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+import org.apache.hdt.core.internal.model.ZNode;
+import org.apache.hdt.core.internal.model.ZooKeeperServer;
+import org.apache.hdt.core.internal.zookeeper.ZooKeeperManager;
+import org.apache.hdt.core.zookeeper.ZooKeeperClient;
+import org.apache.log4j.Logger;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.jface.action.IAction;
+import org.eclipse.jface.viewers.ISelection;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.ui.IObjectActionDelegate;
+import org.eclipse.ui.IWorkbenchPart;
+import org.eclipse.ui.navigator.resources.ProjectExplorer;
+
+public class DeleteAction implements IObjectActionDelegate {
+
+	private final static Logger logger = Logger.getLogger(DeleteAction.class);
+	private ISelection selection;
+	private IWorkbenchPart targetPart;
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.IActionDelegate#run(org.eclipse.jface.action.IAction)
+	 */
+	@Override
+	public void run(IAction action) {
+		if (this.selection != null && !this.selection.isEmpty()) {
+			IStructuredSelection sSelection = (IStructuredSelection) this.selection;
+			@SuppressWarnings("rawtypes")
+			Iterator itr = sSelection.iterator();
+			while (itr.hasNext()) {
+				Object object = itr.next();
+				if (object instanceof ZooKeeperServer) {
+					ZooKeeperServer r = (ZooKeeperServer) object;
+					if (logger.isDebugEnabled())
+						logger.debug("Deleting: " + r);
+					try {
+						ZooKeeperManager.INSTANCE.disconnect(r);
+					} finally {
+						try {
+							ZooKeeperManager.INSTANCE.delete(r);
+						} catch (CoreException e) {
+							logger.error(e.getMessage());
+						}
+					}
+					if (logger.isDebugEnabled())
+						logger.debug("Deleted: " + r);
+					if (targetPart instanceof ProjectExplorer) {
+						ProjectExplorer pe = (ProjectExplorer) targetPart;
+						pe.getCommonViewer().refresh();
+					}
+				} else if (object instanceof ZNode) {
+					ZNode zkn = (ZNode) object;
+					if (logger.isDebugEnabled())
+						logger.debug("Deleting: " + zkn);
+					try {
+						ZooKeeperClient client = ZooKeeperManager.INSTANCE.getClient(zkn.getServer());
+						client.delete(zkn);
+					} catch (CoreException e) {
+						logger.error(e.getMessage(), e);
+					} catch (IOException e) {
+						logger.error(e.getMessage(), e);
+					} catch (InterruptedException e) {
+						logger.error(e.getMessage(), e);
+					}
+				}
+			}
+		}
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IActionDelegate#selectionChanged(org.eclipse.jface.action
+	 * .IAction, org.eclipse.jface.viewers.ISelection)
+	 */
+	@Override
+	public void selectionChanged(IAction action, ISelection selection) {
+		this.selection = selection;
+		boolean enabled = true;
+		if (this.selection != null && !this.selection.isEmpty()) {
+			IStructuredSelection sSelection = (IStructuredSelection) this.selection;
+			@SuppressWarnings("rawtypes")
+			Iterator itr = sSelection.iterator();
+			while (itr.hasNext()) {
+				Object object = itr.next();
+				enabled = false;
+				if (object instanceof ZooKeeperServer) {
+					ZooKeeperServer server = (ZooKeeperServer) object;
+					enabled = server != null;
+				} else if (object instanceof ZNode) {
+					ZNode zkn = (ZNode) object;
+					enabled = zkn != null;
+				}
+			}
+		} else
+			enabled = false;
+		action.setEnabled(enabled);
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IObjectActionDelegate#setActivePart(org.eclipse.jface.
+	 * action.IAction, org.eclipse.ui.IWorkbenchPart)
+	 */
+	@Override
+	public void setActivePart(IAction action, IWorkbenchPart targetPart) {
+		this.targetPart = targetPart;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DisconnectAction.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DisconnectAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DisconnectAction.java
new file mode 100644
index 0000000..c77bf16
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DisconnectAction.java
@@ -0,0 +1,95 @@
+package org.apache.hdt.ui.internal.zookeeper;
+
+import java.util.Iterator;
+
+import org.apache.hdt.core.internal.model.ServerStatus;
+import org.apache.hdt.core.internal.model.ZooKeeperServer;
+import org.apache.hdt.core.internal.zookeeper.ZooKeeperManager;
+import org.apache.log4j.Logger;
+import org.eclipse.jface.action.IAction;
+import org.eclipse.jface.viewers.ISelection;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.ui.IObjectActionDelegate;
+import org.eclipse.ui.IWorkbenchPart;
+import org.eclipse.ui.navigator.resources.ProjectExplorer;
+
+public class DisconnectAction implements IObjectActionDelegate {
+
+	private final static Logger logger = Logger.getLogger(DisconnectAction.class);
+	private ISelection selection;
+	private IWorkbenchPart targetPart;
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.IActionDelegate#run(org.eclipse.jface.action.IAction)
+	 */
+	@Override
+	public void run(IAction action) {
+		if (this.selection != null && !this.selection.isEmpty()) {
+			IStructuredSelection sSelection = (IStructuredSelection) this.selection;
+			@SuppressWarnings("rawtypes")
+			Iterator itr = sSelection.iterator();
+			while (itr.hasNext()) {
+				Object object = itr.next();
+				if (object instanceof ZooKeeperServer) {
+					ZooKeeperServer r = (ZooKeeperServer) object;
+					if(logger.isDebugEnabled())
+						logger.debug("Disconnecting: "+r);
+					ZooKeeperManager.INSTANCE.disconnect(r);
+					if(logger.isDebugEnabled())
+						logger.debug("Disconnected: "+r);
+					if (targetPart instanceof ProjectExplorer) {
+						ProjectExplorer pe = (ProjectExplorer) targetPart;
+						pe.getCommonViewer().refresh(r, true);
+					}
+				}
+			}
+		}
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IActionDelegate#selectionChanged(org.eclipse.jface.action
+	 * .IAction, org.eclipse.jface.viewers.ISelection)
+	 */
+	@Override
+	public void selectionChanged(IAction action, ISelection selection) {
+		this.selection = selection;
+		boolean enabled = true;
+		if (this.selection != null && !this.selection.isEmpty()) {
+			IStructuredSelection sSelection = (IStructuredSelection) this.selection;
+			@SuppressWarnings("rawtypes")
+			Iterator itr = sSelection.iterator();
+			while (itr.hasNext()) {
+				Object object = itr.next();
+				if (object instanceof ZooKeeperServer) {
+					ZooKeeperServer server = (ZooKeeperServer) object;
+					try {
+						enabled = server == null ? false : server.getStatusCode() != ServerStatus.DISCONNECTED_VALUE;
+					} catch (Throwable t) {
+						enabled = false;
+					}
+				} else
+					enabled = false;
+			}
+		} else
+			enabled = false;
+		action.setEnabled(enabled);
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IObjectActionDelegate#setActivePart(org.eclipse.jface.
+	 * action.IAction, org.eclipse.ui.IWorkbenchPart)
+	 */
+	@Override
+	public void setActivePart(IAction action, IWorkbenchPart targetPart) {
+		this.targetPart = targetPart;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperServerAction.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperServerAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperServerAction.java
new file mode 100644
index 0000000..66e9ed3
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperServerAction.java
@@ -0,0 +1,62 @@
+package org.apache.hdt.ui.internal.zookeeper;
+
+import org.eclipse.jface.action.Action;
+import org.eclipse.jface.action.IAction;
+import org.eclipse.jface.viewers.ISelection;
+import org.eclipse.jface.wizard.WizardDialog;
+import org.eclipse.swt.widgets.Shell;
+import org.eclipse.ui.IWorkbenchWindow;
+import org.eclipse.ui.IWorkbenchWindowActionDelegate;
+
+public class NewZooKeeperServerAction extends Action implements IWorkbenchWindowActionDelegate {
+
+	private Shell shell;
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.IActionDelegate#run(org.eclipse.jface.action.IAction)
+	 */
+	@Override
+	public void run(IAction action) {
+		NewZooKeeperWizard wizard = new NewZooKeeperWizard();
+		WizardDialog dialog = new WizardDialog(shell, wizard);
+		dialog.open();
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IActionDelegate#selectionChanged(org.eclipse.jface.action
+	 * .IAction, org.eclipse.jface.viewers.ISelection)
+	 */
+	@Override
+	public void selectionChanged(IAction action, ISelection selection) {
+		// TODO Auto-generated method stub
+
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.IWorkbenchWindowActionDelegate#dispose()
+	 */
+	@Override
+	public void dispose() {
+		// TODO Auto-generated method stub
+
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.IWorkbenchWindowActionDelegate#init(org.eclipse.ui.
+	 * IWorkbenchWindow)
+	 */
+	@Override
+	public void init(IWorkbenchWindow window) {
+		shell = window.getShell();
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperServerWizardPage.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperServerWizardPage.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperServerWizardPage.java
new file mode 100644
index 0000000..f7a5bf5
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperServerWizardPage.java
@@ -0,0 +1,129 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.ui.internal.zookeeper;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.StringTokenizer;
+
+import org.apache.hdt.ui.Activator;
+import org.eclipse.jface.wizard.WizardPage;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.events.ModifyEvent;
+import org.eclipse.swt.events.ModifyListener;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Combo;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.swt.widgets.Label;
+import org.eclipse.swt.widgets.Text;
+
+public class NewZooKeeperServerWizardPage extends WizardPage {
+
+	//private static final Logger logger = Logger.getLogger(NewZooKeeperServerWizardPage.class);
+	private Combo serverCombo;
+	private Text serverNameText;
+
+	private String zkServerLocation = null;
+	private String zkServerName = null;
+	protected NewZooKeeperServerWizardPage() {
+		super("ZooKeeper Server Location");
+		setTitle("ZooKeeper Server Location");
+		setDescription("Enter the name and location of the ZooKeeper server");
+	}
+
+	@Override
+	public void createControl(Composite parent) {
+		Composite c = new Composite(parent, SWT.NONE);
+		final GridLayout layout = new GridLayout(2, false);
+		layout.marginLeft = 5;
+		layout.marginRight = 20;
+		c.setLayout(layout);
+
+		// Add Name
+		Label nameLabel = new Label(c, SWT.NONE);
+		nameLabel.setText("Name:");
+		serverNameText = new Text(c, SWT.BORDER | SWT.SINGLE);
+		serverNameText.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
+		serverNameText.addModifyListener(new ModifyListener() {
+			@Override
+			public void modifyText(ModifyEvent e) {
+				String nameText = serverNameText.getText();
+				if (nameText != null && nameText.trim().length() > 0) {
+					setZkServerName(nameText);
+				} else {
+					setZkServerName(null);
+				}
+				NewZooKeeperServerWizardPage.this.validate();
+			}
+		});
+
+		// Add Combo
+		Label comboLabel = new Label(c, SWT.NONE);
+		comboLabel.setText("Location:");
+		serverCombo = new Combo(c, SWT.BORDER);
+		serverCombo.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
+		serverCombo.addModifyListener(new ModifyListener() {
+			@Override
+			public void modifyText(ModifyEvent e) {
+				String urlText = serverCombo.getText();
+				setZkServerLocation(urlText.trim());
+				NewZooKeeperServerWizardPage.this.validate();
+			}
+		});
+
+		// Add example
+		new Label(c, SWT.NONE);
+		Label exampleLabel = new Label(c, SWT.NONE);
+		exampleLabel.setText("Example: zookeeper.server.hostname:2181, zookeeper.server.hostname:2181/path");
+		exampleLabel.setForeground(Display.getCurrent().getSystemColor(SWT.COLOR_DARK_GRAY));
+
+		// Populate
+		String currentUrls = Activator.getDefault().getPreferenceStore().getString(Activator.PREFERENCE_ZOOKEEPER_URLS);
+		StringTokenizer st = new StringTokenizer(currentUrls, "\r\n", false);
+		while (st.hasMoreTokens()) {
+			serverCombo.add(st.nextToken());
+		}
+		setPageComplete(false);
+		this.setControl(c);
+	}
+
+	/**
+	 * 
+	 */
+	protected void validate() {
+		setPageComplete(getZkServerName() != null && getZkServerLocation() != null);
+	}
+
+	public String getZkServerLocation() {
+		return zkServerLocation;
+	}
+
+	public void setZkServerLocation(String ambariServerUrl) {
+		this.zkServerLocation = ambariServerUrl;
+	}
+
+	public void setZkServerName(String hdfsServerName) {
+		this.zkServerName = hdfsServerName;
+	}
+
+	public String getZkServerName() {
+		return zkServerName;
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperWizard.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperWizard.java
new file mode 100644
index 0000000..405773a
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperWizard.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.ui.internal.zookeeper;
+
+import org.apache.hdt.core.internal.zookeeper.ZooKeeperManager;
+import org.apache.hdt.ui.Activator;
+import org.eclipse.core.runtime.Status;
+import org.eclipse.core.runtime.jobs.Job;
+import org.eclipse.jface.preference.IPreferenceStore;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.jface.wizard.Wizard;
+import org.eclipse.ui.INewWizard;
+import org.eclipse.ui.IWorkbench;
+
+public class NewZooKeeperWizard extends Wizard implements INewWizard {
+
+	//private static Logger logger = Logger.getLogger(NewZooKeeperWizard.class);
+	private NewZooKeeperServerWizardPage serverLocationWizardPage = null;
+
+	public NewZooKeeperWizard() {
+	}
+
+	@Override
+	public void init(IWorkbench workbench, IStructuredSelection selection) {
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.jface.wizard.Wizard#addPages()
+	 */
+	@Override
+	public void addPages() {
+		super.addPages();
+		if (serverLocationWizardPage == null) {
+			serverLocationWizardPage = new NewZooKeeperServerWizardPage();
+		}
+		addPage(serverLocationWizardPage);
+	}
+
+	@Override
+	public boolean performFinish() {
+		if (serverLocationWizardPage != null) {
+			String ambariUrl = serverLocationWizardPage.getZkServerLocation();
+			if (ambariUrl != null) {
+				IPreferenceStore ps = Activator.getDefault().getPreferenceStore();
+				String currentUrls = ps.getString(Activator.PREFERENCE_ZOOKEEPER_URLS);
+				if (currentUrls.indexOf(ambariUrl + "\r\n") < 0) {
+					currentUrls = ambariUrl + "\r\n" + currentUrls;
+					ps.setValue(Activator.PREFERENCE_ZOOKEEPER_URLS, currentUrls);
+				}
+
+				Job j = new Job("Creating ZooKeeper project [" + serverLocationWizardPage.getZkServerName() + "]") {
+					protected org.eclipse.core.runtime.IStatus run(org.eclipse.core.runtime.IProgressMonitor monitor) {
+						ZooKeeperManager.INSTANCE.createServer(serverLocationWizardPage.getZkServerName(), serverLocationWizardPage.getZkServerLocation());
+						return Status.OK_STATUS;
+					};
+				};
+				j.schedule();
+				return true;
+			}
+		}
+		return false;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/OpenAction.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/OpenAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/OpenAction.java
new file mode 100644
index 0000000..18538f7
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/OpenAction.java
@@ -0,0 +1,102 @@
+package org.apache.hdt.ui.internal.zookeeper;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+import org.apache.hdt.core.internal.model.ZNode;
+import org.apache.hdt.core.internal.zookeeper.ZooKeeperManager;
+import org.apache.hdt.core.zookeeper.ZooKeeperClient;
+import org.apache.log4j.Logger;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.jface.action.IAction;
+import org.eclipse.jface.viewers.ISelection;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.ui.IEditorDescriptor;
+import org.eclipse.ui.IObjectActionDelegate;
+import org.eclipse.ui.IWorkbenchPage;
+import org.eclipse.ui.IWorkbenchPart;
+import org.eclipse.ui.PlatformUI;
+
+public class OpenAction implements IObjectActionDelegate {
+
+	private final static Logger logger = Logger.getLogger(OpenAction.class);
+	private ISelection selection;
+	private IWorkbenchPart targetPart;
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.IActionDelegate#run(org.eclipse.jface.action.IAction)
+	 */
+	@Override
+	public void run(IAction action) {
+		if (this.selection != null && !this.selection.isEmpty()) {
+			IStructuredSelection sSelection = (IStructuredSelection) this.selection;
+			@SuppressWarnings("rawtypes")
+			Iterator itr = sSelection.iterator();
+			while (itr.hasNext()) {
+				Object object = itr.next();
+				if (object instanceof ZNode) {
+					ZNode zkn = (ZNode) object;
+					if (logger.isDebugEnabled())
+						logger.debug("Opening: " + zkn);
+					try {
+						ZooKeeperClient client = ZooKeeperManager.INSTANCE.getClient(zkn.getServer());
+						byte[] open = client.open(zkn);
+						IWorkbenchPage activePage = PlatformUI.getWorkbench().getActiveWorkbenchWindow().getActivePage();
+						IEditorDescriptor defaultEditor = PlatformUI.getWorkbench().getEditorRegistry().getDefaultEditor(zkn.getNodeName());
+						activePage.openEditor(new ZooKeeperNodeEditorInput(zkn, open), defaultEditor == null ? "org.eclipse.ui.DefaultTextEditor"
+								: defaultEditor.getId(), true);
+					} catch (CoreException e) {
+						logger.error(e.getMessage(), e);
+					} catch (IOException e) {
+						logger.error(e.getMessage(), e);
+					} catch (InterruptedException e) {
+						logger.error(e.getMessage(), e);
+					}
+				}
+			}
+		}
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IActionDelegate#selectionChanged(org.eclipse.jface.action
+	 * .IAction, org.eclipse.jface.viewers.ISelection)
+	 */
+	@Override
+	public void selectionChanged(IAction action, ISelection selection) {
+		this.selection = selection;
+		boolean enabled = true;
+		if (this.selection != null && !this.selection.isEmpty()) {
+			IStructuredSelection sSelection = (IStructuredSelection) this.selection;
+			@SuppressWarnings("rawtypes")
+			Iterator itr = sSelection.iterator();
+			while (itr.hasNext()) {
+				Object object = itr.next();
+				enabled = false;
+				if (object instanceof ZNode) {
+					ZNode zkn = (ZNode) object;
+					enabled = zkn != null;
+				}
+			}
+		} else
+			enabled = false;
+		action.setEnabled(enabled);
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IObjectActionDelegate#setActivePart(org.eclipse.jface.
+	 * action.IAction, org.eclipse.ui.IWorkbenchPart)
+	 */
+	@Override
+	public void setActivePart(IAction action, IWorkbenchPart targetPart) {
+		this.targetPart = targetPart;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ReconnectAction.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ReconnectAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ReconnectAction.java
new file mode 100644
index 0000000..0cd4607
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ReconnectAction.java
@@ -0,0 +1,95 @@
+package org.apache.hdt.ui.internal.zookeeper;
+
+import java.util.Iterator;
+
+import org.apache.hdt.core.internal.model.ServerStatus;
+import org.apache.hdt.core.internal.model.ZooKeeperServer;
+import org.apache.hdt.core.internal.zookeeper.ZooKeeperManager;
+import org.apache.log4j.Logger;
+import org.eclipse.jface.action.IAction;
+import org.eclipse.jface.viewers.ISelection;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.ui.IObjectActionDelegate;
+import org.eclipse.ui.IWorkbenchPart;
+import org.eclipse.ui.navigator.resources.ProjectExplorer;
+
+public class ReconnectAction implements IObjectActionDelegate {
+
+	private final static Logger logger = Logger.getLogger(ReconnectAction.class);
+	private ISelection selection;
+	private IWorkbenchPart targetPart;
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.IActionDelegate#run(org.eclipse.jface.action.IAction)
+	 */
+	@Override
+	public void run(IAction action) {
+		if (this.selection != null && !this.selection.isEmpty()) {
+			IStructuredSelection sSelection = (IStructuredSelection) this.selection;
+			@SuppressWarnings("rawtypes")
+			Iterator itr = sSelection.iterator();
+			while (itr.hasNext()) {
+				Object object = itr.next();
+				if (object instanceof ZooKeeperServer) {
+					ZooKeeperServer r = (ZooKeeperServer) object;
+					if(logger.isDebugEnabled())
+						logger.debug("Reconnecting: "+r);
+					ZooKeeperManager.INSTANCE.reconnect(r);
+					if(logger.isDebugEnabled())
+						logger.debug("Reconnected: "+r);
+					if (targetPart instanceof ProjectExplorer) {
+						ProjectExplorer pe = (ProjectExplorer) targetPart;
+						pe.getCommonViewer().refresh(r, true);
+					}
+				}
+			}
+		}
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IActionDelegate#selectionChanged(org.eclipse.jface.action
+	 * .IAction, org.eclipse.jface.viewers.ISelection)
+	 */
+	@Override
+	public void selectionChanged(IAction action, ISelection selection) {
+		this.selection = selection;
+		boolean enabled = true;
+		if (this.selection != null && !this.selection.isEmpty()) {
+			IStructuredSelection sSelection = (IStructuredSelection) this.selection;
+			@SuppressWarnings("rawtypes")
+			Iterator itr = sSelection.iterator();
+			while (itr.hasNext()) {
+				Object object = itr.next();
+				if (object instanceof ZooKeeperServer) {
+					ZooKeeperServer server = (ZooKeeperServer) object;
+					try {
+						enabled = server == null ? false : server.getStatusCode() == ServerStatus.DISCONNECTED_VALUE;
+					} catch (Throwable t) {
+						enabled = false;
+					}
+				} else
+					enabled = false;
+			}
+		} else
+			enabled = false;
+		action.setEnabled(enabled);
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IObjectActionDelegate#setActivePart(org.eclipse.jface.
+	 * action.IAction, org.eclipse.ui.IWorkbenchPart)
+	 */
+	@Override
+	public void setActivePart(IAction action, IWorkbenchPart targetPart) {
+		this.targetPart = targetPart;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/RefreshAction.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/RefreshAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/RefreshAction.java
new file mode 100644
index 0000000..d4e16d1
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/RefreshAction.java
@@ -0,0 +1,85 @@
+package org.apache.hdt.ui.internal.zookeeper;
+
+import java.util.Iterator;
+
+import org.apache.hdt.core.internal.model.ServerStatus;
+import org.apache.hdt.core.internal.model.ZNode;
+import org.apache.hdt.core.internal.model.ZooKeeperServer;
+import org.apache.log4j.Logger;
+import org.eclipse.jface.action.IAction;
+import org.eclipse.jface.viewers.ISelection;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.ui.IObjectActionDelegate;
+import org.eclipse.ui.IWorkbenchPart;
+import org.eclipse.ui.navigator.resources.ProjectExplorer;
+
+public class RefreshAction implements IObjectActionDelegate {
+
+	private final static Logger logger = Logger.getLogger(RefreshAction.class);
+	private ISelection selection;
+	private IWorkbenchPart targetPart;
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.IActionDelegate#run(org.eclipse.jface.action.IAction)
+	 */
+	@Override
+	public void run(IAction action) {
+		if (this.selection != null && !this.selection.isEmpty()) {
+			IStructuredSelection sSelection = (IStructuredSelection) this.selection;
+			if (targetPart instanceof ProjectExplorer) {
+				ProjectExplorer pe = (ProjectExplorer) targetPart;
+				@SuppressWarnings("rawtypes")
+				Iterator itr = sSelection.iterator();
+				while (itr.hasNext()) {
+					Object object = itr.next();
+					if (logger.isDebugEnabled())
+						logger.debug("Refreshing: " + object);
+					pe.getCommonViewer().refresh(object, true);
+				}
+			}
+		}
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IActionDelegate#selectionChanged(org.eclipse.jface.action
+	 * .IAction, org.eclipse.jface.viewers.ISelection)
+	 */
+	@Override
+	public void selectionChanged(IAction action, ISelection selection) {
+		this.selection = selection;
+		boolean enabled = true;
+		if (this.selection != null && !this.selection.isEmpty()) {
+			IStructuredSelection sSelection = (IStructuredSelection) this.selection;
+			@SuppressWarnings("rawtypes")
+			Iterator itr = sSelection.iterator();
+			while (itr.hasNext()) {
+				Object object = itr.next();
+				if (object instanceof ZooKeeperServer) {
+					ZooKeeperServer zks = (ZooKeeperServer) object;
+					enabled = zks.getStatusCode() == ServerStatus.CONNECTED_VALUE;
+				} else
+					enabled = object instanceof ZNode;
+			}
+		} else
+			enabled = false;
+		action.setEnabled(enabled);
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IObjectActionDelegate#setActivePart(org.eclipse.jface.
+	 * action.IAction, org.eclipse.ui.IWorkbenchPart)
+	 */
+	@Override
+	public void setActivePart(IAction action, IWorkbenchPart targetPart) {
+		this.targetPart = targetPart;
+	}
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZNodePropertySource.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZNodePropertySource.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZNodePropertySource.java
new file mode 100644
index 0000000..4c7e549
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZNodePropertySource.java
@@ -0,0 +1,226 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.ui.internal.zookeeper;
+
+import java.text.DateFormat;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+
+import org.apache.hdt.core.internal.model.ZNode;
+import org.apache.hdt.ui.Activator;
+import org.eclipse.ui.views.properties.IPropertyDescriptor;
+import org.eclipse.ui.views.properties.IPropertySource;
+import org.eclipse.ui.views.properties.PropertyDescriptor;
+
+/**
+ * @author Srimanth Gunturi
+ * 
+ */
+public class ZNodePropertySource implements IPropertySource {
+
+	/**
+	 * 
+	 */
+	private static final String PROP_DATA_SIZE = Activator.PLUGIN_ID + ".znode.dsize";
+	/**
+	 * 
+	 */
+	private static final String PROP_CHILD_COUNT = Activator.PLUGIN_ID + ".znode.ccount";
+	/**
+	 * 
+	 */
+	private static final String PROP_TIME_REFRESH = Activator.PLUGIN_ID + ".znode.rtime";
+	/**
+	 * 
+	 */
+	private static final String PROP_TIME_MODIFICATION = Activator.PLUGIN_ID + ".znode.mtime";
+	/**
+	 * 
+	 */
+	private static final String PROP_TIME_CREATION = Activator.PLUGIN_ID + ".znode.ctime";
+	/**
+	 * 
+	 */
+	private static final String PROP_ID_MODIFICATION = Activator.PLUGIN_ID + ".znode.mxid";
+	/**
+	 * 
+	 */
+	private static final String PROP_ID_CREATION = Activator.PLUGIN_ID + ".znode.cxid";
+	/**
+	 * 
+	 */
+	private static final String PROP_VERSION_ACL = Activator.PLUGIN_ID + ".znode.aversion";
+	/**
+	 * 
+	 */
+	private static final String PROP_VERSION_CHILDREN = Activator.PLUGIN_ID + ".znode.cversion";
+	/**
+	 * 
+	 */
+	private static final String PROP_VERSION = Activator.PLUGIN_ID + ".znode.version";
+	/**
+	 * 
+	 */
+	private static final String PROP_EPHERMERAL = Activator.PLUGIN_ID + ".znode.ephermeral";
+	/**
+	 * 
+	 */
+	private static final String PROP_EPHERMERAL_SESSION_ID = Activator.PLUGIN_ID + ".znode.ephermeral.sessionid";
+	/**
+	 * 
+	 */
+	private static final String PROP_PATH = Activator.PLUGIN_ID + ".znode.path";
+
+	private final ZNode zNode;
+
+	/**
+	 * @param zNode
+	 */
+	public ZNodePropertySource(ZNode zNode) {
+		this.zNode = zNode;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.views.properties.IPropertySource#getEditableValue()
+	 */
+	@Override
+	public Object getEditableValue() {
+		// TODO Auto-generated method stub
+		return null;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.views.properties.IPropertySource#getPropertyDescriptors()
+	 */
+	@Override
+	public IPropertyDescriptor[] getPropertyDescriptors() {
+		if (zNode != null) {
+			List<IPropertyDescriptor> props = new ArrayList<IPropertyDescriptor>();
+			props.add(new PropertyDescriptor(PROP_PATH, "Path"));
+			// Versions
+			props.add(new PropertyDescriptor(PROP_VERSION, "Version (Node)"));
+			props.add(new PropertyDescriptor(PROP_VERSION_CHILDREN, "Version (Children)"));
+			props.add(new PropertyDescriptor(PROP_VERSION_ACL, "Version (ACL)"));
+			// IDs
+			props.add(new PropertyDescriptor(PROP_ID_CREATION, "ID (Creation)"));
+			props.add(new PropertyDescriptor(PROP_ID_MODIFICATION, "ID (Modification)"));
+			// Time
+			props.add(new PropertyDescriptor(PROP_TIME_CREATION, "Created"));
+			props.add(new PropertyDescriptor(PROP_TIME_MODIFICATION, "Modified"));
+			props.add(new PropertyDescriptor(PROP_TIME_REFRESH, "Refreshed"));
+			// Misc
+			props.add(new PropertyDescriptor(PROP_CHILD_COUNT, "Children Count"));
+			props.add(new PropertyDescriptor(PROP_DATA_SIZE, "Data size"));
+			props.add(new PropertyDescriptor(PROP_EPHERMERAL, "Is Ephermeral Node"));
+			if (zNode.isEphermeral())
+				props.add(new PropertyDescriptor(PROP_EPHERMERAL_SESSION_ID, "Ephermeral Session Id"));
+			return props.toArray(new IPropertyDescriptor[props.size()]);
+		}
+		return new IPropertyDescriptor[0];
+	}
+
+	protected String getTimeDisplay(long time) {
+		if (time > 0)
+			return DateFormat.getDateTimeInstance(DateFormat.LONG, DateFormat.LONG).format(new Date(time));
+		return null;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.views.properties.IPropertySource#getPropertyValue(java
+	 * .lang.Object)
+	 */
+	@Override
+	public Object getPropertyValue(Object id) {
+		if (PROP_PATH.equals(id))
+			return zNode.getPath();
+		if (PROP_CHILD_COUNT.equals(id))
+			return zNode.getChildrenCount();
+		if (PROP_DATA_SIZE.equals(id))
+			return zNode.getDataLength();
+		if (PROP_TIME_CREATION.equals(id))
+			return getTimeDisplay(zNode.getCreationTime());
+		if (PROP_TIME_MODIFICATION.equals(id))
+			return getTimeDisplay(zNode.getModifiedTime());
+		if (PROP_TIME_REFRESH.equals(id))
+			return getTimeDisplay(zNode.getLastRefresh());
+		if (PROP_ID_CREATION.equals(id))
+			return zNode.getCreationId();
+		if (PROP_ID_MODIFICATION.equals(id))
+			return zNode.getModifiedId();
+		if (PROP_VERSION.equals(id))
+			return zNode.getVersion();
+		if (PROP_VERSION_ACL.equals(id))
+			return zNode.getAclVersion();
+		if (PROP_VERSION_CHILDREN.equals(id))
+			return zNode.getChildrenVersion();
+		if (PROP_EPHERMERAL.equals(id))
+			return zNode.isEphermeral();
+		if (PROP_EPHERMERAL_SESSION_ID.equals(id))
+			return zNode.getEphermalOwnerSessionId();
+		return null;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.views.properties.IPropertySource#isPropertySet(java.lang
+	 * .Object)
+	 */
+	@Override
+	public boolean isPropertySet(Object id) {
+		// TODO Auto-generated method stub
+		return false;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.views.properties.IPropertySource#resetPropertyValue(java
+	 * .lang.Object)
+	 */
+	@Override
+	public void resetPropertyValue(Object id) {
+		// TODO Auto-generated method stub
+
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.views.properties.IPropertySource#setPropertyValue(java
+	 * .lang.Object, java.lang.Object)
+	 */
+	@Override
+	public void setPropertyValue(Object id, Object value) {
+		// TODO Auto-generated method stub
+
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperCommonContentProvider.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperCommonContentProvider.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperCommonContentProvider.java
new file mode 100644
index 0000000..77c1c20
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperCommonContentProvider.java
@@ -0,0 +1,187 @@
+package org.apache.hdt.ui.internal.zookeeper;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hdt.core.internal.HadoopManager;
+import org.apache.hdt.core.internal.model.HadoopPackage;
+import org.apache.hdt.core.internal.model.Servers;
+import org.apache.hdt.core.internal.model.ZNode;
+import org.apache.hdt.core.internal.model.ZooKeeperServer;
+import org.apache.hdt.core.internal.zookeeper.ZooKeeperManager;
+import org.apache.hdt.core.zookeeper.ZooKeeperClient;
+import org.apache.log4j.Logger;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.emf.common.notify.Notification;
+import org.eclipse.emf.ecore.util.EContentAdapter;
+import org.eclipse.jface.viewers.Viewer;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.ui.IMemento;
+import org.eclipse.ui.IViewPart;
+import org.eclipse.ui.PartInitException;
+import org.eclipse.ui.PlatformUI;
+import org.eclipse.ui.navigator.CommonNavigator;
+import org.eclipse.ui.navigator.CommonViewer;
+import org.eclipse.ui.navigator.ICommonContentExtensionSite;
+import org.eclipse.ui.navigator.ICommonContentProvider;
+import org.eclipse.ui.navigator.INavigatorContentService;
+
+public class ZooKeeperCommonContentProvider implements ICommonContentProvider {
+
+	private static final Logger logger = Logger.getLogger(ZooKeeperCommonContentProvider.class);
+	private EContentAdapter serversListener;
+	private String viewerId;
+	private Display display;
+
+	@Override
+	public void dispose() {
+		if (serversListener != null) {
+			HadoopManager.INSTANCE.getServers().eAdapters().remove(serversListener);
+			serversListener = null;
+		}
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.navigator.ICommonContentProvider#init(org.eclipse.ui.navigator
+	 * .ICommonContentExtensionSite)
+	 */
+	@Override
+	public void init(ICommonContentExtensionSite aConfig) {
+		INavigatorContentService cs = aConfig.getService();
+		viewerId = cs.getViewerId();
+		this.display = PlatformUI.getWorkbench().getActiveWorkbenchWindow().getShell().getDisplay();
+		hookRefreshResources();
+	}
+
+	@Override
+	public void inputChanged(Viewer viewer, Object oldInput, Object newInput) {
+	}
+
+	@Override
+	public Object[] getElements(Object inputElement) {
+		return ZooKeeperManager.INSTANCE.getServers().toArray();
+	}
+
+	@Override
+	public Object[] getChildren(Object parentElement) {
+		if (parentElement instanceof ZNode) {
+			ZNode zkn = (ZNode) parentElement;
+			try {
+				ZooKeeperClient client = ZooKeeperManager.INSTANCE.getClient(zkn.getServer());
+				List<ZNode> zkChildren = client.getChildren(zkn);
+				return zkChildren.toArray();
+			} catch (CoreException e) {
+				logger.error("Error getting children of node", e);
+			} catch (IOException e) {
+				logger.error("Error getting children of node", e);
+			} catch (InterruptedException e) {
+				logger.error("Error getting children of node", e);
+			}
+		}
+		return null;
+	}
+
+	@Override
+	public Object getParent(Object element) {
+		if (element instanceof ZNode) {
+			ZNode zkn = (ZNode) element;
+			return zkn.getParent();
+		}
+		return null;
+	}
+
+	@Override
+	public boolean hasChildren(Object element) {
+		if (element instanceof ZooKeeperServer)
+			return true;
+		if (element instanceof ZNode)
+			return true;
+		return false;
+	}
+
+	protected void hookRefreshResources() {
+		serversListener = new EContentAdapter() {
+			public boolean isAdapterForType(Object type) {
+				return HadoopPackage.eINSTANCE.getZooKeeperServer().isInstance(type) || HadoopPackage.eINSTANCE.getZNode().isInstance(type);
+			}
+
+			public void notifyChanged(final org.eclipse.emf.common.notify.Notification notification) {
+				super.notifyChanged(notification);
+				if (logger.isDebugEnabled())
+					logger.debug(notification);
+				if (notification.getNotifier() instanceof Servers) {
+					if (notification.getEventType() == Notification.ADD || notification.getEventType() == Notification.REMOVE) {
+						display.asyncExec(new Runnable() {
+							@Override
+							public void run() {
+								CommonViewer viewer = null;
+								try {
+									IViewPart view = PlatformUI.getWorkbench().getActiveWorkbenchWindow().getActivePage().showView(viewerId);
+									if (view instanceof CommonNavigator) {
+										CommonNavigator navigator = (CommonNavigator) view;
+										viewer = navigator.getCommonViewer();
+									}
+								} catch (PartInitException e) {
+								}
+
+								if (viewer != null) {
+									viewer.refresh(true);
+								}
+							}
+						});
+					}
+				} else if (notification.getNotifier() instanceof ZooKeeperServer) {
+					int featureID = notification.getFeatureID(ZooKeeperServer.class);
+					if (featureID == HadoopPackage.ZOO_KEEPER_SERVER__STATUS_CODE) {
+						if (notification.getEventType() == Notification.SET) {
+							display.asyncExec(new Runnable() {
+								@Override
+								public void run() {
+									CommonViewer viewer = null;
+									try {
+										IViewPart view = PlatformUI.getWorkbench().getActiveWorkbenchWindow().getActivePage().showView(viewerId);
+										if (view instanceof CommonNavigator) {
+											CommonNavigator navigator = (CommonNavigator) view;
+											viewer = navigator.getCommonViewer();
+										}
+									} catch (PartInitException e) {
+									}
+
+									if (viewer != null) {
+										viewer.refresh(notification.getNotifier(), true);
+									}
+								}
+							});
+						}
+					}
+				}
+			}
+		};
+		HadoopManager.INSTANCE.getServers().eAdapters().add(serversListener);
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.navigator.IMementoAware#restoreState(org.eclipse.ui.IMemento
+	 * )
+	 */
+	@Override
+	public void restoreState(IMemento aMemento) {
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.navigator.IMementoAware#saveState(org.eclipse.ui.IMemento)
+	 */
+	@Override
+	public void saveState(IMemento aMemento) {
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperLabelProvider.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperLabelProvider.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperLabelProvider.java
new file mode 100644
index 0000000..4ee04ea
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperLabelProvider.java
@@ -0,0 +1,87 @@
+package org.apache.hdt.ui.internal.zookeeper;
+
+import org.apache.hdt.core.internal.model.ZNode;
+import org.apache.hdt.core.internal.model.ZooKeeperServer;
+import org.apache.hdt.ui.Activator;
+import org.eclipse.jface.viewers.ILabelProviderListener;
+import org.eclipse.swt.graphics.Image;
+import org.eclipse.ui.IMemento;
+import org.eclipse.ui.navigator.ICommonContentExtensionSite;
+import org.eclipse.ui.navigator.ICommonLabelProvider;
+
+public class ZooKeeperLabelProvider implements ICommonLabelProvider {
+
+	@Override
+	public void addListener(ILabelProviderListener listener) {
+	}
+
+	@Override
+	public void dispose() {
+	}
+
+	@Override
+	public boolean isLabelProperty(Object element, String property) {
+		return false;
+	}
+
+	@Override
+	public void removeListener(ILabelProviderListener listener) {
+	}
+
+	@Override
+	public Image getImage(Object element) {
+		if (element instanceof ZooKeeperServer)
+			return Activator.IMAGE_ZOOKEEPER;
+		if (element instanceof ZNode)
+			return Activator.IMAGE_ZOOKEEPER_NODE;
+		return null;
+	}
+
+	@Override
+	public String getText(Object element) {
+		if (element instanceof ZooKeeperServer) {
+			ZooKeeperServer zks = (ZooKeeperServer) element;
+			return zks.getName();
+		}
+		if (element instanceof ZNode)
+			return ((ZNode) element).getNodeName();
+		return null;
+	}
+
+	/* (non-Javadoc)
+	 * @see org.eclipse.ui.navigator.IMementoAware#restoreState(org.eclipse.ui.IMemento)
+	 */
+	@Override
+	public void restoreState(IMemento aMemento) {
+		// TODO Auto-generated method stub
+		
+	}
+
+	/* (non-Javadoc)
+	 * @see org.eclipse.ui.navigator.IMementoAware#saveState(org.eclipse.ui.IMemento)
+	 */
+	@Override
+	public void saveState(IMemento aMemento) {
+		// TODO Auto-generated method stub
+		
+	}
+
+	/* (non-Javadoc)
+	 * @see org.eclipse.ui.navigator.IDescriptionProvider#getDescription(java.lang.Object)
+	 */
+	@Override
+	public String getDescription(Object anElement) {
+		// TODO Auto-generated method stub
+		return null;
+	}
+
+	/* (non-Javadoc)
+	 * @see org.eclipse.ui.navigator.ICommonLabelProvider#init(org.eclipse.ui.navigator.ICommonContentExtensionSite)
+	 */
+	@Override
+	public void init(ICommonContentExtensionSite aConfig) {
+		// TODO Auto-generated method stub
+		
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperLightweightLabelDecorator.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperLightweightLabelDecorator.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperLightweightLabelDecorator.java
new file mode 100644
index 0000000..a067c10
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperLightweightLabelDecorator.java
@@ -0,0 +1,67 @@
+package org.apache.hdt.ui.internal.zookeeper;
+
+import org.apache.hdt.core.internal.model.ServerStatus;
+import org.apache.hdt.core.internal.model.ZNode;
+import org.apache.hdt.core.internal.model.ZooKeeperServer;
+import org.apache.hdt.ui.Activator;
+import org.eclipse.jface.viewers.IDecoration;
+import org.eclipse.jface.viewers.ILabelProviderListener;
+import org.eclipse.jface.viewers.ILightweightLabelDecorator;
+
+public class ZooKeeperLightweightLabelDecorator implements ILightweightLabelDecorator {
+
+	@Override
+	public void addListener(ILabelProviderListener listener) {
+		// TODO Auto-generated method stub
+
+	}
+
+	@Override
+	public void dispose() {
+		// TODO Auto-generated method stub
+
+	}
+
+	@Override
+	public boolean isLabelProperty(Object element, String property) {
+		// TODO Auto-generated method stub
+		return false;
+	}
+
+	@Override
+	public void removeListener(ILabelProviderListener listener) {
+		// TODO Auto-generated method stub
+
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.jface.viewers.ILightweightLabelDecorator#decorate(java.lang
+	 * .Object, org.eclipse.jface.viewers.IDecoration)
+	 */
+	@Override
+	public void decorate(Object element, IDecoration decoration) {
+		if (element instanceof ZooKeeperServer) {
+			ZooKeeperServer zks = (ZooKeeperServer) element;
+
+			// Image decorations
+			if (zks.getStatusCode() == ServerStatus.DISCONNECTED_VALUE)
+				decoration.addOverlay(org.apache.hdt.ui.Activator.IMAGE_OFFLINE_OVR);
+			else
+				decoration.addOverlay(org.apache.hdt.ui.Activator.IMAGE_ONLINE_OVR);
+
+			// Text decorations
+			decoration.addSuffix("  " + zks.getUri());
+		} else if (element instanceof ZNode) {
+			ZNode zkn = (ZNode) element;
+			if (zkn.getVersion() > -1) {
+				decoration.addSuffix("  [v=" + zkn.getVersion() + "]");
+			}
+			if (zkn.isEphermeral())
+				decoration.addOverlay(Activator.IMAGE_ZOOKEEPER_EPHERMERAL, IDecoration.BOTTOM_RIGHT);
+		}
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperNodeEditorInput.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperNodeEditorInput.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperNodeEditorInput.java
new file mode 100644
index 0000000..ddacaab
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperNodeEditorInput.java
@@ -0,0 +1,126 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.ui.internal.zookeeper;
+
+import org.apache.hdt.core.internal.model.ZNode;
+import org.apache.hdt.ui.Activator;
+import org.eclipse.core.resources.IStorage;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.jface.resource.ImageDescriptor;
+import org.eclipse.ui.IPersistableElement;
+import org.eclipse.ui.IStorageEditorInput;
+
+/**
+ * @author Srimanth Gunturi
+ * 
+ */
+public class ZooKeeperNodeEditorInput implements IStorageEditorInput {
+
+	private final byte[] nodeData;
+	private final ZNode node;
+
+	/**
+	 * 
+	 */
+	public ZooKeeperNodeEditorInput(ZNode node, byte[] nodeData) {
+		this.node = node;
+		this.nodeData = nodeData;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.IEditorInput#exists()
+	 */
+	@Override
+	public boolean exists() {
+		// TODO
+		return true;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.IEditorInput#getImageDescriptor()
+	 */
+	@Override
+	public ImageDescriptor getImageDescriptor() {
+		return Activator.IMAGE_HADOOP;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.IEditorInput#getName()
+	 */
+	@Override
+	public String getName() {
+		return getNode().getNodeName();
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.IEditorInput#getPersistable()
+	 */
+	@Override
+	public IPersistableElement getPersistable() {
+		// TODO Auto-generated method stub
+		return null;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.IEditorInput#getToolTipText()
+	 */
+	@Override
+	public String getToolTipText() {
+		return getNode().getPath();
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.core.runtime.IAdaptable#getAdapter(java.lang.Class)
+	 */
+	@Override
+	public Object getAdapter(Class adapter) {
+		// TODO Auto-generated method stub
+		return null;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.IStorageEditorInput#getStorage()
+	 */
+	@Override
+	public IStorage getStorage() throws CoreException {
+		return new ZooKeeperNodeStorage(this);
+	}
+
+	public byte[] getNodeData() {
+		return nodeData;
+	}
+
+	public ZNode getNode() {
+		return node;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperNodeStorage.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperNodeStorage.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperNodeStorage.java
new file mode 100644
index 0000000..0861a3a
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperNodeStorage.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.ui.internal.zookeeper;
+
+import java.io.ByteArrayInputStream;
+import java.io.InputStream;
+
+import org.eclipse.core.resources.IStorage;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IPath;
+import org.eclipse.core.runtime.Path;
+
+/**
+ * @author Srimanth Gunturi
+ * 
+ */
+public class ZooKeeperNodeStorage implements IStorage {
+
+	private final ZooKeeperNodeEditorInput editorInput;
+
+	/**
+	 * 
+	 */
+	public ZooKeeperNodeStorage(ZooKeeperNodeEditorInput editorInput) {
+		this.editorInput = editorInput;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.core.runtime.IAdaptable#getAdapter(java.lang.Class)
+	 */
+	@Override
+	public Object getAdapter(Class adapter) {
+		// TODO Auto-generated method stub
+		return null;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.core.resources.IStorage#getContents()
+	 */
+	@Override
+	public InputStream getContents() throws CoreException {
+		return new ByteArrayInputStream(editorInput.getNodeData());
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.core.resources.IStorage#getFullPath()
+	 */
+	@Override
+	public IPath getFullPath() {
+		return new Path(editorInput.getNode().getPath());
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.core.resources.IStorage#getName()
+	 */
+	@Override
+	public String getName() {
+		return editorInput.getName();
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.core.resources.IStorage#isReadOnly()
+	 */
+	@Override
+	public boolean isReadOnly() {
+		return false;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.updateSite/.project
----------------------------------------------------------------------
diff --git a/org.apache.hdt.updateSite/.project b/org.apache.hdt.updateSite/.project
new file mode 100644
index 0000000..aed0b59
--- /dev/null
+++ b/org.apache.hdt.updateSite/.project
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+	<name>org.apache.hdt.updateSite</name>
+	<comment></comment>
+	<projects>
+	</projects>
+	<buildSpec>
+		<buildCommand>
+			<name>org.eclipse.pde.UpdateSiteBuilder</name>
+			<arguments>
+			</arguments>
+		</buildCommand>
+	</buildSpec>
+	<natures>
+		<nature>org.eclipse.pde.UpdateSiteNature</nature>
+	</natures>
+</projectDescription>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.updateSite/site.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.updateSite/site.xml b/org.apache.hdt.updateSite/site.xml
new file mode 100644
index 0000000..10bbb8b
--- /dev/null
+++ b/org.apache.hdt.updateSite/site.xml
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<site>
+   <feature url="features/org.apache.hadoop.eclipse.feature_1.0.0.201306090223.jar" id="org.apache.hadoop.eclipse.feature" version="1.0.0.201306090223">
+      <category name="org.apache.hadoop.eclipse"/>
+   </feature>
+   <category-def name="org.apache.hadoop.eclipse" label="Apache Hadoop Eclipse"/>
+</site>


[8/8] git commit: HDT-32: Merge the code base of Hadoop-Eclipse project into HDT. Contributed by Srimanth Gunturi

Posted by rs...@apache.org.
HDT-32: Merge the code base of Hadoop-Eclipse project into HDT.
        Contributed by Srimanth Gunturi


Project: http://git-wip-us.apache.org/repos/asf/incubator-hdt/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hdt/commit/63bec260
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hdt/tree/63bec260
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hdt/diff/63bec260

Branch: refs/heads/hadoop-eclipse-merge
Commit: 63bec2607fdb96f57cc5c8b71561c03bbc986502
Parents: 0a0744f
Author: Rahul Sharma <rs...@apache.org>
Authored: Thu Jul 25 09:08:50 2013 +0530
Committer: Rahul Sharma <rs...@apache.org>
Committed: Thu Jul 25 09:11:50 2013 +0530

----------------------------------------------------------------------
 org.apache.hdt.core/.classpath                  |    8 +
 org.apache.hdt.core/.project                    |   28 +
 .../.settings/org.eclipse.core.resources.prefs  |    3 +
 .../.settings/org.eclipse.jdt.core.prefs        |  276 ++++
 .../.settings/org.eclipse.jdt.ui.prefs          |    6 +
 org.apache.hdt.core/META-INF/MANIFEST.MF        |   27 +
 org.apache.hdt.core/build.properties            |    9 +
 org.apache.hdt.core/models/Hadoop.ecore         |   92 ++
 org.apache.hdt.core/models/Hadoop.genmodel      |   28 +
 org.apache.hdt.core/plugin.properties           |    8 +
 org.apache.hdt.core/plugin.xml                  |   43 +
 .../org.apache.hadoop.eclipse.hdfsclient.exsd   |  118 ++
 ...g.apache.hadoop.eclipse.zookeeperClient.exsd |  111 ++
 org.apache.hdt.core/src/log4j.properties        |   17 +
 .../src/org/apache/hdt/core/Activator.java      |   63 +
 .../org/apache/hdt/core/hdfs/HDFSClient.java    |  114 ++
 .../hdt/core/hdfs/ResourceInformation.java      |  213 +++
 .../apache/hdt/core/internal/HadoopManager.java |   83 ++
 .../hdt/core/internal/hdfs/DownloadFileJob.java |  137 ++
 .../hdt/core/internal/hdfs/HDFSFileStore.java   |  594 ++++++++
 .../hdt/core/internal/hdfs/HDFSFileSystem.java  |   57 +
 .../hdt/core/internal/hdfs/HDFSManager.java     |  285 ++++
 .../core/internal/hdfs/HDFSMoveDeleteHook.java  |  130 ++
 .../hdfs/HDFSTeamRepositoryProvider.java        |   41 +
 .../apache/hdt/core/internal/hdfs/HDFSURI.java  |   73 +
 .../hdt/core/internal/hdfs/HDFSUtilites.java    |   86 ++
 .../internal/hdfs/InterruptableHDFSClient.java  |  260 ++++
 .../hdt/core/internal/hdfs/UploadFileJob.java   |  150 ++
 .../hdt/core/internal/model/HDFSServer.java     |  127 ++
 .../hdt/core/internal/model/HadoopFactory.java  |   85 ++
 .../hdt/core/internal/model/HadoopPackage.java  | 1380 ++++++++++++++++++
 .../apache/hdt/core/internal/model/Server.java  |  175 +++
 .../hdt/core/internal/model/ServerStatus.java   |  247 ++++
 .../apache/hdt/core/internal/model/Servers.java |  103 ++
 .../apache/hdt/core/internal/model/ZNode.java   |  527 +++++++
 .../hdt/core/internal/model/ZNodeType.java      |  251 ++++
 .../core/internal/model/ZooKeeperServer.java    |   35 +
 .../internal/model/impl/HDFSServerImpl.java     |  310 ++++
 .../internal/model/impl/HadoopFactoryImpl.java  |  195 +++
 .../internal/model/impl/HadoopPackageImpl.java  |  621 ++++++++
 .../core/internal/model/impl/ServerImpl.java    |  395 +++++
 .../core/internal/model/impl/ServersImpl.java   |  271 ++++
 .../hdt/core/internal/model/impl/ZNodeImpl.java | 1017 +++++++++++++
 .../model/impl/ZooKeeperServerImpl.java         | 1109 ++++++++++++++
 .../model/util/HadoopAdapterFactory.java        |  208 +++
 .../core/internal/model/util/HadoopSwitch.java  |  229 +++
 .../zookeeper/InterruptableZooKeeperClient.java |  233 +++
 .../internal/zookeeper/ZooKeeperManager.java    |  162 ++
 .../hdt/core/zookeeper/ZooKeeperClient.java     |   45 +
 org.apache.hdt.feature/.project                 |   17 +
 org.apache.hdt.feature/build.properties         |    1 +
 org.apache.hdt.feature/feature.xml              |   68 +
 org.apache.hdt.hadoop.release/.classpath        |   25 +
 org.apache.hdt.hadoop.release/.project          |   28 +
 .../.settings/org.eclipse.jdt.core.prefs        |  276 ++++
 .../.settings/org.eclipse.jdt.ui.prefs          |    6 +
 .../META-INF/MANIFEST.MF                        |   27 +
 org.apache.hdt.hadoop.release/build.properties  |   23 +
 org.apache.hdt.hadoop.release/fragment.xml      |   36 +
 .../hdt/hadoop/release/HDFSClientRelease.java   |  235 +++
 .../hadoop/release/ZooKeeperClientRelease.java  |  215 +++
 org.apache.hdt.ui.test/.classpath               |    7 +
 org.apache.hdt.ui.test/.project                 |   28 +
 .../.settings/org.eclipse.jdt.core.prefs        |    7 +
 org.apache.hdt.ui.test/META-INF/MANIFEST.MF     |   14 +
 org.apache.hdt.ui.test/build.properties         |    5 +
 org.apache.hdt.ui.test/plugin.xml               |   21 +
 .../src/org/apache/hdt/ui/test/Activator.java   |   67 +
 .../src/org/apache/hdt/ui/test/AllTests.java    |   21 +
 .../org/apache/hdt/ui/test/hdfs/HDFSTests.java  |   20 +
 .../org/apache/hdt/ui/test/hdfs/ModelTests.java |   22 +
 org.apache.hdt.ui/.classpath                    |    7 +
 org.apache.hdt.ui/.project                      |   28 +
 .../.settings/org.eclipse.jdt.core.prefs        |  276 ++++
 .../.settings/org.eclipse.jdt.ui.prefs          |    6 +
 org.apache.hdt.ui/META-INF/MANIFEST.MF          |   19 +
 org.apache.hdt.ui/NOTICE.txt                    |    2 +
 org.apache.hdt.ui/build.properties              |    9 +
 org.apache.hdt.ui/plugin.xml                    |  424 ++++++
 .../src/org/apache/hdt/ui/Activator.java        |  135 ++
 .../hdt/ui/internal/HadoopAdapterFactory.java   |   30 +
 .../internal/HadoopCommonContentProvider.java   |  182 +++
 .../hdt/ui/internal/HadoopLabelProvider.java    |  177 +++
 .../ui/internal/HadoopPerspectiveFactory.java   |   40 +
 .../hdt/ui/internal/HadoopServersView.java      |   29 +
 .../hdfs/DiscardDownloadResourceAction.java     |  132 ++
 .../hdt/ui/internal/hdfs/DisconnectAction.java  |   88 ++
 .../internal/hdfs/DownloadResourceAction.java   |  124 ++
 .../hdfs/HDFSCommonContentProvider.java         |  188 +++
 .../hdfs/HDFSFileStorePropertySource.java       |  177 +++
 .../hdt/ui/internal/hdfs/HDFSLabelProvider.java |  153 ++
 .../hdfs/HDFSLightweightLabelDecorator.java     |  163 +++
 .../ui/internal/hdfs/HDFSPropertySection.java   |  182 +++
 .../ui/internal/hdfs/NewHDFSServerAction.java   |   62 +
 .../internal/hdfs/NewHDFSServerWizardPage.java  |  242 +++
 .../hdt/ui/internal/hdfs/NewHDFSWizard.java     |   96 ++
 .../ui/internal/hdfs/PropertyTypeMapper.java    |   29 +
 .../hdt/ui/internal/hdfs/ReconnectAction.java   |   88 ++
 .../ui/internal/hdfs/UploadResourceAction.java  |  123 ++
 .../hdt/ui/internal/zookeeper/DeleteAction.java |  119 ++
 .../ui/internal/zookeeper/DisconnectAction.java |   95 ++
 .../zookeeper/NewZooKeeperServerAction.java     |   62 +
 .../zookeeper/NewZooKeeperServerWizardPage.java |  129 ++
 .../internal/zookeeper/NewZooKeeperWizard.java  |   81 +
 .../hdt/ui/internal/zookeeper/OpenAction.java   |  102 ++
 .../ui/internal/zookeeper/ReconnectAction.java  |   95 ++
 .../ui/internal/zookeeper/RefreshAction.java    |   85 ++
 .../internal/zookeeper/ZNodePropertySource.java |  226 +++
 .../ZooKeeperCommonContentProvider.java         |  187 +++
 .../zookeeper/ZooKeeperLabelProvider.java       |   87 ++
 .../ZooKeeperLightweightLabelDecorator.java     |   67 +
 .../zookeeper/ZooKeeperNodeEditorInput.java     |  126 ++
 .../zookeeper/ZooKeeperNodeStorage.java         |   94 ++
 org.apache.hdt.updateSite/.project              |   17 +
 org.apache.hdt.updateSite/site.xml              |    7 +
 115 files changed, 16744 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/.classpath
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/.classpath b/org.apache.hdt.core/.classpath
new file mode 100644
index 0000000..c5158db
--- /dev/null
+++ b/org.apache.hdt.core/.classpath
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<classpath>
+	<classpathentry kind="src" path="src"/>
+	<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
+	<classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
+	<classpathentry exported="true" kind="lib" path="lib/log4j/log4j-1.2.17.jar"/>
+	<classpathentry kind="output" path="bin"/>
+</classpath>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/.project
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/.project b/org.apache.hdt.core/.project
new file mode 100644
index 0000000..bb03cc0
--- /dev/null
+++ b/org.apache.hdt.core/.project
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+	<name>org.apache.hdt.core</name>
+	<comment></comment>
+	<projects>
+	</projects>
+	<buildSpec>
+		<buildCommand>
+			<name>org.eclipse.jdt.core.javabuilder</name>
+			<arguments>
+			</arguments>
+		</buildCommand>
+		<buildCommand>
+			<name>org.eclipse.pde.ManifestBuilder</name>
+			<arguments>
+			</arguments>
+		</buildCommand>
+		<buildCommand>
+			<name>org.eclipse.pde.SchemaBuilder</name>
+			<arguments>
+			</arguments>
+		</buildCommand>
+	</buildSpec>
+	<natures>
+		<nature>org.eclipse.pde.PluginNature</nature>
+		<nature>org.eclipse.jdt.core.javanature</nature>
+	</natures>
+</projectDescription>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/.settings/org.eclipse.core.resources.prefs
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/.settings/org.eclipse.core.resources.prefs b/org.apache.hdt.core/.settings/org.eclipse.core.resources.prefs
new file mode 100644
index 0000000..fc54be2
--- /dev/null
+++ b/org.apache.hdt.core/.settings/org.eclipse.core.resources.prefs
@@ -0,0 +1,3 @@
+#Wed Mar 13 23:19:03 PDT 2013
+eclipse.preferences.version=1
+encoding//models/Hadoop.ecore=UTF-8

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/.settings/org.eclipse.jdt.core.prefs
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/.settings/org.eclipse.jdt.core.prefs b/org.apache.hdt.core/.settings/org.eclipse.jdt.core.prefs
new file mode 100644
index 0000000..9e90209
--- /dev/null
+++ b/org.apache.hdt.core/.settings/org.eclipse.jdt.core.prefs
@@ -0,0 +1,276 @@
+#Mon Mar 18 00:41:50 PDT 2013
+eclipse.preferences.version=1
+org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
+org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6
+org.eclipse.jdt.core.compiler.compliance=1.6
+org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
+org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
+org.eclipse.jdt.core.compiler.source=1.6
+org.eclipse.jdt.core.formatter.align_type_members_on_columns=false
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_allocation_expression=16
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_annotation=0
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_enum_constant=16
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_explicit_constructor_call=16
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_method_invocation=16
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_qualified_allocation_expression=16
+org.eclipse.jdt.core.formatter.alignment_for_assignment=0
+org.eclipse.jdt.core.formatter.alignment_for_binary_expression=16
+org.eclipse.jdt.core.formatter.alignment_for_compact_if=16
+org.eclipse.jdt.core.formatter.alignment_for_conditional_expression=80
+org.eclipse.jdt.core.formatter.alignment_for_enum_constants=0
+org.eclipse.jdt.core.formatter.alignment_for_expressions_in_array_initializer=16
+org.eclipse.jdt.core.formatter.alignment_for_method_declaration=0
+org.eclipse.jdt.core.formatter.alignment_for_multiple_fields=16
+org.eclipse.jdt.core.formatter.alignment_for_parameters_in_constructor_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_parameters_in_method_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_selector_in_method_invocation=16
+org.eclipse.jdt.core.formatter.alignment_for_superclass_in_type_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_enum_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_type_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_constructor_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_method_declaration=16
+org.eclipse.jdt.core.formatter.blank_lines_after_imports=1
+org.eclipse.jdt.core.formatter.blank_lines_after_package=1
+org.eclipse.jdt.core.formatter.blank_lines_before_field=0
+org.eclipse.jdt.core.formatter.blank_lines_before_first_class_body_declaration=0
+org.eclipse.jdt.core.formatter.blank_lines_before_imports=1
+org.eclipse.jdt.core.formatter.blank_lines_before_member_type=1
+org.eclipse.jdt.core.formatter.blank_lines_before_method=1
+org.eclipse.jdt.core.formatter.blank_lines_before_new_chunk=1
+org.eclipse.jdt.core.formatter.blank_lines_before_package=0
+org.eclipse.jdt.core.formatter.blank_lines_between_import_groups=1
+org.eclipse.jdt.core.formatter.blank_lines_between_type_declarations=1
+org.eclipse.jdt.core.formatter.brace_position_for_annotation_type_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_anonymous_type_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_array_initializer=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_block=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_block_in_case=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_constructor_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_enum_constant=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_enum_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_method_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_switch=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_type_declaration=end_of_line
+org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_block_comment=false
+org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_javadoc_comment=false
+org.eclipse.jdt.core.formatter.comment.format_block_comments=true
+org.eclipse.jdt.core.formatter.comment.format_header=false
+org.eclipse.jdt.core.formatter.comment.format_html=true
+org.eclipse.jdt.core.formatter.comment.format_javadoc_comments=true
+org.eclipse.jdt.core.formatter.comment.format_line_comments=true
+org.eclipse.jdt.core.formatter.comment.format_source_code=true
+org.eclipse.jdt.core.formatter.comment.indent_parameter_description=true
+org.eclipse.jdt.core.formatter.comment.indent_root_tags=true
+org.eclipse.jdt.core.formatter.comment.insert_new_line_before_root_tags=insert
+org.eclipse.jdt.core.formatter.comment.insert_new_line_for_parameter=insert
+org.eclipse.jdt.core.formatter.comment.line_length=80
+org.eclipse.jdt.core.formatter.comment.new_lines_at_block_boundaries=true
+org.eclipse.jdt.core.formatter.comment.new_lines_at_javadoc_boundaries=true
+org.eclipse.jdt.core.formatter.compact_else_if=true
+org.eclipse.jdt.core.formatter.continuation_indentation=2
+org.eclipse.jdt.core.formatter.continuation_indentation_for_array_initializer=2
+org.eclipse.jdt.core.formatter.disabling_tag=@formatter\:off
+org.eclipse.jdt.core.formatter.enabling_tag=@formatter\:on
+org.eclipse.jdt.core.formatter.format_guardian_clause_on_one_line=false
+org.eclipse.jdt.core.formatter.format_line_comment_starting_on_first_column=true
+org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_annotation_declaration_header=true
+org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_constant_header=true
+org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_declaration_header=true
+org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_type_header=true
+org.eclipse.jdt.core.formatter.indent_breaks_compare_to_cases=true
+org.eclipse.jdt.core.formatter.indent_empty_lines=false
+org.eclipse.jdt.core.formatter.indent_statements_compare_to_block=true
+org.eclipse.jdt.core.formatter.indent_statements_compare_to_body=true
+org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_cases=true
+org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_switch=false
+org.eclipse.jdt.core.formatter.indentation.size=4
+org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_local_variable=insert
+org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_member=insert
+org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_parameter=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_after_label=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_after_opening_brace_in_array_initializer=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_at_end_of_file_if_missing=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_catch_in_try_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_closing_brace_in_array_initializer=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_else_in_if_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_finally_in_try_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_while_in_do_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_annotation_declaration=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_anonymous_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_block=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_constant=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_declaration=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_method_body=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_after_and_in_type_parameter=insert
+org.eclipse.jdt.core.formatter.insert_space_after_assignment_operator=insert
+org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation_type_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_binary_operator=insert
+org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_parameters=insert
+org.eclipse.jdt.core.formatter.insert_space_after_closing_brace_in_block=insert
+org.eclipse.jdt.core.formatter.insert_space_after_closing_paren_in_cast=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_assert=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_case=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_conditional=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_for=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_labeled_statement=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_allocation_expression=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_annotation=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_array_initializer=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_parameters=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_throws=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_constant_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_declarations=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_explicitconstructorcall_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_increments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_inits=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_parameters=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_throws=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_invocation_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_field_declarations=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_local_declarations=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_parameterized_type_reference=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_superinterfaces=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_parameters=insert
+org.eclipse.jdt.core.formatter.insert_space_after_ellipsis=insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_parameterized_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_brace_in_array_initializer=insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_cast=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_catch=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_constructor_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_enum_constant=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_for=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_if=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_invocation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_parenthesized_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_switch=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_synchronized=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_while=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_postfix_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_prefix_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_question_in_conditional=insert
+org.eclipse.jdt.core.formatter.insert_space_after_question_in_wildcard=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_for=insert
+org.eclipse.jdt.core.formatter.insert_space_after_unary_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_and_in_type_parameter=insert
+org.eclipse.jdt.core.formatter.insert_space_before_assignment_operator=insert
+org.eclipse.jdt.core.formatter.insert_space_before_at_in_annotation_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_binary_operator=insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_parameterized_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_brace_in_array_initializer=insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_cast=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_catch=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_constructor_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_enum_constant=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_for=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_if=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_invocation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_parenthesized_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_switch=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_synchronized=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_while=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_assert=insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_case=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_conditional=insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_default=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_for=insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_labeled_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_array_initializer=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_throws=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_constant_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_declarations=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_explicitconstructorcall_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_increments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_inits=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_throws=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_invocation_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_field_declarations=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_local_declarations=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_parameterized_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_superinterfaces=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_ellipsis=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_parameterized_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_annotation_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_anonymous_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_array_initializer=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_block=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_constructor_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_constant=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_method_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_switch=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation_type_member_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_catch=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_constructor_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_enum_constant=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_for=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_if=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_invocation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_parenthesized_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_switch=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_synchronized=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_while=insert
+org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_return=insert
+org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_throw=insert
+org.eclipse.jdt.core.formatter.insert_space_before_postfix_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_prefix_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_question_in_conditional=insert
+org.eclipse.jdt.core.formatter.insert_space_before_question_in_wildcard=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_semicolon=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_for=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_unary_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_brackets_in_array_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_braces_in_array_initializer=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_brackets_in_array_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_annotation_type_member_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_constructor_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_enum_constant=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_invocation=do not insert
+org.eclipse.jdt.core.formatter.join_lines_in_comments=true
+org.eclipse.jdt.core.formatter.join_wrapped_lines=true
+org.eclipse.jdt.core.formatter.keep_else_statement_on_same_line=false
+org.eclipse.jdt.core.formatter.keep_empty_array_initializer_on_one_line=false
+org.eclipse.jdt.core.formatter.keep_imple_if_on_one_line=false
+org.eclipse.jdt.core.formatter.keep_then_statement_on_same_line=false
+org.eclipse.jdt.core.formatter.lineSplit=160
+org.eclipse.jdt.core.formatter.never_indent_block_comments_on_first_column=false
+org.eclipse.jdt.core.formatter.never_indent_line_comments_on_first_column=false
+org.eclipse.jdt.core.formatter.number_of_blank_lines_at_beginning_of_method_body=0
+org.eclipse.jdt.core.formatter.number_of_empty_lines_to_preserve=1
+org.eclipse.jdt.core.formatter.put_empty_statement_on_new_line=true
+org.eclipse.jdt.core.formatter.tabulation.char=tab
+org.eclipse.jdt.core.formatter.tabulation.size=4
+org.eclipse.jdt.core.formatter.use_on_off_tags=false
+org.eclipse.jdt.core.formatter.use_tabs_only_for_leading_indentations=false
+org.eclipse.jdt.core.formatter.wrap_before_binary_operator=true
+org.eclipse.jdt.core.formatter.wrap_outer_expressions_when_nested=false

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/.settings/org.eclipse.jdt.ui.prefs
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/.settings/org.eclipse.jdt.ui.prefs b/org.apache.hdt.core/.settings/org.eclipse.jdt.ui.prefs
new file mode 100644
index 0000000..7223ecf
--- /dev/null
+++ b/org.apache.hdt.core/.settings/org.eclipse.jdt.ui.prefs
@@ -0,0 +1,6 @@
+#Thu Mar 21 01:21:51 PDT 2013
+eclipse.preferences.version=1
+formatter_profile=_Apache Hadoop Eclipse Format
+formatter_settings_version=11
+org.eclipse.jdt.ui.javadoc=true
+org.eclipse.jdt.ui.text.custom_code_templates=<?xml version\="1.0" encoding\="UTF-8" standalone\="no"?><templates><template autoinsert\="true" context\="gettercomment_context" deleted\="false" description\="Comment for getter method" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.gettercomment" name\="gettercomment">/**\n * @return the ${bare_field_name}\n */</template><template autoinsert\="true" context\="settercomment_context" deleted\="false" description\="Comment for setter method" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.settercomment" name\="settercomment">/**\n * @param ${param} the ${bare_field_name} to set\n */</template><template autoinsert\="true" context\="constructorcomment_context" deleted\="false" description\="Comment for created constructors" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.constructorcomment" name\="constructorcomment">/**\n * ${tags}\n */</template><template autoinsert\="false" context\="filecomment_context
 " deleted\="false" description\="Comment for created Java files" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.filecomment" name\="filecomment">/**\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * "License"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http\://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an "AS IS" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n</template><tem
 plate autoinsert\="false" context\="typecomment_context" deleted\="false" description\="Comment for created types" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.typecomment" name\="typecomment">/**\n * @author Srimanth Gunturi\n *\n * ${tags}\n */</template><template autoinsert\="true" context\="fieldcomment_context" deleted\="false" description\="Comment for fields" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.fieldcomment" name\="fieldcomment">/**\n * \n */</template><template autoinsert\="true" context\="methodcomment_context" deleted\="false" description\="Comment for non-overriding methods" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.methodcomment" name\="methodcomment">/**\n * ${tags}\n */</template><template autoinsert\="true" context\="overridecomment_context" deleted\="false" description\="Comment for overriding methods" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.overridecomment" name\="overridecomment">/* (non-Javad
 oc)\n * ${see_to_overridden}\n */</template><template autoinsert\="true" context\="delegatecomment_context" deleted\="false" description\="Comment for delegate methods" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.delegatecomment" name\="delegatecomment">/**\n * ${tags}\n * ${see_to_target}\n */</template><template autoinsert\="true" context\="newtype_context" deleted\="false" description\="Newly created files" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.newtype" name\="newtype">${filecomment}\n${package_declaration}\n\n${typecomment}\n${type_declaration}</template><template autoinsert\="true" context\="classbody_context" deleted\="false" description\="Code in new class type bodies" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.classbody" name\="classbody">\n</template><template autoinsert\="true" context\="interfacebody_context" deleted\="false" description\="Code in new interface type bodies" enabled\="true" id\="org.eclipse.jdt.ui.text.co
 detemplates.interfacebody" name\="interfacebody">\n</template><template autoinsert\="true" context\="enumbody_context" deleted\="false" description\="Code in new enum type bodies" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.enumbody" name\="enumbody">\n</template><template autoinsert\="true" context\="annotationbody_context" deleted\="false" description\="Code in new annotation type bodies" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.annotationbody" name\="annotationbody">\n</template><template autoinsert\="true" context\="catchblock_context" deleted\="false" description\="Code in new catch blocks" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.catchblock" name\="catchblock">// ${todo} Auto-generated catch block\n${exception_var}.printStackTrace();</template><template autoinsert\="true" context\="methodbody_context" deleted\="false" description\="Code in created method stubs" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.methodb
 ody" name\="methodbody">// ${todo} Auto-generated method stub\n${body_statement}</template><template autoinsert\="true" context\="constructorbody_context" deleted\="false" description\="Code in created constructor stubs" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.constructorbody" name\="constructorbody">${body_statement}\n// ${todo} Auto-generated constructor stub</template><template autoinsert\="true" context\="getterbody_context" deleted\="false" description\="Code in created getters" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.getterbody" name\="getterbody">return ${field};</template><template autoinsert\="true" context\="setterbody_context" deleted\="false" description\="Code in created setters" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.setterbody" name\="setterbody">${field} \= ${param};</template></templates>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/META-INF/MANIFEST.MF b/org.apache.hdt.core/META-INF/MANIFEST.MF
new file mode 100644
index 0000000..c3d206a
--- /dev/null
+++ b/org.apache.hdt.core/META-INF/MANIFEST.MF
@@ -0,0 +1,27 @@
+Manifest-Version: 1.0
+Bundle-ManifestVersion: 2
+Bundle-Name: Apache Hadoop Eclipse Plugin
+Bundle-SymbolicName: org.apache.hdt.core;singleton:=true
+Bundle-Version: 1.0.0.qualifier
+Bundle-Activator: org.apache.hdt.core.Activator
+Require-Bundle: org.eclipse.core.runtime,
+ org.eclipse.core.filesystem;bundle-version="1.3.0";visibility:=reexport,
+ org.eclipse.core.resources;bundle-version="3.6.0",
+ org.eclipse.emf.ecore;bundle-version="2.6.1";visibility:=reexport,
+ org.eclipse.team.core;bundle-version="3.5.100"
+Bundle-RequiredExecutionEnvironment: JavaSE-1.6
+Bundle-Vendor: Apache Hadoop
+Bundle-ClassPath: lib/log4j/log4j-1.2.17.jar,
+ .
+Export-Package: org.apache.hdt.core,
+ org.apache.hdt.core.hdfs,
+ org.apache.hdt.core.internal,
+ org.apache.hdt.core.internal.hdfs;x-friends:="org.apache.hdt.ui",
+ org.apache.hdt.core.internal.model,
+ org.apache.hdt.core.internal.model.impl,
+ org.apache.hdt.core.internal.model.util,
+ org.apache.hdt.core.internal.zookeeper,
+ org.apache.hdt.core.zookeeper,
+ org.apache.log4j,
+ org.apache.log4j.config
+Bundle-ActivationPolicy: lazy

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/build.properties
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/build.properties b/org.apache.hdt.core/build.properties
new file mode 100644
index 0000000..f06f871
--- /dev/null
+++ b/org.apache.hdt.core/build.properties
@@ -0,0 +1,9 @@
+source.. = src/
+output.. = bin/
+bin.includes = META-INF/,\
+               .,\
+               plugin.xml,\
+               lib/log4j/log4j-1.2.17.jar
+src.includes = src/,\
+               schema/,\
+               models/

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/models/Hadoop.ecore
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/models/Hadoop.ecore b/org.apache.hdt.core/models/Hadoop.ecore
new file mode 100644
index 0000000..680666b
--- /dev/null
+++ b/org.apache.hdt.core/models/Hadoop.ecore
@@ -0,0 +1,92 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ecore:EPackage xmi:version="2.0"
+    xmlns:xmi="http://www.omg.org/XMI" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+    xmlns:ecore="http://www.eclipse.org/emf/2002/Ecore" name="model"
+    nsURI="http://hadoop/1.0" nsPrefix="model">
+  <eClassifiers xsi:type="ecore:EClass" name="HDFSServer" eSuperTypes="#//Server">
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="loaded" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EBoolean"/>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="operationURIs" upperBound="-1"
+        eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EString" transient="true">
+      <eAnnotations source="http://www.eclipse.org/emf/2002/GenModel" references="#//HDFSServer/operationURIs/%http:%2F%2Fwww.eclipse.org%2Femf%2F2002%2FGenModel%">
+        <details key="documentation" value="List of HDFS uris where operations are being performed."/>
+      </eAnnotations>
+    </eStructuralFeatures>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="userId" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EString"/>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="groupIds" upperBound="-1"
+        eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EString"/>
+  </eClassifiers>
+  <eClassifiers xsi:type="ecore:EClass" name="Servers">
+    <eStructuralFeatures xsi:type="ecore:EReference" name="hdfsServers" upperBound="-1"
+        eType="#//HDFSServer" containment="true"/>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="version" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EString"
+        defaultValueLiteral="1.0.0.0"/>
+    <eStructuralFeatures xsi:type="ecore:EReference" name="zookeeperServers" upperBound="-1"
+        eType="#//ZooKeeperServer" containment="true"/>
+  </eClassifiers>
+  <eClassifiers xsi:type="ecore:EEnum" name="ServerStatus">
+    <eLiterals name="NO_PROJECT" value="1"/>
+    <eLiterals name="DISCONNECTED" value="2"/>
+    <eLiterals name="CONNECTED" value="3"/>
+  </eClassifiers>
+  <eClassifiers xsi:type="ecore:EClass" name="Server" abstract="true">
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="name" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EString"
+        defaultValueLiteral=""/>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="uri" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EString">
+      <eAnnotations source="http://www.eclipse.org/emf/2002/GenModel" references="#//Server/uri/%http:%2F%2Fwww.eclipse.org%2Femf%2F2002%2FGenModel%">
+        <details key="documentation" value="This is URI location for the HDFS server. Ex: hdfs://hdfs.server.hostname/path."/>
+      </eAnnotations>
+    </eStructuralFeatures>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="statusCode" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EInt"
+        transient="true" defaultValueLiteral="0">
+      <eAnnotations source="http://www.eclipse.org/emf/2002/GenModel" references="#//Server/statusCode/%http:%2F%2Fwww.eclipse.org%2Femf%2F2002%2FGenModel%">
+        <details key="documentation" value="Indicates the status of this server. Values could be from HTTP response codes to indicate server status."/>
+      </eAnnotations>
+    </eStructuralFeatures>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="statusMessage" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EString"/>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="lastAccessed" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//ELong"
+        defaultValueLiteral="-1"/>
+  </eClassifiers>
+  <eClassifiers xsi:type="ecore:EClass" name="ZooKeeperServer" eSuperTypes="#//Server #//ZNode"/>
+  <eClassifiers xsi:type="ecore:EClass" name="ZNode">
+    <eOperations name="getPath" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EString">
+      <eAnnotations source="http://www.eclipse.org/emf/2002/GenModel">
+        <details key="body" value="if (this instanceof org.apache.hdt.core.internal.model.ZooKeeperServer)&#xA;&#x9;return &quot;/&quot;;&#xA;else {&#xA;&#x9;String parentPath = getParent().getPath();&#xA;&#x9;return parentPath.endsWith(&quot;/&quot;) ? parentPath + getNodeName() : parentPath + &quot;/&quot; + getNodeName();&#xA;}"/>
+      </eAnnotations>
+    </eOperations>
+    <eOperations name="getServer" eType="#//ZooKeeperServer">
+      <eAnnotations source="http://www.eclipse.org/emf/2002/GenModel">
+        <details key="body" value="if(this instanceof org.apache.hdt.core.internal.model.ZooKeeperServer)&#xA;&#x9;&#x9;&#x9;return (org.apache.hdt.core.internal.model.ZooKeeperServer) this;&#xA;&#x9;&#x9;else&#xA;&#x9;&#x9;&#x9;return getParent().getServer();"/>
+      </eAnnotations>
+    </eOperations>
+    <eStructuralFeatures xsi:type="ecore:EReference" name="children" upperBound="-1"
+        eType="#//ZNode" transient="true" containment="true"/>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="lastRefresh" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//ELong"
+        defaultValueLiteral="-1"/>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="refreshing" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EBoolean"/>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="ephermeral" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EBoolean"
+        transient="true"/>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="creationId" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//ELong"
+        defaultValueLiteral="-1"/>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="modifiedId" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//ELong"
+        defaultValueLiteral="-1"/>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="creationTime" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//ELong"
+        defaultValueLiteral="-1"/>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="modifiedTime" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//ELong"
+        defaultValueLiteral="-1"/>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="version" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EInt"
+        transient="true" defaultValueLiteral="-1"/>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="childrenVersion" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EInt"
+        transient="true" defaultValueLiteral="-1"/>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="aclVersion" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EInt"
+        defaultValueLiteral="-1"/>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="ephermalOwnerSessionId"
+        eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//ELong" defaultValueLiteral="-1"/>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="dataLength" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EInt"
+        defaultValueLiteral="-1"/>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="childrenCount" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EInt"
+        defaultValueLiteral="0"/>
+    <eStructuralFeatures xsi:type="ecore:EReference" name="parent" eType="#//ZNode"/>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="nodeName" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EString"/>
+    <eStructuralFeatures xsi:type="ecore:EAttribute" name="sequential" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EBoolean"/>
+  </eClassifiers>
+</ecore:EPackage>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/models/Hadoop.genmodel
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/models/Hadoop.genmodel b/org.apache.hdt.core/models/Hadoop.genmodel
new file mode 100644
index 0000000..97586d4
--- /dev/null
+++ b/org.apache.hdt.core/models/Hadoop.genmodel
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<genmodel:GenModel xmi:version="2.0"
+    xmlns:xmi="http://www.omg.org/XMI" xmlns:ecore="http://www.eclipse.org/emf/2002/Ecore"
+    xmlns:genmodel="http://www.eclipse.org/emf/2002/GenModel" copyrightText="Licensed to the Apache Software Foundation (ASF) under one&#xA;or more contributor license agreements.  See the NOTICE file&#xA;distributed with this work for additional information&#xA;regarding copyright ownership.  The ASF licenses this file&#xA;to you under the Apache License, Version 2.0 (the&#xA;&quot;License&quot;); you may not use this file except in compliance&#xA;with the License.  You may obtain a copy of the License at&#xA;&#xA;    http://www.apache.org/licenses/LICENSE-2.0&#xA;&#xA;Unless required by applicable law or agreed to in writing, software&#xA;distributed under the License is distributed on an &quot;AS IS&quot; BASIS,&#xA;WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.&#xA;See the License for the specific language governing permissions and&#xA;limitations under the License.&#xA; "
+    modelDirectory="/org.apache.hdt.core/src" modelPluginID="org.apache.hdt.core"
+    modelName="Hadoop" importerID="org.eclipse.emf.importer.ecore" complianceLevel="6.0"
+    copyrightFields="false">
+  <foreignModel>Hadoop.ecore</foreignModel>
+  <genPackages prefix="Hadoop" basePackage="org.apache.hdt.core.internal" disposableProviderFactory="true"
+      ecorePackage="Hadoop.ecore#/">
+    <genEnums typeSafeEnumCompatible="false" ecoreEnum="Hadoop.ecore#//ServerStatus">
+      <genEnumLiterals ecoreEnumLiteral="Hadoop.ecore#//ServerStatus/NO_PROJECT"/>
+    </genEnums>
+    <genClasses image="false" ecoreClass="Hadoop.ecore#//HDFSServer">
+      <genFeatures createChild="false" ecoreFeature="ecore:EAttribute Hadoop.ecore#//HDFSServer/name"/>
+      <genFeatures createChild="false" ecoreFeature="ecore:EAttribute Hadoop.ecore#//HDFSServer/uri"/>
+      <genFeatures createChild="false" ecoreFeature="ecore:EAttribute Hadoop.ecore#//HDFSServer/statusCode"/>
+      <genFeatures createChild="false" ecoreFeature="ecore:EAttribute Hadoop.ecore#//HDFSServer/statusMessage"/>
+      <genFeatures createChild="false" ecoreFeature="ecore:EAttribute Hadoop.ecore#//HDFSServer/lastAccessed"/>
+      <genFeatures createChild="false" ecoreFeature="ecore:EAttribute Hadoop.ecore#//HDFSServer/loaded"/>
+      <genFeatures createChild="false" ecoreFeature="ecore:EAttribute Hadoop.ecore#//HDFSServer/workspaceProjectName"/>
+    </genClasses>
+    <genClasses ecoreClass="Hadoop.ecore#//Servers">
+      <genFeatures notify="false" createChild="false" propertySortChoices="true" ecoreFeature="ecore:EReference Hadoop.ecore#//Servers/hdfsServers"/>
+      <genFeatures createChild="false" ecoreFeature="ecore:EAttribute Hadoop.ecore#//Servers/version"/>
+    </genClasses>
+  </genPackages>
+</genmodel:GenModel>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/plugin.properties
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/plugin.properties b/org.apache.hdt.core/plugin.properties
new file mode 100644
index 0000000..5d65d5d
--- /dev/null
+++ b/org.apache.hdt.core/plugin.properties
@@ -0,0 +1,8 @@
+
+# <copyright>
+# </copyright>
+#
+# $Id$
+
+pluginName = Hadoop Model
+providerName = www.example.org

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/plugin.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/plugin.xml b/org.apache.hdt.core/plugin.xml
new file mode 100644
index 0000000..82dcbec
--- /dev/null
+++ b/org.apache.hdt.core/plugin.xml
@@ -0,0 +1,43 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?eclipse version="3.4"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<plugin>
+   <extension-point id="org.apache.hdt.core.hdfsClient" name="Apache Hadoop HDFS Client" schema="schema/org.apache.hadoop.eclipse.hdfsclient.exsd"/>
+   <extension-point id="org.apache.hdt.core.zookeeperClient" name="Apache Hadoop ZooKeeper Client" schema="schema/org.apache.hadoop.eclipse.zookeeperClient.exsd"/>
+   <extension
+         id="org.apache.hadoop.hdfs.filesystem"
+         name="Apache Hadoop HDFS"
+         point="org.eclipse.core.filesystem.filesystems">
+      <filesystem
+            scheme="hdfs">
+         <run
+               class="org.apache.hdt.core.internal.hdfs.HDFSFileSystem">
+         </run>
+      </filesystem>
+   </extension>
+   <extension
+         point="org.eclipse.team.core.repository">
+      <repository
+            canImportId="org.apache.hadoop.hdfs"
+            class="org.apache.hdt.core.internal.hdfs.HDFSTeamRepositoryProvider"
+            fileSystemScheme="hdfs"
+            id="org.apache.hadoop.hdfs">
+      </repository>
+   </extension>
+
+</plugin>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.hdfsclient.exsd
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.hdfsclient.exsd b/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.hdfsclient.exsd
new file mode 100644
index 0000000..51da7df
--- /dev/null
+++ b/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.hdfsclient.exsd
@@ -0,0 +1,118 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<!-- Schema file written by PDE -->
+<schema targetNamespace="org.apache.hdt.core" xmlns="http://www.w3.org/2001/XMLSchema">
+<annotation>
+      <appinfo>
+         <meta.schema plugin="org.apache.hdt.core" id="org.apache.hdt.core.hdfsClient" name="Apache Hadoop HDFS Client"/>
+      </appinfo>
+      <documentation>
+         [Enter description of this extension point.]
+      </documentation>
+   </annotation>
+
+   <element name="extension">
+      <annotation>
+         <appinfo>
+            <meta.element />
+         </appinfo>
+      </annotation>
+      <complexType>
+         <choice>
+            <sequence>
+               <element ref="hdfsClient" minOccurs="0" maxOccurs="unbounded"/>
+            </sequence>
+         </choice>
+         <attribute name="point" type="string" use="required">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+            </annotation>
+         </attribute>
+         <attribute name="id" type="string">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+            </annotation>
+         </attribute>
+         <attribute name="name" type="string">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+               <appinfo>
+                  <meta.attribute translatable="true"/>
+               </appinfo>
+            </annotation>
+         </attribute>
+      </complexType>
+   </element>
+
+   <element name="hdfsClient">
+      <complexType>
+         <attribute name="class" type="string" use="required">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+               <appinfo>
+                  <meta.attribute kind="java" basedOn="org.apache.hdt.core.hdfs.HDFSClient:"/>
+               </appinfo>
+            </annotation>
+         </attribute>
+         <attribute name="protocol" type="string" use="required">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+            </annotation>
+         </attribute>
+         <attribute name="protocolVersion" type="string" use="required">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+            </annotation>
+         </attribute>
+      </complexType>
+   </element>
+
+   <annotation>
+      <appinfo>
+         <meta.section type="since"/>
+      </appinfo>
+      <documentation>
+         [Enter the first release in which this extension point appears.]
+      </documentation>
+   </annotation>
+
+   <annotation>
+      <appinfo>
+         <meta.section type="examples"/>
+      </appinfo>
+      <documentation>
+         [Enter extension point usage example here.]
+      </documentation>
+   </annotation>
+
+   <annotation>
+      <appinfo>
+         <meta.section type="apiinfo"/>
+      </appinfo>
+      <documentation>
+         [Enter API information here.]
+      </documentation>
+   </annotation>
+
+   <annotation>
+      <appinfo>
+         <meta.section type="implementation"/>
+      </appinfo>
+      <documentation>
+         [Enter information about supplied implementation of this extension point.]
+      </documentation>
+   </annotation>
+
+
+</schema>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.zookeeperClient.exsd
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.zookeeperClient.exsd b/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.zookeeperClient.exsd
new file mode 100644
index 0000000..b89a826
--- /dev/null
+++ b/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.zookeeperClient.exsd
@@ -0,0 +1,111 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<!-- Schema file written by PDE -->
+<schema targetNamespace="org.apache.hdt.core" xmlns="http://www.w3.org/2001/XMLSchema">
+<annotation>
+      <appinfo>
+         <meta.schema plugin="org.apache.hdt.core" id="org.apache.hdt.core.zookeeperClient" name="Apache Hadoop ZooKeeper Client"/>
+      </appinfo>
+      <documentation>
+         [Enter description of this extension point.]
+      </documentation>
+   </annotation>
+
+   <element name="extension">
+      <annotation>
+         <appinfo>
+            <meta.element />
+         </appinfo>
+      </annotation>
+      <complexType>
+         <choice>
+            <sequence>
+               <element ref="zookeeperClient" minOccurs="0" maxOccurs="unbounded"/>
+            </sequence>
+         </choice>
+         <attribute name="point" type="string" use="required">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+            </annotation>
+         </attribute>
+         <attribute name="id" type="string">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+            </annotation>
+         </attribute>
+         <attribute name="name" type="string">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+               <appinfo>
+                  <meta.attribute translatable="true"/>
+               </appinfo>
+            </annotation>
+         </attribute>
+      </complexType>
+   </element>
+
+   <element name="zookeeperClient">
+      <complexType>
+         <attribute name="class" type="string" use="required">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+               <appinfo>
+                  <meta.attribute kind="java" basedOn="org.apache.hdt.core.zookeeper.ZooKeeperClient:"/>
+               </appinfo>
+            </annotation>
+         </attribute>
+         <attribute name="protocolVersion" type="string" use="required">
+            <annotation>
+               <documentation>
+                  
+               </documentation>
+            </annotation>
+         </attribute>
+      </complexType>
+   </element>
+
+   <annotation>
+      <appinfo>
+         <meta.section type="since"/>
+      </appinfo>
+      <documentation>
+         [Enter the first release in which this extension point appears.]
+      </documentation>
+   </annotation>
+
+   <annotation>
+      <appinfo>
+         <meta.section type="examples"/>
+      </appinfo>
+      <documentation>
+         [Enter extension point usage example here.]
+      </documentation>
+   </annotation>
+
+   <annotation>
+      <appinfo>
+         <meta.section type="apiinfo"/>
+      </appinfo>
+      <documentation>
+         [Enter API information here.]
+      </documentation>
+   </annotation>
+
+   <annotation>
+      <appinfo>
+         <meta.section type="implementation"/>
+      </appinfo>
+      <documentation>
+         [Enter information about supplied implementation of this extension point.]
+      </documentation>
+   </annotation>
+
+
+</schema>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/log4j.properties
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/log4j.properties b/org.apache.hdt.core/src/log4j.properties
new file mode 100644
index 0000000..4337f2a
--- /dev/null
+++ b/org.apache.hdt.core/src/log4j.properties
@@ -0,0 +1,17 @@
+log4j.rootLogger=WARN, A1
+log4j.appender.A1=org.apache.log4j.ConsoleAppender
+log4j.appender.A1.layout=org.apache.log4j.PatternLayout
+
+# Print the date in ISO 8601 format
+log4j.appender.A1.layout.ConversionPattern=%d [%t] %-5p %c - %m%n
+
+# Print only messages of level WARN or above in the package com.foo.
+log4j.logger.org.apache.hadoop.eclipse.internal.HadoopCommonContentProvider=DEBUG
+#log4j.logger.org.apache.hadoop.eclipse.internal.hdfs=DEBUG
+#log4j.logger.org.apache.hadoop.eclipse.internal.hdfs.HDFSFileStore=DEBUG
+#log4j.logger.org.apache.hadoop.eclipse.internal.hdfs.HDFSManager=DEBUG
+#log4j.logger.org.apache.hadoop.eclipse.internal.hdfs.DownloadFileJob=DEBUG
+#log4j.logger.org.apache.hadoop.eclipse.internal.hdfs.UploadFileJob=DEBUG
+log4j.logger.org.apache.hadoop.eclipse.internal.zookeeper=DEBUG
+log4j.logger.org.apache.hadoop.eclipse.ui.internal.zookeeper=DEBUG
+log4j.logger.org.apache.hadoop.eclipse.release=DEBUG
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/Activator.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/Activator.java b/org.apache.hdt.core/src/org/apache/hdt/core/Activator.java
new file mode 100644
index 0000000..df59f32
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/Activator.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core;
+
+import org.apache.hdt.core.internal.HadoopManager;
+import org.apache.hdt.core.internal.model.impl.HadoopPackageImpl;
+import org.osgi.framework.BundleActivator;
+import org.osgi.framework.BundleContext;
+
+/**
+ * 
+ * @author Srimanth Gunturi
+ */
+public class Activator implements BundleActivator {
+
+	public static final String BUNDLE_ID = "org.apache.hdt.core";
+	private static BundleContext context;
+
+	static BundleContext getContext() {
+		return context;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.osgi.framework.BundleActivator#start(org.osgi.framework.BundleContext
+	 * )
+	 */
+	public void start(BundleContext bundleContext) throws Exception {
+		Activator.context = bundleContext;
+		HadoopPackageImpl.init();
+		HadoopManager.INSTANCE.getServers();
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.osgi.framework.BundleActivator#stop(org.osgi.framework.BundleContext)
+	 */
+	public void stop(BundleContext bundleContext) throws Exception {
+		HadoopManager.INSTANCE.saveServers();
+		Activator.context = null;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/hdfs/HDFSClient.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/hdfs/HDFSClient.java b/org.apache.hdt.core/src/org/apache/hdt/core/hdfs/HDFSClient.java
new file mode 100644
index 0000000..651f0c7
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/hdfs/HDFSClient.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.hdfs;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+import java.util.List;
+
+/**
+ * 
+ * @author Srimanth Gunturi
+ */
+public abstract class HDFSClient {
+	/**
+	 * Provides the default userId and groupIds.
+	 * 
+	 * @return List of ids. First ID has to be of the user. Group IDs are
+	 *         optional.
+	 * @throws IOException
+	 * @throws InterruptedException 
+	 */
+	public abstract List<String> getDefaultUserAndGroupIds() throws IOException, InterruptedException;
+
+	/**
+	 * 
+	 * @param uri
+	 * @param user
+	 * @return
+	 * @throws IOException
+	 * @throws InterruptedException
+	 */
+	public abstract ResourceInformation getResourceInformation(URI uri, String user) throws IOException, InterruptedException;
+
+	/**
+	 * 
+	 * @param uri
+	 * @param information
+	 * @param user
+	 * @throws IOException
+	 * @throws InterruptedException 
+	 */
+	public abstract void setResourceInformation(URI uri, ResourceInformation information, String user) throws IOException, InterruptedException;
+
+	/**
+	 * 
+	 * @param uri
+	 * @param user
+	 * @return
+	 * @throws IOException
+	 * @throws InterruptedException 
+	 */
+	public abstract List<ResourceInformation> listResources(URI uri, String user) throws IOException, InterruptedException;
+
+	/**
+	 * @param uri
+	 * @param user
+	 * @return
+	 * @throws InterruptedException 
+	 */
+	public abstract InputStream openInputStream(URI uri, String user) throws IOException, InterruptedException;
+
+	/**
+	 * 
+	 * @param uri
+	 * @param user
+	 * @return
+	 * @throws IOException
+	 * @throws InterruptedException 
+	 */
+	public abstract boolean mkdirs(URI uri, String user) throws IOException, InterruptedException;
+
+	/**
+	 * @param uri
+	 * @param user
+	 * @return
+	 * @throws IOException
+	 * @throws InterruptedException 
+	 */
+	public abstract OutputStream openOutputStream(URI uri, String user) throws IOException, InterruptedException;
+
+	/**
+	 * @param uri
+	 * @param monitor
+	 * @return
+	 * @throws IOException
+	 * @throws InterruptedException 
+	 */
+	public abstract OutputStream createOutputStream(URI uri, String user) throws IOException, InterruptedException;
+
+	/**
+	 * @param uri
+	 * @param user
+	 * @throws InterruptedException 
+	 */
+	public abstract void delete(URI uri, String user) throws IOException, InterruptedException;
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/hdfs/ResourceInformation.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/hdfs/ResourceInformation.java b/org.apache.hdt.core/src/org/apache/hdt/core/hdfs/ResourceInformation.java
new file mode 100644
index 0000000..2206287
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/hdfs/ResourceInformation.java
@@ -0,0 +1,213 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.hdfs;
+
+import java.util.List;
+
+public class ResourceInformation {
+	public static class Permissions {
+		public boolean read = true;
+		public boolean write = true;
+		public boolean execute = true;
+
+		public Permissions() {
+		}
+
+		public Permissions(boolean read, boolean write, boolean execute) {
+			this.read = read;
+			this.write = write;
+			this.execute = execute;
+		}
+
+		public void copy(Permissions copyFrom) {
+			this.read = copyFrom.read;
+			this.write = copyFrom.write;
+			this.execute = copyFrom.execute;
+		}
+	}
+
+	private String name;
+	private String path;
+	private long lastModifiedTime = -1;
+	private long lastAccessedTime = -1;
+	private boolean isFolder;
+	private long size;
+	private short replicationFactor;
+	private String owner;
+	private String group;
+	private Permissions userPermissions = new Permissions();
+	private Permissions groupPermissions = new Permissions();
+	private Permissions otherPermissions = new Permissions();
+	private Permissions effectivePermissions = new Permissions();
+
+	/**
+	 * @return the name
+	 */
+	public String getName() {
+		return name;
+	}
+
+	/**
+	 * @param name
+	 *            the name to set
+	 */
+	public void setName(String name) {
+		this.name = name;
+	}
+
+	/**
+	 * @return the lastModifiedTime
+	 */
+	public long getLastModifiedTime() {
+		return lastModifiedTime;
+	}
+
+	/**
+	 * @param lastModifiedTime
+	 *            the lastModifiedTime to set
+	 */
+	public void setLastModifiedTime(long lastModifiedTime) {
+		this.lastModifiedTime = lastModifiedTime;
+	}
+
+	/**
+	 * @return the lastAccessedTime
+	 */
+	public long getLastAccessedTime() {
+		return lastAccessedTime;
+	}
+
+	/**
+	 * @param lastAccessedTime
+	 *            the lastAccessedTime to set
+	 */
+	public void setLastAccessedTime(long lastAccessedTime) {
+		this.lastAccessedTime = lastAccessedTime;
+	}
+
+	/**
+	 * @return the isFolder
+	 */
+	public boolean isFolder() {
+		return isFolder;
+	}
+
+	/**
+	 * @param isFolder
+	 *            the isFolder to set
+	 */
+	public void setFolder(boolean isFolder) {
+		this.isFolder = isFolder;
+	}
+
+	public void setSize(long size) {
+		this.size = size;
+	}
+
+	public long getSize() {
+		return size;
+	}
+
+	public void setPath(String path) {
+		this.path = path;
+	}
+
+	public String getPath() {
+		return path;
+	}
+
+	public void setReplicationFactor(short replicationFactor) {
+		this.replicationFactor = replicationFactor;
+	}
+
+	public short getReplicationFactor() {
+		return replicationFactor;
+	}
+
+	public void setOwner(String owner) {
+		this.owner = owner;
+	}
+
+	public String getOwner() {
+		return owner;
+	}
+
+	public void setGroup(String group) {
+		this.group = group;
+	}
+
+	public String getGroup() {
+		return group;
+	}
+
+	public void setUserPermissions(Permissions userPermissions) {
+		this.userPermissions = userPermissions;
+	}
+
+	public Permissions getUserPermissions() {
+		return userPermissions;
+	}
+
+	public void setGroupPermissions(Permissions groupPermissions) {
+		this.groupPermissions = groupPermissions;
+	}
+
+	public Permissions getGroupPermissions() {
+		return groupPermissions;
+	}
+
+	public void setOtherPermissions(Permissions otherPermissions) {
+		this.otherPermissions = otherPermissions;
+	}
+
+	public Permissions getOtherPermissions() {
+		return otherPermissions;
+	}
+
+	/**
+	 * Effective permissions of this resource for the caller.
+	 * 
+	 * @return {@link Permissions}
+	 */
+	public Permissions getEffectivePermissions() {
+		return effectivePermissions;
+	}
+
+	/**
+	 * Updates the effective permissions for the provided user and groups. This
+	 * updates {@link #isRead()}, {@link #isWrite()} and {@link #isExecute()}
+	 * 
+	 * @param user
+	 * @param groups
+	 */
+	public void updateEffectivePermissions(String user, List<String> groups) {
+		if (user != null) {
+			if (getOwner().equals(user)) {
+				// Owner permissions apply
+				this.effectivePermissions.copy(this.userPermissions);
+			} else if (groups!=null && groups.contains(getGroup())) {
+				// Group permissions apply
+				this.effectivePermissions.copy(this.groupPermissions);
+			} else {
+				// Other permissions apply
+				this.effectivePermissions.copy(this.otherPermissions);
+			}
+		}
+	}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/HadoopManager.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/HadoopManager.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/HadoopManager.java
new file mode 100644
index 0000000..937b171
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/HadoopManager.java
@@ -0,0 +1,83 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.internal;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collections;
+
+import org.apache.hdt.core.Activator;
+import org.apache.hdt.core.internal.hdfs.HDFSManager;
+import org.apache.hdt.core.internal.model.HadoopFactory;
+import org.apache.hdt.core.internal.model.Servers;
+import org.apache.hdt.core.internal.zookeeper.ZooKeeperManager;
+import org.apache.log4j.Logger;
+import org.eclipse.core.runtime.Platform;
+import org.eclipse.emf.common.util.URI;
+import org.eclipse.emf.ecore.resource.Resource;
+import org.eclipse.emf.ecore.resource.impl.ResourceSetImpl;
+import org.osgi.framework.Bundle;
+
+/**
+ * @author Srimanth Gunturi
+ * 
+ */
+public class HadoopManager {
+	private static final Logger logger = Logger.getLogger(HadoopManager.class);
+	private static final String MODEL_FILE_NAME = "servers.xmi";
+	public static HadoopManager INSTANCE = new HadoopManager();
+
+	private Servers servers = null;
+
+	private HadoopManager() {
+	}
+
+	public Servers getServers() {
+		if (servers == null) {
+			loadServers();
+			if (servers == null) {
+				Bundle bundle = Platform.getBundle(Activator.BUNDLE_ID);
+				File serversFile = bundle.getBundleContext().getDataFile(MODEL_FILE_NAME);
+				Resource resource = new ResourceSetImpl().createResource(URI.createFileURI(serversFile.getPath()));
+				servers = HadoopFactory.eINSTANCE.createServers();
+				resource.getContents().add(servers);
+			}
+		}
+		return servers;
+	}
+
+	private void loadServers() {
+		Bundle bundle = Platform.getBundle(Activator.BUNDLE_ID);
+		File serversFile = bundle.getBundleContext().getDataFile(MODEL_FILE_NAME);
+		if (serversFile.exists()) {
+			Resource resource = new ResourceSetImpl().getResource(URI.createFileURI(serversFile.getPath()), true);
+			servers = (Servers) resource.getContents().get(0);
+			HDFSManager.INSTANCE.loadServers();
+			ZooKeeperManager.INSTANCE.loadServers();
+		}
+	}
+
+	public void saveServers() {
+		try {
+			servers.eResource().save(Collections.EMPTY_MAP);
+		} catch (IOException e) {
+			logger.error("Unable to persist Hadoop servers model", e);
+		}
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/DownloadFileJob.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/DownloadFileJob.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/DownloadFileJob.java
new file mode 100644
index 0000000..cd40ab4
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/DownloadFileJob.java
@@ -0,0 +1,137 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.core.internal.hdfs;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+
+import org.apache.hdt.core.Activator;
+import org.apache.log4j.Logger;
+import org.eclipse.core.filesystem.EFS;
+import org.eclipse.core.filesystem.IFileInfo;
+import org.eclipse.core.resources.IResource;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.NullProgressMonitor;
+import org.eclipse.core.runtime.Status;
+import org.eclipse.core.runtime.jobs.Job;
+
+/**
+ * @author Srimanth Gunturi
+ * 
+ */
+public class DownloadFileJob extends Job {
+
+	private final static Logger logger = Logger.getLogger(DownloadFileJob.class);
+	private final HDFSFileStore store;
+	private final IResource resource;
+
+	/**
+	 * @throws CoreException 
+	 * 
+	 */
+	public DownloadFileJob(IResource resource) throws CoreException {
+		super("Downloading " + resource.getLocationURI().toString());
+		this.resource = resource;
+		this.store = (HDFSFileStore) EFS.getStore(resource.getLocationURI());
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.core.resources.IWorkspaceRunnable#run(org.eclipse.core.runtime
+	 * .IProgressMonitor)
+	 */
+	@Override
+	public IStatus run(IProgressMonitor monitor) {
+		IStatus status = Status.OK_STATUS;
+		if (store != null) {
+			URI uri = store.toURI();
+			try {
+				File localFile = store.getLocalFile();
+				if (logger.isDebugEnabled())
+					logger.debug("[" + uri + "]: Downloading to " + (localFile == null ? "(null)" : localFile.toString()));
+				HDFSManager.INSTANCE.startServerOperation(uri.toString());
+				final IFileInfo serverInfo = store.fetchInfo();
+				if (serverInfo.exists()) {
+					monitor.beginTask("Downloading " + uri.toString(), (int) serverInfo.getLength());
+					if (!localFile.exists()) {
+						localFile.getParentFile().mkdirs();
+						localFile.createNewFile();
+					}
+					InputStream openInputStream = store.openRemoteInputStream(EFS.NONE, new NullProgressMonitor());
+					FileOutputStream fos = new FileOutputStream(localFile);
+					try {
+						if (!monitor.isCanceled()) {
+							byte[] data = new byte[8 * 1024];
+							int totalRead = 0;
+							int read = openInputStream.read(data);
+							while (read > -1) {
+								if (monitor.isCanceled())
+									throw new InterruptedException();
+								fos.write(data, 0, read);
+								totalRead += read;
+								monitor.worked(read);
+								read = openInputStream.read(data);
+								if (logger.isDebugEnabled())
+									logger.debug("Downloaded " + totalRead + " out of " + serverInfo.getLength() + " [" + (((float)totalRead*100.0f) / (float)serverInfo.getLength())
+											+ "]");
+							}
+						}
+					} catch (IOException e) {
+						throw e;
+					} catch (InterruptedException e) {
+						throw e;
+					} finally {
+						try {
+							openInputStream.close();
+						} catch (Throwable t) {
+						}
+						try {
+							fos.close();
+						} catch (Throwable t) {
+						}
+						monitor.done();
+					}
+				} else
+					throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, "Server resource not found [" + uri + "]"));
+				resource.refreshLocal(IResource.DEPTH_ONE, new NullProgressMonitor());
+			} catch (InterruptedException e) {
+				logger.warn(e);
+			} catch (CoreException e) {
+				logger.warn(e);
+				status = e.getStatus();
+			} catch (FileNotFoundException e) {
+				logger.warn(e);
+				status = new Status(IStatus.ERROR, Activator.BUNDLE_ID, "Local file not found for writing server content [" + uri + "]", e);
+			} catch (IOException e) {
+				logger.warn(e);
+				status = new Status(IStatus.ERROR, Activator.BUNDLE_ID, "Error downloading file content [" + uri + "]", e);
+			} finally {
+				HDFSManager.INSTANCE.stopServerOperation(uri.toString());
+			}
+		}
+		return status;
+	}
+}


[7/8] HDT-32: Merge the code base of Hadoop-Eclipse project into HDT. Contributed by Srimanth Gunturi

Posted by rs...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileStore.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileStore.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileStore.java
new file mode 100644
index 0000000..b84221c
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileStore.java
@@ -0,0 +1,594 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.internal.hdfs;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hdt.core.Activator;
+import org.apache.hdt.core.hdfs.HDFSClient;
+import org.apache.hdt.core.hdfs.ResourceInformation;
+import org.apache.hdt.core.internal.model.HDFSServer;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.eclipse.core.filesystem.EFS;
+import org.eclipse.core.filesystem.IFileInfo;
+import org.eclipse.core.filesystem.IFileStore;
+import org.eclipse.core.filesystem.provider.FileInfo;
+import org.eclipse.core.filesystem.provider.FileStore;
+import org.eclipse.core.resources.ResourcesPlugin;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.Status;
+import org.eclipse.core.runtime.URIUtil;
+
+/**
+ * Represents a file or folder in the Hadoop Distributed File System. This
+ * {@link IFileStore} knows about the remote HDFS resource, and the local
+ * resource. Based on this, it is able to tell a lot about each file and its
+ * sync status.
+ * 
+ * @author Srimanth Gunturi
+ */
+public class HDFSFileStore extends FileStore {
+
+	private static final Logger logger = Logger.getLogger(HDFSFileStore.class);
+	private final HDFSURI uri;
+	private File localFile = null;
+	private FileInfo serverFileInfo = null;
+	private FileInfo localFileInfo = null;
+	private ResourceInformation serverResourceInfo = null;
+	private HDFSServer hdfsServer;
+	private ResourceInformation.Permissions effectivePermissions = null;
+	private List<String> systemDefaultUserIdAndGroupIds = null;
+
+	public HDFSFileStore(HDFSURI uri) {
+		this.uri = uri;
+	}
+
+	protected HDFSServer getServer() {
+		if (hdfsServer == null) {
+			hdfsServer = HDFSManager.INSTANCE.getServer(this.uri.getURI().toString());
+		}
+		return hdfsServer;
+	}
+
+	@Override
+	public String[] childNames(int options, IProgressMonitor monitor) throws CoreException {
+		List<String> childNamesList = new ArrayList<String>();
+		if (getServer() != null) {
+			try {
+				List<ResourceInformation> listResources = getClient().listResources(uri.getURI(), getServer().getUserId());
+				for (ResourceInformation lr : listResources) {
+					if (lr != null)
+						childNamesList.add(lr.getName());
+				}
+			} catch (IOException e) {
+				throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+			} catch (InterruptedException e) {
+				throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+			}
+			if (isLocalFile()) {
+				// If there is a local folder also, then local children belong
+				// to
+				// the server also.
+				File local = getLocalFile();
+				if (local.isDirectory()) {
+					childNamesList.addAll(Arrays.asList(local.list()));
+				}
+			}
+		}
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: childNames():" + childNamesList);
+		return childNamesList.toArray(new String[childNamesList.size()]);
+	}
+
+	/**
+	 * @return
+	 * @throws CoreException
+	 */
+	private HDFSClient getClient() throws CoreException {
+		return HDFSManager.INSTANCE.getClient(getServer().getUri());
+	}
+
+	/**
+	 * The file information for this resource contains two parts: server file
+	 * information and local file information. Either one, or both file
+	 * informations are possible:
+	 * <ul>
+	 * <li>Server only</li>
+	 * <li>Local only</li>
+	 * <li>Server and local
+	 * <li>
+	 * </ul>
+	 * 
+	 * This method will attempt to determine both server and client file
+	 * informations depending on which is not available. Stale information can
+	 * be cleared by call {@link #clearServerFileInfo()} and
+	 * {@link #clearLocalFileInfo()}.
+	 * 
+	 */
+	@Override
+	public IFileInfo fetchInfo(int options, IProgressMonitor monitor) throws CoreException {
+		if (serverFileInfo == null) {
+			serverResourceInfo = null;
+			this.effectivePermissions = null;
+			FileInfo fi = new FileInfo(getName());
+			HDFSServer server = getServer();
+			if (server != null) {
+				try {
+					if (".project".equals(getName())) {
+						fi.setExists(getLocalFile().exists());
+						fi.setLength(getLocalFile().length());
+					} else {
+						ResourceInformation fileInformation = getClient().getResourceInformation(uri.getURI(), server.getUserId());
+						if (fileInformation != null) {
+							serverResourceInfo = fileInformation;
+							fi.setDirectory(fileInformation.isFolder());
+							fi.setExists(true);
+							fi.setLastModified(fileInformation.getLastModifiedTime());
+							fi.setLength(fileInformation.getSize());
+							fi.setName(fileInformation.getName());
+							String userId = server.getUserId();
+							List<String> groupIds = server.getGroupIds();
+							if (userId == null) {
+								userId = getDefaultUserId();
+								groupIds = getDefaultGroupIds();
+							}
+							fileInformation.updateEffectivePermissions(userId, groupIds);
+							this.effectivePermissions = fileInformation.getEffectivePermissions();
+							fi.setAttribute(EFS.ATTRIBUTE_OWNER_READ, fileInformation.getUserPermissions().read);
+							fi.setAttribute(EFS.ATTRIBUTE_OWNER_WRITE, fileInformation.getUserPermissions().write);
+							fi.setAttribute(EFS.ATTRIBUTE_OWNER_EXECUTE, fileInformation.getUserPermissions().execute);
+							fi.setAttribute(EFS.ATTRIBUTE_GROUP_READ, fileInformation.getGroupPermissions().read);
+							fi.setAttribute(EFS.ATTRIBUTE_GROUP_WRITE, fileInformation.getGroupPermissions().write);
+							fi.setAttribute(EFS.ATTRIBUTE_GROUP_EXECUTE, fileInformation.getGroupPermissions().execute);
+							fi.setAttribute(EFS.ATTRIBUTE_OTHER_READ, fileInformation.getOtherPermissions().read);
+							fi.setAttribute(EFS.ATTRIBUTE_OTHER_WRITE, fileInformation.getOtherPermissions().write);
+							fi.setAttribute(EFS.ATTRIBUTE_OTHER_EXECUTE, fileInformation.getOtherPermissions().execute);
+						}
+					}
+				} catch (IOException e) {
+					throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+				} catch (InterruptedException e) {
+					throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+				} finally {
+				}
+			} else {
+				// No server definition
+				fi.setExists(false);
+			}
+			serverFileInfo = fi;
+		}
+		if (localFileInfo == null) {
+			if (isLocalFile()) {
+				File file = getLocalFile();
+				localFileInfo = new FileInfo(file.getName());
+				if (file.exists()) {
+					localFileInfo.setExists(true);
+					localFileInfo.setLastModified(file.lastModified());
+					localFileInfo.setLength(file.length());
+					localFileInfo.setDirectory(file.isDirectory());
+					localFileInfo.setAttribute(EFS.ATTRIBUTE_READ_ONLY, file.exists() && !file.canWrite());
+					localFileInfo.setAttribute(EFS.ATTRIBUTE_HIDDEN, file.isHidden());
+				} else
+					localFileInfo.setExists(false);
+			}
+		}
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: fetchInfo(): " + HDFSUtilites.getDebugMessage(serverFileInfo));
+		if (localFileInfo != null)
+			return localFileInfo;
+		return serverFileInfo;
+	}
+
+	protected String getDefaultUserId() {
+		if (systemDefaultUserIdAndGroupIds == null) {
+			try {
+				this.systemDefaultUserIdAndGroupIds = getClient().getDefaultUserAndGroupIds();
+			} catch (IOException e) {
+				logger.debug(e.getMessage(), e);
+			} catch (CoreException e) {
+				logger.debug(e.getMessage(), e);
+			} catch (InterruptedException e) {
+				logger.debug(e.getMessage(), e);
+			}
+		}
+		if (this.systemDefaultUserIdAndGroupIds != null && this.systemDefaultUserIdAndGroupIds.size() > 0)
+			return this.systemDefaultUserIdAndGroupIds.get(0);
+		return null;
+	}
+
+	protected List<String> getDefaultGroupIds() {
+		if (systemDefaultUserIdAndGroupIds == null) {
+			try {
+				this.systemDefaultUserIdAndGroupIds = getClient().getDefaultUserAndGroupIds();
+			} catch (IOException e) {
+				logger.debug(e.getMessage(), e);
+			} catch (CoreException e) {
+				logger.debug(e.getMessage(), e);
+			} catch (InterruptedException e) {
+				logger.debug(e.getMessage(), e);
+			}
+		}
+		if (this.systemDefaultUserIdAndGroupIds != null && this.systemDefaultUserIdAndGroupIds.size() > 1)
+			return this.systemDefaultUserIdAndGroupIds.subList(1, this.systemDefaultUserIdAndGroupIds.size() - 1);
+		return null;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.core.filesystem.provider.FileStore#putInfo(org.eclipse.core
+	 * .filesystem.IFileInfo, int, org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public void putInfo(IFileInfo info, int options, IProgressMonitor monitor) throws CoreException {
+		try {
+			if (isLocalFile()) {
+				File file = getLocalFile();
+				if ((options & EFS.SET_LAST_MODIFIED) != 0)
+					file.setLastModified(info.getLastModified());
+				if ((options & EFS.SET_ATTRIBUTES) != 0) {
+					file.setReadable(info.getAttribute(EFS.ATTRIBUTE_OWNER_READ), true);
+					file.setWritable(info.getAttribute(EFS.ATTRIBUTE_OWNER_WRITE), true);
+					file.setExecutable(info.getAttribute(EFS.ATTRIBUTE_OWNER_EXECUTE), true);
+				}
+			} else {
+				ResourceInformation ri = new ResourceInformation();
+				ri.setFolder(info.isDirectory());
+				if ((options & EFS.SET_LAST_MODIFIED) != 0)
+					ri.setLastModifiedTime(info.getLastModified());
+				HDFSServer server = getServer();
+				getClient().setResourceInformation(uri.getURI(), ri, server == null ? null : server.getUserId());
+			}
+		} catch (IOException e) {
+			throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+		} catch (InterruptedException e) {
+			throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+		}
+	}
+
+	/**
+	 * When this file store makes changes which obsolete the server information,
+	 * it should clear the server information.
+	 */
+	protected void clearServerFileInfo() {
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: clearServerFileInfo()");
+		this.serverFileInfo = null;
+	}
+
+	/**
+	 * When this file store makes changes which obsolete the local information,
+	 * it should clear the localinformation.
+	 */
+	protected void clearLocalFileInfo() {
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: clearServerFileInfo()");
+		this.localFileInfo = null;
+	}
+
+	@Override
+	public IFileStore getChild(String name) {
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: getChild():" + name);
+		return new HDFSFileStore(uri.append(name));
+	}
+
+	@Override
+	public String getName() {
+		String lastSegment = uri.lastSegment();
+		if (lastSegment == null)
+			lastSegment = "/";
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: getName():" + lastSegment);
+		return lastSegment;
+	}
+
+	@Override
+	public IFileStore getParent() {
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: getParent()");
+		try {
+			return new HDFSFileStore(uri.removeLastSegment());
+		} catch (URISyntaxException e) {
+			logger.log(Level.WARN, e.getMessage(), e);
+		}
+		return null;
+	}
+
+	@Override
+	public InputStream openInputStream(int options, IProgressMonitor monitor) throws CoreException {
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: openInputStream()");
+		if (".project".equals(getName())) {
+			try {
+				final File localFile = getLocalFile();
+				if (!localFile.exists())
+					localFile.createNewFile();
+				return new FileInputStream(localFile);
+			} catch (FileNotFoundException e) {
+				throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+			} catch (IOException e) {
+				throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+			}
+		} else {
+			File lFile = getLocalFile();
+			if (lFile.exists()) {
+				try {
+					return new FileInputStream(lFile);
+				} catch (FileNotFoundException e) {
+					throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+				}
+			} else {
+				return openRemoteInputStream(options, monitor);
+			}
+		}
+	}
+
+	public InputStream openRemoteInputStream(int options, IProgressMonitor monitor) throws CoreException {
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: openRemoteInputStream()");
+		if (".project".equals(getName())) {
+			return null;
+		} else {
+			try {
+				HDFSServer server = getServer();
+				return getClient().openInputStream(uri.getURI(), server == null ? null : server.getUserId());
+			} catch (IOException e) {
+				throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+			} catch (InterruptedException e) {
+				throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+			}
+		}
+	}
+
+	@Override
+	public URI toURI() {
+		return uri.getURI();
+	}
+
+	/**
+	 * @return the localFile
+	 * @throws CoreException
+	 */
+	public File getLocalFile() throws CoreException {
+		if (localFile == null) {
+			final HDFSManager hdfsManager = HDFSManager.INSTANCE;
+			final String uriString = uri.getURI().toString();
+			HDFSServer server = hdfsManager.getServer(uriString);
+			if (server != null) {
+				File workspaceFolder = ResourcesPlugin.getWorkspace().getRoot().getLocation().toFile();
+				try {
+					URI relativeURI = URIUtil.makeRelative(uri.getURI(), new URI(server.getUri()));
+					String relativePath = hdfsManager.getProjectName(server) + "/" + relativeURI.toString();
+					localFile = new File(workspaceFolder, relativePath);
+				} catch (URISyntaxException e) {
+					throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+				}
+			} else
+				logger.error("No server associated with uri: " + uriString);
+		}
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: getLocalFile():" + localFile);
+		return localFile;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.core.filesystem.provider.FileStore#mkdir(int,
+	 * org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public IFileStore mkdir(int options, IProgressMonitor monitor) throws CoreException {
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: mkdir()");
+		try {
+			clearServerFileInfo();
+			HDFSServer server = getServer();
+			if (getClient().mkdirs(uri.getURI(), server == null ? null : server.getUserId())) {
+				return this;
+			} else {
+				return null;
+			}
+		} catch (IOException e) {
+			logger.error("Unable to mkdir: " + uri);
+			throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+		} catch (InterruptedException e) {
+			logger.error("Unable to mkdir: " + uri);
+			throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+		}
+	}
+
+	/**
+	 * Determines if file exists in local workspace
+	 * 
+	 * @return
+	 */
+	public boolean isLocalFile() {
+		try {
+			File localFile = getLocalFile();
+			return localFile != null && localFile.exists();
+		} catch (CoreException e) {
+			logger.debug("Unable to determine if file is local", e);
+		}
+		return false;
+	}
+
+	/**
+	 * <code>true</code> only when it is {@link #isLocalFile()} and NOT
+	 * {@link #isRemoteFile()}
+	 * 
+	 * @return
+	 */
+	public boolean isLocalOnly() {
+		return isLocalFile() && !isRemoteFile();
+	}
+
+	/**
+	 * Determines if file exists on server side.
+	 * 
+	 * @return
+	 */
+	public boolean isRemoteFile() {
+		if (this.serverFileInfo == null)
+			this.fetchInfo();
+		return this.serverFileInfo != null && this.serverFileInfo.exists();
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.core.filesystem.provider.FileStore#openOutputStream(int,
+	 * org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public OutputStream openOutputStream(int options, IProgressMonitor monitor) throws CoreException {
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: openOutputStream()");
+		if (".project".equals(getName())) {
+			try {
+				File dotProjectFile = getLocalFile();
+				if (!dotProjectFile.exists()) {
+					dotProjectFile.getParentFile().mkdirs();
+					dotProjectFile.createNewFile();
+				}
+				return new FileOutputStream(dotProjectFile);
+			} catch (FileNotFoundException e) {
+				throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+			} catch (IOException e) {
+				throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+			}
+		} else {
+			File lFile = getLocalFile();
+			if (!lFile.exists()) {
+				lFile.getParentFile().mkdirs();
+				try {
+					lFile.createNewFile();
+				} catch (IOException e) {
+					throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, "Cannot create new file to save", e));
+				}
+			}
+			if (lFile.exists()) {
+				try {
+					clearLocalFileInfo();
+					return new FileOutputStream(lFile);
+				} catch (FileNotFoundException e) {
+					throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+				}
+			} else
+				throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, "Local file does not exist to write to: " + lFile.getAbsolutePath()));
+		}
+	}
+
+	public OutputStream openRemoteOutputStream(int options, IProgressMonitor monitor) throws CoreException {
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: openRemoteOutputStream()");
+		try {
+			HDFSServer server = getServer();
+			clearServerFileInfo();
+			if (fetchInfo().exists())
+				return getClient().openOutputStream(uri.getURI(), server == null ? null : server.getUserId());
+			else
+				return getClient().createOutputStream(uri.getURI(), server == null ? null : server.getUserId());
+		} catch (IOException e) {
+			throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+		} catch (InterruptedException e) {
+			throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+		}
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.core.filesystem.provider.FileStore#delete(int,
+	 * org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public void delete(int options, IProgressMonitor monitor) throws CoreException {
+		if (logger.isDebugEnabled())
+			logger.debug("[" + uri + "]: delete()");
+		try {
+			if (isLocalFile()) {
+				clearLocalFileInfo();
+				final File lf = getLocalFile();
+				final File plf = lf.getParentFile();
+				lf.delete();
+				UploadFileJob.deleteFoldersIfEmpty(plf);
+			}
+			if (isRemoteFile()) {
+				final HDFSServer server = getServer();
+				if (server != null) {
+					if (server.getUri().equals(uri.getURI().toString())) {
+						// Server location is the same as the project - so we
+						// just
+						// disconnect instead of actually deleting the root
+						// folder
+						// on HDFS.
+					} else {
+						clearServerFileInfo();
+						getClient().delete(uri.getURI(), server == null ? null : server.getUserId());
+					}
+				} else {
+					// Not associated with any server, we just disconnect.
+				}
+			}
+		} catch (IOException e) {
+			logger.error("Unable to delete: " + uri);
+			throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+		} catch (InterruptedException e) {
+			logger.error("Unable to delete: " + uri);
+			throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e));
+		}
+	}
+
+	/**
+	 * Effective permissions are only given when the accessing user and the
+	 * permissions from the server are known. If any data in permissions
+	 * determining process is not known, <code>null</code> is returned.
+	 * 
+	 * @return the effectivePermissions
+	 */
+	public ResourceInformation.Permissions getEffectivePermissions() {
+		if (effectivePermissions == null)
+			fetchInfo();
+		return effectivePermissions;
+	}
+
+	/**
+	 * @return the serverResourceInfo
+	 */
+	public ResourceInformation getServerResourceInfo() {
+		return serverResourceInfo;
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileSystem.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileSystem.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileSystem.java
new file mode 100644
index 0000000..cc55493
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileSystem.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.internal.hdfs;
+
+import java.net.URI;
+
+import org.eclipse.core.filesystem.IFileStore;
+import org.eclipse.core.filesystem.provider.FileSystem;
+
+/**
+ * 
+ * @author Srimanth Gunturi
+ */
+public class HDFSFileSystem extends FileSystem {
+	
+	public static final String SCHEME = "hdfs";
+
+	@Override
+	public IFileStore getStore(URI uri) {
+		if(SCHEME.equals(uri.getScheme()))
+			return new HDFSFileStore(new HDFSURI(uri));
+		return null;
+	}
+	
+	/* (non-Javadoc)
+	 * @see org.eclipse.core.filesystem.provider.FileSystem#canDelete()
+	 */
+	@Override
+	public boolean canDelete() {
+		return true;
+	}
+	
+	/* (non-Javadoc)
+	 * @see org.eclipse.core.filesystem.provider.FileSystem#canWrite()
+	 */
+	@Override
+	public boolean canWrite() {
+		return true;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java
new file mode 100644
index 0000000..93f0696
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java
@@ -0,0 +1,285 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.internal.hdfs;
+
+import java.net.URISyntaxException;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hdt.core.Activator;
+import org.apache.hdt.core.hdfs.HDFSClient;
+import org.apache.hdt.core.internal.HadoopManager;
+import org.apache.hdt.core.internal.model.HDFSServer;
+import org.apache.hdt.core.internal.model.HadoopFactory;
+import org.apache.hdt.core.internal.model.ServerStatus;
+import org.apache.log4j.Logger;
+import org.eclipse.core.resources.IProject;
+import org.eclipse.core.resources.IProjectDescription;
+import org.eclipse.core.resources.IResource;
+import org.eclipse.core.resources.IWorkspace;
+import org.eclipse.core.resources.IWorkspaceRoot;
+import org.eclipse.core.resources.ResourcesPlugin;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IConfigurationElement;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.NullProgressMonitor;
+import org.eclipse.core.runtime.Platform;
+import org.eclipse.core.runtime.Status;
+import org.eclipse.emf.common.util.EList;
+import org.eclipse.team.core.RepositoryProvider;
+
+/**
+ * Manages workspace files with server files.
+ * 
+ * @author Srimanth Gunturi
+ * 
+ */
+public class HDFSManager {
+
+	public static HDFSManager INSTANCE = new HDFSManager();
+	private static final Logger logger = Logger.getLogger(HDFSManager.class);
+
+	public static void disconnectProject(IProject project) {
+		HDFSServer server = HDFSManager.INSTANCE.getServer(project.getLocationURI().toString());
+		if (server != null && server.getStatusCode() != ServerStatus.DISCONNECTED_VALUE)
+			server.setStatusCode(ServerStatus.DISCONNECTED_VALUE);
+		try {
+			project.refreshLocal(IResource.DEPTH_INFINITE, new NullProgressMonitor());
+		} catch (CoreException e) {
+			logger.warn(e.getMessage(), e);
+		}
+	}
+
+	public static void reconnectProject(IProject project) {
+		HDFSServer server = HDFSManager.INSTANCE.getServer(project.getLocationURI().toString());
+		if (server != null && server.getStatusCode() == ServerStatus.DISCONNECTED_VALUE)
+			server.setStatusCode(0);
+		try {
+			project.refreshLocal(IResource.DEPTH_INFINITE, new NullProgressMonitor());
+		} catch (CoreException e) {
+			logger.warn(e.getMessage(), e);
+		}
+	}
+
+	private Map<HDFSServer, String> serverToProjectMap = new HashMap<HDFSServer, String>();
+	private Map<String, HDFSServer> projectToServerMap = new HashMap<String, HDFSServer>();
+	private final Map<String, HDFSClient> hdfsClientsMap = new HashMap<String, HDFSClient>();
+	/**
+	 * URI should always end with a '/'
+	 */
+	private Map<String, HDFSServer> uriToServerMap = new HashMap<String, HDFSServer>();
+
+	private Map<String, HDFSServer> uriToServerCacheMap = new LinkedHashMap<String, HDFSServer>() {
+		private static final long serialVersionUID = 1L;
+		private int MAX_ENTRIES = 1 << 10;
+
+		protected boolean removeEldestEntry(Map.Entry<String, HDFSServer> eldest) {
+			return size() > MAX_ENTRIES;
+		};
+	};
+
+	/**
+	 * Singleton
+	 */
+	private HDFSManager() {
+	}
+
+	public EList<HDFSServer> getHdfsServers() {
+		return HadoopManager.INSTANCE.getServers().getHdfsServers();
+	}
+
+	public void loadServers() {
+		final IWorkspaceRoot workspaceRoot = ResourcesPlugin.getWorkspace().getRoot();
+		for (HDFSServer server : getHdfsServers()) {
+			uriToServerMap.put(server.getUri(), server);
+			final IProject project = workspaceRoot.getProject(server.getName());
+			if (!project.exists()) {
+				server.setStatusCode(ServerStatus.NO_PROJECT_VALUE);
+			}
+			serverToProjectMap.put(server, server.getName());
+			projectToServerMap.put(server.getName(), server);
+		}
+		IProject[] projects = workspaceRoot.getProjects();
+		if (projects != null) {
+			for (IProject p : projects) {
+				if (p.getLocationURI() != null && HDFSFileSystem.SCHEME.equals(p.getLocationURI().getScheme())) {
+					if (!projectToServerMap.keySet().contains(p)) {
+						logger.error("HDFS project with no server associated being closed:" + p.getName());
+						try {
+							p.close(new NullProgressMonitor());
+							logger.error("HDFS project with no server associated closed:" + p.getName());
+						} catch (CoreException e) {
+							logger.error("HDFS project with no server associated cannot be closed:" + p.getName(), e);
+						}
+					}
+				}
+			}
+		}
+	}
+
+	/**
+	 * Creates and adds an HDFS server definition. This also creates a local
+	 * project which represents server file system via EFS.
+	 * 
+	 * @param hdfsURI
+	 * @return
+	 * @throws CoreException
+	 */
+	public HDFSServer createServer(String name, java.net.URI hdfsURI, String userId, List<String> groupIds) throws CoreException {
+		if (hdfsURI.getPath() == null || hdfsURI.getPath().length() < 1) {
+			try {
+				hdfsURI = new java.net.URI(hdfsURI.toString() + "/");
+			} catch (URISyntaxException e) {
+			}
+		}
+		if (ResourcesPlugin.getWorkspace().getRoot().getProject(name).exists())
+			throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, "Project with name '" + name + "' already exists"));
+		HDFSServer hdfsServer = HadoopFactory.eINSTANCE.createHDFSServer();
+		hdfsServer.setName(name);
+		hdfsServer.setUri(hdfsURI.toString());
+		hdfsServer.setLoaded(true);
+		if (userId != null)
+			hdfsServer.setUserId(userId);
+		if (groupIds != null)
+			for (String groupId : groupIds)
+				hdfsServer.getGroupIds().add(groupId);
+		getHdfsServers().add(hdfsServer);
+		HadoopManager.INSTANCE.saveServers();
+		uriToServerMap.put(hdfsServer.getUri(), hdfsServer);
+		serverToProjectMap.put(hdfsServer, name);
+		projectToServerMap.put(name, hdfsServer);
+		createIProject(name, hdfsURI);
+		return hdfsServer;
+	}
+
+	/**
+	 * @param name
+	 * @param hdfsURI
+	 * @return
+	 * @throws CoreException
+	 */
+	private IProject createIProject(String name, java.net.URI hdfsURI) throws CoreException {
+		final IWorkspace workspace = ResourcesPlugin.getWorkspace();
+		IProject project = workspace.getRoot().getProject(name);
+		IProjectDescription pd = workspace.newProjectDescription(name);
+		pd.setLocationURI(hdfsURI);
+		project.create(pd, new NullProgressMonitor());
+		project.open(new NullProgressMonitor());
+		RepositoryProvider.map(project, HDFSTeamRepositoryProvider.ID);
+		return project;
+	}
+
+	public HDFSServer getServer(String uri) {
+		if (uri != null && !uriToServerCacheMap.containsKey(uri)) {
+			String tmpUri = uri;
+			HDFSServer serverU = uriToServerMap.get(tmpUri);
+			while (serverU == null) {
+				int lastSlashIndex = tmpUri.lastIndexOf('/');
+				tmpUri = lastSlashIndex < 0 ? null : tmpUri.substring(0, lastSlashIndex);
+				if (tmpUri != null)
+					serverU = uriToServerMap.get(tmpUri + "/");
+				else
+					break;
+			}
+			if (serverU != null)
+				uriToServerCacheMap.put(uri, serverU);
+		}
+		return uriToServerCacheMap.get(uri);
+	}
+
+	public String getProjectName(HDFSServer server) {
+		return serverToProjectMap.get(server);
+	}
+
+	/**
+	 * @param string
+	 */
+	public void startServerOperation(String uri) {
+		HDFSServer server = getServer(uri);
+		if (server != null && !server.getOperationURIs().contains(uri)) {
+			server.getOperationURIs().add(uri);
+		}
+	}
+
+	/**
+	 * @param string
+	 */
+	public void stopServerOperation(String uri) {
+		HDFSServer server = getServer(uri);
+		if (server != null) {
+			server.getOperationURIs().remove(uri);
+		}
+	}
+
+	public boolean isServerOperationRunning(String uri) {
+		HDFSServer server = getServer(uri);
+		if (server != null) {
+			return server.getOperationURIs().contains(uri);
+		}
+		return false;
+	}
+
+	/**
+	 * @param server
+	 */
+	public void deleteServer(HDFSServer server) {
+		getHdfsServers().remove(server);
+		String projectName = this.serverToProjectMap.remove(server);
+		this.projectToServerMap.remove(projectName);
+		this.uriToServerMap.remove(server.getUri());
+		HadoopManager.INSTANCE.saveServers();
+	}
+
+	/**
+	 * Provides the HDFSClient instance to
+	 * 
+	 * @param serverURI
+	 * @return
+	 * @throws CoreException
+	 */
+	public HDFSClient getClient(String serverURI) throws CoreException {
+		if (logger.isDebugEnabled())
+			logger.debug("getClient(" + serverURI + "): Server=" + serverURI);
+		HDFSServer server = getServer(serverURI);
+		if (server != null && server.getStatusCode() == ServerStatus.DISCONNECTED_VALUE) {
+			if (logger.isDebugEnabled())
+				logger.debug("getClient(" + serverURI + "): Server timed out. Not returning client");
+			throw new CoreException(new Status(IStatus.WARNING, Activator.BUNDLE_ID, "Server disconnected due to timeout. Please reconnect to server."));
+		}
+		if (hdfsClientsMap.containsKey(serverURI))
+			return hdfsClientsMap.get(serverURI);
+		else {
+			try {
+				java.net.URI sUri = serverURI == null ? new java.net.URI("hdfs://server") : new java.net.URI(serverURI);
+				IConfigurationElement[] elementsFor = Platform.getExtensionRegistry().getConfigurationElementsFor("org.apache.hdt.core.hdfsClient");
+				for (IConfigurationElement element : elementsFor) {
+					if (sUri.getScheme().equals(element.getAttribute("protocol"))) {
+						HDFSClient client = (HDFSClient) element.createExecutableExtension("class");
+						hdfsClientsMap.put(serverURI, new InterruptableHDFSClient(serverURI, client));
+					}
+				}
+			} catch (URISyntaxException e) {
+				throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID, "Invalid server URI", e));
+			}
+			return hdfsClientsMap.get(serverURI);
+		}
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSMoveDeleteHook.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSMoveDeleteHook.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSMoveDeleteHook.java
new file mode 100644
index 0000000..0ca0df4
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSMoveDeleteHook.java
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.internal.hdfs;
+
+import org.eclipse.core.resources.IFile;
+import org.eclipse.core.resources.IFolder;
+import org.eclipse.core.resources.IProject;
+import org.eclipse.core.resources.IProjectDescription;
+import org.eclipse.core.resources.IResource;
+import org.eclipse.core.resources.team.IMoveDeleteHook;
+import org.eclipse.core.resources.team.IResourceTree;
+import org.eclipse.core.runtime.IProgressMonitor;
+
+/**
+ * @author Srimanth Gunturi
+ * 
+ */
+public class HDFSMoveDeleteHook implements IMoveDeleteHook {
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.core.resources.team.IMoveDeleteHook#deleteFile(org.eclipse
+	 * .core.resources.team.IResourceTree, org.eclipse.core.resources.IFile,
+	 * int, org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public boolean deleteFile(IResourceTree tree, IFile file, int updateFlags, IProgressMonitor monitor) {
+		return false;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.core.resources.team.IMoveDeleteHook#deleteFolder(org.eclipse
+	 * .core.resources.team.IResourceTree, org.eclipse.core.resources.IFolder,
+	 * int, org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public boolean deleteFolder(IResourceTree tree, IFolder folder, int updateFlags, IProgressMonitor monitor) {
+		return false;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.core.resources.team.IMoveDeleteHook#deleteProject(org.eclipse
+	 * .core.resources.team.IResourceTree, org.eclipse.core.resources.IProject,
+	 * int, org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public boolean deleteProject(IResourceTree tree, IProject project, int updateFlags, IProgressMonitor monitor) {
+		if (HDFSFileSystem.SCHEME.equals(project.getLocationURI().getScheme())) {
+			// Deleting a HDFS project root folder *and* its contents is not
+			// supported.
+			// Caller has to uncheck the 'Delete project contents' checkbox.
+			if ((IResource.ALWAYS_DELETE_PROJECT_CONTENT & updateFlags) > 0) {
+				throw new RuntimeException(
+						"Deletion of HDFS project root folder is not supported. To remove project uncheck the \'Delete project contents on disk\' checkbox");
+			}
+		}
+		return false;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.core.resources.team.IMoveDeleteHook#moveFile(org.eclipse.
+	 * core.resources.team.IResourceTree, org.eclipse.core.resources.IFile,
+	 * org.eclipse.core.resources.IFile, int,
+	 * org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public boolean moveFile(IResourceTree tree, IFile source, IFile destination, int updateFlags, IProgressMonitor monitor) {
+		return false;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.core.resources.team.IMoveDeleteHook#moveFolder(org.eclipse
+	 * .core.resources.team.IResourceTree, org.eclipse.core.resources.IFolder,
+	 * org.eclipse.core.resources.IFolder, int,
+	 * org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public boolean moveFolder(IResourceTree tree, IFolder source, IFolder destination, int updateFlags, IProgressMonitor monitor) {
+		return false;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.core.resources.team.IMoveDeleteHook#moveProject(org.eclipse
+	 * .core.resources.team.IResourceTree, org.eclipse.core.resources.IProject,
+	 * org.eclipse.core.resources.IProjectDescription, int,
+	 * org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public boolean moveProject(IResourceTree tree, IProject source, IProjectDescription description, int updateFlags, IProgressMonitor monitor) {
+		if (HDFSFileSystem.SCHEME.equals(source.getLocationURI().getScheme())) {
+			// Moving a HDFS project is not supported.
+			throw new RuntimeException("Moving a HDFS project root folder is not supported.");
+		}
+		return false;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSTeamRepositoryProvider.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSTeamRepositoryProvider.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSTeamRepositoryProvider.java
new file mode 100644
index 0000000..e09e456
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSTeamRepositoryProvider.java
@@ -0,0 +1,41 @@
+package org.apache.hdt.core.internal.hdfs;
+
+import org.eclipse.core.resources.team.IMoveDeleteHook;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.team.core.RepositoryProvider;
+
+public class HDFSTeamRepositoryProvider extends RepositoryProvider {
+
+	public static final String ID = "org.apache.hadoop.hdfs";
+	private HDFSMoveDeleteHook moveDeleteHook = new HDFSMoveDeleteHook();
+	
+	public HDFSTeamRepositoryProvider() {
+		// TODO Auto-generated constructor stub
+	}
+
+	@Override
+	public void deconfigure() throws CoreException {
+		// TODO Auto-generated method stub
+
+	}
+
+	@Override
+	public void configureProject() throws CoreException {
+		// TODO Auto-generated method stub
+
+	}
+
+	@Override
+	public String getID() {
+		return ID;
+	}
+	
+	/* (non-Javadoc)
+	 * @see org.eclipse.team.core.RepositoryProvider#getMoveDeleteHook()
+	 */
+	@Override
+	public IMoveDeleteHook getMoveDeleteHook() {
+		return moveDeleteHook;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSURI.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSURI.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSURI.java
new file mode 100644
index 0000000..e958646
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSURI.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.internal.hdfs;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import org.eclipse.core.runtime.IPath;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.core.runtime.URIUtil;
+
+/**
+ * 
+ * @author Srimanth Gunturi
+ */
+public class HDFSURI {
+	public static final String SCHEME = "hdfs";
+	private final URI uri;
+	private IPath path;
+
+	public HDFSURI(URI uri) {
+		this.uri = uri;
+		String pathString = uri.getPath();
+		path = new Path(pathString);
+	}
+
+	public HDFSURI append(String name) {
+		return new HDFSURI(URIUtil.append(uri, name));
+	}
+
+	public String lastSegment() {
+		return URIUtil.lastSegment(uri);
+	}
+
+	public HDFSURI removeLastSegment() throws URISyntaxException {
+		if (path.segmentCount() > 0) {
+			String parentPath = path.removeLastSegments(1).toString();
+			URI parentURI = new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), parentPath, uri.getQuery(), uri.getFragment());
+			return new HDFSURI(parentURI);
+		}
+		return null;
+	}
+
+	public URI getURI() {
+		return uri;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see java.lang.Object#toString()
+	 */
+	@Override
+	public String toString() {
+		return uri == null ? "null" : uri.toString();
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSUtilites.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSUtilites.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSUtilites.java
new file mode 100644
index 0000000..e2e387b
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSUtilites.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.internal.hdfs;
+
+import java.text.DateFormat;
+import java.util.Date;
+
+import org.eclipse.core.filesystem.EFS;
+import org.eclipse.core.filesystem.IFileInfo;
+
+/**
+ * @author Srimanth Gunturi
+ * 
+ */
+public class HDFSUtilites {
+
+	public static String getDebugMessage(IFileInfo fi) {
+		if (fi != null) {
+			String lastMod = DateFormat.getDateTimeInstance(DateFormat.SHORT, DateFormat.LONG).format(new Date(fi.getLastModified()));
+			
+			String userPerms = "user(";
+			if (fi.getAttribute(EFS.ATTRIBUTE_OWNER_READ))
+				userPerms+="r";
+			else
+				userPerms+="-";
+			if (fi.getAttribute(EFS.ATTRIBUTE_OWNER_WRITE))
+				userPerms+="w";
+			else
+				userPerms+="-";
+			if (fi.getAttribute(EFS.ATTRIBUTE_OWNER_EXECUTE))
+				userPerms+="x";
+			else
+				userPerms+="-";
+			userPerms += ")";
+
+			String groupPerms = "group(";
+			if (fi.getAttribute(EFS.ATTRIBUTE_GROUP_READ))
+				groupPerms+="r";
+			else
+				groupPerms+="-";
+			if (fi.getAttribute(EFS.ATTRIBUTE_GROUP_WRITE))
+				groupPerms+="w";
+			else
+				groupPerms+="-";
+			if (fi.getAttribute(EFS.ATTRIBUTE_GROUP_EXECUTE))
+				groupPerms+="x";
+			else
+				groupPerms+="-";
+			groupPerms += ")";
+
+			String otherPerms = "other(";
+			if (fi.getAttribute(EFS.ATTRIBUTE_OTHER_READ))
+				otherPerms+="r";
+			else
+				otherPerms+="-";
+			if (fi.getAttribute(EFS.ATTRIBUTE_OTHER_WRITE))
+				otherPerms+="w";
+			else
+				otherPerms+="-";
+			if (fi.getAttribute(EFS.ATTRIBUTE_OTHER_EXECUTE))
+				otherPerms+="x";
+			else
+				otherPerms+="-";
+			otherPerms += ")";
+
+			return "Exists=" + fi.exists() + ", Length=" + fi.getLength() + ", LastMod=" + lastMod + ", "+userPerms+", "+groupPerms+", "+otherPerms;
+		}
+		return "null";
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/InterruptableHDFSClient.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/InterruptableHDFSClient.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/InterruptableHDFSClient.java
new file mode 100644
index 0000000..0301d5f
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/InterruptableHDFSClient.java
@@ -0,0 +1,260 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.internal.hdfs;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hdt.core.hdfs.HDFSClient;
+import org.apache.hdt.core.hdfs.ResourceInformation;
+import org.apache.hdt.core.internal.model.HDFSServer;
+import org.apache.hdt.core.internal.model.ServerStatus;
+import org.apache.log4j.Logger;
+import org.eclipse.core.resources.IProject;
+import org.eclipse.core.resources.ResourcesPlugin;
+
+/**
+ * 
+ * @author Srimanth Gunturi
+ * 
+ */
+public class InterruptableHDFSClient extends HDFSClient {
+	private static final int DEFAULT_TIMEOUT = 5000;
+	private static final Logger logger = Logger.getLogger(InterruptableHDFSClient.class);
+	// private static ExecutorService threadPool =
+	// Executors.newFixedThreadPool(10);
+
+	private final HDFSClient client;
+	private final int timeoutMillis = DEFAULT_TIMEOUT;
+	private final String serverURI;
+
+	/**
+	 * @param serverURI
+	 * 
+	 */
+	public InterruptableHDFSClient(String serverURI, HDFSClient client) {
+		this.serverURI = serverURI;
+		this.client = client;
+	}
+
+	private static interface CustomRunnable<V> {
+		public V run() throws IOException, InterruptedException;
+	}
+
+	protected <T> T executeWithTimeout(final CustomRunnable<T> runnable) throws IOException, InterruptedException {
+		final List<T> data = new ArrayList<T>();
+		final IOException[] ioE = new IOException[1];
+		final InterruptedException[] inE = new InterruptedException[1];
+		Thread runnerThread = new Thread(new Runnable() {
+			public void run() {
+				try {
+					data.add(runnable.run());
+				} catch (IOException e) {
+					ioE[0] = e;
+				} catch (InterruptedException e) {
+					inE[0] = e;
+				}
+			}
+		});
+		boolean interrupted = false;
+		runnerThread.start();
+		runnerThread.join(timeoutMillis);
+		if (runnerThread.isAlive()) {
+			if(logger.isDebugEnabled())
+				logger.debug("executeWithTimeout(): Interrupting server call");
+			runnerThread.interrupt();
+			interrupted = true;
+		}
+		if (ioE[0] != null)
+			throw ioE[0];
+		if (inE[0] != null)
+			throw inE[0];
+		if (interrupted) {
+			// Tell HDFS manager that the server timed out
+			if(logger.isDebugEnabled())
+				logger.debug("executeWithTimeout(): Server timed out: "+serverURI);
+			HDFSServer server = HDFSManager.INSTANCE.getServer(serverURI);
+			String projectName = HDFSManager.INSTANCE.getProjectName(server);
+			IProject project = ResourcesPlugin.getWorkspace().getRoot().getProject(projectName);
+			HDFSManager.disconnectProject(project);
+			throw new InterruptedException();
+		}
+		if (data.size() > 0)
+			return data.get(0);
+		return null;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#getDefaultUserAndGroupIds()
+	 */
+	@Override
+	public List<String> getDefaultUserAndGroupIds() throws IOException, InterruptedException {
+		return executeWithTimeout(new CustomRunnable<List<String>>() {
+			@Override
+			public List<String> run() throws IOException, InterruptedException {
+				return client.getDefaultUserAndGroupIds();
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#getResourceInformation(java
+	 * .net.URI, java.lang.String)
+	 */
+	@Override
+	public ResourceInformation getResourceInformation(final URI uri, final String user) throws IOException, InterruptedException {
+		return executeWithTimeout(new CustomRunnable<ResourceInformation>() {
+			@Override
+			public ResourceInformation run() throws IOException, InterruptedException {
+				return client.getResourceInformation(uri, user);
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#setResourceInformation(java
+	 * .net.URI, org.apache.hdt.core.hdfs.ResourceInformation,
+	 * java.lang.String)
+	 */
+	@Override
+	public void setResourceInformation(final URI uri, final ResourceInformation information, final String user) throws IOException, InterruptedException {
+		executeWithTimeout(new CustomRunnable<Object>() {
+			@Override
+			public Object run() throws IOException, InterruptedException {
+				client.setResourceInformation(uri, information, user);
+				return null;
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#listResources(java.net.URI,
+	 * java.lang.String)
+	 */
+	@Override
+	public List<ResourceInformation> listResources(final URI uri, final String user) throws IOException, InterruptedException {
+		return executeWithTimeout(new CustomRunnable<List<ResourceInformation>>() {
+			@Override
+			public List<ResourceInformation> run() throws IOException, InterruptedException {
+				return client.listResources(uri, user);
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#openInputStream(java.net.URI,
+	 * java.lang.String)
+	 */
+	@Override
+	public InputStream openInputStream(final URI uri, final String user) throws IOException, InterruptedException {
+		return executeWithTimeout(new CustomRunnable<InputStream>() {
+			@Override
+			public InputStream run() throws IOException, InterruptedException {
+				return client.openInputStream(uri, user);
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.apache.hdt.core.hdfs.HDFSClient#mkdirs(java.net.URI,
+	 * java.lang.String)
+	 */
+	@Override
+	public boolean mkdirs(final URI uri, final String user) throws IOException, InterruptedException {
+		return executeWithTimeout(new CustomRunnable<Boolean>() {
+			@Override
+			public Boolean run() throws IOException, InterruptedException {
+				return client.mkdirs(uri, user);
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#openOutputStream(java.net.URI,
+	 * java.lang.String)
+	 */
+	@Override
+	public OutputStream openOutputStream(final URI uri, final String user) throws IOException, InterruptedException {
+		return executeWithTimeout(new CustomRunnable<OutputStream>() {
+			@Override
+			public OutputStream run() throws IOException, InterruptedException {
+				return client.openOutputStream(uri, user);
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#createOutputStream(java.net
+	 * .URI, java.lang.String)
+	 */
+	@Override
+	public OutputStream createOutputStream(final URI uri, final String user) throws IOException, InterruptedException {
+		return executeWithTimeout(new CustomRunnable<OutputStream>() {
+			@Override
+			public OutputStream run() throws IOException, InterruptedException {
+				return client.openOutputStream(uri, user);
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.apache.hdt.core.hdfs.HDFSClient#delete(java.net.URI,
+	 * java.lang.String)
+	 */
+	@Override
+	public void delete(final URI uri, final String user) throws IOException, InterruptedException {
+		executeWithTimeout(new CustomRunnable<Object>() {
+			@Override
+			public Object run() throws IOException, InterruptedException {
+				client.delete(uri, user);
+				return null;
+			}
+		});
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/UploadFileJob.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/UploadFileJob.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/UploadFileJob.java
new file mode 100644
index 0000000..a369776
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/UploadFileJob.java
@@ -0,0 +1,150 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.core.internal.hdfs;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.URI;
+
+import org.apache.hdt.core.Activator;
+import org.apache.log4j.Logger;
+import org.eclipse.core.filesystem.EFS;
+import org.eclipse.core.resources.IResource;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.NullProgressMonitor;
+import org.eclipse.core.runtime.Status;
+import org.eclipse.core.runtime.jobs.Job;
+
+/**
+ * @author Srimanth Gunturi
+ * 
+ */
+public class UploadFileJob extends Job {
+
+	private final static Logger logger = Logger.getLogger(UploadFileJob.class);
+	private final HDFSFileStore store;
+	private final IResource resource;
+
+	/**
+	 * @throws CoreException
+	 * 
+	 */
+	public UploadFileJob(IResource resource) throws CoreException {
+		super("Uploading " + resource.getLocationURI());
+		this.resource = resource;
+		this.store = (HDFSFileStore) EFS.getStore(resource.getLocationURI());;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.core.resources.IWorkspaceRunnable#run(org.eclipse.core.runtime
+	 * .IProgressMonitor)
+	 */
+	@Override
+	public IStatus run(IProgressMonitor monitor) {
+		IStatus status = Status.OK_STATUS;
+		if (store != null) {
+			URI uri = store.toURI();
+			try {
+				File localFile = store.getLocalFile();
+				if (logger.isDebugEnabled())
+					logger.debug("[" + uri + "]: Uploading from " + (localFile == null ? "(null)" : localFile.toString()));
+				HDFSManager.INSTANCE.startServerOperation(uri.toString());
+				if (localFile != null && localFile.exists()) {
+					boolean uploaded = false;
+					monitor.beginTask("Uploading " + localFile.getAbsolutePath(), (int) localFile.length());
+					FileInputStream fis = new FileInputStream(localFile);
+					OutputStream fos = store.openRemoteOutputStream(EFS.NONE, new NullProgressMonitor());
+					try {
+						if (!monitor.isCanceled()) {
+							byte[] data = new byte[8 * 1024];
+							int read = fis.read(data);
+							int totalRead = 0;
+							while (read > -1) {
+								if (monitor.isCanceled())
+									throw new InterruptedException();
+								fos.write(data, 0, read);
+								totalRead += read;
+								monitor.worked(read);
+								read = fis.read(data);
+								if (logger.isDebugEnabled())
+									logger.debug("Uploaded " + totalRead + " out of " + localFile.length() + " [" + (((float)totalRead*100.0f) / (float)localFile.length())
+											+ "]");
+							}
+							uploaded = true;
+						}
+					} catch (InterruptedException e) {
+						throw e;
+					} finally {
+						try {
+							fis.close();
+						} catch (Throwable t) {
+						}
+						try {
+							fos.close();
+						} catch (Throwable t) {
+						}
+						if (uploaded) {
+							// Delete parent folders if empty.
+							File parentFolder = localFile.getParentFile();
+							localFile.delete();
+							deleteFoldersIfEmpty(parentFolder);
+						}
+						monitor.done();
+					}
+				} else
+					status = new Status(IStatus.ERROR, Activator.BUNDLE_ID, "Local file not found [" + localFile + "]");
+				resource.refreshLocal(IResource.DEPTH_ONE, new NullProgressMonitor());
+			} catch (InterruptedException e) {
+				logger.debug("Uploading file [" + uri + "] cancelled by user");
+			} catch (IOException e) {
+				status = new Status(IStatus.ERROR, Activator.BUNDLE_ID, "Error uploading file " + uri, e);
+			} catch (CoreException e) {
+				status = new Status(IStatus.ERROR, Activator.BUNDLE_ID, e.getMessage(), e);
+				;
+			} finally {
+				HDFSManager.INSTANCE.stopServerOperation(uri.toString());
+			}
+		}
+		return status;
+	}
+
+	/**
+	 * Will attempt to delete the provided folder and its parents provided they
+	 * are empty.
+	 * 
+	 * @param localFile
+	 */
+	public static void deleteFoldersIfEmpty(File folder) {
+		File toDeleteFolder = folder;
+		String[] children = toDeleteFolder.list();
+		while (children == null || children.length < 1) {
+			// Empty folder
+			folder = toDeleteFolder.getParentFile();
+			toDeleteFolder.delete();
+			toDeleteFolder = folder;
+			children = folder.list();
+		}
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HDFSServer.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HDFSServer.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HDFSServer.java
new file mode 100644
index 0000000..be04f74
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HDFSServer.java
@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *  
+ */
+package org.apache.hdt.core.internal.model;
+
+import org.eclipse.emf.common.util.EList;
+import org.eclipse.emf.ecore.EObject;
+
+/**
+ * <!-- begin-user-doc -->
+ * A representation of the model object '<em><b>HDFS Server</b></em>'.
+ * <!-- end-user-doc -->
+ *
+ * <p>
+ * The following features are supported:
+ * <ul>
+ *   <li>{@link org.apache.hdt.core.internal.model.HDFSServer#isLoaded <em>Loaded</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.HDFSServer#getOperationURIs <em>Operation UR Is</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.HDFSServer#getUserId <em>User Id</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.HDFSServer#getGroupIds <em>Group Ids</em>}</li>
+ * </ul>
+ * </p>
+ *
+ * @see org.apache.hdt.core.internal.model.HadoopPackage#getHDFSServer()
+ * @model
+ * @generated
+ */
+public interface HDFSServer extends Server {
+	/**
+	 * Returns the value of the '<em><b>Loaded</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Loaded</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Loaded</em>' attribute.
+	 * @see #setLoaded(boolean)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getHDFSServer_Loaded()
+	 * @model
+	 * @generated
+	 */
+	boolean isLoaded();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.HDFSServer#isLoaded <em>Loaded</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Loaded</em>' attribute.
+	 * @see #isLoaded()
+	 * @generated
+	 */
+	void setLoaded(boolean value);
+
+	/**
+	 * Returns the value of the '<em><b>Operation UR Is</b></em>' attribute list.
+	 * The list contents are of type {@link java.lang.String}.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * <!-- begin-model-doc -->
+	 * List of HDFS uris where operations are being performed.
+	 * <!-- end-model-doc -->
+	 * @return the value of the '<em>Operation UR Is</em>' attribute list.
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getHDFSServer_OperationURIs()
+	 * @model transient="true"
+	 * @generated
+	 */
+	EList<String> getOperationURIs();
+
+	/**
+	 * Returns the value of the '<em><b>User Id</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>User Id</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>User Id</em>' attribute.
+	 * @see #setUserId(String)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getHDFSServer_UserId()
+	 * @model
+	 * @generated
+	 */
+	String getUserId();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.HDFSServer#getUserId <em>User Id</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>User Id</em>' attribute.
+	 * @see #getUserId()
+	 * @generated
+	 */
+	void setUserId(String value);
+
+	/**
+	 * Returns the value of the '<em><b>Group Ids</b></em>' attribute list.
+	 * The list contents are of type {@link java.lang.String}.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Group Ids</em>' attribute list isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Group Ids</em>' attribute list.
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getHDFSServer_GroupIds()
+	 * @model
+	 * @generated
+	 */
+	EList<String> getGroupIds();
+
+} // HDFSServer

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HadoopFactory.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HadoopFactory.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HadoopFactory.java
new file mode 100644
index 0000000..bb79ecc
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HadoopFactory.java
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *  
+ */
+package org.apache.hdt.core.internal.model;
+
+import org.eclipse.emf.ecore.EFactory;
+
+/**
+ * <!-- begin-user-doc -->
+ * The <b>Factory</b> for the model.
+ * It provides a create method for each non-abstract class of the model.
+ * <!-- end-user-doc -->
+ * @see org.apache.hdt.core.internal.model.HadoopPackage
+ * @generated
+ */
+public interface HadoopFactory extends EFactory {
+	/**
+	 * The singleton instance of the factory.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	HadoopFactory eINSTANCE = org.apache.hdt.core.internal.model.impl.HadoopFactoryImpl.init();
+
+	/**
+	 * Returns a new object of class '<em>HDFS Server</em>'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return a new object of class '<em>HDFS Server</em>'.
+	 * @generated
+	 */
+	HDFSServer createHDFSServer();
+
+	/**
+	 * Returns a new object of class '<em>Servers</em>'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return a new object of class '<em>Servers</em>'.
+	 * @generated
+	 */
+	Servers createServers();
+
+	/**
+	 * Returns a new object of class '<em>Zoo Keeper Server</em>'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return a new object of class '<em>Zoo Keeper Server</em>'.
+	 * @generated
+	 */
+	ZooKeeperServer createZooKeeperServer();
+
+	/**
+	 * Returns a new object of class '<em>ZNode</em>'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return a new object of class '<em>ZNode</em>'.
+	 * @generated
+	 */
+	ZNode createZNode();
+
+	/**
+	 * Returns the package supported by this factory.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the package supported by this factory.
+	 * @generated
+	 */
+	HadoopPackage getHadoopPackage();
+
+} //HadoopFactory


[6/8] HDT-32: Merge the code base of Hadoop-Eclipse project into HDT. Contributed by Srimanth Gunturi

Posted by rs...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HadoopPackage.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HadoopPackage.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HadoopPackage.java
new file mode 100644
index 0000000..8332b4e
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HadoopPackage.java
@@ -0,0 +1,1380 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *  
+ */
+package org.apache.hdt.core.internal.model;
+
+import org.eclipse.emf.ecore.EAttribute;
+import org.eclipse.emf.ecore.EClass;
+import org.eclipse.emf.ecore.EEnum;
+import org.eclipse.emf.ecore.EPackage;
+import org.eclipse.emf.ecore.EReference;
+
+/**
+ * <!-- begin-user-doc -->
+ * The <b>Package</b> for the model.
+ * It contains accessors for the meta objects to represent
+ * <ul>
+ *   <li>each class,</li>
+ *   <li>each feature of each class,</li>
+ *   <li>each enum,</li>
+ *   <li>and each data type</li>
+ * </ul>
+ * <!-- end-user-doc -->
+ * @see org.apache.hdt.core.internal.model.HadoopFactory
+ * @model kind="package"
+ * @generated
+ */
+public interface HadoopPackage extends EPackage {
+	/**
+	 * The package name.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	String eNAME = "model";
+
+	/**
+	 * The package namespace URI.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	String eNS_URI = "http://hadoop/1.0";
+
+	/**
+	 * The package namespace name.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	String eNS_PREFIX = "model";
+
+	/**
+	 * The singleton instance of the package.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	HadoopPackage eINSTANCE = org.apache.hdt.core.internal.model.impl.HadoopPackageImpl.init();
+
+	/**
+	 * The meta object id for the '{@link org.apache.hdt.core.internal.model.impl.ServerImpl <em>Server</em>}' class.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see org.apache.hdt.core.internal.model.impl.ServerImpl
+	 * @see org.apache.hdt.core.internal.model.impl.HadoopPackageImpl#getServer()
+	 * @generated
+	 */
+	int SERVER = 2;
+
+	/**
+	 * The feature id for the '<em><b>Name</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int SERVER__NAME = 0;
+
+	/**
+	 * The feature id for the '<em><b>Uri</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int SERVER__URI = 1;
+
+	/**
+	 * The feature id for the '<em><b>Status Code</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int SERVER__STATUS_CODE = 2;
+
+	/**
+	 * The feature id for the '<em><b>Status Message</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int SERVER__STATUS_MESSAGE = 3;
+
+	/**
+	 * The feature id for the '<em><b>Last Accessed</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int SERVER__LAST_ACCESSED = 4;
+
+	/**
+	 * The number of structural features of the '<em>Server</em>' class.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int SERVER_FEATURE_COUNT = 5;
+
+	/**
+	 * The meta object id for the '{@link org.apache.hdt.core.internal.model.impl.HDFSServerImpl <em>HDFS Server</em>}' class.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see org.apache.hdt.core.internal.model.impl.HDFSServerImpl
+	 * @see org.apache.hdt.core.internal.model.impl.HadoopPackageImpl#getHDFSServer()
+	 * @generated
+	 */
+	int HDFS_SERVER = 0;
+
+	/**
+	 * The feature id for the '<em><b>Name</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int HDFS_SERVER__NAME = SERVER__NAME;
+
+	/**
+	 * The feature id for the '<em><b>Uri</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int HDFS_SERVER__URI = SERVER__URI;
+
+	/**
+	 * The feature id for the '<em><b>Status Code</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int HDFS_SERVER__STATUS_CODE = SERVER__STATUS_CODE;
+
+	/**
+	 * The feature id for the '<em><b>Status Message</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int HDFS_SERVER__STATUS_MESSAGE = SERVER__STATUS_MESSAGE;
+
+	/**
+	 * The feature id for the '<em><b>Last Accessed</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int HDFS_SERVER__LAST_ACCESSED = SERVER__LAST_ACCESSED;
+
+	/**
+	 * The feature id for the '<em><b>Loaded</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int HDFS_SERVER__LOADED = SERVER_FEATURE_COUNT + 0;
+
+	/**
+	 * The feature id for the '<em><b>Operation UR Is</b></em>' attribute list.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int HDFS_SERVER__OPERATION_UR_IS = SERVER_FEATURE_COUNT + 1;
+
+	/**
+	 * The feature id for the '<em><b>User Id</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int HDFS_SERVER__USER_ID = SERVER_FEATURE_COUNT + 2;
+
+	/**
+	 * The feature id for the '<em><b>Group Ids</b></em>' attribute list.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int HDFS_SERVER__GROUP_IDS = SERVER_FEATURE_COUNT + 3;
+
+	/**
+	 * The number of structural features of the '<em>HDFS Server</em>' class.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int HDFS_SERVER_FEATURE_COUNT = SERVER_FEATURE_COUNT + 4;
+
+	/**
+	 * The meta object id for the '{@link org.apache.hdt.core.internal.model.impl.ServersImpl <em>Servers</em>}' class.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see org.apache.hdt.core.internal.model.impl.ServersImpl
+	 * @see org.apache.hdt.core.internal.model.impl.HadoopPackageImpl#getServers()
+	 * @generated
+	 */
+	int SERVERS = 1;
+
+	/**
+	 * The feature id for the '<em><b>Hdfs Servers</b></em>' containment reference list.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int SERVERS__HDFS_SERVERS = 0;
+
+	/**
+	 * The feature id for the '<em><b>Version</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int SERVERS__VERSION = 1;
+
+	/**
+	 * The feature id for the '<em><b>Zookeeper Servers</b></em>' containment reference list.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int SERVERS__ZOOKEEPER_SERVERS = 2;
+
+	/**
+	 * The number of structural features of the '<em>Servers</em>' class.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int SERVERS_FEATURE_COUNT = 3;
+
+	/**
+	 * The meta object id for the '{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl <em>Zoo Keeper Server</em>}' class.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl
+	 * @see org.apache.hdt.core.internal.model.impl.HadoopPackageImpl#getZooKeeperServer()
+	 * @generated
+	 */
+	int ZOO_KEEPER_SERVER = 3;
+
+	/**
+	 * The feature id for the '<em><b>Name</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZOO_KEEPER_SERVER__NAME = SERVER__NAME;
+
+	/**
+	 * The feature id for the '<em><b>Uri</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZOO_KEEPER_SERVER__URI = SERVER__URI;
+
+	/**
+	 * The feature id for the '<em><b>Status Code</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZOO_KEEPER_SERVER__STATUS_CODE = SERVER__STATUS_CODE;
+
+	/**
+	 * The feature id for the '<em><b>Status Message</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZOO_KEEPER_SERVER__STATUS_MESSAGE = SERVER__STATUS_MESSAGE;
+
+	/**
+	 * The feature id for the '<em><b>Last Accessed</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZOO_KEEPER_SERVER__LAST_ACCESSED = SERVER__LAST_ACCESSED;
+
+	/**
+	 * The feature id for the '<em><b>Children</b></em>' containment reference list.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZOO_KEEPER_SERVER__CHILDREN = SERVER_FEATURE_COUNT + 0;
+
+	/**
+	 * The feature id for the '<em><b>Last Refresh</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZOO_KEEPER_SERVER__LAST_REFRESH = SERVER_FEATURE_COUNT + 1;
+
+	/**
+	 * The feature id for the '<em><b>Refreshing</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZOO_KEEPER_SERVER__REFRESHING = SERVER_FEATURE_COUNT + 2;
+
+	/**
+	 * The feature id for the '<em><b>Ephermeral</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZOO_KEEPER_SERVER__EPHERMERAL = SERVER_FEATURE_COUNT + 3;
+
+	/**
+	 * The feature id for the '<em><b>Creation Id</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZOO_KEEPER_SERVER__CREATION_ID = SERVER_FEATURE_COUNT + 4;
+
+	/**
+	 * The feature id for the '<em><b>Modified Id</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZOO_KEEPER_SERVER__MODIFIED_ID = SERVER_FEATURE_COUNT + 5;
+
+	/**
+	 * The feature id for the '<em><b>Creation Time</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZOO_KEEPER_SERVER__CREATION_TIME = SERVER_FEATURE_COUNT + 6;
+
+	/**
+	 * The feature id for the '<em><b>Modified Time</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZOO_KEEPER_SERVER__MODIFIED_TIME = SERVER_FEATURE_COUNT + 7;
+
+	/**
+	 * The feature id for the '<em><b>Version</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZOO_KEEPER_SERVER__VERSION = SERVER_FEATURE_COUNT + 8;
+
+	/**
+	 * The feature id for the '<em><b>Children Version</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZOO_KEEPER_SERVER__CHILDREN_VERSION = SERVER_FEATURE_COUNT + 9;
+
+	/**
+	 * The feature id for the '<em><b>Acl Version</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZOO_KEEPER_SERVER__ACL_VERSION = SERVER_FEATURE_COUNT + 10;
+
+	/**
+	 * The feature id for the '<em><b>Ephermal Owner Session Id</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZOO_KEEPER_SERVER__EPHERMAL_OWNER_SESSION_ID = SERVER_FEATURE_COUNT + 11;
+
+	/**
+	 * The feature id for the '<em><b>Data Length</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZOO_KEEPER_SERVER__DATA_LENGTH = SERVER_FEATURE_COUNT + 12;
+
+	/**
+	 * The feature id for the '<em><b>Children Count</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZOO_KEEPER_SERVER__CHILDREN_COUNT = SERVER_FEATURE_COUNT + 13;
+
+	/**
+	 * The feature id for the '<em><b>Parent</b></em>' reference.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZOO_KEEPER_SERVER__PARENT = SERVER_FEATURE_COUNT + 14;
+
+	/**
+	 * The feature id for the '<em><b>Node Name</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZOO_KEEPER_SERVER__NODE_NAME = SERVER_FEATURE_COUNT + 15;
+
+	/**
+	 * The feature id for the '<em><b>Sequential</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZOO_KEEPER_SERVER__SEQUENTIAL = SERVER_FEATURE_COUNT + 16;
+
+	/**
+	 * The number of structural features of the '<em>Zoo Keeper Server</em>' class.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZOO_KEEPER_SERVER_FEATURE_COUNT = SERVER_FEATURE_COUNT + 17;
+
+	/**
+	 * The meta object id for the '{@link org.apache.hdt.core.internal.model.impl.ZNodeImpl <em>ZNode</em>}' class.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see org.apache.hdt.core.internal.model.impl.ZNodeImpl
+	 * @see org.apache.hdt.core.internal.model.impl.HadoopPackageImpl#getZNode()
+	 * @generated
+	 */
+	int ZNODE = 4;
+
+	/**
+	 * The feature id for the '<em><b>Children</b></em>' containment reference list.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZNODE__CHILDREN = 0;
+
+	/**
+	 * The feature id for the '<em><b>Last Refresh</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZNODE__LAST_REFRESH = 1;
+
+	/**
+	 * The feature id for the '<em><b>Refreshing</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZNODE__REFRESHING = 2;
+
+	/**
+	 * The feature id for the '<em><b>Ephermeral</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZNODE__EPHERMERAL = 3;
+
+	/**
+	 * The feature id for the '<em><b>Creation Id</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZNODE__CREATION_ID = 4;
+
+	/**
+	 * The feature id for the '<em><b>Modified Id</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZNODE__MODIFIED_ID = 5;
+
+	/**
+	 * The feature id for the '<em><b>Creation Time</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZNODE__CREATION_TIME = 6;
+
+	/**
+	 * The feature id for the '<em><b>Modified Time</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZNODE__MODIFIED_TIME = 7;
+
+	/**
+	 * The feature id for the '<em><b>Version</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZNODE__VERSION = 8;
+
+	/**
+	 * The feature id for the '<em><b>Children Version</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZNODE__CHILDREN_VERSION = 9;
+
+	/**
+	 * The feature id for the '<em><b>Acl Version</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZNODE__ACL_VERSION = 10;
+
+	/**
+	 * The feature id for the '<em><b>Ephermal Owner Session Id</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZNODE__EPHERMAL_OWNER_SESSION_ID = 11;
+
+	/**
+	 * The feature id for the '<em><b>Data Length</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZNODE__DATA_LENGTH = 12;
+
+	/**
+	 * The feature id for the '<em><b>Children Count</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZNODE__CHILDREN_COUNT = 13;
+
+	/**
+	 * The feature id for the '<em><b>Parent</b></em>' reference.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZNODE__PARENT = 14;
+
+	/**
+	 * The feature id for the '<em><b>Node Name</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZNODE__NODE_NAME = 15;
+
+	/**
+	 * The feature id for the '<em><b>Sequential</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZNODE__SEQUENTIAL = 16;
+
+	/**
+	 * The number of structural features of the '<em>ZNode</em>' class.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 * @ordered
+	 */
+	int ZNODE_FEATURE_COUNT = 17;
+
+	/**
+	 * The meta object id for the '{@link org.apache.hdt.core.internal.model.ServerStatus <em>Server Status</em>}' enum.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see org.apache.hdt.core.internal.model.ServerStatus
+	 * @see org.apache.hdt.core.internal.model.impl.HadoopPackageImpl#getServerStatus()
+	 * @generated
+	 */
+	int SERVER_STATUS = 5;
+
+
+	/**
+	 * Returns the meta object for class '{@link org.apache.hdt.core.internal.model.HDFSServer <em>HDFS Server</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for class '<em>HDFS Server</em>'.
+	 * @see org.apache.hdt.core.internal.model.HDFSServer
+	 * @generated
+	 */
+	EClass getHDFSServer();
+
+	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.HDFSServer#isLoaded <em>Loaded</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>Loaded</em>'.
+	 * @see org.apache.hdt.core.internal.model.HDFSServer#isLoaded()
+	 * @see #getHDFSServer()
+	 * @generated
+	 */
+	EAttribute getHDFSServer_Loaded();
+
+	/**
+	 * Returns the meta object for the attribute list '{@link org.apache.hdt.core.internal.model.HDFSServer#getOperationURIs <em>Operation UR Is</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute list '<em>Operation UR Is</em>'.
+	 * @see org.apache.hdt.core.internal.model.HDFSServer#getOperationURIs()
+	 * @see #getHDFSServer()
+	 * @generated
+	 */
+	EAttribute getHDFSServer_OperationURIs();
+
+	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.HDFSServer#getUserId <em>User Id</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>User Id</em>'.
+	 * @see org.apache.hdt.core.internal.model.HDFSServer#getUserId()
+	 * @see #getHDFSServer()
+	 * @generated
+	 */
+	EAttribute getHDFSServer_UserId();
+
+	/**
+	 * Returns the meta object for the attribute list '{@link org.apache.hdt.core.internal.model.HDFSServer#getGroupIds <em>Group Ids</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute list '<em>Group Ids</em>'.
+	 * @see org.apache.hdt.core.internal.model.HDFSServer#getGroupIds()
+	 * @see #getHDFSServer()
+	 * @generated
+	 */
+	EAttribute getHDFSServer_GroupIds();
+
+	/**
+	 * Returns the meta object for class '{@link org.apache.hdt.core.internal.model.Servers <em>Servers</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for class '<em>Servers</em>'.
+	 * @see org.apache.hdt.core.internal.model.Servers
+	 * @generated
+	 */
+	EClass getServers();
+
+	/**
+	 * Returns the meta object for the containment reference list '{@link org.apache.hdt.core.internal.model.Servers#getHdfsServers <em>Hdfs Servers</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the containment reference list '<em>Hdfs Servers</em>'.
+	 * @see org.apache.hdt.core.internal.model.Servers#getHdfsServers()
+	 * @see #getServers()
+	 * @generated
+	 */
+	EReference getServers_HdfsServers();
+
+	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.Servers#getVersion <em>Version</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>Version</em>'.
+	 * @see org.apache.hdt.core.internal.model.Servers#getVersion()
+	 * @see #getServers()
+	 * @generated
+	 */
+	EAttribute getServers_Version();
+
+	/**
+	 * Returns the meta object for the containment reference list '{@link org.apache.hdt.core.internal.model.Servers#getZookeeperServers <em>Zookeeper Servers</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the containment reference list '<em>Zookeeper Servers</em>'.
+	 * @see org.apache.hdt.core.internal.model.Servers#getZookeeperServers()
+	 * @see #getServers()
+	 * @generated
+	 */
+	EReference getServers_ZookeeperServers();
+
+	/**
+	 * Returns the meta object for class '{@link org.apache.hdt.core.internal.model.Server <em>Server</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for class '<em>Server</em>'.
+	 * @see org.apache.hdt.core.internal.model.Server
+	 * @generated
+	 */
+	EClass getServer();
+
+	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.Server#getName <em>Name</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>Name</em>'.
+	 * @see org.apache.hdt.core.internal.model.Server#getName()
+	 * @see #getServer()
+	 * @generated
+	 */
+	EAttribute getServer_Name();
+
+	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.Server#getUri <em>Uri</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>Uri</em>'.
+	 * @see org.apache.hdt.core.internal.model.Server#getUri()
+	 * @see #getServer()
+	 * @generated
+	 */
+	EAttribute getServer_Uri();
+
+	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.Server#getStatusCode <em>Status Code</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>Status Code</em>'.
+	 * @see org.apache.hdt.core.internal.model.Server#getStatusCode()
+	 * @see #getServer()
+	 * @generated
+	 */
+	EAttribute getServer_StatusCode();
+
+	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.Server#getStatusMessage <em>Status Message</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>Status Message</em>'.
+	 * @see org.apache.hdt.core.internal.model.Server#getStatusMessage()
+	 * @see #getServer()
+	 * @generated
+	 */
+	EAttribute getServer_StatusMessage();
+
+	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.Server#getLastAccessed <em>Last Accessed</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>Last Accessed</em>'.
+	 * @see org.apache.hdt.core.internal.model.Server#getLastAccessed()
+	 * @see #getServer()
+	 * @generated
+	 */
+	EAttribute getServer_LastAccessed();
+
+	/**
+	 * Returns the meta object for class '{@link org.apache.hdt.core.internal.model.ZooKeeperServer <em>Zoo Keeper Server</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for class '<em>Zoo Keeper Server</em>'.
+	 * @see org.apache.hdt.core.internal.model.ZooKeeperServer
+	 * @generated
+	 */
+	EClass getZooKeeperServer();
+
+	/**
+	 * Returns the meta object for class '{@link org.apache.hdt.core.internal.model.ZNode <em>ZNode</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for class '<em>ZNode</em>'.
+	 * @see org.apache.hdt.core.internal.model.ZNode
+	 * @generated
+	 */
+	EClass getZNode();
+
+	/**
+	 * Returns the meta object for the containment reference list '{@link org.apache.hdt.core.internal.model.ZNode#getChildren <em>Children</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the containment reference list '<em>Children</em>'.
+	 * @see org.apache.hdt.core.internal.model.ZNode#getChildren()
+	 * @see #getZNode()
+	 * @generated
+	 */
+	EReference getZNode_Children();
+
+	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.ZNode#getLastRefresh <em>Last Refresh</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>Last Refresh</em>'.
+	 * @see org.apache.hdt.core.internal.model.ZNode#getLastRefresh()
+	 * @see #getZNode()
+	 * @generated
+	 */
+	EAttribute getZNode_LastRefresh();
+
+	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.ZNode#isRefreshing <em>Refreshing</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>Refreshing</em>'.
+	 * @see org.apache.hdt.core.internal.model.ZNode#isRefreshing()
+	 * @see #getZNode()
+	 * @generated
+	 */
+	EAttribute getZNode_Refreshing();
+
+	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.ZNode#isEphermeral <em>Ephermeral</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>Ephermeral</em>'.
+	 * @see org.apache.hdt.core.internal.model.ZNode#isEphermeral()
+	 * @see #getZNode()
+	 * @generated
+	 */
+	EAttribute getZNode_Ephermeral();
+
+	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.ZNode#getCreationId <em>Creation Id</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>Creation Id</em>'.
+	 * @see org.apache.hdt.core.internal.model.ZNode#getCreationId()
+	 * @see #getZNode()
+	 * @generated
+	 */
+	EAttribute getZNode_CreationId();
+
+	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.ZNode#getModifiedId <em>Modified Id</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>Modified Id</em>'.
+	 * @see org.apache.hdt.core.internal.model.ZNode#getModifiedId()
+	 * @see #getZNode()
+	 * @generated
+	 */
+	EAttribute getZNode_ModifiedId();
+
+	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.ZNode#getCreationTime <em>Creation Time</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>Creation Time</em>'.
+	 * @see org.apache.hdt.core.internal.model.ZNode#getCreationTime()
+	 * @see #getZNode()
+	 * @generated
+	 */
+	EAttribute getZNode_CreationTime();
+
+	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.ZNode#getModifiedTime <em>Modified Time</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>Modified Time</em>'.
+	 * @see org.apache.hdt.core.internal.model.ZNode#getModifiedTime()
+	 * @see #getZNode()
+	 * @generated
+	 */
+	EAttribute getZNode_ModifiedTime();
+
+	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.ZNode#getVersion <em>Version</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>Version</em>'.
+	 * @see org.apache.hdt.core.internal.model.ZNode#getVersion()
+	 * @see #getZNode()
+	 * @generated
+	 */
+	EAttribute getZNode_Version();
+
+	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.ZNode#getChildrenVersion <em>Children Version</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>Children Version</em>'.
+	 * @see org.apache.hdt.core.internal.model.ZNode#getChildrenVersion()
+	 * @see #getZNode()
+	 * @generated
+	 */
+	EAttribute getZNode_ChildrenVersion();
+
+	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.ZNode#getAclVersion <em>Acl Version</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>Acl Version</em>'.
+	 * @see org.apache.hdt.core.internal.model.ZNode#getAclVersion()
+	 * @see #getZNode()
+	 * @generated
+	 */
+	EAttribute getZNode_AclVersion();
+
+	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.ZNode#getEphermalOwnerSessionId <em>Ephermal Owner Session Id</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>Ephermal Owner Session Id</em>'.
+	 * @see org.apache.hdt.core.internal.model.ZNode#getEphermalOwnerSessionId()
+	 * @see #getZNode()
+	 * @generated
+	 */
+	EAttribute getZNode_EphermalOwnerSessionId();
+
+	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.ZNode#getDataLength <em>Data Length</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>Data Length</em>'.
+	 * @see org.apache.hdt.core.internal.model.ZNode#getDataLength()
+	 * @see #getZNode()
+	 * @generated
+	 */
+	EAttribute getZNode_DataLength();
+
+	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.ZNode#getChildrenCount <em>Children Count</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>Children Count</em>'.
+	 * @see org.apache.hdt.core.internal.model.ZNode#getChildrenCount()
+	 * @see #getZNode()
+	 * @generated
+	 */
+	EAttribute getZNode_ChildrenCount();
+
+	/**
+	 * Returns the meta object for the reference '{@link org.apache.hdt.core.internal.model.ZNode#getParent <em>Parent</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the reference '<em>Parent</em>'.
+	 * @see org.apache.hdt.core.internal.model.ZNode#getParent()
+	 * @see #getZNode()
+	 * @generated
+	 */
+	EReference getZNode_Parent();
+
+	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.ZNode#getNodeName <em>Node Name</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>Node Name</em>'.
+	 * @see org.apache.hdt.core.internal.model.ZNode#getNodeName()
+	 * @see #getZNode()
+	 * @generated
+	 */
+	EAttribute getZNode_NodeName();
+
+	/**
+	 * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.ZNode#isSequential <em>Sequential</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for the attribute '<em>Sequential</em>'.
+	 * @see org.apache.hdt.core.internal.model.ZNode#isSequential()
+	 * @see #getZNode()
+	 * @generated
+	 */
+	EAttribute getZNode_Sequential();
+
+	/**
+	 * Returns the meta object for enum '{@link org.apache.hdt.core.internal.model.ServerStatus <em>Server Status</em>}'.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the meta object for enum '<em>Server Status</em>'.
+	 * @see org.apache.hdt.core.internal.model.ServerStatus
+	 * @generated
+	 */
+	EEnum getServerStatus();
+
+	/**
+	 * Returns the factory that creates the instances of the model.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the factory that creates the instances of the model.
+	 * @generated
+	 */
+	HadoopFactory getHadoopFactory();
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * Defines literals for the meta objects that represent
+	 * <ul>
+	 *   <li>each class,</li>
+	 *   <li>each feature of each class,</li>
+	 *   <li>each enum,</li>
+	 *   <li>and each data type</li>
+	 * </ul>
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	interface Literals {
+		/**
+		 * The meta object literal for the '{@link org.apache.hdt.core.internal.model.impl.HDFSServerImpl <em>HDFS Server</em>}' class.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @see org.apache.hdt.core.internal.model.impl.HDFSServerImpl
+		 * @see org.apache.hdt.core.internal.model.impl.HadoopPackageImpl#getHDFSServer()
+		 * @generated
+		 */
+		EClass HDFS_SERVER = eINSTANCE.getHDFSServer();
+
+		/**
+		 * The meta object literal for the '<em><b>Loaded</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute HDFS_SERVER__LOADED = eINSTANCE.getHDFSServer_Loaded();
+
+		/**
+		 * The meta object literal for the '<em><b>Operation UR Is</b></em>' attribute list feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute HDFS_SERVER__OPERATION_UR_IS = eINSTANCE.getHDFSServer_OperationURIs();
+
+		/**
+		 * The meta object literal for the '<em><b>User Id</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute HDFS_SERVER__USER_ID = eINSTANCE.getHDFSServer_UserId();
+
+		/**
+		 * The meta object literal for the '<em><b>Group Ids</b></em>' attribute list feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute HDFS_SERVER__GROUP_IDS = eINSTANCE.getHDFSServer_GroupIds();
+
+		/**
+		 * The meta object literal for the '{@link org.apache.hdt.core.internal.model.impl.ServersImpl <em>Servers</em>}' class.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @see org.apache.hdt.core.internal.model.impl.ServersImpl
+		 * @see org.apache.hdt.core.internal.model.impl.HadoopPackageImpl#getServers()
+		 * @generated
+		 */
+		EClass SERVERS = eINSTANCE.getServers();
+
+		/**
+		 * The meta object literal for the '<em><b>Hdfs Servers</b></em>' containment reference list feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EReference SERVERS__HDFS_SERVERS = eINSTANCE.getServers_HdfsServers();
+
+		/**
+		 * The meta object literal for the '<em><b>Version</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute SERVERS__VERSION = eINSTANCE.getServers_Version();
+
+		/**
+		 * The meta object literal for the '<em><b>Zookeeper Servers</b></em>' containment reference list feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EReference SERVERS__ZOOKEEPER_SERVERS = eINSTANCE.getServers_ZookeeperServers();
+
+		/**
+		 * The meta object literal for the '{@link org.apache.hdt.core.internal.model.impl.ServerImpl <em>Server</em>}' class.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @see org.apache.hdt.core.internal.model.impl.ServerImpl
+		 * @see org.apache.hdt.core.internal.model.impl.HadoopPackageImpl#getServer()
+		 * @generated
+		 */
+		EClass SERVER = eINSTANCE.getServer();
+
+		/**
+		 * The meta object literal for the '<em><b>Name</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute SERVER__NAME = eINSTANCE.getServer_Name();
+
+		/**
+		 * The meta object literal for the '<em><b>Uri</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute SERVER__URI = eINSTANCE.getServer_Uri();
+
+		/**
+		 * The meta object literal for the '<em><b>Status Code</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute SERVER__STATUS_CODE = eINSTANCE.getServer_StatusCode();
+
+		/**
+		 * The meta object literal for the '<em><b>Status Message</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute SERVER__STATUS_MESSAGE = eINSTANCE.getServer_StatusMessage();
+
+		/**
+		 * The meta object literal for the '<em><b>Last Accessed</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute SERVER__LAST_ACCESSED = eINSTANCE.getServer_LastAccessed();
+
+		/**
+		 * The meta object literal for the '{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl <em>Zoo Keeper Server</em>}' class.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @see org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl
+		 * @see org.apache.hdt.core.internal.model.impl.HadoopPackageImpl#getZooKeeperServer()
+		 * @generated
+		 */
+		EClass ZOO_KEEPER_SERVER = eINSTANCE.getZooKeeperServer();
+
+		/**
+		 * The meta object literal for the '{@link org.apache.hdt.core.internal.model.impl.ZNodeImpl <em>ZNode</em>}' class.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @see org.apache.hdt.core.internal.model.impl.ZNodeImpl
+		 * @see org.apache.hdt.core.internal.model.impl.HadoopPackageImpl#getZNode()
+		 * @generated
+		 */
+		EClass ZNODE = eINSTANCE.getZNode();
+
+		/**
+		 * The meta object literal for the '<em><b>Children</b></em>' containment reference list feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EReference ZNODE__CHILDREN = eINSTANCE.getZNode_Children();
+
+		/**
+		 * The meta object literal for the '<em><b>Last Refresh</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute ZNODE__LAST_REFRESH = eINSTANCE.getZNode_LastRefresh();
+
+		/**
+		 * The meta object literal for the '<em><b>Refreshing</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute ZNODE__REFRESHING = eINSTANCE.getZNode_Refreshing();
+
+		/**
+		 * The meta object literal for the '<em><b>Ephermeral</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute ZNODE__EPHERMERAL = eINSTANCE.getZNode_Ephermeral();
+
+		/**
+		 * The meta object literal for the '<em><b>Creation Id</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute ZNODE__CREATION_ID = eINSTANCE.getZNode_CreationId();
+
+		/**
+		 * The meta object literal for the '<em><b>Modified Id</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute ZNODE__MODIFIED_ID = eINSTANCE.getZNode_ModifiedId();
+
+		/**
+		 * The meta object literal for the '<em><b>Creation Time</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute ZNODE__CREATION_TIME = eINSTANCE.getZNode_CreationTime();
+
+		/**
+		 * The meta object literal for the '<em><b>Modified Time</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute ZNODE__MODIFIED_TIME = eINSTANCE.getZNode_ModifiedTime();
+
+		/**
+		 * The meta object literal for the '<em><b>Version</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute ZNODE__VERSION = eINSTANCE.getZNode_Version();
+
+		/**
+		 * The meta object literal for the '<em><b>Children Version</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute ZNODE__CHILDREN_VERSION = eINSTANCE.getZNode_ChildrenVersion();
+
+		/**
+		 * The meta object literal for the '<em><b>Acl Version</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute ZNODE__ACL_VERSION = eINSTANCE.getZNode_AclVersion();
+
+		/**
+		 * The meta object literal for the '<em><b>Ephermal Owner Session Id</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute ZNODE__EPHERMAL_OWNER_SESSION_ID = eINSTANCE.getZNode_EphermalOwnerSessionId();
+
+		/**
+		 * The meta object literal for the '<em><b>Data Length</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute ZNODE__DATA_LENGTH = eINSTANCE.getZNode_DataLength();
+
+		/**
+		 * The meta object literal for the '<em><b>Children Count</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute ZNODE__CHILDREN_COUNT = eINSTANCE.getZNode_ChildrenCount();
+
+		/**
+		 * The meta object literal for the '<em><b>Parent</b></em>' reference feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EReference ZNODE__PARENT = eINSTANCE.getZNode_Parent();
+
+		/**
+		 * The meta object literal for the '<em><b>Node Name</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute ZNODE__NODE_NAME = eINSTANCE.getZNode_NodeName();
+
+		/**
+		 * The meta object literal for the '<em><b>Sequential</b></em>' attribute feature.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @generated
+		 */
+		EAttribute ZNODE__SEQUENTIAL = eINSTANCE.getZNode_Sequential();
+
+		/**
+		 * The meta object literal for the '{@link org.apache.hdt.core.internal.model.ServerStatus <em>Server Status</em>}' enum.
+		 * <!-- begin-user-doc -->
+		 * <!-- end-user-doc -->
+		 * @see org.apache.hdt.core.internal.model.ServerStatus
+		 * @see org.apache.hdt.core.internal.model.impl.HadoopPackageImpl#getServerStatus()
+		 * @generated
+		 */
+		EEnum SERVER_STATUS = eINSTANCE.getServerStatus();
+
+	}
+
+} //HadoopPackage

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/Server.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/Server.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/Server.java
new file mode 100644
index 0000000..a5d41b1
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/Server.java
@@ -0,0 +1,175 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *  
+ */
+package org.apache.hdt.core.internal.model;
+
+import org.eclipse.emf.ecore.EObject;
+
+/**
+ * <!-- begin-user-doc -->
+ * A representation of the model object '<em><b>Server</b></em>'.
+ * <!-- end-user-doc -->
+ *
+ * <p>
+ * The following features are supported:
+ * <ul>
+ *   <li>{@link org.apache.hdt.core.internal.model.Server#getName <em>Name</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.Server#getUri <em>Uri</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.Server#getStatusCode <em>Status Code</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.Server#getStatusMessage <em>Status Message</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.Server#getLastAccessed <em>Last Accessed</em>}</li>
+ * </ul>
+ * </p>
+ *
+ * @see org.apache.hdt.core.internal.model.HadoopPackage#getServer()
+ * @model abstract="true"
+ * @generated
+ */
+public interface Server extends EObject {
+	/**
+	 * Returns the value of the '<em><b>Name</b></em>' attribute.
+	 * The default value is <code>""</code>.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Name</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Name</em>' attribute.
+	 * @see #setName(String)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getServer_Name()
+	 * @model default=""
+	 * @generated
+	 */
+	String getName();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.Server#getName <em>Name</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Name</em>' attribute.
+	 * @see #getName()
+	 * @generated
+	 */
+	void setName(String value);
+
+	/**
+	 * Returns the value of the '<em><b>Uri</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * <!-- begin-model-doc -->
+	 * This is URI location for the HDFS server. Ex: hdfs://hdfs.server.hostname/path.
+	 * <!-- end-model-doc -->
+	 * @return the value of the '<em>Uri</em>' attribute.
+	 * @see #setUri(String)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getServer_Uri()
+	 * @model
+	 * @generated
+	 */
+	String getUri();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.Server#getUri <em>Uri</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Uri</em>' attribute.
+	 * @see #getUri()
+	 * @generated
+	 */
+	void setUri(String value);
+
+	/**
+	 * Returns the value of the '<em><b>Status Code</b></em>' attribute.
+	 * The default value is <code>"0"</code>.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * <!-- begin-model-doc -->
+	 * Indicates the status of this server. Values could be from HTTP response codes to indicate server status.
+	 * <!-- end-model-doc -->
+	 * @return the value of the '<em>Status Code</em>' attribute.
+	 * @see #setStatusCode(int)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getServer_StatusCode()
+	 * @model default="0" transient="true"
+	 * @generated
+	 */
+	int getStatusCode();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.Server#getStatusCode <em>Status Code</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Status Code</em>' attribute.
+	 * @see #getStatusCode()
+	 * @generated
+	 */
+	void setStatusCode(int value);
+
+	/**
+	 * Returns the value of the '<em><b>Status Message</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Status Message</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Status Message</em>' attribute.
+	 * @see #setStatusMessage(String)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getServer_StatusMessage()
+	 * @model
+	 * @generated
+	 */
+	String getStatusMessage();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.Server#getStatusMessage <em>Status Message</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Status Message</em>' attribute.
+	 * @see #getStatusMessage()
+	 * @generated
+	 */
+	void setStatusMessage(String value);
+
+	/**
+	 * Returns the value of the '<em><b>Last Accessed</b></em>' attribute.
+	 * The default value is <code>"-1"</code>.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Last Accessed</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Last Accessed</em>' attribute.
+	 * @see #setLastAccessed(long)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getServer_LastAccessed()
+	 * @model default="-1"
+	 * @generated
+	 */
+	long getLastAccessed();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.Server#getLastAccessed <em>Last Accessed</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Last Accessed</em>' attribute.
+	 * @see #getLastAccessed()
+	 * @generated
+	 */
+	void setLastAccessed(long value);
+
+} // Server

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/ServerStatus.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/ServerStatus.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/ServerStatus.java
new file mode 100644
index 0000000..4b030de
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/ServerStatus.java
@@ -0,0 +1,247 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *  
+ */
+package org.apache.hdt.core.internal.model;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+import org.eclipse.emf.common.util.Enumerator;
+
+/**
+ * <!-- begin-user-doc -->
+ * A representation of the literals of the enumeration '<em><b>Server Status</b></em>',
+ * and utility methods for working with them.
+ * <!-- end-user-doc -->
+ * @see org.apache.hdt.core.internal.model.HadoopPackage#getServerStatus()
+ * @model
+ * @generated
+ */
+public enum ServerStatus implements Enumerator {
+	/**
+	 * The '<em><b>NO PROJECT</b></em>' literal object.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #NO_PROJECT_VALUE
+	 * @generated
+	 * @ordered
+	 */
+	NO_PROJECT(1, "NO_PROJECT", "NO_PROJECT"), /**
+	 * The '<em><b>DISCONNECTED</b></em>' literal object.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #DISCONNECTED_VALUE
+	 * @generated
+	 * @ordered
+	 */
+	DISCONNECTED(2, "DISCONNECTED", "DISCONNECTED"), /**
+	 * The '<em><b>CONNECTED</b></em>' literal object.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #CONNECTED_VALUE
+	 * @generated
+	 * @ordered
+	 */
+	CONNECTED(3, "CONNECTED", "CONNECTED");
+
+	/**
+	 * The '<em><b>NO PROJECT</b></em>' literal value.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of '<em><b>NO PROJECT</b></em>' literal object isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @see #NO_PROJECT
+	 * @model
+	 * @generated
+	 * @ordered
+	 */
+	public static final int NO_PROJECT_VALUE = 1;
+
+	/**
+	 * The '<em><b>DISCONNECTED</b></em>' literal value.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of '<em><b>DISCONNECTED</b></em>' literal object isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @see #DISCONNECTED
+	 * @model
+	 * @generated
+	 * @ordered
+	 */
+	public static final int DISCONNECTED_VALUE = 2;
+
+	/**
+	 * The '<em><b>CONNECTED</b></em>' literal value.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of '<em><b>CONNECTED</b></em>' literal object isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @see #CONNECTED
+	 * @model
+	 * @generated
+	 * @ordered
+	 */
+	public static final int CONNECTED_VALUE = 3;
+
+	/**
+	 * An array of all the '<em><b>Server Status</b></em>' enumerators.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	private static final ServerStatus[] VALUES_ARRAY =
+		new ServerStatus[] {
+			NO_PROJECT,
+			DISCONNECTED,
+			CONNECTED,
+		};
+
+	/**
+	 * A public read-only list of all the '<em><b>Server Status</b></em>' enumerators.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public static final List<ServerStatus> VALUES = Collections.unmodifiableList(Arrays.asList(VALUES_ARRAY));
+
+	/**
+	 * Returns the '<em><b>Server Status</b></em>' literal with the specified literal value.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public static ServerStatus get(String literal) {
+		for (int i = 0; i < VALUES_ARRAY.length; ++i) {
+			ServerStatus result = VALUES_ARRAY[i];
+			if (result.toString().equals(literal)) {
+				return result;
+			}
+		}
+		return null;
+	}
+
+	/**
+	 * Returns the '<em><b>Server Status</b></em>' literal with the specified name.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public static ServerStatus getByName(String name) {
+		for (int i = 0; i < VALUES_ARRAY.length; ++i) {
+			ServerStatus result = VALUES_ARRAY[i];
+			if (result.getName().equals(name)) {
+				return result;
+			}
+		}
+		return null;
+	}
+
+	/**
+	 * Returns the '<em><b>Server Status</b></em>' literal with the specified integer value.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public static ServerStatus get(int value) {
+		switch (value) {
+			case NO_PROJECT_VALUE: return NO_PROJECT;
+			case DISCONNECTED_VALUE: return DISCONNECTED;
+			case CONNECTED_VALUE: return CONNECTED;
+		}
+		return null;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	private final int value;
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	private final String name;
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	private final String literal;
+
+	/**
+	 * Only this class can construct instances.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	private ServerStatus(int value, String name, String literal) {
+		this.value = value;
+		this.name = name;
+		this.literal = literal;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public int getValue() {
+	  return value;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public String getName() {
+	  return name;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public String getLiteral() {
+	  return literal;
+	}
+
+	/**
+	 * Returns the literal value of the enumerator, which is its string representation.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public String toString() {
+		return literal;
+	}
+	
+} //ServerStatus

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/Servers.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/Servers.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/Servers.java
new file mode 100644
index 0000000..c31bffb
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/Servers.java
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *  
+ */
+package org.apache.hdt.core.internal.model;
+
+import org.eclipse.emf.common.util.EList;
+
+import org.eclipse.emf.ecore.EObject;
+
+/**
+ * <!-- begin-user-doc -->
+ * A representation of the model object '<em><b>Servers</b></em>'.
+ * <!-- end-user-doc -->
+ *
+ * <p>
+ * The following features are supported:
+ * <ul>
+ *   <li>{@link org.apache.hdt.core.internal.model.Servers#getHdfsServers <em>Hdfs Servers</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.Servers#getVersion <em>Version</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.Servers#getZookeeperServers <em>Zookeeper Servers</em>}</li>
+ * </ul>
+ * </p>
+ *
+ * @see org.apache.hdt.core.internal.model.HadoopPackage#getServers()
+ * @model
+ * @generated
+ */
+public interface Servers extends EObject {
+	/**
+	 * Returns the value of the '<em><b>Hdfs Servers</b></em>' containment reference list.
+	 * The list contents are of type {@link org.apache.hdt.core.internal.model.HDFSServer}.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Hdfs Servers</em>' reference list isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Hdfs Servers</em>' containment reference list.
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getServers_HdfsServers()
+	 * @model containment="true"
+	 * @generated
+	 */
+	EList<HDFSServer> getHdfsServers();
+
+	/**
+	 * Returns the value of the '<em><b>Version</b></em>' attribute.
+	 * The default value is <code>"1.0.0.0"</code>.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Version</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Version</em>' attribute.
+	 * @see #setVersion(String)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getServers_Version()
+	 * @model default="1.0.0.0"
+	 * @generated
+	 */
+	String getVersion();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.Servers#getVersion <em>Version</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Version</em>' attribute.
+	 * @see #getVersion()
+	 * @generated
+	 */
+	void setVersion(String value);
+
+	/**
+	 * Returns the value of the '<em><b>Zookeeper Servers</b></em>' containment reference list.
+	 * The list contents are of type {@link org.apache.hdt.core.internal.model.ZooKeeperServer}.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Zookeeper Servers</em>' reference list isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Zookeeper Servers</em>' containment reference list.
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getServers_ZookeeperServers()
+	 * @model containment="true"
+	 * @generated
+	 */
+	EList<ZooKeeperServer> getZookeeperServers();
+
+} // Servers

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/ZNode.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/ZNode.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/ZNode.java
new file mode 100644
index 0000000..ceb4496
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/ZNode.java
@@ -0,0 +1,527 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *  
+ */
+package org.apache.hdt.core.internal.model;
+
+import org.eclipse.emf.common.util.EList;
+
+import org.eclipse.emf.ecore.EObject;
+
+/**
+ * <!-- begin-user-doc -->
+ * A representation of the model object '<em><b>ZNode</b></em>'.
+ * <!-- end-user-doc -->
+ *
+ * <p>
+ * The following features are supported:
+ * <ul>
+ *   <li>{@link org.apache.hdt.core.internal.model.ZNode#getChildren <em>Children</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.ZNode#getLastRefresh <em>Last Refresh</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.ZNode#isRefreshing <em>Refreshing</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.ZNode#isEphermeral <em>Ephermeral</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.ZNode#getCreationId <em>Creation Id</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.ZNode#getModifiedId <em>Modified Id</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.ZNode#getCreationTime <em>Creation Time</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.ZNode#getModifiedTime <em>Modified Time</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.ZNode#getVersion <em>Version</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.ZNode#getChildrenVersion <em>Children Version</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.ZNode#getAclVersion <em>Acl Version</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.ZNode#getEphermalOwnerSessionId <em>Ephermal Owner Session Id</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.ZNode#getDataLength <em>Data Length</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.ZNode#getChildrenCount <em>Children Count</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.ZNode#getParent <em>Parent</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.ZNode#getNodeName <em>Node Name</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.ZNode#isSequential <em>Sequential</em>}</li>
+ * </ul>
+ * </p>
+ *
+ * @see org.apache.hdt.core.internal.model.HadoopPackage#getZNode()
+ * @model
+ * @generated
+ */
+public interface ZNode extends EObject {
+	/**
+	 * Returns the value of the '<em><b>Children</b></em>' containment reference list.
+	 * The list contents are of type {@link org.apache.hdt.core.internal.model.ZNode}.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Children</em>' reference list isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Children</em>' containment reference list.
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getZNode_Children()
+	 * @model containment="true" transient="true"
+	 * @generated
+	 */
+	EList<ZNode> getChildren();
+
+	/**
+	 * Returns the value of the '<em><b>Last Refresh</b></em>' attribute.
+	 * The default value is <code>"-1"</code>.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Last Refresh</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Last Refresh</em>' attribute.
+	 * @see #setLastRefresh(long)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getZNode_LastRefresh()
+	 * @model default="-1"
+	 * @generated
+	 */
+	long getLastRefresh();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.ZNode#getLastRefresh <em>Last Refresh</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Last Refresh</em>' attribute.
+	 * @see #getLastRefresh()
+	 * @generated
+	 */
+	void setLastRefresh(long value);
+
+	/**
+	 * Returns the value of the '<em><b>Refreshing</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Refreshing</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Refreshing</em>' attribute.
+	 * @see #setRefreshing(boolean)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getZNode_Refreshing()
+	 * @model
+	 * @generated
+	 */
+	boolean isRefreshing();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.ZNode#isRefreshing <em>Refreshing</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Refreshing</em>' attribute.
+	 * @see #isRefreshing()
+	 * @generated
+	 */
+	void setRefreshing(boolean value);
+
+	/**
+	 * Returns the value of the '<em><b>Ephermeral</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Ephermeral</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Ephermeral</em>' attribute.
+	 * @see #setEphermeral(boolean)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getZNode_Ephermeral()
+	 * @model transient="true"
+	 * @generated
+	 */
+	boolean isEphermeral();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.ZNode#isEphermeral <em>Ephermeral</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Ephermeral</em>' attribute.
+	 * @see #isEphermeral()
+	 * @generated
+	 */
+	void setEphermeral(boolean value);
+
+	/**
+	 * Returns the value of the '<em><b>Creation Id</b></em>' attribute.
+	 * The default value is <code>"-1"</code>.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Creation Id</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Creation Id</em>' attribute.
+	 * @see #setCreationId(long)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getZNode_CreationId()
+	 * @model default="-1"
+	 * @generated
+	 */
+	long getCreationId();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.ZNode#getCreationId <em>Creation Id</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Creation Id</em>' attribute.
+	 * @see #getCreationId()
+	 * @generated
+	 */
+	void setCreationId(long value);
+
+	/**
+	 * Returns the value of the '<em><b>Modified Id</b></em>' attribute.
+	 * The default value is <code>"-1"</code>.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Modified Id</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Modified Id</em>' attribute.
+	 * @see #setModifiedId(long)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getZNode_ModifiedId()
+	 * @model default="-1"
+	 * @generated
+	 */
+	long getModifiedId();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.ZNode#getModifiedId <em>Modified Id</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Modified Id</em>' attribute.
+	 * @see #getModifiedId()
+	 * @generated
+	 */
+	void setModifiedId(long value);
+
+	/**
+	 * Returns the value of the '<em><b>Creation Time</b></em>' attribute.
+	 * The default value is <code>"-1"</code>.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Creation Time</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Creation Time</em>' attribute.
+	 * @see #setCreationTime(long)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getZNode_CreationTime()
+	 * @model default="-1"
+	 * @generated
+	 */
+	long getCreationTime();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.ZNode#getCreationTime <em>Creation Time</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Creation Time</em>' attribute.
+	 * @see #getCreationTime()
+	 * @generated
+	 */
+	void setCreationTime(long value);
+
+	/**
+	 * Returns the value of the '<em><b>Modified Time</b></em>' attribute.
+	 * The default value is <code>"-1"</code>.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Modified Time</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Modified Time</em>' attribute.
+	 * @see #setModifiedTime(long)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getZNode_ModifiedTime()
+	 * @model default="-1"
+	 * @generated
+	 */
+	long getModifiedTime();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.ZNode#getModifiedTime <em>Modified Time</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Modified Time</em>' attribute.
+	 * @see #getModifiedTime()
+	 * @generated
+	 */
+	void setModifiedTime(long value);
+
+	/**
+	 * Returns the value of the '<em><b>Version</b></em>' attribute.
+	 * The default value is <code>"-1"</code>.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Version</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Version</em>' attribute.
+	 * @see #setVersion(int)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getZNode_Version()
+	 * @model default="-1" transient="true"
+	 * @generated
+	 */
+	int getVersion();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.ZNode#getVersion <em>Version</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Version</em>' attribute.
+	 * @see #getVersion()
+	 * @generated
+	 */
+	void setVersion(int value);
+
+	/**
+	 * Returns the value of the '<em><b>Children Version</b></em>' attribute.
+	 * The default value is <code>"-1"</code>.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Children Version</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Children Version</em>' attribute.
+	 * @see #setChildrenVersion(int)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getZNode_ChildrenVersion()
+	 * @model default="-1" transient="true"
+	 * @generated
+	 */
+	int getChildrenVersion();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.ZNode#getChildrenVersion <em>Children Version</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Children Version</em>' attribute.
+	 * @see #getChildrenVersion()
+	 * @generated
+	 */
+	void setChildrenVersion(int value);
+
+	/**
+	 * Returns the value of the '<em><b>Acl Version</b></em>' attribute.
+	 * The default value is <code>"-1"</code>.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Acl Version</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Acl Version</em>' attribute.
+	 * @see #setAclVersion(int)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getZNode_AclVersion()
+	 * @model default="-1"
+	 * @generated
+	 */
+	int getAclVersion();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.ZNode#getAclVersion <em>Acl Version</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Acl Version</em>' attribute.
+	 * @see #getAclVersion()
+	 * @generated
+	 */
+	void setAclVersion(int value);
+
+	/**
+	 * Returns the value of the '<em><b>Ephermal Owner Session Id</b></em>' attribute.
+	 * The default value is <code>"-1"</code>.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Ephermal Owner Session Id</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Ephermal Owner Session Id</em>' attribute.
+	 * @see #setEphermalOwnerSessionId(long)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getZNode_EphermalOwnerSessionId()
+	 * @model default="-1"
+	 * @generated
+	 */
+	long getEphermalOwnerSessionId();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.ZNode#getEphermalOwnerSessionId <em>Ephermal Owner Session Id</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Ephermal Owner Session Id</em>' attribute.
+	 * @see #getEphermalOwnerSessionId()
+	 * @generated
+	 */
+	void setEphermalOwnerSessionId(long value);
+
+	/**
+	 * Returns the value of the '<em><b>Data Length</b></em>' attribute.
+	 * The default value is <code>"-1"</code>.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Data Length</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Data Length</em>' attribute.
+	 * @see #setDataLength(int)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getZNode_DataLength()
+	 * @model default="-1"
+	 * @generated
+	 */
+	int getDataLength();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.ZNode#getDataLength <em>Data Length</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Data Length</em>' attribute.
+	 * @see #getDataLength()
+	 * @generated
+	 */
+	void setDataLength(int value);
+
+	/**
+	 * Returns the value of the '<em><b>Children Count</b></em>' attribute.
+	 * The default value is <code>"0"</code>.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Children Count</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Children Count</em>' attribute.
+	 * @see #setChildrenCount(int)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getZNode_ChildrenCount()
+	 * @model default="0"
+	 * @generated
+	 */
+	int getChildrenCount();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.ZNode#getChildrenCount <em>Children Count</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Children Count</em>' attribute.
+	 * @see #getChildrenCount()
+	 * @generated
+	 */
+	void setChildrenCount(int value);
+
+	/**
+	 * Returns the value of the '<em><b>Parent</b></em>' reference.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Parent</em>' reference isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Parent</em>' reference.
+	 * @see #setParent(ZNode)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getZNode_Parent()
+	 * @model
+	 * @generated
+	 */
+	ZNode getParent();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.ZNode#getParent <em>Parent</em>}' reference.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Parent</em>' reference.
+	 * @see #getParent()
+	 * @generated
+	 */
+	void setParent(ZNode value);
+
+	/**
+	 * Returns the value of the '<em><b>Node Name</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Node Name</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Node Name</em>' attribute.
+	 * @see #setNodeName(String)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getZNode_NodeName()
+	 * @model
+	 * @generated
+	 */
+	String getNodeName();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.ZNode#getNodeName <em>Node Name</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Node Name</em>' attribute.
+	 * @see #getNodeName()
+	 * @generated
+	 */
+	void setNodeName(String value);
+
+	/**
+	 * Returns the value of the '<em><b>Sequential</b></em>' attribute.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Sequential</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @return the value of the '<em>Sequential</em>' attribute.
+	 * @see #setSequential(boolean)
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#getZNode_Sequential()
+	 * @model
+	 * @generated
+	 */
+	boolean isSequential();
+
+	/**
+	 * Sets the value of the '{@link org.apache.hdt.core.internal.model.ZNode#isSequential <em>Sequential</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param value the new value of the '<em>Sequential</em>' attribute.
+	 * @see #isSequential()
+	 * @generated
+	 */
+	void setSequential(boolean value);
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Server</em>' reference isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @model kind="operation"
+	 *        annotation="http://www.eclipse.org/emf/2002/GenModel body='if(this instanceof org.apache.hdt.core.internal.model.ZooKeeperServer)\n\t\t\treturn (org.apache.hdt.core.internal.model.ZooKeeperServer) this;\n\t\telse\n\t\t\treturn getParent().getServer();'"
+	 * @generated
+	 */
+	ZooKeeperServer getServer();
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of the '<em>Path</em>' attribute isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @model kind="operation"
+	 *        annotation="http://www.eclipse.org/emf/2002/GenModel body='if (this instanceof org.apache.hdt.core.internal.model.ZooKeeperServer)\n\treturn \"/\";\nelse {\n\tString parentPath = getParent().getPath();\n\treturn parentPath.endsWith(\"/\") ? parentPath + getNodeName() : parentPath + \"/\" + getNodeName();\n}'"
+	 * @generated
+	 */
+	String getPath();
+
+} // ZNode

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/ZNodeType.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/ZNodeType.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/ZNodeType.java
new file mode 100644
index 0000000..f67ca48
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/ZNodeType.java
@@ -0,0 +1,251 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *  
+ */
+package org.apache.hdt.core.internal.model;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+import org.eclipse.emf.common.util.Enumerator;
+
+/**
+ * <!-- begin-user-doc -->
+ * A representation of the literals of the enumeration '<em><b>ZNode Type</b></em>',
+ * and utility methods for working with them.
+ * <!-- end-user-doc -->
+ * @see org.apache.hdt.core.internal.model.HadoopPackage#getZNodeType()
+ * @model
+ * @generated
+ */
+public enum ZNodeType implements Enumerator {
+	/**
+	 * The '<em><b>REGULAR</b></em>' literal object.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #REGULAR_VALUE
+	 * @generated
+	 * @ordered
+	 */
+	REGULAR(0, "REGULAR", "REGULAR"),
+
+	/**
+	 * The '<em><b>EPHERMAL</b></em>' literal object.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #EPHERMAL_VALUE
+	 * @generated
+	 * @ordered
+	 */
+	EPHERMAL(1, "EPHERMAL", "EPHERMAL"),
+
+	/**
+	 * The '<em><b>SEQUENCE</b></em>' literal object.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #SEQUENCE_VALUE
+	 * @generated
+	 * @ordered
+	 */
+	SEQUENCE(2, "SEQUENCE", "SEQUENCE");
+
+	/**
+	 * The '<em><b>REGULAR</b></em>' literal value.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of '<em><b>REGULAR</b></em>' literal object isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @see #REGULAR
+	 * @model
+	 * @generated
+	 * @ordered
+	 */
+	public static final int REGULAR_VALUE = 0;
+
+	/**
+	 * The '<em><b>EPHERMAL</b></em>' literal value.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of '<em><b>EPHERMAL</b></em>' literal object isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @see #EPHERMAL
+	 * @model
+	 * @generated
+	 * @ordered
+	 */
+	public static final int EPHERMAL_VALUE = 1;
+
+	/**
+	 * The '<em><b>SEQUENCE</b></em>' literal value.
+	 * <!-- begin-user-doc -->
+	 * <p>
+	 * If the meaning of '<em><b>SEQUENCE</b></em>' literal object isn't clear,
+	 * there really should be more of a description here...
+	 * </p>
+	 * <!-- end-user-doc -->
+	 * @see #SEQUENCE
+	 * @model
+	 * @generated
+	 * @ordered
+	 */
+	public static final int SEQUENCE_VALUE = 2;
+
+	/**
+	 * An array of all the '<em><b>ZNode Type</b></em>' enumerators.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	private static final ZNodeType[] VALUES_ARRAY =
+		new ZNodeType[] {
+			REGULAR,
+			EPHERMAL,
+			SEQUENCE,
+		};
+
+	/**
+	 * A public read-only list of all the '<em><b>ZNode Type</b></em>' enumerators.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public static final List<ZNodeType> VALUES = Collections.unmodifiableList(Arrays.asList(VALUES_ARRAY));
+
+	/**
+	 * Returns the '<em><b>ZNode Type</b></em>' literal with the specified literal value.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public static ZNodeType get(String literal) {
+		for (int i = 0; i < VALUES_ARRAY.length; ++i) {
+			ZNodeType result = VALUES_ARRAY[i];
+			if (result.toString().equals(literal)) {
+				return result;
+			}
+		}
+		return null;
+	}
+
+	/**
+	 * Returns the '<em><b>ZNode Type</b></em>' literal with the specified name.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public static ZNodeType getByName(String name) {
+		for (int i = 0; i < VALUES_ARRAY.length; ++i) {
+			ZNodeType result = VALUES_ARRAY[i];
+			if (result.getName().equals(name)) {
+				return result;
+			}
+		}
+		return null;
+	}
+
+	/**
+	 * Returns the '<em><b>ZNode Type</b></em>' literal with the specified integer value.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public static ZNodeType get(int value) {
+		switch (value) {
+			case REGULAR_VALUE: return REGULAR;
+			case EPHERMAL_VALUE: return EPHERMAL;
+			case SEQUENCE_VALUE: return SEQUENCE;
+		}
+		return null;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	private final int value;
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	private final String name;
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	private final String literal;
+
+	/**
+	 * Only this class can construct instances.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	private ZNodeType(int value, String name, String literal) {
+		this.value = value;
+		this.name = name;
+		this.literal = literal;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public int getValue() {
+	  return value;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public String getName() {
+	  return name;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public String getLiteral() {
+	  return literal;
+	}
+
+	/**
+	 * Returns the literal value of the enumerator, which is its string representation.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public String toString() {
+		return literal;
+	}
+	
+} //ZNodeType

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/ZooKeeperServer.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/ZooKeeperServer.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/ZooKeeperServer.java
new file mode 100644
index 0000000..e3b3796
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/ZooKeeperServer.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *  
+ */
+package org.apache.hdt.core.internal.model;
+
+import org.eclipse.emf.common.util.EList;
+
+
+/**
+ * <!-- begin-user-doc -->
+ * A representation of the model object '<em><b>Zoo Keeper Server</b></em>'.
+ * <!-- end-user-doc -->
+ *
+ *
+ * @see org.apache.hdt.core.internal.model.HadoopPackage#getZooKeeperServer()
+ * @model
+ * @generated
+ */
+public interface ZooKeeperServer extends Server, ZNode {
+} // ZooKeeperServer


[2/8] HDT-32: Merge the code base of Hadoop-Eclipse project into HDT. Contributed by Srimanth Gunturi

Posted by rs...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/.settings/org.eclipse.jdt.ui.prefs
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/.settings/org.eclipse.jdt.ui.prefs b/org.apache.hdt.ui/.settings/org.eclipse.jdt.ui.prefs
new file mode 100644
index 0000000..be0e13d
--- /dev/null
+++ b/org.apache.hdt.ui/.settings/org.eclipse.jdt.ui.prefs
@@ -0,0 +1,6 @@
+#Thu Mar 21 02:01:00 PDT 2013
+eclipse.preferences.version=1
+formatter_profile=_Apache Hadoop Eclipse Format
+formatter_settings_version=11
+org.eclipse.jdt.ui.javadoc=true
+org.eclipse.jdt.ui.text.custom_code_templates=<?xml version\="1.0" encoding\="UTF-8" standalone\="no"?><templates><template autoinsert\="true" context\="gettercomment_context" deleted\="false" description\="Comment for getter method" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.gettercomment" name\="gettercomment">/**\n * @return the ${bare_field_name}\n */</template><template autoinsert\="true" context\="settercomment_context" deleted\="false" description\="Comment for setter method" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.settercomment" name\="settercomment">/**\n * @param ${param} the ${bare_field_name} to set\n */</template><template autoinsert\="true" context\="constructorcomment_context" deleted\="false" description\="Comment for created constructors" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.constructorcomment" name\="constructorcomment">/**\n * ${tags}\n */</template><template autoinsert\="false" context\="filecomment_context
 " deleted\="false" description\="Comment for created Java files" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.filecomment" name\="filecomment">/**\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * "License"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http\://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an "AS IS" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */</template><templ
 ate autoinsert\="false" context\="typecomment_context" deleted\="false" description\="Comment for created types" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.typecomment" name\="typecomment">/**\n * @author Srimanth Gunturi\n *\n * ${tags}\n */</template><template autoinsert\="true" context\="fieldcomment_context" deleted\="false" description\="Comment for fields" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.fieldcomment" name\="fieldcomment">/**\n * \n */</template><template autoinsert\="true" context\="methodcomment_context" deleted\="false" description\="Comment for non-overriding methods" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.methodcomment" name\="methodcomment">/**\n * ${tags}\n */</template><template autoinsert\="true" context\="overridecomment_context" deleted\="false" description\="Comment for overriding methods" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.overridecomment" name\="overridecomment">/* (non-Javadoc
 )\n * ${see_to_overridden}\n */</template><template autoinsert\="true" context\="delegatecomment_context" deleted\="false" description\="Comment for delegate methods" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.delegatecomment" name\="delegatecomment">/**\n * ${tags}\n * ${see_to_target}\n */</template><template autoinsert\="true" context\="newtype_context" deleted\="false" description\="Newly created files" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.newtype" name\="newtype">${filecomment}\n${package_declaration}\n\n${typecomment}\n${type_declaration}</template><template autoinsert\="true" context\="classbody_context" deleted\="false" description\="Code in new class type bodies" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.classbody" name\="classbody">\n</template><template autoinsert\="true" context\="interfacebody_context" deleted\="false" description\="Code in new interface type bodies" enabled\="true" id\="org.eclipse.jdt.ui.text.code
 templates.interfacebody" name\="interfacebody">\n</template><template autoinsert\="true" context\="enumbody_context" deleted\="false" description\="Code in new enum type bodies" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.enumbody" name\="enumbody">\n</template><template autoinsert\="true" context\="annotationbody_context" deleted\="false" description\="Code in new annotation type bodies" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.annotationbody" name\="annotationbody">\n</template><template autoinsert\="true" context\="catchblock_context" deleted\="false" description\="Code in new catch blocks" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.catchblock" name\="catchblock">// ${todo} Auto-generated catch block\n${exception_var}.printStackTrace();</template><template autoinsert\="true" context\="methodbody_context" deleted\="false" description\="Code in created method stubs" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.methodbod
 y" name\="methodbody">// ${todo} Auto-generated method stub\n${body_statement}</template><template autoinsert\="true" context\="constructorbody_context" deleted\="false" description\="Code in created constructor stubs" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.constructorbody" name\="constructorbody">${body_statement}\n// ${todo} Auto-generated constructor stub</template><template autoinsert\="true" context\="getterbody_context" deleted\="false" description\="Code in created getters" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.getterbody" name\="getterbody">return ${field};</template><template autoinsert\="true" context\="setterbody_context" deleted\="false" description\="Code in created setters" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.setterbody" name\="setterbody">${field} \= ${param};</template></templates>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/META-INF/MANIFEST.MF b/org.apache.hdt.ui/META-INF/MANIFEST.MF
new file mode 100644
index 0000000..aa7ecdb
--- /dev/null
+++ b/org.apache.hdt.ui/META-INF/MANIFEST.MF
@@ -0,0 +1,19 @@
+Manifest-Version: 1.0
+Bundle-ManifestVersion: 2
+Bundle-Name: Apache Hadoop UI Eclipse Plugin
+Bundle-SymbolicName: org.apache.hdt.ui;singleton:=true
+Bundle-Version: 1.0.0.qualifier
+Bundle-Activator: org.apache.hdt.ui.Activator
+Bundle-Vendor: Apache Hadoop
+Require-Bundle: org.eclipse.core.runtime,
+ org.eclipse.core.resources,
+ org.eclipse.ui,
+ org.eclipse.ui.ide;bundle-version="3.6.0",
+ org.eclipse.team.ui;bundle-version="3.5.100",
+ org.apache.hdt.core;bundle-version="1.0.0",
+ org.eclipse.ui.navigator;bundle-version="3.5.0",
+ org.eclipse.ui.navigator.resources;bundle-version="3.4.200",
+ org.eclipse.ui.views.properties.tabbed;bundle-version="3.5.100";resolution:=optional
+Bundle-RequiredExecutionEnvironment: JavaSE-1.6
+Bundle-ActivationPolicy: lazy
+Bundle-ClassPath: .

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/NOTICE.txt
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/NOTICE.txt b/org.apache.hdt.ui/NOTICE.txt
new file mode 100644
index 0000000..f4ba503
--- /dev/null
+++ b/org.apache.hdt.ui/NOTICE.txt
@@ -0,0 +1,2 @@
+Eclipse Icons (http://tech.joelbecker.net/articles/resources/5-eclipseicons - Eclipse Public License v 1.0)
+http://eclipse.org/ - http://www.eclipse.org/legal/epl-v10.html 

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/build.properties
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/build.properties b/org.apache.hdt.ui/build.properties
new file mode 100644
index 0000000..e9472eb
--- /dev/null
+++ b/org.apache.hdt.ui/build.properties
@@ -0,0 +1,9 @@
+source.. = src/
+output.. = bin/
+bin.includes = META-INF/,\
+               .,\
+               plugin.xml,\
+               NOTICE.txt,\
+               icons/
+src.includes = NOTICE.txt,\
+               src/

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/plugin.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/plugin.xml b/org.apache.hdt.ui/plugin.xml
new file mode 100644
index 0000000..2051a9f
--- /dev/null
+++ b/org.apache.hdt.ui/plugin.xml
@@ -0,0 +1,424 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?eclipse version="3.4"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<plugin>
+   <extension
+         point="org.eclipse.ui.perspectives">
+      <perspective
+            class="org.apache.hdt.ui.internal.HadoopPerspectiveFactory"
+            icon="icons/hadoop-logo-16x16.png"
+            id="org.apache.hdt.ui.perspective"
+            name="Hadoop">
+      </perspective>
+   </extension>
+   <extension
+         point="org.eclipse.ui.perspectiveExtensions">
+      <perspectiveExtension
+            targetID="org.apache.hdt.ui.perspective">
+         <view
+               id="org.apache.hdt.ui.view.servers"
+               minimized="false"
+               ratio="0.25"
+               relationship="left"
+               relative="org.eclipse.ui.editorss">
+         </view>
+         <view
+               id="org.eclipse.ui.navigator.ProjectExplorer"
+               minimized="false"
+               ratio="0.5"
+               relationship="bottom"
+               relative="org.apache.hdt.ui.view.servers">
+         </view>
+         <view
+               id="org.eclipse.ui.views.PropertySheet"
+               minimized="false"
+               ratio="0.66"
+               relationship="bottom"
+               relative="org.eclipse.ui.editorss">
+         </view>
+         <newWizardShortcut
+               id="org.apache.hdt.ui.wizard.newHdfsServer">
+         </newWizardShortcut>
+         <newWizardShortcut
+               id="org.apache.hdt.ui.wizard.newZooKeeperServer">
+         </newWizardShortcut>
+         <viewShortcut
+               id="org.eclipse.ui.views.ResourceNavigator">
+         </viewShortcut>
+         <actionSet
+               id="org.apache.hdt.ui.actionSet.newServers">
+         </actionSet>
+      </perspectiveExtension>
+   </extension>
+   <extension
+         point="org.eclipse.ui.decorators">
+      <decorator
+            adaptable="true"
+            class="org.apache.hdt.ui.internal.hdfs.HDFSLightweightLabelDecorator"
+            id="org.apache.hdt.ui.decorator.hdfs"
+            label="HDFS Resource Decorator"
+            lightweight="true"
+            location="BOTTOM_RIGHT"
+            state="true">
+         <enablement>
+            <objectClass
+                  name="org.eclipse.core.resources.IResource">
+            </objectClass>
+         </enablement>
+      </decorator>
+      <decorator
+            adaptable="true"
+            class="org.apache.hdt.ui.internal.zookeeper.ZooKeeperLightweightLabelDecorator"
+            id="org.apache.hdt.ui.decorator.zookeeper"
+            label="ZooKeeper Resource Decorator"
+            lightweight="true"
+            location="BOTTOM_RIGHT"
+            state="true">
+         <enablement>
+            <objectClass
+                  name="org.apache.hdt.core.internal.model.ZNode">
+            </objectClass>
+         </enablement>
+      </decorator>
+   </extension>
+   <extension
+         point="org.eclipse.ui.navigator.viewer">
+      <viewerContentBinding
+            viewerId="org.eclipse.ui.navigator.ProjectExplorer">
+         <includes>
+            <contentExtension
+                  isRoot="false"
+                  pattern="org.apache.hdt.ui.navigatorContent.hdfs">
+            </contentExtension>
+         </includes>
+      </viewerContentBinding>
+      <viewerContentBinding
+            viewerId="org.apache.hdt.ui.view.servers">
+         <includes>
+            <contentExtension
+                  isRoot="true"
+                  pattern="org.apache.hdt.ui.navigatorContent.hadoop">
+            </contentExtension>
+         </includes>
+      </viewerContentBinding>
+   </extension>
+   <extension
+         point="org.eclipse.ui.navigator.navigatorContent">
+      <navigatorContent
+            contentProvider="org.apache.hdt.ui.internal.hdfs.HDFSCommonContentProvider"
+            icon="icons/hadoop-logo-16x16.png"
+            id="org.apache.hdt.ui.navigatorContent.hdfs"
+            labelProvider="org.apache.hdt.ui.internal.hdfs.HDFSLabelProvider"
+            name="Navigator HDFS Content Provider"
+            priority="higher">
+         <triggerPoints>
+            <instanceof
+                  value="org.eclipse.core.resources.IProject">
+            </instanceof>
+         </triggerPoints>
+      </navigatorContent>
+      <navigatorContent
+            contentProvider="org.apache.hdt.ui.internal.HadoopCommonContentProvider"
+            icon="icons/hadoop-logo-16x16.png"
+            id="org.apache.hdt.ui.navigatorContent.hadoop"
+            labelProvider="org.apache.hdt.ui.internal.HadoopLabelProvider"
+            name="Hadoop Content Provider">
+         <triggerPoints>
+            <instanceof
+                  value="org.apache.hdt.core.internal.model.ZooKeeperServer">
+            </instanceof></triggerPoints>
+      </navigatorContent>
+   </extension>
+   <extension
+         point="org.eclipse.ui.newWizards">
+      <wizard
+            category="org.apache.hdt.ui.newWizards.category"
+            class="org.apache.hdt.ui.internal.hdfs.NewHDFSWizard"
+            finalPerspective="org.apache.hdt.ui.perspective"
+            icon="icons/hadoop-hdfs-new.png"
+            id="org.apache.hdt.ui.wizard.newHdfsServer"
+            name="New HDFS Server">
+      </wizard>
+      <category
+            id="org.apache.hdt.ui.newWizards.category"
+            name="Hadoop">
+      </category>
+      <wizard
+            category="org.apache.hdt.ui.newWizards.category"
+            class="org.apache.hdt.ui.internal.zookeeper.NewZooKeeperWizard"
+            finalPerspective="org.apache.hdt.ui.perspective"
+            icon="icons/hadoop-zookeeper-new.png"
+            id="org.apache.hdt.ui.wizard.newZooKeeperServer"
+            name="New ZooKeeper Server">
+      </wizard>
+   </extension>
+   <extension
+         point="org.eclipse.ui.popupMenus">
+      <objectContribution
+            adaptable="true"
+            id="org.apache.hdt.ui.team.fileContribution"
+            objectClass="org.eclipse.core.resources.IFile">
+         <action
+               class="org.apache.hdt.ui.internal.hdfs.DownloadResourceAction"
+               icon="icons/download.gif"
+               id="org.apache.hdt.ui.team.FileDownloadAction"
+               label="Download"
+               menubarPath="additions">
+         </action>
+         <action
+               class="org.apache.hdt.ui.internal.hdfs.UploadResourceAction"
+               icon="icons/upload.gif"
+               id="org.apache.hdt.ui.team.FileUploadAction"
+               label="Upload"
+               menubarPath="additions">
+         </action>
+         <action
+               class="org.apache.hdt.ui.internal.hdfs.DiscardDownloadResourceAction"
+               icon="icons/discardDownload.png"
+               id="org.apache.hdt.ui.team.fileDiscardDownloadAction"
+               label="Discard Download"
+               menubarPath="additions">
+         </action>
+      </objectContribution>
+      <objectContribution
+            adaptable="true"
+            id="org.apache.hdt.ui.team.folderContribution"
+            objectClass="org.eclipse.core.resources.IFolder">
+         <action
+               class="org.apache.hdt.ui.internal.hdfs.DownloadResourceAction"
+               icon="icons/download.gif"
+               id="org.apache.hdt.ui.team.folderDownloadContribution"
+               label="Download"
+               menubarPath="additions">
+         </action>
+         <action
+               class="org.apache.hdt.ui.internal.hdfs.UploadResourceAction"
+               icon="icons/upload.gif"
+               id="org.apache.hdt.ui.team.folderUploadContribution"
+               label="Upload"
+               menubarPath="additions">
+         </action>
+         <action
+               class="org.apache.hdt.ui.internal.hdfs.DiscardDownloadResourceAction"
+               icon="icons/discardDownload.png"
+               id="org.apache.hdt.ui.team.folderDiscardDownloadAction"
+               label="Discard Download"
+               menubarPath="additions">
+         </action>
+      </objectContribution>
+      <objectContribution
+            adaptable="false"
+            id="org.apache.hdt.ui.team.projectContribution"
+            objectClass="org.eclipse.core.resources.IProject">
+         <action
+               class="org.apache.hdt.ui.internal.hdfs.ReconnectAction"
+               icon="icons/ovr/online.png"
+               id="org.apache.hdt.ui.team.projectReconnectContribution"
+               label="Reconnect"
+               menubarPath="additions">
+            <class
+                  class="org.eclipse.core.resources.IProject">
+            </class>
+         </action>
+         <action
+               class="org.apache.hdt.ui.internal.hdfs.DisconnectAction"
+               icon="icons/ovr/offline.png"
+               id="org.apache.hdt.ui.team.projectDisconnectContribution"
+               label="Disconnect"
+               menubarPath="additions">
+            <class
+                  class="org.eclipse.core.resources.IProject">
+            </class>
+         </action>
+      </objectContribution>
+      <objectContribution
+            adaptable="false"
+            id="org.apache.hdt.ui.team.zkServerContribution"
+            objectClass="org.apache.hdt.core.internal.model.ZooKeeperServer">
+         <action
+               class="org.apache.hdt.ui.internal.zookeeper.DisconnectAction"
+               icon="icons/ovr/offline.png"
+               id="org.apache.hdt.ui.team.zookeeper.DisconnectContribution"
+               label="Disconnect"
+               menubarPath="additions">
+         </action>
+         <action
+               class="org.apache.hdt.ui.internal.zookeeper.ReconnectAction"
+               icon="icons/ovr/online.png"
+               id="org.apache.hdt.ui.team.zookeeper.ReconnectContribution"
+               label="Reconnect"
+               menubarPath="additions">
+         </action>
+         <action
+               class="org.apache.hdt.ui.internal.zookeeper.RefreshAction"
+               icon="icons/zookeeper-refresh.gif"
+               id="org.apache.hdt.ui.team.zookeeper.server.RefreshContribution"
+               label="Refresh"
+               menubarPath="additions">
+         </action>
+         <action
+               class="org.apache.hdt.ui.internal.zookeeper.DeleteAction"
+               icon="icons/delete.gif"
+               id="org.apache.hdt.ui.team.zookeeper.node.DeleteContribution"
+               label="Delete"
+               menubarPath="additions">
+         </action>
+         <visibility>
+            <objectClass
+                  name="org.apache.hdt.core.internal.model.ZooKeeperServer">
+            </objectClass>
+         </visibility>
+      </objectContribution>
+      <objectContribution
+            adaptable="false"
+            id="org.apache.hdt.ui.team.zkNodeContribution"
+            objectClass="org.apache.hdt.core.internal.model.ZNode">
+         <action
+               class="org.apache.hdt.ui.internal.zookeeper.RefreshAction"
+               icon="icons/zookeeper-refresh.gif"
+               id="org.apache.hdt.ui.team.zookeeper.node.RefreshContribution"
+               label="Refresh"
+               menubarPath="additions">
+         </action>
+         <action
+               class="org.apache.hdt.ui.internal.zookeeper.DeleteAction"
+               icon="icons/delete.gif"
+               id="org.apache.hdt.ui.team.zookeeper.node.DeleteContribution"
+               label="Delete"
+               menubarPath="additions">
+         </action>
+         <action
+               class="org.apache.hdt.ui.internal.zookeeper.OpenAction"
+               enablesFor="1+"
+               id="org.apache.hdt.ui.team.zookeeper.node.OpenContribution"
+               label="Open"
+               menubarPath="additions">
+         </action>
+         <visibility>
+            <and>
+               <objectClass
+                     name="org.apache.hdt.core.internal.model.ZNode">
+               </objectClass>
+               <not>
+                  <objectClass
+                        name="org.apache.hdt.core.internal.model.ZooKeeperServer">
+                  </objectClass>
+               </not>
+            </and>
+         </visibility>
+      </objectContribution>
+   </extension>
+   <extension
+         point="org.eclipse.ui.views.properties.tabbed.propertyContributor">
+      <propertyContributor
+            contributorId="org.eclipse.ui.navigator.ProjectExplorer"
+            typeMapper="org.apache.hdt.ui.internal.hdfs.PropertyTypeMapper">
+         <propertyCategory
+               category="general"></propertyCategory>
+         <propertyCategory
+               category="resource">
+         </propertyCategory>
+      </propertyContributor>
+   </extension>
+   <extension
+         point="org.eclipse.ui.views.properties.tabbed.propertyTabs">
+      <propertyTabs
+            contributorId="org.eclipse.ui.navigator.ProjectExplorer">
+         <propertyTab
+               category="resource"
+               id="org.apache.hdt.ui.propertyTab.hdfs"
+               image="icons/hadoop-hdfs-16x16.gif"
+               label="HDFS">
+         </propertyTab>
+      </propertyTabs>
+   </extension>
+   <extension
+         point="org.eclipse.ui.views.properties.tabbed.propertySections">
+      <propertySections
+            contributorId="org.eclipse.ui.navigator.ProjectExplorer">
+         <propertySection
+               class="org.apache.hdt.ui.internal.hdfs.HDFSPropertySection"
+               enablesFor="1"
+               id="org.apache.hdt.ui.propertySection.hdfs"
+               tab="org.apache.hdt.ui.propertyTab.hdfs">
+            <input
+                  type="org.apache.hdt.core.internal.hdfs.HDFSFileStore">
+            </input>
+         </propertySection>
+      </propertySections>
+   </extension>
+   <extension
+         point="org.eclipse.core.runtime.adapters">
+      <factory
+            adaptableType="org.apache.hdt.core.internal.hdfs.HDFSFileStore"
+            class="org.apache.hdt.ui.internal.HadoopAdapterFactory">
+         <adapter
+               type="org.eclipse.ui.views.properties.IPropertySource">
+         </adapter>
+      </factory>
+      <factory
+            adaptableType="org.apache.hdt.core.internal.model.ZNode"
+            class="org.apache.hdt.ui.internal.HadoopAdapterFactory">
+         <adapter
+               type="org.eclipse.ui.views.properties.IPropertySource">
+         </adapter>
+      </factory>
+   </extension>
+   <extension
+         point="org.eclipse.ui.views">
+      <view
+            allowMultiple="false"
+            category="org.apache.hdt.ui.category"
+            class="org.eclipse.ui.navigator.CommonNavigator"
+            icon="icons/hadoop-logo-16x16.png"
+            id="org.apache.hdt.ui.view.servers"
+            name="Hadoop Servers"
+            restorable="true">
+      </view>
+      <category
+            id="org.apache.hdt.ui.category"
+            name="Hadoop">
+      </category>
+   </extension>
+   <extension
+         point="org.eclipse.ui.actionSets">
+      <actionSet
+            id="org.apache.hdt.ui.actionSet.newServers"
+            label="Hadoop New Servers Action Set">
+         <action
+               class="org.apache.hdt.ui.internal.zookeeper.NewZooKeeperServerAction"
+               icon="icons/hadoop-zookeeper-new.png"
+               id="org.apache.hdt.ui.action.new.hdfs"
+               label="New ZooKeeper Server"
+               style="push"
+               toolbarPath="new.group"
+               tooltip="New ZooKeeper Server">
+         </action>
+         <action
+               class="org.apache.hdt.ui.internal.hdfs.NewHDFSServerAction"
+               icon="icons/hadoop-hdfs-new.png"
+               id="org.apache.hdt.ui.action.new.hdfs"
+               label="New HDFS Server"
+               style="push"
+               toolbarPath="new.group"
+               tooltip="New HDFS Server">
+         </action>
+      </actionSet>
+   </extension>
+
+</plugin>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/Activator.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/Activator.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/Activator.java
new file mode 100644
index 0000000..99d7105
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/Activator.java
@@ -0,0 +1,135 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.ui;
+
+import java.net.URL;
+
+import org.eclipse.core.runtime.FileLocator;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.jface.resource.ImageDescriptor;
+import org.eclipse.swt.graphics.Image;
+import org.eclipse.ui.plugin.AbstractUIPlugin;
+import org.osgi.framework.Bundle;
+import org.osgi.framework.BundleContext;
+
+/**
+ * The activator class controls the plug-in life cycle
+ * 
+ * @author Srimanth Gunturi
+ */
+public class Activator extends AbstractUIPlugin {
+
+	// private static final Logger logger = Logger.getLogger(Activator.class);
+	// The plug-in ID
+	public static final String PLUGIN_ID = "org.apache.hdt.core"; //$NON-NLS-1$
+	public static final String PREFERENCE_HDFS_URLS = "HDFS_SERVER_URLS";
+	public static final String PREFERENCE_ZOOKEEPER_URLS = "ZOOKEEPER_SERVER_URLS";
+	// ImageDescriptors
+	public static ImageDescriptor IMAGE_REMOTE_OVR;
+	public static ImageDescriptor IMAGE_LOCAL_OVR;
+	public static ImageDescriptor IMAGE_INCOMING_OVR;
+	public static ImageDescriptor IMAGE_OUTGOING_OVR;
+	public static ImageDescriptor IMAGE_SYNC_OVR;
+	public static ImageDescriptor IMAGE_READONLY_OVR;
+	public static ImageDescriptor IMAGE_HADOOP;
+	public static ImageDescriptor IMAGE_OFFLINE_OVR;
+	public static ImageDescriptor IMAGE_ONLINE_OVR;
+	public static ImageDescriptor IMAGE_ZOOKEEPER_EPHERMERAL;
+	// Images
+	public static Image IMAGE_HDFS;
+	public static Image IMAGE_ZOOKEEPER;
+	public static Image IMAGE_ZOOKEEPER_NODE;
+
+	// The shared instance
+	private static Activator plugin;
+
+	/**
+	 * The constructor
+	 */
+	public Activator() {
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.plugin.AbstractUIPlugin#start(org.osgi.framework.BundleContext
+	 * )
+	 */
+	public void start(BundleContext context) throws Exception {
+		super.start(context);
+		plugin = this;
+		loadImages();
+	}
+
+	/**
+	 * 
+	 */
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.plugin.AbstractUIPlugin#stop(org.osgi.framework.BundleContext
+	 * )
+	 */
+	public void stop(BundleContext context) throws Exception {
+		plugin = null;
+		super.stop(context);
+	}
+
+	/**
+	 * Returns the shared instance
+	 * 
+	 * @return the shared instance
+	 */
+	public static Activator getDefault() {
+		return plugin;
+	}
+
+	private void loadImages() {
+		Bundle bundle = getDefault().getBundle();
+		URL remoteFileUrl = FileLocator.find(bundle, new Path("/icons/ovr/remote_resource.gif"), null);
+		URL localFileUrl = FileLocator.find(bundle, new Path("/icons/ovr/local_resource.gif"), null);
+		URL incomingUrl = FileLocator.find(bundle, new Path("/icons/ovr/overlay-incoming.gif"), null);
+		URL outgoingUrl = FileLocator.find(bundle, new Path("/icons/ovr/overlay-outgoing.gif"), null);
+		URL waitingUrl = FileLocator.find(bundle, new Path("/icons/ovr/waiting_ovr.gif"), null);
+		URL hdfsUrl = FileLocator.find(bundle, new Path("/icons/hadoop-hdfs-16x16.gif"), null);
+		URL zookeeperUrl = FileLocator.find(bundle, new Path("/icons/hadoop-zookeeper-16x16.png"), null);
+		URL zookeeperNodeUrl = FileLocator.find(bundle, new Path("/icons/zookeeper_node.png"), null);
+		URL hadoopUrl = FileLocator.find(bundle, new Path("/icons/hadoop-logo-16x16.png"), null);
+		URL readonlyUrl = FileLocator.find(bundle, new Path("/icons/ovr/read_only.gif"), null);
+		URL offlineUrl = FileLocator.find(bundle, new Path("/icons/ovr/offline.png"), null);
+		URL onlineUrl = FileLocator.find(bundle, new Path("/icons/ovr/online.png"), null);
+		URL zookeeperEphermeralUrl = FileLocator.find(bundle, new Path("/icons/ovr/zookeeper_ephermeral.gif"), null);
+		
+		IMAGE_REMOTE_OVR = ImageDescriptor.createFromURL(remoteFileUrl);
+		IMAGE_LOCAL_OVR = ImageDescriptor.createFromURL(localFileUrl);
+		IMAGE_INCOMING_OVR = ImageDescriptor.createFromURL(incomingUrl);
+		IMAGE_OUTGOING_OVR = ImageDescriptor.createFromURL(outgoingUrl);
+		IMAGE_SYNC_OVR = ImageDescriptor.createFromURL(waitingUrl);
+		IMAGE_HDFS = ImageDescriptor.createFromURL(hdfsUrl).createImage();
+		IMAGE_HADOOP = ImageDescriptor.createFromURL(hadoopUrl);
+		IMAGE_READONLY_OVR = ImageDescriptor.createFromURL(readonlyUrl);
+		IMAGE_OFFLINE_OVR = ImageDescriptor.createFromURL(offlineUrl);
+		IMAGE_ONLINE_OVR = ImageDescriptor.createFromURL(onlineUrl);
+		IMAGE_ZOOKEEPER = ImageDescriptor.createFromURL(zookeeperUrl).createImage();
+		IMAGE_ZOOKEEPER_NODE = ImageDescriptor.createFromURL(zookeeperNodeUrl).createImage();
+		IMAGE_ZOOKEEPER_EPHERMERAL = ImageDescriptor.createFromURL(zookeeperEphermeralUrl);
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/HadoopAdapterFactory.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/HadoopAdapterFactory.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/HadoopAdapterFactory.java
new file mode 100644
index 0000000..e8af745
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/HadoopAdapterFactory.java
@@ -0,0 +1,30 @@
+package org.apache.hdt.ui.internal;
+
+import org.apache.hdt.core.internal.hdfs.HDFSFileStore;
+import org.apache.hdt.core.internal.model.ZNode;
+import org.apache.hdt.ui.internal.hdfs.HDFSFileStorePropertySource;
+import org.apache.hdt.ui.internal.zookeeper.ZNodePropertySource;
+import org.eclipse.core.runtime.IAdapterFactory;
+import org.eclipse.ui.views.properties.IPropertySource;
+
+public class HadoopAdapterFactory implements IAdapterFactory {
+
+	@Override
+	public Object getAdapter(Object adaptableObject, Class adapterType) {
+		if (adaptableObject instanceof HDFSFileStore) {
+			HDFSFileStore fs = (HDFSFileStore) adaptableObject;
+			if (adapterType == IPropertySource.class)
+				return new HDFSFileStorePropertySource(fs);
+		} else if (adaptableObject instanceof ZNode) {
+			ZNode z = (ZNode) adaptableObject;
+			return new ZNodePropertySource(z);
+		}
+		return null;
+	}
+
+	@Override
+	public Class[] getAdapterList() {
+		return new Class[] { IPropertySource.class };
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/HadoopCommonContentProvider.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/HadoopCommonContentProvider.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/HadoopCommonContentProvider.java
new file mode 100644
index 0000000..c671c49
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/HadoopCommonContentProvider.java
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.ui.internal;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hdt.ui.internal.zookeeper.ZooKeeperCommonContentProvider;
+import org.apache.log4j.Logger;
+import org.eclipse.jface.viewers.Viewer;
+import org.eclipse.ui.IMemento;
+import org.eclipse.ui.navigator.ICommonContentExtensionSite;
+import org.eclipse.ui.navigator.ICommonContentProvider;
+
+/**
+ * @author Srimanth Gunturi
+ * 
+ */
+public class HadoopCommonContentProvider implements ICommonContentProvider {
+
+	private static final Logger logger = Logger.getLogger(HadoopCommonContentProvider.class);
+	private List<ICommonContentProvider> childProviders = new ArrayList<ICommonContentProvider>();
+
+	/**
+	 * 
+	 */
+	public HadoopCommonContentProvider() {
+		childProviders.add(new ZooKeeperCommonContentProvider());
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.jface.viewers.ITreeContentProvider#getElements(java.lang.
+	 * Object)
+	 */
+	@Override
+	public Object[] getElements(Object inputElement) {
+		List<Object> elements = new ArrayList<Object>();
+		for (ICommonContentProvider cp : childProviders) {
+			Object[] ces = cp.getElements(inputElement);
+			if (ces != null)
+				for (Object s : ces)
+					elements.add(s);
+		}
+		if (logger.isDebugEnabled())
+			logger.debug("getElements(" + inputElement + "): " + elements);
+		return elements.toArray();
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.jface.viewers.ITreeContentProvider#getChildren(java.lang.
+	 * Object)
+	 */
+	@Override
+	public Object[] getChildren(Object parentElement) {
+		List<Object> elements = new ArrayList<Object>();
+		for (ICommonContentProvider cp : childProviders) {
+			Object[] ces = cp.getChildren(parentElement);
+			if (ces != null)
+				for (Object s : ces)
+					elements.add(s);
+		}
+		if (logger.isDebugEnabled())
+			logger.debug("getChildren(" + parentElement + "): " + elements);
+		return elements.toArray();
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.jface.viewers.ITreeContentProvider#getParent(java.lang.Object
+	 * )
+	 */
+	@Override
+	public Object getParent(Object element) {
+		for (ICommonContentProvider cp : childProviders) {
+			Object parent = cp.getParent(element);
+			if (parent != null)
+				return parent;
+		}
+		return null;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.jface.viewers.ITreeContentProvider#hasChildren(java.lang.
+	 * Object)
+	 */
+	@Override
+	public boolean hasChildren(Object element) {
+		for (ICommonContentProvider cp : childProviders) {
+			boolean hasChildren = cp.hasChildren(element);
+			if (hasChildren)
+				return hasChildren;
+		}
+		return false;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.jface.viewers.IContentProvider#dispose()
+	 */
+	@Override
+	public void dispose() {
+		for (ICommonContentProvider cp : childProviders)
+			cp.dispose();
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.jface.viewers.IContentProvider#inputChanged(org.eclipse.jface
+	 * .viewers.Viewer, java.lang.Object, java.lang.Object)
+	 */
+	@Override
+	public void inputChanged(Viewer viewer, Object oldInput, Object newInput) {
+		for (ICommonContentProvider cp : childProviders)
+			cp.inputChanged(viewer, oldInput, newInput);
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.navigator.IMementoAware#restoreState(org.eclipse.ui.IMemento
+	 * )
+	 */
+	@Override
+	public void restoreState(IMemento aMemento) {
+		// TODO Auto-generated method stub
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.navigator.IMementoAware#saveState(org.eclipse.ui.IMemento)
+	 */
+	@Override
+	public void saveState(IMemento aMemento) {
+		// TODO Auto-generated method stub
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.navigator.ICommonContentProvider#init(org.eclipse.ui.navigator
+	 * .ICommonContentExtensionSite)
+	 */
+	@Override
+	public void init(ICommonContentExtensionSite aConfig) {
+		for (ICommonContentProvider cp : childProviders)
+			cp.init(aConfig);
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/HadoopLabelProvider.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/HadoopLabelProvider.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/HadoopLabelProvider.java
new file mode 100644
index 0000000..c0f6529
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/HadoopLabelProvider.java
@@ -0,0 +1,177 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.ui.internal;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hdt.ui.internal.hdfs.HDFSLabelProvider;
+import org.apache.hdt.ui.internal.zookeeper.ZooKeeperLabelProvider;
+import org.eclipse.jface.viewers.ILabelProviderListener;
+import org.eclipse.swt.graphics.Image;
+import org.eclipse.ui.IMemento;
+import org.eclipse.ui.navigator.ICommonContentExtensionSite;
+import org.eclipse.ui.navigator.ICommonLabelProvider;
+
+/**
+ * @author Srimanth Gunturi
+ * 
+ */
+public class HadoopLabelProvider implements ICommonLabelProvider {
+	private List<ICommonLabelProvider> childProviders = new ArrayList<ICommonLabelProvider>();
+
+	/**
+	 * 
+	 */
+	public HadoopLabelProvider() {
+		childProviders.add(new HDFSLabelProvider());
+		childProviders.add(new ZooKeeperLabelProvider());
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.jface.viewers.ILabelProvider#getImage(java.lang.Object)
+	 */
+	@Override
+	public Image getImage(Object element) {
+		for (ICommonLabelProvider lp : childProviders) {
+			Image image = lp.getImage(element);
+			if (image != null)
+				return image;
+		}
+		return null;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.jface.viewers.ILabelProvider#getText(java.lang.Object)
+	 */
+	@Override
+	public String getText(Object element) {
+		for (ICommonLabelProvider lp : childProviders) {
+			String text = lp.getText(element);
+			if (text != null)
+				return text;
+		}
+		return null;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.jface.viewers.IBaseLabelProvider#addListener(org.eclipse.
+	 * jface.viewers.ILabelProviderListener)
+	 */
+	@Override
+	public void addListener(ILabelProviderListener listener) {
+		// TODO Auto-generated method stub
+
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.jface.viewers.IBaseLabelProvider#dispose()
+	 */
+	@Override
+	public void dispose() {
+		// TODO Auto-generated method stub
+
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.jface.viewers.IBaseLabelProvider#isLabelProperty(java.lang
+	 * .Object, java.lang.String)
+	 */
+	@Override
+	public boolean isLabelProperty(Object element, String property) {
+		// TODO Auto-generated method stub
+		return false;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.jface.viewers.IBaseLabelProvider#removeListener(org.eclipse
+	 * .jface.viewers.ILabelProviderListener)
+	 */
+	@Override
+	public void removeListener(ILabelProviderListener listener) {
+		// TODO Auto-generated method stub
+
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.navigator.IMementoAware#restoreState(org.eclipse.ui.IMemento
+	 * )
+	 */
+	@Override
+	public void restoreState(IMemento aMemento) {
+		// TODO Auto-generated method stub
+
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.navigator.IMementoAware#saveState(org.eclipse.ui.IMemento)
+	 */
+	@Override
+	public void saveState(IMemento aMemento) {
+		// TODO Auto-generated method stub
+
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.navigator.IDescriptionProvider#getDescription(java.lang
+	 * .Object)
+	 */
+	@Override
+	public String getDescription(Object anElement) {
+		// TODO Auto-generated method stub
+		return null;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.navigator.ICommonLabelProvider#init(org.eclipse.ui.navigator
+	 * .ICommonContentExtensionSite)
+	 */
+	@Override
+	public void init(ICommonContentExtensionSite aConfig) {
+		// TODO Auto-generated method stub
+
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/HadoopPerspectiveFactory.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/HadoopPerspectiveFactory.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/HadoopPerspectiveFactory.java
new file mode 100644
index 0000000..e312632
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/HadoopPerspectiveFactory.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.ui.internal;
+
+import org.eclipse.ui.IPageLayout;
+import org.eclipse.ui.IPerspectiveFactory;
+
+/**
+ * 
+ * @author Srimanth Gunturi
+ */
+public class HadoopPerspectiveFactory implements IPerspectiveFactory {
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IPerspectiveFactory#createInitialLayout(org.eclipse.ui
+	 * .IPageLayout)
+	 */
+	@Override
+	public void createInitialLayout(IPageLayout layout) {
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/HadoopServersView.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/HadoopServersView.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/HadoopServersView.java
new file mode 100644
index 0000000..e12c80d
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/HadoopServersView.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.ui.internal;
+
+import org.eclipse.ui.navigator.CommonNavigator;
+
+/**
+ * @author Srimanth Gunturi
+ * 
+ */
+public class HadoopServersView extends CommonNavigator {
+
+	public static final String VIEW_ID = "org.apache.hdt.ui.view.servers";
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/DiscardDownloadResourceAction.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/DiscardDownloadResourceAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/DiscardDownloadResourceAction.java
new file mode 100644
index 0000000..ce1c4d2
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/DiscardDownloadResourceAction.java
@@ -0,0 +1,132 @@
+package org.apache.hdt.ui.internal.hdfs;
+
+import java.io.File;
+import java.util.Iterator;
+
+import org.apache.hdt.core.internal.hdfs.HDFSFileStore;
+import org.apache.hdt.core.internal.hdfs.HDFSManager;
+import org.apache.hdt.core.internal.hdfs.UploadFileJob;
+import org.apache.log4j.Logger;
+import org.eclipse.core.filesystem.EFS;
+import org.eclipse.core.resources.IFolder;
+import org.eclipse.core.resources.IResource;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.NullProgressMonitor;
+import org.eclipse.jface.action.IAction;
+import org.eclipse.jface.dialogs.MessageDialog;
+import org.eclipse.jface.viewers.ISelection;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.ui.IObjectActionDelegate;
+import org.eclipse.ui.IWorkbenchPart;
+
+public class DiscardDownloadResourceAction implements IObjectActionDelegate {
+
+	private final static Logger logger = Logger.getLogger(DiscardDownloadResourceAction.class);
+	private ISelection selection;
+	private IWorkbenchPart targetPart;
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.IActionDelegate#run(org.eclipse.jface.action.IAction)
+	 */
+	@Override
+	public void run(IAction action) {
+		if (this.selection != null && !this.selection.isEmpty()) {
+			IStructuredSelection sSelection = (IStructuredSelection) this.selection;
+			@SuppressWarnings("rawtypes")
+			Iterator itr = sSelection.iterator();
+			while (itr.hasNext()) {
+				Object object = itr.next();
+				if (object instanceof IResource) {
+					IResource r = (IResource) object;
+					discardDownloadResource(r);
+				}
+			}
+		}
+	}
+
+	/**
+	 * @param r
+	 */
+	private void discardDownloadResource(IResource r) {
+		try {
+			HDFSFileStore store = (HDFSFileStore) EFS.getStore(r.getLocationURI());
+			switch (r.getType()) {
+			case IResource.FOLDER:
+				IFolder folder = (IFolder) r;
+				IResource[] members = folder.members();
+				if (members != null) {
+					for (int mc = 0; mc < members.length; mc++) {
+						discardDownloadResource(members[mc]);
+					}
+				}
+			case IResource.FILE:
+				if (store.isLocalFile()) {
+					File file = store.getLocalFile();
+					HDFSManager.INSTANCE.startServerOperation(store.toURI().toString());
+					try{
+						if (file.exists()) {
+							file.delete();
+							UploadFileJob.deleteFoldersIfEmpty(file.getParentFile());
+						}
+						r.getParent().refreshLocal(IResource.DEPTH_ONE, new NullProgressMonitor());
+					}finally{
+						HDFSManager.INSTANCE.stopServerOperation(store.toURI().toString());
+					}
+				}
+			}
+		} catch (CoreException e) {
+			MessageDialog.openError(targetPart.getSite().getShell(), "Upload HDFS Resources", "Error uploading resource to " + r.getLocationURI() + ": "
+					+ e.getMessage());
+			logger.warn(e.getMessage(), e);
+		}
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IActionDelegate#selectionChanged(org.eclipse.jface.action
+	 * .IAction, org.eclipse.jface.viewers.ISelection)
+	 */
+	@Override
+	public void selectionChanged(IAction action, ISelection selection) {
+		this.selection = selection;
+		boolean enabled = true;
+		if (this.selection != null && !this.selection.isEmpty()) {
+			IStructuredSelection sSelection = (IStructuredSelection) this.selection;
+			@SuppressWarnings("rawtypes")
+			Iterator itr = sSelection.iterator();
+			while (itr.hasNext()) {
+				Object object = itr.next();
+				if (object instanceof IResource) {
+					IResource r = (IResource) object;
+					try {
+						HDFSFileStore store = (HDFSFileStore) EFS.getStore(r.getLocationURI());
+						enabled = store.isLocalFile();
+					} catch (Throwable t) {
+						enabled = false;
+					}
+				} else
+					enabled = false;
+			}
+		} else
+			enabled = false;
+		action.setEnabled(enabled);
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IObjectActionDelegate#setActivePart(org.eclipse.jface.
+	 * action.IAction, org.eclipse.ui.IWorkbenchPart)
+	 */
+	@Override
+	public void setActivePart(IAction action, IWorkbenchPart targetPart) {
+		this.targetPart = targetPart;
+
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/DisconnectAction.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/DisconnectAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/DisconnectAction.java
new file mode 100644
index 0000000..aa346cc
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/DisconnectAction.java
@@ -0,0 +1,88 @@
+package org.apache.hdt.ui.internal.hdfs;
+
+import java.util.Iterator;
+
+import org.apache.hdt.core.internal.hdfs.HDFSManager;
+import org.apache.hdt.core.internal.model.HDFSServer;
+import org.apache.hdt.core.internal.model.ServerStatus;
+import org.apache.log4j.Logger;
+import org.eclipse.core.resources.IProject;
+import org.eclipse.jface.action.IAction;
+import org.eclipse.jface.viewers.ISelection;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.ui.IObjectActionDelegate;
+import org.eclipse.ui.IWorkbenchPart;
+
+public class DisconnectAction implements IObjectActionDelegate {
+
+	private final static Logger logger = Logger.getLogger(DownloadResourceAction.class);
+	private ISelection selection;
+	private IWorkbenchPart targetPart;
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.IActionDelegate#run(org.eclipse.jface.action.IAction)
+	 */
+	@Override
+	public void run(IAction action) {
+		if (this.selection != null && !this.selection.isEmpty()) {
+			IStructuredSelection sSelection = (IStructuredSelection) this.selection;
+			@SuppressWarnings("rawtypes")
+			Iterator itr = sSelection.iterator();
+			while (itr.hasNext()) {
+				Object object = itr.next();
+				if (object instanceof IProject) {
+					IProject r = (IProject) object;
+					HDFSManager.disconnectProject(r);
+				}
+			}
+		}
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IActionDelegate#selectionChanged(org.eclipse.jface.action
+	 * .IAction, org.eclipse.jface.viewers.ISelection)
+	 */
+	@Override
+	public void selectionChanged(IAction action, ISelection selection) {
+		this.selection = selection;
+		boolean enabled = true;
+		if (this.selection != null && !this.selection.isEmpty()) {
+			IStructuredSelection sSelection = (IStructuredSelection) this.selection;
+			@SuppressWarnings("rawtypes")
+			Iterator itr = sSelection.iterator();
+			while (itr.hasNext()) {
+				Object object = itr.next();
+				if (object instanceof IProject) {
+					IProject r = (IProject) object;
+					try {
+						HDFSServer server = HDFSManager.INSTANCE.getServer(r.getLocationURI().toString());
+						enabled = server == null ? false : server.getStatusCode() != ServerStatus.DISCONNECTED_VALUE;
+					} catch (Throwable t) {
+						enabled = false;
+					}
+				} else
+					enabled = false;
+			}
+		} else
+			enabled = false;
+		action.setEnabled(enabled);
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IObjectActionDelegate#setActivePart(org.eclipse.jface.
+	 * action.IAction, org.eclipse.ui.IWorkbenchPart)
+	 */
+	@Override
+	public void setActivePart(IAction action, IWorkbenchPart targetPart) {
+		this.targetPart = targetPart;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/DownloadResourceAction.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/DownloadResourceAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/DownloadResourceAction.java
new file mode 100644
index 0000000..aecc122
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/DownloadResourceAction.java
@@ -0,0 +1,124 @@
+package org.apache.hdt.ui.internal.hdfs;
+
+import java.util.Iterator;
+
+import org.apache.hdt.core.hdfs.ResourceInformation.Permissions;
+import org.apache.hdt.core.internal.hdfs.DownloadFileJob;
+import org.apache.hdt.core.internal.hdfs.HDFSFileStore;
+import org.apache.log4j.Logger;
+import org.eclipse.core.filesystem.EFS;
+import org.eclipse.core.resources.IFile;
+import org.eclipse.core.resources.IFolder;
+import org.eclipse.core.resources.IResource;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.jface.action.IAction;
+import org.eclipse.jface.dialogs.MessageDialog;
+import org.eclipse.jface.viewers.ISelection;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.ui.IObjectActionDelegate;
+import org.eclipse.ui.IWorkbenchPart;
+
+public class DownloadResourceAction implements IObjectActionDelegate {
+
+	private final static Logger logger = Logger.getLogger(DownloadResourceAction.class);
+	private ISelection selection;
+	private IWorkbenchPart targetPart;
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.IActionDelegate#run(org.eclipse.jface.action.IAction)
+	 */
+	@Override
+	public void run(IAction action) {
+		if (this.selection != null && !this.selection.isEmpty()) {
+			IStructuredSelection sSelection = (IStructuredSelection) this.selection;
+			@SuppressWarnings("rawtypes")
+			Iterator itr = sSelection.iterator();
+			while (itr.hasNext()) {
+				Object object = itr.next();
+				if (object instanceof IResource) {
+					IResource r = (IResource) object;
+					downloadResource(r);
+				}
+			}
+		}
+	}
+
+	/**
+	 * @param r
+	 */
+	private void downloadResource(IResource r) {
+		try {
+			switch (r.getType()) {
+			case IFile.FILE:
+				DownloadFileJob dfj = new DownloadFileJob(r);
+				dfj.schedule();
+				break;
+			case IFolder.FOLDER:
+				IFolder folder = (IFolder) r;
+				IResource[] children = folder.members();
+				if (children != null) {
+					for (int cc = 0; cc < children.length; cc++) {
+						downloadResource(children[cc]);
+					}
+				}
+				break;
+			}
+		} catch (CoreException e) {
+			MessageDialog.openError(targetPart.getSite().getShell(), "Download HDFS Resources", "Error downloading resource from " + r.getLocationURI() + ": "
+					+ e.getMessage());
+			logger.warn(e.getMessage(), e);
+		}
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IActionDelegate#selectionChanged(org.eclipse.jface.action
+	 * .IAction, org.eclipse.jface.viewers.ISelection)
+	 */
+	@Override
+	public void selectionChanged(IAction action, ISelection selection) {
+		this.selection = selection;
+		boolean enabled = true;
+		if (this.selection != null && !this.selection.isEmpty()) {
+			IStructuredSelection sSelection = (IStructuredSelection) this.selection;
+			@SuppressWarnings("rawtypes")
+			Iterator itr = sSelection.iterator();
+			while (itr.hasNext()) {
+				Object object = itr.next();
+				if (object instanceof IResource) {
+					IResource r = (IResource) object;
+					try {
+						HDFSFileStore store = (HDFSFileStore) EFS.getStore(r.getLocationURI());
+						Permissions effectivePermissions = store.getEffectivePermissions();
+						if (enabled && effectivePermissions != null && !effectivePermissions.read)
+							enabled = false;
+						if (enabled)
+							enabled = !store.isLocalFile();
+					} catch (Throwable t) {
+						enabled = false;
+					}
+				} else
+					enabled = false;
+			}
+		} else
+			enabled = false;
+		action.setEnabled(enabled);
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.IObjectActionDelegate#setActivePart(org.eclipse.jface.
+	 * action.IAction, org.eclipse.ui.IWorkbenchPart)
+	 */
+	@Override
+	public void setActivePart(IAction action, IWorkbenchPart targetPart) {
+		this.targetPart = targetPart;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSCommonContentProvider.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSCommonContentProvider.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSCommonContentProvider.java
new file mode 100644
index 0000000..fa53a6a
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSCommonContentProvider.java
@@ -0,0 +1,188 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.ui.internal.hdfs;
+
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hdt.core.internal.HadoopManager;
+import org.apache.hdt.core.internal.hdfs.HDFSManager;
+import org.apache.hdt.core.internal.model.HDFSServer;
+import org.apache.hdt.core.internal.model.HadoopPackage;
+import org.apache.log4j.Logger;
+import org.eclipse.core.resources.IContainer;
+import org.eclipse.core.resources.IFile;
+import org.eclipse.core.resources.ResourcesPlugin;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.emf.common.notify.Notification;
+import org.eclipse.emf.ecore.util.EContentAdapter;
+import org.eclipse.jface.viewers.Viewer;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.ui.IMemento;
+import org.eclipse.ui.IViewPart;
+import org.eclipse.ui.PartInitException;
+import org.eclipse.ui.PlatformUI;
+import org.eclipse.ui.navigator.CommonNavigator;
+import org.eclipse.ui.navigator.CommonViewer;
+import org.eclipse.ui.navigator.ICommonContentExtensionSite;
+import org.eclipse.ui.navigator.ICommonContentProvider;
+import org.eclipse.ui.navigator.INavigatorContentService;
+
+public class HDFSCommonContentProvider implements ICommonContentProvider {
+
+	private static final Logger logger = Logger.getLogger(HDFSCommonContentProvider.class);
+
+	private String viewerId;
+	private Display display = null;
+
+	private EContentAdapter serversListener;
+
+	@Override
+	public Object[] getElements(Object inputElement) {
+		return null;
+	}
+
+	@Override
+	public Object[] getChildren(Object parentElement) {
+		// TODO Auto-generated method stub
+		return null;
+	}
+
+	@Override
+	public Object getParent(Object element) {
+		// TODO Auto-generated method stub
+		return null;
+	}
+
+	@Override
+	public boolean hasChildren(Object element) {
+		return false;
+	}
+
+	@Override
+	public void dispose() {
+		if (serversListener != null) {
+			HadoopManager.INSTANCE.getServers().eAdapters().remove(serversListener);
+			serversListener = null;
+		}
+	}
+
+	@Override
+	public void inputChanged(Viewer viewer, Object oldInput, Object newInput) {
+		// TODO Auto-generated method stub
+
+	}
+
+	@Override
+	public void restoreState(IMemento aMemento) {
+		// TODO Auto-generated method stub
+
+	}
+
+	@Override
+	public void saveState(IMemento aMemento) {
+		// TODO Auto-generated method stub
+
+	}
+
+	@Override
+	public void init(ICommonContentExtensionSite aConfig) {
+		INavigatorContentService cs = aConfig.getService();
+		viewerId = cs.getViewerId();
+		this.display = PlatformUI.getWorkbench().getActiveWorkbenchWindow().getShell().getDisplay();
+		hookRefreshResources();
+	}
+
+	protected void hookRefreshResources() {
+		serversListener = new EContentAdapter() {
+			private List<String> addedUris = new ArrayList<String>();
+
+			public boolean isAdapterForType(Object type) {
+				return HadoopPackage.eINSTANCE.getHDFSServer().isInstance(type);
+			}
+
+			public void notifyChanged(org.eclipse.emf.common.notify.Notification notification) {
+				super.notifyChanged(notification);
+				if (notification.getNotifier() instanceof HDFSServer) {
+					int featureID = notification.getFeatureID(HDFSServer.class);
+					if (featureID == HadoopPackage.HDFS_SERVER__OPERATION_UR_IS) {
+						if (notification.getEventType() == Notification.ADD) {
+							Object[] array = ((HDFSServer) notification.getNotifier()).getOperationURIs().toArray();
+							for (int ac = 0; ac < array.length; ac++) {
+								String uri = (String) array[ac];
+								addedUris.add(uri);
+							}
+						} else if (addedUris.size() > 0 && display != null) {
+							display.asyncExec(new Runnable() {
+								@Override
+								public void run() {
+									CommonViewer viewer = null;
+									try {
+										IViewPart view = PlatformUI.getWorkbench().getActiveWorkbenchWindow().getActivePage().showView(viewerId);
+										if (view instanceof CommonNavigator) {
+											CommonNavigator navigator = (CommonNavigator) view;
+											viewer = navigator.getCommonViewer();
+										}
+									} catch (PartInitException e) {
+									}
+									if (viewer != null) {
+										Object[] addedArray = addedUris.toArray();
+										for (int ac = 0; ac < addedArray.length; ac++) {
+											String uri = (String) addedArray[ac];
+											HDFSServer server = HDFSManager.INSTANCE.getServer(uri);
+											if (server != null) {
+												try {
+													URI relativeURI = org.eclipse.core.runtime.URIUtil.makeRelative(new URI(uri), new URI(server.getUri()));
+													if (relativeURI != null) {
+														String projectName = HDFSManager.INSTANCE.getProjectName(server);
+														if (projectName != null) {
+															IFile file = ResourcesPlugin.getWorkspace().getRoot().getFile(
+																	new Path(projectName + "/" + relativeURI.toString()));
+															if (file != null) {
+																viewer.refresh(file, true);
+																if (logger.isDebugEnabled())
+																	logger.debug("EMF listener: Refreshed [" + file.getFullPath() + "]");
+																IContainer parent = file.getParent();
+																while (parent != null) {
+																	viewer.refresh(parent, true);
+																	parent = parent.getParent();
+																}
+															}
+														}
+													}
+												} catch (Throwable t) {
+													if (logger.isDebugEnabled())
+														logger.debug(t);
+												}
+											}
+										}
+									}
+									addedUris.clear();
+								}
+							});
+						}
+
+					}
+				}
+			}
+		};
+		HadoopManager.INSTANCE.getServers().eAdapters().add(serversListener);
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSFileStorePropertySource.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSFileStorePropertySource.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSFileStorePropertySource.java
new file mode 100644
index 0000000..b921cf2
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSFileStorePropertySource.java
@@ -0,0 +1,177 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.ui.internal.hdfs;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hdt.core.hdfs.ResourceInformation.Permissions;
+import org.apache.hdt.core.internal.hdfs.HDFSFileStore;
+import org.eclipse.core.filesystem.EFS;
+import org.eclipse.ui.views.properties.IPropertyDescriptor;
+import org.eclipse.ui.views.properties.IPropertySource;
+import org.eclipse.ui.views.properties.PropertyDescriptor;
+
+/**
+ * @author Srimanth Gunturi
+ * 
+ */
+public class HDFSFileStorePropertySource implements IPropertySource {
+
+	private enum Property {
+		USER, GROUP, ISLOCAL,
+		PERMISSIONS, USER_PERMISSIONS, 
+		GROUP_PERMISSIONS, OTHER_PERMISSIONS,
+	}
+
+	private final HDFSFileStore fileStore;
+
+	/**
+	 * @param fs
+	 */
+	public HDFSFileStorePropertySource(HDFSFileStore fileStore) {
+		this.fileStore = fileStore;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.ui.views.properties.IPropertySource#getEditableValue()
+	 */
+	@Override
+	public Object getEditableValue() {
+		// TODO Auto-generated method stub
+		return null;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.views.properties.IPropertySource#getPropertyDescriptors()
+	 */
+	@Override
+	public IPropertyDescriptor[] getPropertyDescriptors() {
+		List<IPropertyDescriptor> descriptors = new ArrayList<IPropertyDescriptor>();
+		final PropertyDescriptor user = new PropertyDescriptor(Property.USER, "User");
+		final PropertyDescriptor group = new PropertyDescriptor(Property.GROUP, "Group");
+		final PropertyDescriptor isDownloaded = new PropertyDescriptor(Property.ISLOCAL, "Is downloaded");
+		final PropertyDescriptor perms = new PropertyDescriptor(Property.PERMISSIONS, "Effective Permissions");
+		final PropertyDescriptor userPerms = new PropertyDescriptor(Property.USER_PERMISSIONS, "User Permissions");
+		final PropertyDescriptor groupPerms = new PropertyDescriptor(Property.GROUP_PERMISSIONS, "Group Permissions");
+		final PropertyDescriptor otherPerms = new PropertyDescriptor(Property.OTHER_PERMISSIONS, "Other Permissions");
+		descriptors.add(user);
+		descriptors.add(group);
+		descriptors.add(isDownloaded);
+		descriptors.add(perms);
+		descriptors.add(userPerms);
+		descriptors.add(groupPerms);
+		descriptors.add(otherPerms);
+		return descriptors.toArray(new IPropertyDescriptor[descriptors.size()]);
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.views.properties.IPropertySource#getPropertyValue(java
+	 * .lang.Object)
+	 */
+	@Override
+	public Object getPropertyValue(Object id) {
+		if (Property.USER.equals(id))
+			return this.fileStore.getServerResourceInfo() == null ? null : fileStore.getServerResourceInfo().getOwner();
+		else if (Property.GROUP.equals(id))
+			return this.fileStore.getServerResourceInfo() == null ? null : fileStore.getServerResourceInfo().getGroup();
+		else if (Property.ISLOCAL.equals(id))
+			return this.fileStore.isLocalFile();
+		else if (Property.PERMISSIONS.equals(id)){
+			String perms = "";
+			final Permissions effectivePermissions = this.fileStore.getEffectivePermissions();
+			if(effectivePermissions!=null){
+				perms += effectivePermissions.read ? "r" : "-";
+				perms += effectivePermissions.write ? "w" : "-";
+				perms += effectivePermissions.execute ? "x" : "-";
+			}else{
+				perms += "?";
+				perms += "?";
+				perms += "?";
+			}
+			return perms;
+		} else if (Property.USER_PERMISSIONS.equals(id)){
+			String perms = "";
+			perms += this.fileStore.fetchInfo().getAttribute(EFS.ATTRIBUTE_OWNER_READ) ? "r" : "-";
+			perms += this.fileStore.fetchInfo().getAttribute(EFS.ATTRIBUTE_OWNER_WRITE) ? "w" : "-";
+			perms += this.fileStore.fetchInfo().getAttribute(EFS.ATTRIBUTE_OWNER_EXECUTE) ? "x" : "-";
+			return perms;
+		} else if (Property.GROUP_PERMISSIONS.equals(id)){
+			String perms = "";
+			perms += this.fileStore.fetchInfo().getAttribute(EFS.ATTRIBUTE_GROUP_READ) ? "r" : "-";
+			perms += this.fileStore.fetchInfo().getAttribute(EFS.ATTRIBUTE_GROUP_WRITE) ? "w" : "-";
+			perms += this.fileStore.fetchInfo().getAttribute(EFS.ATTRIBUTE_GROUP_EXECUTE) ? "x" : "-";
+			return perms;
+		} else if (Property.OTHER_PERMISSIONS.equals(id)){
+			String perms = "";
+			perms += this.fileStore.fetchInfo().getAttribute(EFS.ATTRIBUTE_OTHER_READ) ? "r" : "-";
+			perms += this.fileStore.fetchInfo().getAttribute(EFS.ATTRIBUTE_OTHER_WRITE) ? "w" : "-";
+			perms += this.fileStore.fetchInfo().getAttribute(EFS.ATTRIBUTE_OTHER_EXECUTE) ? "x" : "-";
+			return perms;
+		}
+		return null;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.views.properties.IPropertySource#isPropertySet(java.lang
+	 * .Object)
+	 */
+	@Override
+	public boolean isPropertySet(Object id) {
+		// TODO Auto-generated method stub
+		return false;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.views.properties.IPropertySource#resetPropertyValue(java
+	 * .lang.Object)
+	 */
+	@Override
+	public void resetPropertyValue(Object id) {
+		// TODO Auto-generated method stub
+
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.views.properties.IPropertySource#setPropertyValue(java
+	 * .lang.Object, java.lang.Object)
+	 */
+	@Override
+	public void setPropertyValue(Object id, Object value) {
+		// TODO Auto-generated method stub
+
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSLabelProvider.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSLabelProvider.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSLabelProvider.java
new file mode 100644
index 0000000..a5543a7
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSLabelProvider.java
@@ -0,0 +1,153 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.ui.internal.hdfs;
+
+import org.apache.hdt.core.internal.hdfs.HDFSFileSystem;
+import org.apache.hdt.ui.Activator;
+import org.eclipse.core.resources.IProject;
+import org.eclipse.jface.viewers.ILabelProviderListener;
+import org.eclipse.swt.graphics.Image;
+import org.eclipse.ui.IMemento;
+import org.eclipse.ui.navigator.ICommonContentExtensionSite;
+import org.eclipse.ui.navigator.ICommonLabelProvider;
+
+/**
+ * @author Srimanth Gunturi
+ * 
+ */
+public class HDFSLabelProvider implements ICommonLabelProvider {
+
+	/**
+	 * 
+	 */
+	public HDFSLabelProvider() {
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.jface.viewers.ILabelProvider#getImage(java.lang.Object)
+	 */
+	@Override
+	public Image getImage(Object element) {
+		if (element instanceof IProject) {
+			IProject project = (IProject) element;
+			if (HDFSFileSystem.SCHEME.equals(project.getLocationURI().getScheme())) {
+				return Activator.IMAGE_HDFS;
+			}
+		}
+		return null;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.jface.viewers.ILabelProvider#getText(java.lang.Object)
+	 */
+	@Override
+	public String getText(Object element) {
+		return null;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.jface.viewers.IBaseLabelProvider#addListener(org.eclipse.
+	 * jface.viewers.ILabelProviderListener)
+	 */
+	@Override
+	public void addListener(ILabelProviderListener listener) {
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.jface.viewers.IBaseLabelProvider#dispose()
+	 */
+	@Override
+	public void dispose() {
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.jface.viewers.IBaseLabelProvider#isLabelProperty(java.lang
+	 * .Object, java.lang.String)
+	 */
+	@Override
+	public boolean isLabelProperty(Object element, String property) {
+		return false;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.jface.viewers.IBaseLabelProvider#removeListener(org.eclipse
+	 * .jface.viewers.ILabelProviderListener)
+	 */
+	@Override
+	public void removeListener(ILabelProviderListener listener) {
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.navigator.IMementoAware#restoreState(org.eclipse.ui.IMemento
+	 * )
+	 */
+	@Override
+	public void restoreState(IMemento aMemento) {
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.navigator.IMementoAware#saveState(org.eclipse.ui.IMemento)
+	 */
+	@Override
+	public void saveState(IMemento aMemento) {
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.navigator.IDescriptionProvider#getDescription(java.lang
+	 * .Object)
+	 */
+	@Override
+	public String getDescription(Object anElement) {
+		return null;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.ui.navigator.ICommonLabelProvider#init(org.eclipse.ui.navigator
+	 * .ICommonContentExtensionSite)
+	 */
+	@Override
+	public void init(ICommonContentExtensionSite aConfig) {
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSLightweightLabelDecorator.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSLightweightLabelDecorator.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSLightweightLabelDecorator.java
new file mode 100644
index 0000000..9424a45
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSLightweightLabelDecorator.java
@@ -0,0 +1,163 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.ui.internal.hdfs;
+
+import java.net.URI;
+
+import org.apache.hdt.core.hdfs.ResourceInformation.Permissions;
+import org.apache.hdt.core.internal.hdfs.HDFSFileStore;
+import org.apache.hdt.core.internal.hdfs.HDFSManager;
+import org.apache.hdt.core.internal.hdfs.HDFSURI;
+import org.apache.hdt.core.internal.model.HDFSServer;
+import org.apache.hdt.core.internal.model.ServerStatus;
+import org.apache.log4j.Logger;
+import org.eclipse.core.filesystem.EFS;
+import org.eclipse.core.resources.IProject;
+import org.eclipse.core.resources.IResource;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.jface.viewers.IDecoration;
+import org.eclipse.jface.viewers.ILabelProviderListener;
+import org.eclipse.jface.viewers.ILightweightLabelDecorator;
+
+public class HDFSLightweightLabelDecorator implements ILightweightLabelDecorator {
+	private static final Logger logger = Logger.getLogger(HDFSLightweightLabelDecorator.class);
+
+	/**
+	 * 
+	 */
+	public HDFSLightweightLabelDecorator() {
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.jface.viewers.IBaseLabelProvider#addListener(org.eclipse.
+	 * jface.viewers.ILabelProviderListener)
+	 */
+	@Override
+	public void addListener(ILabelProviderListener listener) {
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.eclipse.jface.viewers.IBaseLabelProvider#dispose()
+	 */
+	@Override
+	public void dispose() {
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.jface.viewers.IBaseLabelProvider#isLabelProperty(java.lang
+	 * .Object, java.lang.String)
+	 */
+	@Override
+	public boolean isLabelProperty(Object element, String property) {
+		// TODO Auto-generated method stub
+		return false;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.jface.viewers.IBaseLabelProvider#removeListener(org.eclipse
+	 * .jface.viewers.ILabelProviderListener)
+	 */
+	@Override
+	public void removeListener(ILabelProviderListener listener) {
+		// TODO Auto-generated method stub
+
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.eclipse.jface.viewers.ILightweightLabelDecorator#decorate(java.lang
+	 * .Object, org.eclipse.jface.viewers.IDecoration)
+	 */
+	@Override
+	public void decorate(Object element, IDecoration decoration) {
+		if (element instanceof IResource) {
+			IResource r = (IResource) element;
+			URI locationURI = r.getLocationURI();
+			if (locationURI != null && HDFSURI.SCHEME.equals(locationURI.getScheme())) {
+				try {
+					if (r instanceof IProject) {
+						final HDFSManager hdfsManager = HDFSManager.INSTANCE;
+						HDFSServer server = hdfsManager.getServer(locationURI.toString());
+						if (server != null) {
+							String serverUrl = server.getUri();
+							String userId = server.getUserId();
+							if (userId == null) {
+								try {
+									userId = hdfsManager.getClient(serverUrl).getDefaultUserAndGroupIds().get(0);
+								} catch (Throwable e) {
+									userId = null;
+								}
+							}
+							if (userId == null)
+								userId = "";
+							else
+								userId = userId + "@";
+							if (serverUrl != null) {
+								try {
+									URI uri = new URI(serverUrl);
+									serverUrl = serverUrl.substring(uri.getScheme().length() + 3);
+								} catch (Throwable e) {
+								}
+							}
+							if (serverUrl.endsWith("/"))
+								serverUrl = serverUrl.substring(0, serverUrl.length() - 1);
+							decoration.addSuffix(" " + userId + serverUrl);
+							if (server.getStatusCode() == ServerStatus.DISCONNECTED_VALUE)
+								decoration.addOverlay(org.apache.hdt.ui.Activator.IMAGE_OFFLINE_OVR);
+							else
+								decoration.addOverlay(org.apache.hdt.ui.Activator.IMAGE_ONLINE_OVR);
+						} else
+							decoration.addSuffix(" [Unknown server]");
+					} else
+						decorate((HDFSFileStore) EFS.getStore(locationURI), decoration);
+				} catch (CoreException e) {
+					logger.debug(e.getMessage(), e);
+				}
+			}
+		}
+	}
+
+	protected void decorate(HDFSFileStore store, IDecoration decoration) {
+		if (store != null) {
+			if (store.isLocalFile())
+				decoration.addOverlay(org.apache.hdt.ui.Activator.IMAGE_LOCAL_OVR, IDecoration.BOTTOM_LEFT);
+			else if (store.isRemoteFile())
+				decoration.addOverlay(org.apache.hdt.ui.Activator.IMAGE_REMOTE_OVR, IDecoration.BOTTOM_LEFT);
+			if (store.isLocalOnly())
+				decoration.addOverlay(org.apache.hdt.ui.Activator.IMAGE_OUTGOING_OVR, IDecoration.BOTTOM_RIGHT);
+
+			Permissions effectivePermissions = store.getEffectivePermissions();
+			if (effectivePermissions != null && !effectivePermissions.read && !effectivePermissions.write)
+				decoration.addOverlay(org.apache.hdt.ui.Activator.IMAGE_READONLY_OVR);
+		}
+	}
+
+}


[5/8] HDT-32: Merge the code base of Hadoop-Eclipse project into HDT. Contributed by Srimanth Gunturi

Posted by rs...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HDFSServerImpl.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HDFSServerImpl.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HDFSServerImpl.java
new file mode 100644
index 0000000..ed25f07
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HDFSServerImpl.java
@@ -0,0 +1,310 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *  
+ */
+package org.apache.hdt.core.internal.model.impl;
+
+import java.util.Collection;
+
+import org.apache.hdt.core.internal.model.HDFSServer;
+import org.apache.hdt.core.internal.model.HadoopPackage;
+
+import org.eclipse.emf.common.notify.Notification;
+
+import org.eclipse.emf.common.util.EList;
+import org.eclipse.emf.ecore.EClass;
+
+import org.eclipse.emf.ecore.impl.ENotificationImpl;
+import org.eclipse.emf.ecore.impl.EObjectImpl;
+import org.eclipse.emf.ecore.util.EDataTypeUniqueEList;
+
+/**
+ * <!-- begin-user-doc -->
+ * An implementation of the model object '<em><b>HDFS Server</b></em>'.
+ * <!-- end-user-doc -->
+ * <p>
+ * The following features are implemented:
+ * <ul>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.HDFSServerImpl#isLoaded <em>Loaded</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.HDFSServerImpl#getOperationURIs <em>Operation UR Is</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.HDFSServerImpl#getUserId <em>User Id</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.HDFSServerImpl#getGroupIds <em>Group Ids</em>}</li>
+ * </ul>
+ * </p>
+ *
+ * @generated
+ */
+public class HDFSServerImpl extends ServerImpl implements HDFSServer {
+	/**
+	 * The default value of the '{@link #isLoaded() <em>Loaded</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #isLoaded()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final boolean LOADED_EDEFAULT = false;
+
+	/**
+	 * The cached value of the '{@link #isLoaded() <em>Loaded</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #isLoaded()
+	 * @generated
+	 * @ordered
+	 */
+	protected boolean loaded = LOADED_EDEFAULT;
+
+	/**
+	 * The cached value of the '{@link #getOperationURIs() <em>Operation UR Is</em>}' attribute list.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getOperationURIs()
+	 * @generated
+	 * @ordered
+	 */
+	protected EList<String> operationURIs;
+
+	/**
+	 * The default value of the '{@link #getUserId() <em>User Id</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getUserId()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final String USER_ID_EDEFAULT = null;
+
+	/**
+	 * The cached value of the '{@link #getUserId() <em>User Id</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getUserId()
+	 * @generated
+	 * @ordered
+	 */
+	protected String userId = USER_ID_EDEFAULT;
+
+	/**
+	 * The cached value of the '{@link #getGroupIds() <em>Group Ids</em>}' attribute list.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getGroupIds()
+	 * @generated
+	 * @ordered
+	 */
+	protected EList<String> groupIds;
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	protected HDFSServerImpl() {
+		super();
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	protected EClass eStaticClass() {
+		return HadoopPackage.Literals.HDFS_SERVER;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public boolean isLoaded() {
+		return loaded;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setLoaded(boolean newLoaded) {
+		boolean oldLoaded = loaded;
+		loaded = newLoaded;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.HDFS_SERVER__LOADED, oldLoaded, loaded));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EList<String> getOperationURIs() {
+		if (operationURIs == null) {
+			operationURIs = new EDataTypeUniqueEList<String>(String.class, this, HadoopPackage.HDFS_SERVER__OPERATION_UR_IS);
+		}
+		return operationURIs;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public String getUserId() {
+		return userId;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setUserId(String newUserId) {
+		String oldUserId = userId;
+		userId = newUserId;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.HDFS_SERVER__USER_ID, oldUserId, userId));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EList<String> getGroupIds() {
+		if (groupIds == null) {
+			groupIds = new EDataTypeUniqueEList<String>(String.class, this, HadoopPackage.HDFS_SERVER__GROUP_IDS);
+		}
+		return groupIds;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public Object eGet(int featureID, boolean resolve, boolean coreType) {
+		switch (featureID) {
+			case HadoopPackage.HDFS_SERVER__LOADED:
+				return isLoaded();
+			case HadoopPackage.HDFS_SERVER__OPERATION_UR_IS:
+				return getOperationURIs();
+			case HadoopPackage.HDFS_SERVER__USER_ID:
+				return getUserId();
+			case HadoopPackage.HDFS_SERVER__GROUP_IDS:
+				return getGroupIds();
+		}
+		return super.eGet(featureID, resolve, coreType);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@SuppressWarnings("unchecked")
+	@Override
+	public void eSet(int featureID, Object newValue) {
+		switch (featureID) {
+			case HadoopPackage.HDFS_SERVER__LOADED:
+				setLoaded((Boolean)newValue);
+				return;
+			case HadoopPackage.HDFS_SERVER__OPERATION_UR_IS:
+				getOperationURIs().clear();
+				getOperationURIs().addAll((Collection<? extends String>)newValue);
+				return;
+			case HadoopPackage.HDFS_SERVER__USER_ID:
+				setUserId((String)newValue);
+				return;
+			case HadoopPackage.HDFS_SERVER__GROUP_IDS:
+				getGroupIds().clear();
+				getGroupIds().addAll((Collection<? extends String>)newValue);
+				return;
+		}
+		super.eSet(featureID, newValue);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public void eUnset(int featureID) {
+		switch (featureID) {
+			case HadoopPackage.HDFS_SERVER__LOADED:
+				setLoaded(LOADED_EDEFAULT);
+				return;
+			case HadoopPackage.HDFS_SERVER__OPERATION_UR_IS:
+				getOperationURIs().clear();
+				return;
+			case HadoopPackage.HDFS_SERVER__USER_ID:
+				setUserId(USER_ID_EDEFAULT);
+				return;
+			case HadoopPackage.HDFS_SERVER__GROUP_IDS:
+				getGroupIds().clear();
+				return;
+		}
+		super.eUnset(featureID);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public boolean eIsSet(int featureID) {
+		switch (featureID) {
+			case HadoopPackage.HDFS_SERVER__LOADED:
+				return loaded != LOADED_EDEFAULT;
+			case HadoopPackage.HDFS_SERVER__OPERATION_UR_IS:
+				return operationURIs != null && !operationURIs.isEmpty();
+			case HadoopPackage.HDFS_SERVER__USER_ID:
+				return USER_ID_EDEFAULT == null ? userId != null : !USER_ID_EDEFAULT.equals(userId);
+			case HadoopPackage.HDFS_SERVER__GROUP_IDS:
+				return groupIds != null && !groupIds.isEmpty();
+		}
+		return super.eIsSet(featureID);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public String toString() {
+		if (eIsProxy()) return super.toString();
+
+		StringBuffer result = new StringBuffer(super.toString());
+		result.append(" (loaded: ");
+		result.append(loaded);
+		result.append(", operationURIs: ");
+		result.append(operationURIs);
+		result.append(", userId: ");
+		result.append(userId);
+		result.append(", groupIds: ");
+		result.append(groupIds);
+		result.append(')');
+		return result.toString();
+	}
+
+} //HDFSServerImpl

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopFactoryImpl.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopFactoryImpl.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopFactoryImpl.java
new file mode 100644
index 0000000..c3e5c2b
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopFactoryImpl.java
@@ -0,0 +1,195 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *  
+ */
+package org.apache.hdt.core.internal.model.impl;
+
+import org.apache.hdt.core.internal.model.*;
+
+import org.eclipse.emf.ecore.EClass;
+import org.eclipse.emf.ecore.EDataType;
+import org.eclipse.emf.ecore.EObject;
+import org.eclipse.emf.ecore.EPackage;
+
+import org.eclipse.emf.ecore.impl.EFactoryImpl;
+
+import org.eclipse.emf.ecore.plugin.EcorePlugin;
+
+/**
+ * <!-- begin-user-doc -->
+ * An implementation of the model <b>Factory</b>.
+ * <!-- end-user-doc -->
+ * @generated
+ */
+public class HadoopFactoryImpl extends EFactoryImpl implements HadoopFactory {
+	/**
+	 * Creates the default factory implementation.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public static HadoopFactory init() {
+		try {
+			HadoopFactory theHadoopFactory = (HadoopFactory)EPackage.Registry.INSTANCE.getEFactory("http://hadoop/1.0"); 
+			if (theHadoopFactory != null) {
+				return theHadoopFactory;
+			}
+		}
+		catch (Exception exception) {
+			EcorePlugin.INSTANCE.log(exception);
+		}
+		return new HadoopFactoryImpl();
+	}
+
+	/**
+	 * Creates an instance of the factory.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public HadoopFactoryImpl() {
+		super();
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public EObject create(EClass eClass) {
+		switch (eClass.getClassifierID()) {
+			case HadoopPackage.HDFS_SERVER: return createHDFSServer();
+			case HadoopPackage.SERVERS: return createServers();
+			case HadoopPackage.ZOO_KEEPER_SERVER: return createZooKeeperServer();
+			case HadoopPackage.ZNODE: return createZNode();
+			default:
+				throw new IllegalArgumentException("The class '" + eClass.getName() + "' is not a valid classifier");
+		}
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public Object createFromString(EDataType eDataType, String initialValue) {
+		switch (eDataType.getClassifierID()) {
+			case HadoopPackage.SERVER_STATUS:
+				return createServerStatusFromString(eDataType, initialValue);
+			default:
+				throw new IllegalArgumentException("The datatype '" + eDataType.getName() + "' is not a valid classifier");
+		}
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public String convertToString(EDataType eDataType, Object instanceValue) {
+		switch (eDataType.getClassifierID()) {
+			case HadoopPackage.SERVER_STATUS:
+				return convertServerStatusToString(eDataType, instanceValue);
+			default:
+				throw new IllegalArgumentException("The datatype '" + eDataType.getName() + "' is not a valid classifier");
+		}
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public HDFSServer createHDFSServer() {
+		HDFSServerImpl hdfsServer = new HDFSServerImpl();
+		return hdfsServer;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public Servers createServers() {
+		ServersImpl servers = new ServersImpl();
+		return servers;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public ZooKeeperServer createZooKeeperServer() {
+		ZooKeeperServerImpl zooKeeperServer = new ZooKeeperServerImpl();
+		return zooKeeperServer;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public ZNode createZNode() {
+		ZNodeImpl zNode = new ZNodeImpl();
+		return zNode;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public ServerStatus createServerStatusFromString(EDataType eDataType, String initialValue) {
+		ServerStatus result = ServerStatus.get(initialValue);
+		if (result == null) throw new IllegalArgumentException("The value '" + initialValue + "' is not a valid enumerator of '" + eDataType.getName() + "'");
+		return result;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public String convertServerStatusToString(EDataType eDataType, Object instanceValue) {
+		return instanceValue == null ? null : instanceValue.toString();
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public HadoopPackage getHadoopPackage() {
+		return (HadoopPackage)getEPackage();
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @deprecated
+	 * @generated
+	 */
+	@Deprecated
+	public static HadoopPackage getPackage() {
+		return HadoopPackage.eINSTANCE;
+	}
+
+} //HadoopFactoryImpl

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopPackageImpl.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopPackageImpl.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopPackageImpl.java
new file mode 100644
index 0000000..a698d56
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopPackageImpl.java
@@ -0,0 +1,621 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *  
+ */
+package org.apache.hdt.core.internal.model.impl;
+
+
+import org.apache.hdt.core.internal.model.HDFSServer;
+import org.apache.hdt.core.internal.model.HadoopFactory;
+import org.apache.hdt.core.internal.model.HadoopPackage;
+import org.apache.hdt.core.internal.model.Server;
+import org.apache.hdt.core.internal.model.ServerStatus;
+import org.apache.hdt.core.internal.model.Servers;
+import org.apache.hdt.core.internal.model.ZNode;
+import org.apache.hdt.core.internal.model.ZNodeType;
+import org.apache.hdt.core.internal.model.ZooKeeperServer;
+import org.eclipse.emf.ecore.EAttribute;
+import org.eclipse.emf.ecore.EClass;
+import org.eclipse.emf.ecore.EEnum;
+import org.eclipse.emf.ecore.EPackage;
+import org.eclipse.emf.ecore.EReference;
+
+import org.eclipse.emf.ecore.impl.EPackageImpl;
+
+/**
+ * <!-- begin-user-doc -->
+ * An implementation of the model <b>Package</b>.
+ * <!-- end-user-doc -->
+ * @generated
+ */
+public class HadoopPackageImpl extends EPackageImpl implements HadoopPackage {
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	private EClass hdfsServerEClass = null;
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	private EClass serversEClass = null;
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	private EClass serverEClass = null;
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	private EClass zooKeeperServerEClass = null;
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	private EClass zNodeEClass = null;
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	private EEnum serverStatusEEnum = null;
+
+	/**
+	 * Creates an instance of the model <b>Package</b>, registered with
+	 * {@link org.eclipse.emf.ecore.EPackage.Registry EPackage.Registry} by the package
+	 * package URI value.
+	 * <p>Note: the correct way to create the package is via the static
+	 * factory method {@link #init init()}, which also performs
+	 * initialization of the package, or returns the registered package,
+	 * if one already exists.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see org.eclipse.emf.ecore.EPackage.Registry
+	 * @see org.apache.hdt.core.internal.model.HadoopPackage#eNS_URI
+	 * @see #init()
+	 * @generated
+	 */
+	private HadoopPackageImpl() {
+		super(eNS_URI, HadoopFactory.eINSTANCE);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	private static boolean isInited = false;
+
+	/**
+	 * Creates, registers, and initializes the <b>Package</b> for this model, and for any others upon which it depends.
+	 * 
+	 * <p>This method is used to initialize {@link HadoopPackage#eINSTANCE} when that field is accessed.
+	 * Clients should not invoke it directly. Instead, they should simply access that field to obtain the package.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #eNS_URI
+	 * @see #createPackageContents()
+	 * @see #initializePackageContents()
+	 * @generated
+	 */
+	public static HadoopPackage init() {
+		if (isInited) return (HadoopPackage)EPackage.Registry.INSTANCE.getEPackage(HadoopPackage.eNS_URI);
+
+		// Obtain or create and register package
+		HadoopPackageImpl theHadoopPackage = (HadoopPackageImpl)(EPackage.Registry.INSTANCE.get(eNS_URI) instanceof HadoopPackageImpl ? EPackage.Registry.INSTANCE.get(eNS_URI) : new HadoopPackageImpl());
+
+		isInited = true;
+
+		// Create package meta-data objects
+		theHadoopPackage.createPackageContents();
+
+		// Initialize created meta-data
+		theHadoopPackage.initializePackageContents();
+
+		// Mark meta-data to indicate it can't be changed
+		theHadoopPackage.freeze();
+
+  
+		// Update the registry and return the package
+		EPackage.Registry.INSTANCE.put(HadoopPackage.eNS_URI, theHadoopPackage);
+		return theHadoopPackage;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EClass getHDFSServer() {
+		return hdfsServerEClass;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getHDFSServer_Loaded() {
+		return (EAttribute)hdfsServerEClass.getEStructuralFeatures().get(0);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getHDFSServer_OperationURIs() {
+		return (EAttribute)hdfsServerEClass.getEStructuralFeatures().get(1);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getHDFSServer_UserId() {
+		return (EAttribute)hdfsServerEClass.getEStructuralFeatures().get(2);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getHDFSServer_GroupIds() {
+		return (EAttribute)hdfsServerEClass.getEStructuralFeatures().get(3);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EClass getServers() {
+		return serversEClass;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EReference getServers_HdfsServers() {
+		return (EReference)serversEClass.getEStructuralFeatures().get(0);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getServers_Version() {
+		return (EAttribute)serversEClass.getEStructuralFeatures().get(1);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EReference getServers_ZookeeperServers() {
+		return (EReference)serversEClass.getEStructuralFeatures().get(2);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EClass getServer() {
+		return serverEClass;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getServer_Name() {
+		return (EAttribute)serverEClass.getEStructuralFeatures().get(0);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getServer_Uri() {
+		return (EAttribute)serverEClass.getEStructuralFeatures().get(1);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getServer_StatusCode() {
+		return (EAttribute)serverEClass.getEStructuralFeatures().get(2);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getServer_StatusMessage() {
+		return (EAttribute)serverEClass.getEStructuralFeatures().get(3);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getServer_LastAccessed() {
+		return (EAttribute)serverEClass.getEStructuralFeatures().get(4);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EClass getZooKeeperServer() {
+		return zooKeeperServerEClass;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EClass getZNode() {
+		return zNodeEClass;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EReference getZNode_Children() {
+		return (EReference)zNodeEClass.getEStructuralFeatures().get(0);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getZNode_LastRefresh() {
+		return (EAttribute)zNodeEClass.getEStructuralFeatures().get(1);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getZNode_Refreshing() {
+		return (EAttribute)zNodeEClass.getEStructuralFeatures().get(2);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getZNode_Ephermeral() {
+		return (EAttribute)zNodeEClass.getEStructuralFeatures().get(3);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getZNode_CreationId() {
+		return (EAttribute)zNodeEClass.getEStructuralFeatures().get(4);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getZNode_ModifiedId() {
+		return (EAttribute)zNodeEClass.getEStructuralFeatures().get(5);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getZNode_CreationTime() {
+		return (EAttribute)zNodeEClass.getEStructuralFeatures().get(6);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getZNode_ModifiedTime() {
+		return (EAttribute)zNodeEClass.getEStructuralFeatures().get(7);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getZNode_Version() {
+		return (EAttribute)zNodeEClass.getEStructuralFeatures().get(8);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getZNode_ChildrenVersion() {
+		return (EAttribute)zNodeEClass.getEStructuralFeatures().get(9);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getZNode_AclVersion() {
+		return (EAttribute)zNodeEClass.getEStructuralFeatures().get(10);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getZNode_EphermalOwnerSessionId() {
+		return (EAttribute)zNodeEClass.getEStructuralFeatures().get(11);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getZNode_DataLength() {
+		return (EAttribute)zNodeEClass.getEStructuralFeatures().get(12);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getZNode_ChildrenCount() {
+		return (EAttribute)zNodeEClass.getEStructuralFeatures().get(13);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EReference getZNode_Parent() {
+		return (EReference)zNodeEClass.getEStructuralFeatures().get(14);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getZNode_NodeName() {
+		return (EAttribute)zNodeEClass.getEStructuralFeatures().get(15);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EAttribute getZNode_Sequential() {
+		return (EAttribute)zNodeEClass.getEStructuralFeatures().get(16);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EEnum getServerStatus() {
+		return serverStatusEEnum;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public HadoopFactory getHadoopFactory() {
+		return (HadoopFactory)getEFactoryInstance();
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	private boolean isCreated = false;
+
+	/**
+	 * Creates the meta-model objects for the package.  This method is
+	 * guarded to have no affect on any invocation but its first.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void createPackageContents() {
+		if (isCreated) return;
+		isCreated = true;
+
+		// Create classes and their features
+		hdfsServerEClass = createEClass(HDFS_SERVER);
+		createEAttribute(hdfsServerEClass, HDFS_SERVER__LOADED);
+		createEAttribute(hdfsServerEClass, HDFS_SERVER__OPERATION_UR_IS);
+		createEAttribute(hdfsServerEClass, HDFS_SERVER__USER_ID);
+		createEAttribute(hdfsServerEClass, HDFS_SERVER__GROUP_IDS);
+
+		serversEClass = createEClass(SERVERS);
+		createEReference(serversEClass, SERVERS__HDFS_SERVERS);
+		createEAttribute(serversEClass, SERVERS__VERSION);
+		createEReference(serversEClass, SERVERS__ZOOKEEPER_SERVERS);
+
+		serverEClass = createEClass(SERVER);
+		createEAttribute(serverEClass, SERVER__NAME);
+		createEAttribute(serverEClass, SERVER__URI);
+		createEAttribute(serverEClass, SERVER__STATUS_CODE);
+		createEAttribute(serverEClass, SERVER__STATUS_MESSAGE);
+		createEAttribute(serverEClass, SERVER__LAST_ACCESSED);
+
+		zooKeeperServerEClass = createEClass(ZOO_KEEPER_SERVER);
+
+		zNodeEClass = createEClass(ZNODE);
+		createEReference(zNodeEClass, ZNODE__CHILDREN);
+		createEAttribute(zNodeEClass, ZNODE__LAST_REFRESH);
+		createEAttribute(zNodeEClass, ZNODE__REFRESHING);
+		createEAttribute(zNodeEClass, ZNODE__EPHERMERAL);
+		createEAttribute(zNodeEClass, ZNODE__CREATION_ID);
+		createEAttribute(zNodeEClass, ZNODE__MODIFIED_ID);
+		createEAttribute(zNodeEClass, ZNODE__CREATION_TIME);
+		createEAttribute(zNodeEClass, ZNODE__MODIFIED_TIME);
+		createEAttribute(zNodeEClass, ZNODE__VERSION);
+		createEAttribute(zNodeEClass, ZNODE__CHILDREN_VERSION);
+		createEAttribute(zNodeEClass, ZNODE__ACL_VERSION);
+		createEAttribute(zNodeEClass, ZNODE__EPHERMAL_OWNER_SESSION_ID);
+		createEAttribute(zNodeEClass, ZNODE__DATA_LENGTH);
+		createEAttribute(zNodeEClass, ZNODE__CHILDREN_COUNT);
+		createEReference(zNodeEClass, ZNODE__PARENT);
+		createEAttribute(zNodeEClass, ZNODE__NODE_NAME);
+		createEAttribute(zNodeEClass, ZNODE__SEQUENTIAL);
+
+		// Create enums
+		serverStatusEEnum = createEEnum(SERVER_STATUS);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	private boolean isInitialized = false;
+
+	/**
+	 * Complete the initialization of the package and its meta-model.  This
+	 * method is guarded to have no affect on any invocation but its first.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void initializePackageContents() {
+		if (isInitialized) return;
+		isInitialized = true;
+
+		// Initialize package
+		setName(eNAME);
+		setNsPrefix(eNS_PREFIX);
+		setNsURI(eNS_URI);
+
+		// Create type parameters
+
+		// Set bounds for type parameters
+
+		// Add supertypes to classes
+		hdfsServerEClass.getESuperTypes().add(this.getServer());
+		zooKeeperServerEClass.getESuperTypes().add(this.getServer());
+		zooKeeperServerEClass.getESuperTypes().add(this.getZNode());
+
+		// Initialize classes and features; add operations and parameters
+		initEClass(hdfsServerEClass, HDFSServer.class, "HDFSServer", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
+		initEAttribute(getHDFSServer_Loaded(), ecorePackage.getEBoolean(), "loaded", null, 0, 1, HDFSServer.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getHDFSServer_OperationURIs(), ecorePackage.getEString(), "operationURIs", null, 0, -1, HDFSServer.class, IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getHDFSServer_UserId(), ecorePackage.getEString(), "userId", null, 0, 1, HDFSServer.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getHDFSServer_GroupIds(), ecorePackage.getEString(), "groupIds", null, 0, -1, HDFSServer.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+
+		initEClass(serversEClass, Servers.class, "Servers", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
+		initEReference(getServers_HdfsServers(), this.getHDFSServer(), null, "hdfsServers", null, 0, -1, Servers.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, IS_COMPOSITE, !IS_RESOLVE_PROXIES, !IS_UNSETTABLE, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getServers_Version(), ecorePackage.getEString(), "version", "1.0.0.0", 0, 1, Servers.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEReference(getServers_ZookeeperServers(), this.getZooKeeperServer(), null, "zookeeperServers", null, 0, -1, Servers.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, IS_COMPOSITE, !IS_RESOLVE_PROXIES, !IS_UNSETTABLE, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+
+		initEClass(serverEClass, Server.class, "Server", IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
+		initEAttribute(getServer_Name(), ecorePackage.getEString(), "name", "", 0, 1, Server.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getServer_Uri(), ecorePackage.getEString(), "uri", null, 0, 1, Server.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getServer_StatusCode(), ecorePackage.getEInt(), "statusCode", "0", 0, 1, Server.class, IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getServer_StatusMessage(), ecorePackage.getEString(), "statusMessage", null, 0, 1, Server.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getServer_LastAccessed(), ecorePackage.getELong(), "lastAccessed", "-1", 0, 1, Server.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+
+		initEClass(zooKeeperServerEClass, ZooKeeperServer.class, "ZooKeeperServer", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
+
+		initEClass(zNodeEClass, ZNode.class, "ZNode", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
+		initEReference(getZNode_Children(), this.getZNode(), null, "children", null, 0, -1, ZNode.class, IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, IS_COMPOSITE, !IS_RESOLVE_PROXIES, !IS_UNSETTABLE, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getZNode_LastRefresh(), ecorePackage.getELong(), "lastRefresh", "-1", 0, 1, ZNode.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getZNode_Refreshing(), ecorePackage.getEBoolean(), "refreshing", null, 0, 1, ZNode.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getZNode_Ephermeral(), ecorePackage.getEBoolean(), "ephermeral", null, 0, 1, ZNode.class, IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getZNode_CreationId(), ecorePackage.getELong(), "creationId", "-1", 0, 1, ZNode.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getZNode_ModifiedId(), ecorePackage.getELong(), "modifiedId", "-1", 0, 1, ZNode.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getZNode_CreationTime(), ecorePackage.getELong(), "creationTime", "-1", 0, 1, ZNode.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getZNode_ModifiedTime(), ecorePackage.getELong(), "modifiedTime", "-1", 0, 1, ZNode.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getZNode_Version(), ecorePackage.getEInt(), "version", "-1", 0, 1, ZNode.class, IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getZNode_ChildrenVersion(), ecorePackage.getEInt(), "childrenVersion", "-1", 0, 1, ZNode.class, IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getZNode_AclVersion(), ecorePackage.getEInt(), "aclVersion", "-1", 0, 1, ZNode.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getZNode_EphermalOwnerSessionId(), ecorePackage.getELong(), "ephermalOwnerSessionId", "-1", 0, 1, ZNode.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getZNode_DataLength(), ecorePackage.getEInt(), "dataLength", "-1", 0, 1, ZNode.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getZNode_ChildrenCount(), ecorePackage.getEInt(), "childrenCount", "0", 0, 1, ZNode.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEReference(getZNode_Parent(), this.getZNode(), null, "parent", null, 0, 1, ZNode.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_COMPOSITE, IS_RESOLVE_PROXIES, !IS_UNSETTABLE, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getZNode_NodeName(), ecorePackage.getEString(), "nodeName", null, 0, 1, ZNode.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+		initEAttribute(getZNode_Sequential(), ecorePackage.getEBoolean(), "sequential", null, 0, 1, ZNode.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+
+		addEOperation(zNodeEClass, ecorePackage.getEString(), "getPath", 0, 1, IS_UNIQUE, IS_ORDERED);
+
+		addEOperation(zNodeEClass, this.getZooKeeperServer(), "getServer", 0, 1, IS_UNIQUE, IS_ORDERED);
+
+		// Initialize enums and add enum literals
+		initEEnum(serverStatusEEnum, ServerStatus.class, "ServerStatus");
+		addEEnumLiteral(serverStatusEEnum, ServerStatus.NO_PROJECT);
+		addEEnumLiteral(serverStatusEEnum, ServerStatus.DISCONNECTED);
+		addEEnumLiteral(serverStatusEEnum, ServerStatus.CONNECTED);
+
+		// Create resource
+		createResource(eNS_URI);
+	}
+
+} //HadoopPackageImpl

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/ServerImpl.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/ServerImpl.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/ServerImpl.java
new file mode 100644
index 0000000..c74e513
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/ServerImpl.java
@@ -0,0 +1,395 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *  
+ */
+package org.apache.hdt.core.internal.model.impl;
+
+import org.apache.hdt.core.internal.model.HadoopPackage;
+import org.apache.hdt.core.internal.model.Server;
+
+import org.eclipse.emf.common.notify.Notification;
+
+import org.eclipse.emf.ecore.EClass;
+
+import org.eclipse.emf.ecore.impl.ENotificationImpl;
+import org.eclipse.emf.ecore.impl.EObjectImpl;
+
+/**
+ * <!-- begin-user-doc -->
+ * An implementation of the model object '<em><b>Server</b></em>'.
+ * <!-- end-user-doc -->
+ * <p>
+ * The following features are implemented:
+ * <ul>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ServerImpl#getName <em>Name</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ServerImpl#getUri <em>Uri</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ServerImpl#getStatusCode <em>Status Code</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ServerImpl#getStatusMessage <em>Status Message</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ServerImpl#getLastAccessed <em>Last Accessed</em>}</li>
+ * </ul>
+ * </p>
+ *
+ * @generated
+ */
+public abstract class ServerImpl extends EObjectImpl implements Server {
+	/**
+	 * The default value of the '{@link #getName() <em>Name</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getName()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final String NAME_EDEFAULT = "";
+
+	/**
+	 * The cached value of the '{@link #getName() <em>Name</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getName()
+	 * @generated
+	 * @ordered
+	 */
+	protected String name = NAME_EDEFAULT;
+
+	/**
+	 * The default value of the '{@link #getUri() <em>Uri</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getUri()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final String URI_EDEFAULT = null;
+
+	/**
+	 * The cached value of the '{@link #getUri() <em>Uri</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getUri()
+	 * @generated
+	 * @ordered
+	 */
+	protected String uri = URI_EDEFAULT;
+
+	/**
+	 * The default value of the '{@link #getStatusCode() <em>Status Code</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getStatusCode()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final int STATUS_CODE_EDEFAULT = 0;
+
+	/**
+	 * The cached value of the '{@link #getStatusCode() <em>Status Code</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getStatusCode()
+	 * @generated
+	 * @ordered
+	 */
+	protected int statusCode = STATUS_CODE_EDEFAULT;
+
+	/**
+	 * The default value of the '{@link #getStatusMessage() <em>Status Message</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getStatusMessage()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final String STATUS_MESSAGE_EDEFAULT = null;
+
+	/**
+	 * The cached value of the '{@link #getStatusMessage() <em>Status Message</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getStatusMessage()
+	 * @generated
+	 * @ordered
+	 */
+	protected String statusMessage = STATUS_MESSAGE_EDEFAULT;
+
+	/**
+	 * The default value of the '{@link #getLastAccessed() <em>Last Accessed</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getLastAccessed()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final long LAST_ACCESSED_EDEFAULT = -1L;
+
+	/**
+	 * The cached value of the '{@link #getLastAccessed() <em>Last Accessed</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getLastAccessed()
+	 * @generated
+	 * @ordered
+	 */
+	protected long lastAccessed = LAST_ACCESSED_EDEFAULT;
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	protected ServerImpl() {
+		super();
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	protected EClass eStaticClass() {
+		return HadoopPackage.Literals.SERVER;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public String getName() {
+		return name;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setName(String newName) {
+		String oldName = name;
+		name = newName;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.SERVER__NAME, oldName, name));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public String getUri() {
+		return uri;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setUri(String newUri) {
+		String oldUri = uri;
+		uri = newUri;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.SERVER__URI, oldUri, uri));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public int getStatusCode() {
+		return statusCode;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setStatusCode(int newStatusCode) {
+		int oldStatusCode = statusCode;
+		statusCode = newStatusCode;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.SERVER__STATUS_CODE, oldStatusCode, statusCode));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public String getStatusMessage() {
+		return statusMessage;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setStatusMessage(String newStatusMessage) {
+		String oldStatusMessage = statusMessage;
+		statusMessage = newStatusMessage;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.SERVER__STATUS_MESSAGE, oldStatusMessage, statusMessage));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public long getLastAccessed() {
+		return lastAccessed;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setLastAccessed(long newLastAccessed) {
+		long oldLastAccessed = lastAccessed;
+		lastAccessed = newLastAccessed;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.SERVER__LAST_ACCESSED, oldLastAccessed, lastAccessed));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public Object eGet(int featureID, boolean resolve, boolean coreType) {
+		switch (featureID) {
+			case HadoopPackage.SERVER__NAME:
+				return getName();
+			case HadoopPackage.SERVER__URI:
+				return getUri();
+			case HadoopPackage.SERVER__STATUS_CODE:
+				return getStatusCode();
+			case HadoopPackage.SERVER__STATUS_MESSAGE:
+				return getStatusMessage();
+			case HadoopPackage.SERVER__LAST_ACCESSED:
+				return getLastAccessed();
+		}
+		return super.eGet(featureID, resolve, coreType);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public void eSet(int featureID, Object newValue) {
+		switch (featureID) {
+			case HadoopPackage.SERVER__NAME:
+				setName((String)newValue);
+				return;
+			case HadoopPackage.SERVER__URI:
+				setUri((String)newValue);
+				return;
+			case HadoopPackage.SERVER__STATUS_CODE:
+				setStatusCode((Integer)newValue);
+				return;
+			case HadoopPackage.SERVER__STATUS_MESSAGE:
+				setStatusMessage((String)newValue);
+				return;
+			case HadoopPackage.SERVER__LAST_ACCESSED:
+				setLastAccessed((Long)newValue);
+				return;
+		}
+		super.eSet(featureID, newValue);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public void eUnset(int featureID) {
+		switch (featureID) {
+			case HadoopPackage.SERVER__NAME:
+				setName(NAME_EDEFAULT);
+				return;
+			case HadoopPackage.SERVER__URI:
+				setUri(URI_EDEFAULT);
+				return;
+			case HadoopPackage.SERVER__STATUS_CODE:
+				setStatusCode(STATUS_CODE_EDEFAULT);
+				return;
+			case HadoopPackage.SERVER__STATUS_MESSAGE:
+				setStatusMessage(STATUS_MESSAGE_EDEFAULT);
+				return;
+			case HadoopPackage.SERVER__LAST_ACCESSED:
+				setLastAccessed(LAST_ACCESSED_EDEFAULT);
+				return;
+		}
+		super.eUnset(featureID);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public boolean eIsSet(int featureID) {
+		switch (featureID) {
+			case HadoopPackage.SERVER__NAME:
+				return NAME_EDEFAULT == null ? name != null : !NAME_EDEFAULT.equals(name);
+			case HadoopPackage.SERVER__URI:
+				return URI_EDEFAULT == null ? uri != null : !URI_EDEFAULT.equals(uri);
+			case HadoopPackage.SERVER__STATUS_CODE:
+				return statusCode != STATUS_CODE_EDEFAULT;
+			case HadoopPackage.SERVER__STATUS_MESSAGE:
+				return STATUS_MESSAGE_EDEFAULT == null ? statusMessage != null : !STATUS_MESSAGE_EDEFAULT.equals(statusMessage);
+			case HadoopPackage.SERVER__LAST_ACCESSED:
+				return lastAccessed != LAST_ACCESSED_EDEFAULT;
+		}
+		return super.eIsSet(featureID);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public String toString() {
+		if (eIsProxy()) return super.toString();
+
+		StringBuffer result = new StringBuffer(super.toString());
+		result.append(" (name: ");
+		result.append(name);
+		result.append(", uri: ");
+		result.append(uri);
+		result.append(", statusCode: ");
+		result.append(statusCode);
+		result.append(", statusMessage: ");
+		result.append(statusMessage);
+		result.append(", lastAccessed: ");
+		result.append(lastAccessed);
+		result.append(')');
+		return result.toString();
+	}
+
+} //ServerImpl

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/ServersImpl.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/ServersImpl.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/ServersImpl.java
new file mode 100644
index 0000000..d51b0fa
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/ServersImpl.java
@@ -0,0 +1,271 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *  
+ */
+package org.apache.hdt.core.internal.model.impl;
+
+import java.util.Collection;
+
+import org.apache.hdt.core.internal.model.HDFSServer;
+import org.apache.hdt.core.internal.model.HadoopPackage;
+import org.apache.hdt.core.internal.model.Servers;
+import org.apache.hdt.core.internal.model.ZooKeeperServer;
+import org.eclipse.emf.common.notify.Notification;
+import org.eclipse.emf.common.notify.NotificationChain;
+import org.eclipse.emf.common.util.EList;
+import org.eclipse.emf.ecore.EClass;
+import org.eclipse.emf.ecore.InternalEObject;
+import org.eclipse.emf.ecore.impl.ENotificationImpl;
+import org.eclipse.emf.ecore.impl.EObjectImpl;
+import org.eclipse.emf.ecore.util.EObjectContainmentEList;
+import org.eclipse.emf.ecore.util.EObjectResolvingEList;
+import org.eclipse.emf.ecore.util.InternalEList;
+
+/**
+ * <!-- begin-user-doc -->
+ * An implementation of the model object '<em><b>Servers</b></em>'.
+ * <!-- end-user-doc -->
+ * <p>
+ * The following features are implemented:
+ * <ul>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ServersImpl#getHdfsServers <em>Hdfs Servers</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ServersImpl#getVersion <em>Version</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ServersImpl#getZookeeperServers <em>Zookeeper Servers</em>}</li>
+ * </ul>
+ * </p>
+ *
+ * @generated
+ */
+public class ServersImpl extends EObjectImpl implements Servers {
+	/**
+	 * The cached value of the '{@link #getHdfsServers() <em>Hdfs Servers</em>}' containment reference list.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getHdfsServers()
+	 * @generated
+	 * @ordered
+	 */
+	protected EList<HDFSServer> hdfsServers;
+
+	/**
+	 * The default value of the '{@link #getVersion() <em>Version</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getVersion()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final String VERSION_EDEFAULT = "1.0.0.0";
+
+	/**
+	 * The cached value of the '{@link #getVersion() <em>Version</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getVersion()
+	 * @generated
+	 * @ordered
+	 */
+	protected String version = VERSION_EDEFAULT;
+
+	/**
+	 * The cached value of the '{@link #getZookeeperServers() <em>Zookeeper Servers</em>}' containment reference list.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getZookeeperServers()
+	 * @generated
+	 * @ordered
+	 */
+	protected EList<ZooKeeperServer> zookeeperServers;
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	protected ServersImpl() {
+		super();
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	protected EClass eStaticClass() {
+		return HadoopPackage.Literals.SERVERS;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EList<HDFSServer> getHdfsServers() {
+		if (hdfsServers == null) {
+			hdfsServers = new EObjectContainmentEList<HDFSServer>(HDFSServer.class, this, HadoopPackage.SERVERS__HDFS_SERVERS);
+		}
+		return hdfsServers;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public String getVersion() {
+		return version;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setVersion(String newVersion) {
+		String oldVersion = version;
+		version = newVersion;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.SERVERS__VERSION, oldVersion, version));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EList<ZooKeeperServer> getZookeeperServers() {
+		if (zookeeperServers == null) {
+			zookeeperServers = new EObjectContainmentEList<ZooKeeperServer>(ZooKeeperServer.class, this, HadoopPackage.SERVERS__ZOOKEEPER_SERVERS);
+		}
+		return zookeeperServers;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public NotificationChain eInverseRemove(InternalEObject otherEnd, int featureID, NotificationChain msgs) {
+		switch (featureID) {
+			case HadoopPackage.SERVERS__HDFS_SERVERS:
+				return ((InternalEList<?>)getHdfsServers()).basicRemove(otherEnd, msgs);
+			case HadoopPackage.SERVERS__ZOOKEEPER_SERVERS:
+				return ((InternalEList<?>)getZookeeperServers()).basicRemove(otherEnd, msgs);
+		}
+		return super.eInverseRemove(otherEnd, featureID, msgs);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public Object eGet(int featureID, boolean resolve, boolean coreType) {
+		switch (featureID) {
+			case HadoopPackage.SERVERS__HDFS_SERVERS:
+				return getHdfsServers();
+			case HadoopPackage.SERVERS__VERSION:
+				return getVersion();
+			case HadoopPackage.SERVERS__ZOOKEEPER_SERVERS:
+				return getZookeeperServers();
+		}
+		return super.eGet(featureID, resolve, coreType);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@SuppressWarnings("unchecked")
+	@Override
+	public void eSet(int featureID, Object newValue) {
+		switch (featureID) {
+			case HadoopPackage.SERVERS__HDFS_SERVERS:
+				getHdfsServers().clear();
+				getHdfsServers().addAll((Collection<? extends HDFSServer>)newValue);
+				return;
+			case HadoopPackage.SERVERS__VERSION:
+				setVersion((String)newValue);
+				return;
+			case HadoopPackage.SERVERS__ZOOKEEPER_SERVERS:
+				getZookeeperServers().clear();
+				getZookeeperServers().addAll((Collection<? extends ZooKeeperServer>)newValue);
+				return;
+		}
+		super.eSet(featureID, newValue);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public void eUnset(int featureID) {
+		switch (featureID) {
+			case HadoopPackage.SERVERS__HDFS_SERVERS:
+				getHdfsServers().clear();
+				return;
+			case HadoopPackage.SERVERS__VERSION:
+				setVersion(VERSION_EDEFAULT);
+				return;
+			case HadoopPackage.SERVERS__ZOOKEEPER_SERVERS:
+				getZookeeperServers().clear();
+				return;
+		}
+		super.eUnset(featureID);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public boolean eIsSet(int featureID) {
+		switch (featureID) {
+			case HadoopPackage.SERVERS__HDFS_SERVERS:
+				return hdfsServers != null && !hdfsServers.isEmpty();
+			case HadoopPackage.SERVERS__VERSION:
+				return VERSION_EDEFAULT == null ? version != null : !VERSION_EDEFAULT.equals(version);
+			case HadoopPackage.SERVERS__ZOOKEEPER_SERVERS:
+				return zookeeperServers != null && !zookeeperServers.isEmpty();
+		}
+		return super.eIsSet(featureID);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public String toString() {
+		if (eIsProxy()) return super.toString();
+
+		StringBuffer result = new StringBuffer(super.toString());
+		result.append(" (version: ");
+		result.append(version);
+		result.append(')');
+		return result.toString();
+	}
+
+} //ServersImpl

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/ZNodeImpl.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/ZNodeImpl.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/ZNodeImpl.java
new file mode 100644
index 0000000..c339e70
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/ZNodeImpl.java
@@ -0,0 +1,1017 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *  
+ */
+package org.apache.hdt.core.internal.model.impl;
+
+import java.util.Collection;
+
+import org.apache.hdt.core.internal.model.HadoopPackage;
+import org.apache.hdt.core.internal.model.ZNode;
+import org.apache.hdt.core.internal.model.ZNodeType;
+import org.apache.hdt.core.internal.model.ZooKeeperServer;
+import org.eclipse.emf.common.notify.Notification;
+import org.eclipse.emf.common.notify.NotificationChain;
+import org.eclipse.emf.common.util.EList;
+import org.eclipse.emf.ecore.EClass;
+import org.eclipse.emf.ecore.InternalEObject;
+import org.eclipse.emf.ecore.impl.ENotificationImpl;
+import org.eclipse.emf.ecore.impl.EObjectImpl;
+import org.eclipse.emf.ecore.util.EObjectContainmentEList;
+import org.eclipse.emf.ecore.util.InternalEList;
+
+/**
+ * <!-- begin-user-doc --> An implementation of the model object '
+ * <em><b>ZNode</b></em>'. <!-- end-user-doc -->
+ * <p>
+ * The following features are implemented:
+ * <ul>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZNodeImpl#getChildren <em>Children</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZNodeImpl#getLastRefresh <em>Last Refresh</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZNodeImpl#isRefreshing <em>Refreshing</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZNodeImpl#isEphermeral <em>Ephermeral</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZNodeImpl#getCreationId <em>Creation Id</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZNodeImpl#getModifiedId <em>Modified Id</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZNodeImpl#getCreationTime <em>Creation Time</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZNodeImpl#getModifiedTime <em>Modified Time</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZNodeImpl#getVersion <em>Version</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZNodeImpl#getChildrenVersion <em>Children Version</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZNodeImpl#getAclVersion <em>Acl Version</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZNodeImpl#getEphermalOwnerSessionId <em>Ephermal Owner Session Id</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZNodeImpl#getDataLength <em>Data Length</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZNodeImpl#getChildrenCount <em>Children Count</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZNodeImpl#getParent <em>Parent</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZNodeImpl#getNodeName <em>Node Name</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZNodeImpl#isSequential <em>Sequential</em>}</li>
+ * </ul>
+ * </p>
+ *
+ * @generated
+ */
+public class ZNodeImpl extends EObjectImpl implements ZNode {
+	/**
+	 * The cached value of the '{@link #getChildren() <em>Children</em>}' containment reference list.
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @see #getChildren()
+	 * @generated
+	 * @ordered
+	 */
+	protected EList<ZNode> children;
+
+	/**
+	 * The default value of the '{@link #getLastRefresh() <em>Last Refresh</em>}' attribute.
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @see #getLastRefresh()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final long LAST_REFRESH_EDEFAULT = -1L;
+
+	/**
+	 * The cached value of the '{@link #getLastRefresh() <em>Last Refresh</em>}' attribute.
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @see #getLastRefresh()
+	 * @generated
+	 * @ordered
+	 */
+	protected long lastRefresh = LAST_REFRESH_EDEFAULT;
+
+	/**
+	 * The default value of the '{@link #isRefreshing() <em>Refreshing</em>}' attribute.
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @see #isRefreshing()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final boolean REFRESHING_EDEFAULT = false;
+
+	/**
+	 * The cached value of the '{@link #isRefreshing() <em>Refreshing</em>}' attribute.
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @see #isRefreshing()
+	 * @generated
+	 * @ordered
+	 */
+	protected boolean refreshing = REFRESHING_EDEFAULT;
+
+	/**
+	 * The default value of the '{@link #isEphermeral() <em>Ephermeral</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #isEphermeral()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final boolean EPHERMERAL_EDEFAULT = false;
+
+	/**
+	 * The cached value of the '{@link #isEphermeral() <em>Ephermeral</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #isEphermeral()
+	 * @generated
+	 * @ordered
+	 */
+	protected boolean ephermeral = EPHERMERAL_EDEFAULT;
+
+	/**
+	 * The default value of the '{@link #getCreationId() <em>Creation Id</em>}' attribute.
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @see #getCreationId()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final long CREATION_ID_EDEFAULT = -1L;
+
+	/**
+	 * The cached value of the '{@link #getCreationId() <em>Creation Id</em>}' attribute.
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @see #getCreationId()
+	 * @generated
+	 * @ordered
+	 */
+	protected long creationId = CREATION_ID_EDEFAULT;
+
+	/**
+	 * The default value of the '{@link #getModifiedId() <em>Modified Id</em>}' attribute.
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @see #getModifiedId()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final long MODIFIED_ID_EDEFAULT = -1L;
+
+	/**
+	 * The cached value of the '{@link #getModifiedId() <em>Modified Id</em>}' attribute.
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @see #getModifiedId()
+	 * @generated
+	 * @ordered
+	 */
+	protected long modifiedId = MODIFIED_ID_EDEFAULT;
+
+	/**
+	 * The default value of the '{@link #getCreationTime() <em>Creation Time</em>}' attribute.
+	 * <!-- begin-user-doc --> <!--
+	 * end-user-doc -->
+	 * @see #getCreationTime()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final long CREATION_TIME_EDEFAULT = -1L;
+
+	/**
+	 * The cached value of the '{@link #getCreationTime() <em>Creation Time</em>}' attribute.
+	 * <!-- begin-user-doc --> <!--
+	 * end-user-doc -->
+	 * @see #getCreationTime()
+	 * @generated
+	 * @ordered
+	 */
+	protected long creationTime = CREATION_TIME_EDEFAULT;
+
+	/**
+	 * The default value of the '{@link #getModifiedTime() <em>Modified Time</em>}' attribute.
+	 * <!-- begin-user-doc --> <!--
+	 * end-user-doc -->
+	 * @see #getModifiedTime()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final long MODIFIED_TIME_EDEFAULT = -1L;
+
+	/**
+	 * The cached value of the '{@link #getModifiedTime() <em>Modified Time</em>}' attribute.
+	 * <!-- begin-user-doc --> <!--
+	 * end-user-doc -->
+	 * @see #getModifiedTime()
+	 * @generated
+	 * @ordered
+	 */
+	protected long modifiedTime = MODIFIED_TIME_EDEFAULT;
+
+	/**
+	 * The default value of the '{@link #getVersion() <em>Version</em>}' attribute.
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @see #getVersion()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final int VERSION_EDEFAULT = -1;
+
+	/**
+	 * The cached value of the '{@link #getVersion() <em>Version</em>}' attribute.
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @see #getVersion()
+	 * @generated
+	 * @ordered
+	 */
+	protected int version = VERSION_EDEFAULT;
+
+	/**
+	 * The default value of the '{@link #getChildrenVersion() <em>Children Version</em>}' attribute.
+	 * <!-- begin-user-doc --> <!--
+	 * end-user-doc -->
+	 * @see #getChildrenVersion()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final int CHILDREN_VERSION_EDEFAULT = -1;
+
+	/**
+	 * The cached value of the '{@link #getChildrenVersion() <em>Children Version</em>}' attribute.
+	 * <!-- begin-user-doc --> <!--
+	 * end-user-doc -->
+	 * @see #getChildrenVersion()
+	 * @generated
+	 * @ordered
+	 */
+	protected int childrenVersion = CHILDREN_VERSION_EDEFAULT;
+
+	/**
+	 * The default value of the '{@link #getAclVersion() <em>Acl Version</em>}' attribute.
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @see #getAclVersion()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final int ACL_VERSION_EDEFAULT = -1;
+
+	/**
+	 * The cached value of the '{@link #getAclVersion() <em>Acl Version</em>}' attribute.
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @see #getAclVersion()
+	 * @generated
+	 * @ordered
+	 */
+	protected int aclVersion = ACL_VERSION_EDEFAULT;
+
+	/**
+	 * The default value of the '{@link #getEphermalOwnerSessionId() <em>Ephermal Owner Session Id</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getEphermalOwnerSessionId()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final long EPHERMAL_OWNER_SESSION_ID_EDEFAULT = -1L;
+
+	/**
+	 * The cached value of the '{@link #getEphermalOwnerSessionId() <em>Ephermal Owner Session Id</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getEphermalOwnerSessionId()
+	 * @generated
+	 * @ordered
+	 */
+	protected long ephermalOwnerSessionId = EPHERMAL_OWNER_SESSION_ID_EDEFAULT;
+
+	/**
+	 * The default value of the '{@link #getDataLength() <em>Data Length</em>}' attribute.
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @see #getDataLength()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final int DATA_LENGTH_EDEFAULT = -1;
+
+	/**
+	 * The cached value of the '{@link #getDataLength() <em>Data Length</em>}' attribute.
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @see #getDataLength()
+	 * @generated
+	 * @ordered
+	 */
+	protected int dataLength = DATA_LENGTH_EDEFAULT;
+
+	/**
+	 * The default value of the '{@link #getChildrenCount() <em>Children Count</em>}' attribute.
+	 * <!-- begin-user-doc --> <!--
+	 * end-user-doc -->
+	 * @see #getChildrenCount()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final int CHILDREN_COUNT_EDEFAULT = 0;
+
+	/**
+	 * The cached value of the '{@link #getChildrenCount() <em>Children Count</em>}' attribute.
+	 * <!-- begin-user-doc --> <!--
+	 * end-user-doc -->
+	 * @see #getChildrenCount()
+	 * @generated
+	 * @ordered
+	 */
+	protected int childrenCount = CHILDREN_COUNT_EDEFAULT;
+
+	/**
+	 * The cached value of the '{@link #getParent() <em>Parent</em>}' reference.
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @see #getParent()
+	 * @generated
+	 * @ordered
+	 */
+	protected ZNode parent;
+
+	/**
+	 * The default value of the '{@link #getNodeName() <em>Node Name</em>}' attribute.
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @see #getNodeName()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final String NODE_NAME_EDEFAULT = null;
+
+	/**
+	 * The cached value of the '{@link #getNodeName() <em>Node Name</em>}' attribute.
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @see #getNodeName()
+	 * @generated
+	 * @ordered
+	 */
+	protected String nodeName = NODE_NAME_EDEFAULT;
+
+	/**
+	 * The default value of the '{@link #isSequential() <em>Sequential</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #isSequential()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final boolean SEQUENTIAL_EDEFAULT = false;
+
+	/**
+	 * The cached value of the '{@link #isSequential() <em>Sequential</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #isSequential()
+	 * @generated
+	 * @ordered
+	 */
+	protected boolean sequential = SEQUENTIAL_EDEFAULT;
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	protected ZNodeImpl() {
+		super();
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	protected EClass eStaticClass() {
+		return HadoopPackage.Literals.ZNODE;
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EList<ZNode> getChildren() {
+		if (children == null) {
+			children = new EObjectContainmentEList<ZNode>(ZNode.class, this, HadoopPackage.ZNODE__CHILDREN);
+		}
+		return children;
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public long getLastRefresh() {
+		return lastRefresh;
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setLastRefresh(long newLastRefresh) {
+		long oldLastRefresh = lastRefresh;
+		lastRefresh = newLastRefresh;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZNODE__LAST_REFRESH, oldLastRefresh, lastRefresh));
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public boolean isRefreshing() {
+		return refreshing;
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setRefreshing(boolean newRefreshing) {
+		boolean oldRefreshing = refreshing;
+		refreshing = newRefreshing;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZNODE__REFRESHING, oldRefreshing, refreshing));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public boolean isEphermeral() {
+		return ephermeral;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setEphermeral(boolean newEphermeral) {
+		boolean oldEphermeral = ephermeral;
+		ephermeral = newEphermeral;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZNODE__EPHERMERAL, oldEphermeral, ephermeral));
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public long getCreationId() {
+		return creationId;
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setCreationId(long newCreationId) {
+		long oldCreationId = creationId;
+		creationId = newCreationId;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZNODE__CREATION_ID, oldCreationId, creationId));
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public long getModifiedId() {
+		return modifiedId;
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setModifiedId(long newModifiedId) {
+		long oldModifiedId = modifiedId;
+		modifiedId = newModifiedId;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZNODE__MODIFIED_ID, oldModifiedId, modifiedId));
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public long getCreationTime() {
+		return creationTime;
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setCreationTime(long newCreationTime) {
+		long oldCreationTime = creationTime;
+		creationTime = newCreationTime;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZNODE__CREATION_TIME, oldCreationTime, creationTime));
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public long getModifiedTime() {
+		return modifiedTime;
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setModifiedTime(long newModifiedTime) {
+		long oldModifiedTime = modifiedTime;
+		modifiedTime = newModifiedTime;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZNODE__MODIFIED_TIME, oldModifiedTime, modifiedTime));
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public int getVersion() {
+		return version;
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setVersion(int newVersion) {
+		int oldVersion = version;
+		version = newVersion;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZNODE__VERSION, oldVersion, version));
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public int getChildrenVersion() {
+		return childrenVersion;
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setChildrenVersion(int newChildrenVersion) {
+		int oldChildrenVersion = childrenVersion;
+		childrenVersion = newChildrenVersion;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZNODE__CHILDREN_VERSION, oldChildrenVersion, childrenVersion));
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public int getAclVersion() {
+		return aclVersion;
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setAclVersion(int newAclVersion) {
+		int oldAclVersion = aclVersion;
+		aclVersion = newAclVersion;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZNODE__ACL_VERSION, oldAclVersion, aclVersion));
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public long getEphermalOwnerSessionId() {
+		return ephermalOwnerSessionId;
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setEphermalOwnerSessionId(long newEphermalOwnerSessionId) {
+		long oldEphermalOwnerSessionId = ephermalOwnerSessionId;
+		ephermalOwnerSessionId = newEphermalOwnerSessionId;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZNODE__EPHERMAL_OWNER_SESSION_ID, oldEphermalOwnerSessionId, ephermalOwnerSessionId));
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public int getDataLength() {
+		return dataLength;
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setDataLength(int newDataLength) {
+		int oldDataLength = dataLength;
+		dataLength = newDataLength;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZNODE__DATA_LENGTH, oldDataLength, dataLength));
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public int getChildrenCount() {
+		return childrenCount;
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setChildrenCount(int newChildrenCount) {
+		int oldChildrenCount = childrenCount;
+		childrenCount = newChildrenCount;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZNODE__CHILDREN_COUNT, oldChildrenCount, childrenCount));
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public ZNode getParent() {
+		if (parent != null && parent.eIsProxy()) {
+			InternalEObject oldParent = (InternalEObject)parent;
+			parent = (ZNode)eResolveProxy(oldParent);
+			if (parent != oldParent) {
+				if (eNotificationRequired())
+					eNotify(new ENotificationImpl(this, Notification.RESOLVE, HadoopPackage.ZNODE__PARENT, oldParent, parent));
+			}
+		}
+		return parent;
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public ZNode basicGetParent() {
+		return parent;
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setParent(ZNode newParent) {
+		ZNode oldParent = parent;
+		parent = newParent;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZNODE__PARENT, oldParent, parent));
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public String getNodeName() {
+		return nodeName;
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setNodeName(String newNodeName) {
+		String oldNodeName = nodeName;
+		nodeName = newNodeName;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZNODE__NODE_NAME, oldNodeName, nodeName));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public boolean isSequential() {
+		return sequential;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setSequential(boolean newSequential) {
+		boolean oldSequential = sequential;
+		sequential = newSequential;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZNODE__SEQUENTIAL, oldSequential, sequential));
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public ZooKeeperServer getServer() {
+		if(this instanceof org.apache.hdt.core.internal.model.ZooKeeperServer)
+					return (org.apache.hdt.core.internal.model.ZooKeeperServer) this;
+				else
+					return getParent().getServer();
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	public String getPath() {
+		if (this instanceof org.apache.hdt.core.internal.model.ZooKeeperServer)
+			return "/";
+		else {
+			String parentPath = getParent().getPath();
+			return parentPath.endsWith("/") ? parentPath + getNodeName() : parentPath + "/" + getNodeName();
+		}
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public NotificationChain eInverseRemove(InternalEObject otherEnd, int featureID, NotificationChain msgs) {
+		switch (featureID) {
+			case HadoopPackage.ZNODE__CHILDREN:
+				return ((InternalEList<?>)getChildren()).basicRemove(otherEnd, msgs);
+		}
+		return super.eInverseRemove(otherEnd, featureID, msgs);
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public Object eGet(int featureID, boolean resolve, boolean coreType) {
+		switch (featureID) {
+			case HadoopPackage.ZNODE__CHILDREN:
+				return getChildren();
+			case HadoopPackage.ZNODE__LAST_REFRESH:
+				return getLastRefresh();
+			case HadoopPackage.ZNODE__REFRESHING:
+				return isRefreshing();
+			case HadoopPackage.ZNODE__EPHERMERAL:
+				return isEphermeral();
+			case HadoopPackage.ZNODE__CREATION_ID:
+				return getCreationId();
+			case HadoopPackage.ZNODE__MODIFIED_ID:
+				return getModifiedId();
+			case HadoopPackage.ZNODE__CREATION_TIME:
+				return getCreationTime();
+			case HadoopPackage.ZNODE__MODIFIED_TIME:
+				return getModifiedTime();
+			case HadoopPackage.ZNODE__VERSION:
+				return getVersion();
+			case HadoopPackage.ZNODE__CHILDREN_VERSION:
+				return getChildrenVersion();
+			case HadoopPackage.ZNODE__ACL_VERSION:
+				return getAclVersion();
+			case HadoopPackage.ZNODE__EPHERMAL_OWNER_SESSION_ID:
+				return getEphermalOwnerSessionId();
+			case HadoopPackage.ZNODE__DATA_LENGTH:
+				return getDataLength();
+			case HadoopPackage.ZNODE__CHILDREN_COUNT:
+				return getChildrenCount();
+			case HadoopPackage.ZNODE__PARENT:
+				if (resolve) return getParent();
+				return basicGetParent();
+			case HadoopPackage.ZNODE__NODE_NAME:
+				return getNodeName();
+			case HadoopPackage.ZNODE__SEQUENTIAL:
+				return isSequential();
+		}
+		return super.eGet(featureID, resolve, coreType);
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	@SuppressWarnings("unchecked")
+	@Override
+	public void eSet(int featureID, Object newValue) {
+		switch (featureID) {
+			case HadoopPackage.ZNODE__CHILDREN:
+				getChildren().clear();
+				getChildren().addAll((Collection<? extends ZNode>)newValue);
+				return;
+			case HadoopPackage.ZNODE__LAST_REFRESH:
+				setLastRefresh((Long)newValue);
+				return;
+			case HadoopPackage.ZNODE__REFRESHING:
+				setRefreshing((Boolean)newValue);
+				return;
+			case HadoopPackage.ZNODE__EPHERMERAL:
+				setEphermeral((Boolean)newValue);
+				return;
+			case HadoopPackage.ZNODE__CREATION_ID:
+				setCreationId((Long)newValue);
+				return;
+			case HadoopPackage.ZNODE__MODIFIED_ID:
+				setModifiedId((Long)newValue);
+				return;
+			case HadoopPackage.ZNODE__CREATION_TIME:
+				setCreationTime((Long)newValue);
+				return;
+			case HadoopPackage.ZNODE__MODIFIED_TIME:
+				setModifiedTime((Long)newValue);
+				return;
+			case HadoopPackage.ZNODE__VERSION:
+				setVersion((Integer)newValue);
+				return;
+			case HadoopPackage.ZNODE__CHILDREN_VERSION:
+				setChildrenVersion((Integer)newValue);
+				return;
+			case HadoopPackage.ZNODE__ACL_VERSION:
+				setAclVersion((Integer)newValue);
+				return;
+			case HadoopPackage.ZNODE__EPHERMAL_OWNER_SESSION_ID:
+				setEphermalOwnerSessionId((Long)newValue);
+				return;
+			case HadoopPackage.ZNODE__DATA_LENGTH:
+				setDataLength((Integer)newValue);
+				return;
+			case HadoopPackage.ZNODE__CHILDREN_COUNT:
+				setChildrenCount((Integer)newValue);
+				return;
+			case HadoopPackage.ZNODE__PARENT:
+				setParent((ZNode)newValue);
+				return;
+			case HadoopPackage.ZNODE__NODE_NAME:
+				setNodeName((String)newValue);
+				return;
+			case HadoopPackage.ZNODE__SEQUENTIAL:
+				setSequential((Boolean)newValue);
+				return;
+		}
+		super.eSet(featureID, newValue);
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public void eUnset(int featureID) {
+		switch (featureID) {
+			case HadoopPackage.ZNODE__CHILDREN:
+				getChildren().clear();
+				return;
+			case HadoopPackage.ZNODE__LAST_REFRESH:
+				setLastRefresh(LAST_REFRESH_EDEFAULT);
+				return;
+			case HadoopPackage.ZNODE__REFRESHING:
+				setRefreshing(REFRESHING_EDEFAULT);
+				return;
+			case HadoopPackage.ZNODE__EPHERMERAL:
+				setEphermeral(EPHERMERAL_EDEFAULT);
+				return;
+			case HadoopPackage.ZNODE__CREATION_ID:
+				setCreationId(CREATION_ID_EDEFAULT);
+				return;
+			case HadoopPackage.ZNODE__MODIFIED_ID:
+				setModifiedId(MODIFIED_ID_EDEFAULT);
+				return;
+			case HadoopPackage.ZNODE__CREATION_TIME:
+				setCreationTime(CREATION_TIME_EDEFAULT);
+				return;
+			case HadoopPackage.ZNODE__MODIFIED_TIME:
+				setModifiedTime(MODIFIED_TIME_EDEFAULT);
+				return;
+			case HadoopPackage.ZNODE__VERSION:
+				setVersion(VERSION_EDEFAULT);
+				return;
+			case HadoopPackage.ZNODE__CHILDREN_VERSION:
+				setChildrenVersion(CHILDREN_VERSION_EDEFAULT);
+				return;
+			case HadoopPackage.ZNODE__ACL_VERSION:
+				setAclVersion(ACL_VERSION_EDEFAULT);
+				return;
+			case HadoopPackage.ZNODE__EPHERMAL_OWNER_SESSION_ID:
+				setEphermalOwnerSessionId(EPHERMAL_OWNER_SESSION_ID_EDEFAULT);
+				return;
+			case HadoopPackage.ZNODE__DATA_LENGTH:
+				setDataLength(DATA_LENGTH_EDEFAULT);
+				return;
+			case HadoopPackage.ZNODE__CHILDREN_COUNT:
+				setChildrenCount(CHILDREN_COUNT_EDEFAULT);
+				return;
+			case HadoopPackage.ZNODE__PARENT:
+				setParent((ZNode)null);
+				return;
+			case HadoopPackage.ZNODE__NODE_NAME:
+				setNodeName(NODE_NAME_EDEFAULT);
+				return;
+			case HadoopPackage.ZNODE__SEQUENTIAL:
+				setSequential(SEQUENTIAL_EDEFAULT);
+				return;
+		}
+		super.eUnset(featureID);
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public boolean eIsSet(int featureID) {
+		switch (featureID) {
+			case HadoopPackage.ZNODE__CHILDREN:
+				return children != null && !children.isEmpty();
+			case HadoopPackage.ZNODE__LAST_REFRESH:
+				return lastRefresh != LAST_REFRESH_EDEFAULT;
+			case HadoopPackage.ZNODE__REFRESHING:
+				return refreshing != REFRESHING_EDEFAULT;
+			case HadoopPackage.ZNODE__EPHERMERAL:
+				return ephermeral != EPHERMERAL_EDEFAULT;
+			case HadoopPackage.ZNODE__CREATION_ID:
+				return creationId != CREATION_ID_EDEFAULT;
+			case HadoopPackage.ZNODE__MODIFIED_ID:
+				return modifiedId != MODIFIED_ID_EDEFAULT;
+			case HadoopPackage.ZNODE__CREATION_TIME:
+				return creationTime != CREATION_TIME_EDEFAULT;
+			case HadoopPackage.ZNODE__MODIFIED_TIME:
+				return modifiedTime != MODIFIED_TIME_EDEFAULT;
+			case HadoopPackage.ZNODE__VERSION:
+				return version != VERSION_EDEFAULT;
+			case HadoopPackage.ZNODE__CHILDREN_VERSION:
+				return childrenVersion != CHILDREN_VERSION_EDEFAULT;
+			case HadoopPackage.ZNODE__ACL_VERSION:
+				return aclVersion != ACL_VERSION_EDEFAULT;
+			case HadoopPackage.ZNODE__EPHERMAL_OWNER_SESSION_ID:
+				return ephermalOwnerSessionId != EPHERMAL_OWNER_SESSION_ID_EDEFAULT;
+			case HadoopPackage.ZNODE__DATA_LENGTH:
+				return dataLength != DATA_LENGTH_EDEFAULT;
+			case HadoopPackage.ZNODE__CHILDREN_COUNT:
+				return childrenCount != CHILDREN_COUNT_EDEFAULT;
+			case HadoopPackage.ZNODE__PARENT:
+				return parent != null;
+			case HadoopPackage.ZNODE__NODE_NAME:
+				return NODE_NAME_EDEFAULT == null ? nodeName != null : !NODE_NAME_EDEFAULT.equals(nodeName);
+			case HadoopPackage.ZNODE__SEQUENTIAL:
+				return sequential != SEQUENTIAL_EDEFAULT;
+		}
+		return super.eIsSet(featureID);
+	}
+
+	/**
+	 * <!-- begin-user-doc --> <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public String toString() {
+		if (eIsProxy()) return super.toString();
+
+		StringBuffer result = new StringBuffer(super.toString());
+		result.append(" (lastRefresh: ");
+		result.append(lastRefresh);
+		result.append(", refreshing: ");
+		result.append(refreshing);
+		result.append(", ephermeral: ");
+		result.append(ephermeral);
+		result.append(", creationId: ");
+		result.append(creationId);
+		result.append(", modifiedId: ");
+		result.append(modifiedId);
+		result.append(", creationTime: ");
+		result.append(creationTime);
+		result.append(", modifiedTime: ");
+		result.append(modifiedTime);
+		result.append(", version: ");
+		result.append(version);
+		result.append(", childrenVersion: ");
+		result.append(childrenVersion);
+		result.append(", aclVersion: ");
+		result.append(aclVersion);
+		result.append(", ephermalOwnerSessionId: ");
+		result.append(ephermalOwnerSessionId);
+		result.append(", dataLength: ");
+		result.append(dataLength);
+		result.append(", childrenCount: ");
+		result.append(childrenCount);
+		result.append(", nodeName: ");
+		result.append(nodeName);
+		result.append(", sequential: ");
+		result.append(sequential);
+		result.append(')');
+		return result.toString();
+	}
+
+} // ZNodeImpl


[4/8] HDT-32: Merge the code base of Hadoop-Eclipse project into HDT. Contributed by Srimanth Gunturi

Posted by rs...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/ZooKeeperServerImpl.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/ZooKeeperServerImpl.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/ZooKeeperServerImpl.java
new file mode 100644
index 0000000..d09bc18
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/ZooKeeperServerImpl.java
@@ -0,0 +1,1109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *  
+ */
+package org.apache.hdt.core.internal.model.impl;
+
+import java.util.Collection;
+
+import org.apache.hdt.core.internal.model.HadoopPackage;
+import org.apache.hdt.core.internal.model.ZNode;
+import org.apache.hdt.core.internal.model.ZNodeType;
+import org.apache.hdt.core.internal.model.ZooKeeperServer;
+
+import org.eclipse.emf.common.notify.Notification;
+import org.eclipse.emf.common.notify.NotificationChain;
+import org.eclipse.emf.common.util.EList;
+import org.eclipse.emf.ecore.EClass;
+import org.eclipse.emf.ecore.InternalEObject;
+import org.eclipse.emf.ecore.impl.ENotificationImpl;
+import org.eclipse.emf.ecore.util.EObjectContainmentEList;
+import org.eclipse.emf.ecore.util.InternalEList;
+import org.eclipse.emf.ecore.util.EObjectResolvingEList;
+
+/**
+ * <!-- begin-user-doc -->
+ * An implementation of the model object '<em><b>Zoo Keeper Server</b></em>'.
+ * <!-- end-user-doc -->
+ * <p>
+ * The following features are implemented:
+ * <ul>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getChildren <em>Children</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getLastRefresh <em>Last Refresh</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#isRefreshing <em>Refreshing</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#isEphermeral <em>Ephermeral</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getCreationId <em>Creation Id</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getModifiedId <em>Modified Id</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getCreationTime <em>Creation Time</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getModifiedTime <em>Modified Time</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getVersion <em>Version</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getChildrenVersion <em>Children Version</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getAclVersion <em>Acl Version</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getEphermalOwnerSessionId <em>Ephermal Owner Session Id</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getDataLength <em>Data Length</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getChildrenCount <em>Children Count</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getParent <em>Parent</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#getNodeName <em>Node Name</em>}</li>
+ *   <li>{@link org.apache.hdt.core.internal.model.impl.ZooKeeperServerImpl#isSequential <em>Sequential</em>}</li>
+ * </ul>
+ * </p>
+ *
+ * @generated
+ */
+public class ZooKeeperServerImpl extends ServerImpl implements ZooKeeperServer {
+	/**
+	 * The cached value of the '{@link #getChildren() <em>Children</em>}' containment reference list.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getChildren()
+	 * @generated
+	 * @ordered
+	 */
+	protected EList<ZNode> children;
+	/**
+	 * The default value of the '{@link #getLastRefresh() <em>Last Refresh</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getLastRefresh()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final long LAST_REFRESH_EDEFAULT = -1L;
+	/**
+	 * The cached value of the '{@link #getLastRefresh() <em>Last Refresh</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getLastRefresh()
+	 * @generated
+	 * @ordered
+	 */
+	protected long lastRefresh = LAST_REFRESH_EDEFAULT;
+	/**
+	 * The default value of the '{@link #isRefreshing() <em>Refreshing</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #isRefreshing()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final boolean REFRESHING_EDEFAULT = false;
+	/**
+	 * The cached value of the '{@link #isRefreshing() <em>Refreshing</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #isRefreshing()
+	 * @generated
+	 * @ordered
+	 */
+	protected boolean refreshing = REFRESHING_EDEFAULT;
+	/**
+	 * The default value of the '{@link #isEphermeral() <em>Ephermeral</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #isEphermeral()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final boolean EPHERMERAL_EDEFAULT = false;
+	/**
+	 * The cached value of the '{@link #isEphermeral() <em>Ephermeral</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #isEphermeral()
+	 * @generated
+	 * @ordered
+	 */
+	protected boolean ephermeral = EPHERMERAL_EDEFAULT;
+	/**
+	 * The default value of the '{@link #getCreationId() <em>Creation Id</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getCreationId()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final long CREATION_ID_EDEFAULT = -1L;
+	/**
+	 * The cached value of the '{@link #getCreationId() <em>Creation Id</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getCreationId()
+	 * @generated
+	 * @ordered
+	 */
+	protected long creationId = CREATION_ID_EDEFAULT;
+	/**
+	 * The default value of the '{@link #getModifiedId() <em>Modified Id</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getModifiedId()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final long MODIFIED_ID_EDEFAULT = -1L;
+	/**
+	 * The cached value of the '{@link #getModifiedId() <em>Modified Id</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getModifiedId()
+	 * @generated
+	 * @ordered
+	 */
+	protected long modifiedId = MODIFIED_ID_EDEFAULT;
+	/**
+	 * The default value of the '{@link #getCreationTime() <em>Creation Time</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getCreationTime()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final long CREATION_TIME_EDEFAULT = -1L;
+	/**
+	 * The cached value of the '{@link #getCreationTime() <em>Creation Time</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getCreationTime()
+	 * @generated
+	 * @ordered
+	 */
+	protected long creationTime = CREATION_TIME_EDEFAULT;
+	/**
+	 * The default value of the '{@link #getModifiedTime() <em>Modified Time</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getModifiedTime()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final long MODIFIED_TIME_EDEFAULT = -1L;
+	/**
+	 * The cached value of the '{@link #getModifiedTime() <em>Modified Time</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getModifiedTime()
+	 * @generated
+	 * @ordered
+	 */
+	protected long modifiedTime = MODIFIED_TIME_EDEFAULT;
+	/**
+	 * The default value of the '{@link #getVersion() <em>Version</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getVersion()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final int VERSION_EDEFAULT = -1;
+	/**
+	 * The cached value of the '{@link #getVersion() <em>Version</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getVersion()
+	 * @generated
+	 * @ordered
+	 */
+	protected int version = VERSION_EDEFAULT;
+	/**
+	 * The default value of the '{@link #getChildrenVersion() <em>Children Version</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getChildrenVersion()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final int CHILDREN_VERSION_EDEFAULT = -1;
+	/**
+	 * The cached value of the '{@link #getChildrenVersion() <em>Children Version</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getChildrenVersion()
+	 * @generated
+	 * @ordered
+	 */
+	protected int childrenVersion = CHILDREN_VERSION_EDEFAULT;
+	/**
+	 * The default value of the '{@link #getAclVersion() <em>Acl Version</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getAclVersion()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final int ACL_VERSION_EDEFAULT = -1;
+	/**
+	 * The cached value of the '{@link #getAclVersion() <em>Acl Version</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getAclVersion()
+	 * @generated
+	 * @ordered
+	 */
+	protected int aclVersion = ACL_VERSION_EDEFAULT;
+	/**
+	 * The default value of the '{@link #getEphermalOwnerSessionId() <em>Ephermal Owner Session Id</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getEphermalOwnerSessionId()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final long EPHERMAL_OWNER_SESSION_ID_EDEFAULT = -1L;
+	/**
+	 * The cached value of the '{@link #getEphermalOwnerSessionId() <em>Ephermal Owner Session Id</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getEphermalOwnerSessionId()
+	 * @generated
+	 * @ordered
+	 */
+	protected long ephermalOwnerSessionId = EPHERMAL_OWNER_SESSION_ID_EDEFAULT;
+	/**
+	 * The default value of the '{@link #getDataLength() <em>Data Length</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getDataLength()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final int DATA_LENGTH_EDEFAULT = -1;
+	/**
+	 * The cached value of the '{@link #getDataLength() <em>Data Length</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getDataLength()
+	 * @generated
+	 * @ordered
+	 */
+	protected int dataLength = DATA_LENGTH_EDEFAULT;
+	/**
+	 * The default value of the '{@link #getChildrenCount() <em>Children Count</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getChildrenCount()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final int CHILDREN_COUNT_EDEFAULT = 0;
+	/**
+	 * The cached value of the '{@link #getChildrenCount() <em>Children Count</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getChildrenCount()
+	 * @generated
+	 * @ordered
+	 */
+	protected int childrenCount = CHILDREN_COUNT_EDEFAULT;
+	/**
+	 * The cached value of the '{@link #getParent() <em>Parent</em>}' reference.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getParent()
+	 * @generated
+	 * @ordered
+	 */
+	protected ZNode parent;
+	/**
+	 * The default value of the '{@link #getNodeName() <em>Node Name</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getNodeName()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final String NODE_NAME_EDEFAULT = null;
+	/**
+	 * The cached value of the '{@link #getNodeName() <em>Node Name</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #getNodeName()
+	 * @generated
+	 * @ordered
+	 */
+	protected String nodeName = NODE_NAME_EDEFAULT;
+	/**
+	 * The default value of the '{@link #isSequential() <em>Sequential</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #isSequential()
+	 * @generated
+	 * @ordered
+	 */
+	protected static final boolean SEQUENTIAL_EDEFAULT = false;
+	/**
+	 * The cached value of the '{@link #isSequential() <em>Sequential</em>}' attribute.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @see #isSequential()
+	 * @generated
+	 * @ordered
+	 */
+	protected boolean sequential = SEQUENTIAL_EDEFAULT;
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	protected ZooKeeperServerImpl() {
+		super();
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	protected EClass eStaticClass() {
+		return HadoopPackage.Literals.ZOO_KEEPER_SERVER;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public EList<ZNode> getChildren() {
+		if (children == null) {
+			children = new EObjectContainmentEList<ZNode>(ZNode.class, this, HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN);
+		}
+		return children;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public long getLastRefresh() {
+		return lastRefresh;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setLastRefresh(long newLastRefresh) {
+		long oldLastRefresh = lastRefresh;
+		lastRefresh = newLastRefresh;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__LAST_REFRESH, oldLastRefresh, lastRefresh));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public boolean isRefreshing() {
+		return refreshing;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setRefreshing(boolean newRefreshing) {
+		boolean oldRefreshing = refreshing;
+		refreshing = newRefreshing;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__REFRESHING, oldRefreshing, refreshing));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public boolean isEphermeral() {
+		return ephermeral;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setEphermeral(boolean newEphermeral) {
+		boolean oldEphermeral = ephermeral;
+		ephermeral = newEphermeral;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__EPHERMERAL, oldEphermeral, ephermeral));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public long getCreationId() {
+		return creationId;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setCreationId(long newCreationId) {
+		long oldCreationId = creationId;
+		creationId = newCreationId;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__CREATION_ID, oldCreationId, creationId));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public long getModifiedId() {
+		return modifiedId;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setModifiedId(long newModifiedId) {
+		long oldModifiedId = modifiedId;
+		modifiedId = newModifiedId;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_ID, oldModifiedId, modifiedId));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public long getCreationTime() {
+		return creationTime;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setCreationTime(long newCreationTime) {
+		long oldCreationTime = creationTime;
+		creationTime = newCreationTime;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__CREATION_TIME, oldCreationTime, creationTime));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public long getModifiedTime() {
+		return modifiedTime;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setModifiedTime(long newModifiedTime) {
+		long oldModifiedTime = modifiedTime;
+		modifiedTime = newModifiedTime;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_TIME, oldModifiedTime, modifiedTime));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public int getVersion() {
+		return version;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setVersion(int newVersion) {
+		int oldVersion = version;
+		version = newVersion;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__VERSION, oldVersion, version));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public int getChildrenVersion() {
+		return childrenVersion;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setChildrenVersion(int newChildrenVersion) {
+		int oldChildrenVersion = childrenVersion;
+		childrenVersion = newChildrenVersion;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_VERSION, oldChildrenVersion, childrenVersion));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public int getAclVersion() {
+		return aclVersion;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setAclVersion(int newAclVersion) {
+		int oldAclVersion = aclVersion;
+		aclVersion = newAclVersion;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__ACL_VERSION, oldAclVersion, aclVersion));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public long getEphermalOwnerSessionId() {
+		return ephermalOwnerSessionId;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setEphermalOwnerSessionId(long newEphermalOwnerSessionId) {
+		long oldEphermalOwnerSessionId = ephermalOwnerSessionId;
+		ephermalOwnerSessionId = newEphermalOwnerSessionId;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__EPHERMAL_OWNER_SESSION_ID, oldEphermalOwnerSessionId, ephermalOwnerSessionId));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public int getDataLength() {
+		return dataLength;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setDataLength(int newDataLength) {
+		int oldDataLength = dataLength;
+		dataLength = newDataLength;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__DATA_LENGTH, oldDataLength, dataLength));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public int getChildrenCount() {
+		return childrenCount;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setChildrenCount(int newChildrenCount) {
+		int oldChildrenCount = childrenCount;
+		childrenCount = newChildrenCount;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_COUNT, oldChildrenCount, childrenCount));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public ZNode getParent() {
+		if (parent != null && parent.eIsProxy()) {
+			InternalEObject oldParent = (InternalEObject)parent;
+			parent = (ZNode)eResolveProxy(oldParent);
+			if (parent != oldParent) {
+				if (eNotificationRequired())
+					eNotify(new ENotificationImpl(this, Notification.RESOLVE, HadoopPackage.ZOO_KEEPER_SERVER__PARENT, oldParent, parent));
+			}
+		}
+		return parent;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public ZNode basicGetParent() {
+		return parent;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setParent(ZNode newParent) {
+		ZNode oldParent = parent;
+		parent = newParent;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__PARENT, oldParent, parent));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public String getNodeName() {
+		return nodeName;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setNodeName(String newNodeName) {
+		String oldNodeName = nodeName;
+		nodeName = newNodeName;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__NODE_NAME, oldNodeName, nodeName));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public boolean isSequential() {
+		return sequential;
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public void setSequential(boolean newSequential) {
+		boolean oldSequential = sequential;
+		sequential = newSequential;
+		if (eNotificationRequired())
+			eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.ZOO_KEEPER_SERVER__SEQUENTIAL, oldSequential, sequential));
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public ZooKeeperServer getServer() {
+		if(this instanceof org.apache.hdt.core.internal.model.ZooKeeperServer)
+					return (org.apache.hdt.core.internal.model.ZooKeeperServer) this;
+				else
+					return getParent().getServer();
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public String getPath() {
+		if (this instanceof org.apache.hdt.core.internal.model.ZooKeeperServer)
+			return "/";
+		else {
+			String parentPath = getParent().getPath();
+			return parentPath.endsWith("/") ? parentPath + getNodeName() : parentPath + "/" + getNodeName();
+		}
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public NotificationChain eInverseRemove(InternalEObject otherEnd, int featureID, NotificationChain msgs) {
+		switch (featureID) {
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN:
+				return ((InternalEList<?>)getChildren()).basicRemove(otherEnd, msgs);
+		}
+		return super.eInverseRemove(otherEnd, featureID, msgs);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public Object eGet(int featureID, boolean resolve, boolean coreType) {
+		switch (featureID) {
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN:
+				return getChildren();
+			case HadoopPackage.ZOO_KEEPER_SERVER__LAST_REFRESH:
+				return getLastRefresh();
+			case HadoopPackage.ZOO_KEEPER_SERVER__REFRESHING:
+				return isRefreshing();
+			case HadoopPackage.ZOO_KEEPER_SERVER__EPHERMERAL:
+				return isEphermeral();
+			case HadoopPackage.ZOO_KEEPER_SERVER__CREATION_ID:
+				return getCreationId();
+			case HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_ID:
+				return getModifiedId();
+			case HadoopPackage.ZOO_KEEPER_SERVER__CREATION_TIME:
+				return getCreationTime();
+			case HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_TIME:
+				return getModifiedTime();
+			case HadoopPackage.ZOO_KEEPER_SERVER__VERSION:
+				return getVersion();
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_VERSION:
+				return getChildrenVersion();
+			case HadoopPackage.ZOO_KEEPER_SERVER__ACL_VERSION:
+				return getAclVersion();
+			case HadoopPackage.ZOO_KEEPER_SERVER__EPHERMAL_OWNER_SESSION_ID:
+				return getEphermalOwnerSessionId();
+			case HadoopPackage.ZOO_KEEPER_SERVER__DATA_LENGTH:
+				return getDataLength();
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_COUNT:
+				return getChildrenCount();
+			case HadoopPackage.ZOO_KEEPER_SERVER__PARENT:
+				if (resolve) return getParent();
+				return basicGetParent();
+			case HadoopPackage.ZOO_KEEPER_SERVER__NODE_NAME:
+				return getNodeName();
+			case HadoopPackage.ZOO_KEEPER_SERVER__SEQUENTIAL:
+				return isSequential();
+		}
+		return super.eGet(featureID, resolve, coreType);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@SuppressWarnings("unchecked")
+	@Override
+	public void eSet(int featureID, Object newValue) {
+		switch (featureID) {
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN:
+				getChildren().clear();
+				getChildren().addAll((Collection<? extends ZNode>)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__LAST_REFRESH:
+				setLastRefresh((Long)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__REFRESHING:
+				setRefreshing((Boolean)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__EPHERMERAL:
+				setEphermeral((Boolean)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CREATION_ID:
+				setCreationId((Long)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_ID:
+				setModifiedId((Long)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CREATION_TIME:
+				setCreationTime((Long)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_TIME:
+				setModifiedTime((Long)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__VERSION:
+				setVersion((Integer)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_VERSION:
+				setChildrenVersion((Integer)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__ACL_VERSION:
+				setAclVersion((Integer)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__EPHERMAL_OWNER_SESSION_ID:
+				setEphermalOwnerSessionId((Long)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__DATA_LENGTH:
+				setDataLength((Integer)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_COUNT:
+				setChildrenCount((Integer)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__PARENT:
+				setParent((ZNode)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__NODE_NAME:
+				setNodeName((String)newValue);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__SEQUENTIAL:
+				setSequential((Boolean)newValue);
+				return;
+		}
+		super.eSet(featureID, newValue);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public void eUnset(int featureID) {
+		switch (featureID) {
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN:
+				getChildren().clear();
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__LAST_REFRESH:
+				setLastRefresh(LAST_REFRESH_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__REFRESHING:
+				setRefreshing(REFRESHING_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__EPHERMERAL:
+				setEphermeral(EPHERMERAL_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CREATION_ID:
+				setCreationId(CREATION_ID_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_ID:
+				setModifiedId(MODIFIED_ID_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CREATION_TIME:
+				setCreationTime(CREATION_TIME_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_TIME:
+				setModifiedTime(MODIFIED_TIME_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__VERSION:
+				setVersion(VERSION_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_VERSION:
+				setChildrenVersion(CHILDREN_VERSION_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__ACL_VERSION:
+				setAclVersion(ACL_VERSION_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__EPHERMAL_OWNER_SESSION_ID:
+				setEphermalOwnerSessionId(EPHERMAL_OWNER_SESSION_ID_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__DATA_LENGTH:
+				setDataLength(DATA_LENGTH_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_COUNT:
+				setChildrenCount(CHILDREN_COUNT_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__PARENT:
+				setParent((ZNode)null);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__NODE_NAME:
+				setNodeName(NODE_NAME_EDEFAULT);
+				return;
+			case HadoopPackage.ZOO_KEEPER_SERVER__SEQUENTIAL:
+				setSequential(SEQUENTIAL_EDEFAULT);
+				return;
+		}
+		super.eUnset(featureID);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public boolean eIsSet(int featureID) {
+		switch (featureID) {
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN:
+				return children != null && !children.isEmpty();
+			case HadoopPackage.ZOO_KEEPER_SERVER__LAST_REFRESH:
+				return lastRefresh != LAST_REFRESH_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__REFRESHING:
+				return refreshing != REFRESHING_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__EPHERMERAL:
+				return ephermeral != EPHERMERAL_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CREATION_ID:
+				return creationId != CREATION_ID_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_ID:
+				return modifiedId != MODIFIED_ID_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CREATION_TIME:
+				return creationTime != CREATION_TIME_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_TIME:
+				return modifiedTime != MODIFIED_TIME_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__VERSION:
+				return version != VERSION_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_VERSION:
+				return childrenVersion != CHILDREN_VERSION_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__ACL_VERSION:
+				return aclVersion != ACL_VERSION_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__EPHERMAL_OWNER_SESSION_ID:
+				return ephermalOwnerSessionId != EPHERMAL_OWNER_SESSION_ID_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__DATA_LENGTH:
+				return dataLength != DATA_LENGTH_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_COUNT:
+				return childrenCount != CHILDREN_COUNT_EDEFAULT;
+			case HadoopPackage.ZOO_KEEPER_SERVER__PARENT:
+				return parent != null;
+			case HadoopPackage.ZOO_KEEPER_SERVER__NODE_NAME:
+				return NODE_NAME_EDEFAULT == null ? nodeName != null : !NODE_NAME_EDEFAULT.equals(nodeName);
+			case HadoopPackage.ZOO_KEEPER_SERVER__SEQUENTIAL:
+				return sequential != SEQUENTIAL_EDEFAULT;
+		}
+		return super.eIsSet(featureID);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public int eBaseStructuralFeatureID(int derivedFeatureID, Class<?> baseClass) {
+		if (baseClass == ZNode.class) {
+			switch (derivedFeatureID) {
+				case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN: return HadoopPackage.ZNODE__CHILDREN;
+				case HadoopPackage.ZOO_KEEPER_SERVER__LAST_REFRESH: return HadoopPackage.ZNODE__LAST_REFRESH;
+				case HadoopPackage.ZOO_KEEPER_SERVER__REFRESHING: return HadoopPackage.ZNODE__REFRESHING;
+				case HadoopPackage.ZOO_KEEPER_SERVER__EPHERMERAL: return HadoopPackage.ZNODE__EPHERMERAL;
+				case HadoopPackage.ZOO_KEEPER_SERVER__CREATION_ID: return HadoopPackage.ZNODE__CREATION_ID;
+				case HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_ID: return HadoopPackage.ZNODE__MODIFIED_ID;
+				case HadoopPackage.ZOO_KEEPER_SERVER__CREATION_TIME: return HadoopPackage.ZNODE__CREATION_TIME;
+				case HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_TIME: return HadoopPackage.ZNODE__MODIFIED_TIME;
+				case HadoopPackage.ZOO_KEEPER_SERVER__VERSION: return HadoopPackage.ZNODE__VERSION;
+				case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_VERSION: return HadoopPackage.ZNODE__CHILDREN_VERSION;
+				case HadoopPackage.ZOO_KEEPER_SERVER__ACL_VERSION: return HadoopPackage.ZNODE__ACL_VERSION;
+				case HadoopPackage.ZOO_KEEPER_SERVER__EPHERMAL_OWNER_SESSION_ID: return HadoopPackage.ZNODE__EPHERMAL_OWNER_SESSION_ID;
+				case HadoopPackage.ZOO_KEEPER_SERVER__DATA_LENGTH: return HadoopPackage.ZNODE__DATA_LENGTH;
+				case HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_COUNT: return HadoopPackage.ZNODE__CHILDREN_COUNT;
+				case HadoopPackage.ZOO_KEEPER_SERVER__PARENT: return HadoopPackage.ZNODE__PARENT;
+				case HadoopPackage.ZOO_KEEPER_SERVER__NODE_NAME: return HadoopPackage.ZNODE__NODE_NAME;
+				case HadoopPackage.ZOO_KEEPER_SERVER__SEQUENTIAL: return HadoopPackage.ZNODE__SEQUENTIAL;
+				default: return -1;
+			}
+		}
+		return super.eBaseStructuralFeatureID(derivedFeatureID, baseClass);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public int eDerivedStructuralFeatureID(int baseFeatureID, Class<?> baseClass) {
+		if (baseClass == ZNode.class) {
+			switch (baseFeatureID) {
+				case HadoopPackage.ZNODE__CHILDREN: return HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN;
+				case HadoopPackage.ZNODE__LAST_REFRESH: return HadoopPackage.ZOO_KEEPER_SERVER__LAST_REFRESH;
+				case HadoopPackage.ZNODE__REFRESHING: return HadoopPackage.ZOO_KEEPER_SERVER__REFRESHING;
+				case HadoopPackage.ZNODE__EPHERMERAL: return HadoopPackage.ZOO_KEEPER_SERVER__EPHERMERAL;
+				case HadoopPackage.ZNODE__CREATION_ID: return HadoopPackage.ZOO_KEEPER_SERVER__CREATION_ID;
+				case HadoopPackage.ZNODE__MODIFIED_ID: return HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_ID;
+				case HadoopPackage.ZNODE__CREATION_TIME: return HadoopPackage.ZOO_KEEPER_SERVER__CREATION_TIME;
+				case HadoopPackage.ZNODE__MODIFIED_TIME: return HadoopPackage.ZOO_KEEPER_SERVER__MODIFIED_TIME;
+				case HadoopPackage.ZNODE__VERSION: return HadoopPackage.ZOO_KEEPER_SERVER__VERSION;
+				case HadoopPackage.ZNODE__CHILDREN_VERSION: return HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_VERSION;
+				case HadoopPackage.ZNODE__ACL_VERSION: return HadoopPackage.ZOO_KEEPER_SERVER__ACL_VERSION;
+				case HadoopPackage.ZNODE__EPHERMAL_OWNER_SESSION_ID: return HadoopPackage.ZOO_KEEPER_SERVER__EPHERMAL_OWNER_SESSION_ID;
+				case HadoopPackage.ZNODE__DATA_LENGTH: return HadoopPackage.ZOO_KEEPER_SERVER__DATA_LENGTH;
+				case HadoopPackage.ZNODE__CHILDREN_COUNT: return HadoopPackage.ZOO_KEEPER_SERVER__CHILDREN_COUNT;
+				case HadoopPackage.ZNODE__PARENT: return HadoopPackage.ZOO_KEEPER_SERVER__PARENT;
+				case HadoopPackage.ZNODE__NODE_NAME: return HadoopPackage.ZOO_KEEPER_SERVER__NODE_NAME;
+				case HadoopPackage.ZNODE__SEQUENTIAL: return HadoopPackage.ZOO_KEEPER_SERVER__SEQUENTIAL;
+				default: return -1;
+			}
+		}
+		return super.eDerivedStructuralFeatureID(baseFeatureID, baseClass);
+	}
+
+	/**
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	@Override
+	public String toString() {
+		if (eIsProxy()) return super.toString();
+
+		StringBuffer result = new StringBuffer(super.toString());
+		result.append(" (lastRefresh: ");
+		result.append(lastRefresh);
+		result.append(", refreshing: ");
+		result.append(refreshing);
+		result.append(", ephermeral: ");
+		result.append(ephermeral);
+		result.append(", creationId: ");
+		result.append(creationId);
+		result.append(", modifiedId: ");
+		result.append(modifiedId);
+		result.append(", creationTime: ");
+		result.append(creationTime);
+		result.append(", modifiedTime: ");
+		result.append(modifiedTime);
+		result.append(", version: ");
+		result.append(version);
+		result.append(", childrenVersion: ");
+		result.append(childrenVersion);
+		result.append(", aclVersion: ");
+		result.append(aclVersion);
+		result.append(", ephermalOwnerSessionId: ");
+		result.append(ephermalOwnerSessionId);
+		result.append(", dataLength: ");
+		result.append(dataLength);
+		result.append(", childrenCount: ");
+		result.append(childrenCount);
+		result.append(", nodeName: ");
+		result.append(nodeName);
+		result.append(", sequential: ");
+		result.append(sequential);
+		result.append(')');
+		return result.toString();
+	}
+
+} //ZooKeeperServerImpl

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopAdapterFactory.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopAdapterFactory.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopAdapterFactory.java
new file mode 100644
index 0000000..417d51a
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopAdapterFactory.java
@@ -0,0 +1,208 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *  
+ */
+package org.apache.hdt.core.internal.model.util;
+
+import org.apache.hdt.core.internal.model.*;
+
+import org.eclipse.emf.common.notify.Adapter;
+import org.eclipse.emf.common.notify.Notifier;
+
+import org.eclipse.emf.common.notify.impl.AdapterFactoryImpl;
+
+import org.eclipse.emf.ecore.EObject;
+
+/**
+ * <!-- begin-user-doc -->
+ * The <b>Adapter Factory</b> for the model.
+ * It provides an adapter <code>createXXX</code> method for each class of the model.
+ * <!-- end-user-doc -->
+ * @see org.apache.hdt.core.internal.model.HadoopPackage
+ * @generated
+ */
+public class HadoopAdapterFactory extends AdapterFactoryImpl {
+	/**
+	 * The cached model package.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	protected static HadoopPackage modelPackage;
+
+	/**
+	 * Creates an instance of the adapter factory.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public HadoopAdapterFactory() {
+		if (modelPackage == null) {
+			modelPackage = HadoopPackage.eINSTANCE;
+		}
+	}
+
+	/**
+	 * Returns whether this factory is applicable for the type of the object.
+	 * <!-- begin-user-doc -->
+	 * This implementation returns <code>true</code> if the object is either the model's package or is an instance object of the model.
+	 * <!-- end-user-doc -->
+	 * @return whether this factory is applicable for the type of the object.
+	 * @generated
+	 */
+	@Override
+	public boolean isFactoryForType(Object object) {
+		if (object == modelPackage) {
+			return true;
+		}
+		if (object instanceof EObject) {
+			return ((EObject)object).eClass().getEPackage() == modelPackage;
+		}
+		return false;
+	}
+
+	/**
+	 * The switch that delegates to the <code>createXXX</code> methods.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	protected HadoopSwitch<Adapter> modelSwitch =
+		new HadoopSwitch<Adapter>() {
+			@Override
+			public Adapter caseHDFSServer(HDFSServer object) {
+				return createHDFSServerAdapter();
+			}
+			@Override
+			public Adapter caseServers(Servers object) {
+				return createServersAdapter();
+			}
+			@Override
+			public Adapter caseServer(Server object) {
+				return createServerAdapter();
+			}
+			@Override
+			public Adapter caseZooKeeperServer(ZooKeeperServer object) {
+				return createZooKeeperServerAdapter();
+			}
+			@Override
+			public Adapter caseZNode(ZNode object) {
+				return createZNodeAdapter();
+			}
+			@Override
+			public Adapter defaultCase(EObject object) {
+				return createEObjectAdapter();
+			}
+		};
+
+	/**
+	 * Creates an adapter for the <code>target</code>.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @param target the object to adapt.
+	 * @return the adapter for the <code>target</code>.
+	 * @generated
+	 */
+	@Override
+	public Adapter createAdapter(Notifier target) {
+		return modelSwitch.doSwitch((EObject)target);
+	}
+
+
+	/**
+	 * Creates a new adapter for an object of class '{@link org.apache.hdt.core.internal.model.HDFSServer <em>HDFS Server</em>}'.
+	 * <!-- begin-user-doc -->
+	 * This default implementation returns null so that we can easily ignore cases;
+	 * it's useful to ignore a case when inheritance will catch all the cases anyway.
+	 * <!-- end-user-doc -->
+	 * @return the new adapter.
+	 * @see org.apache.hdt.core.internal.model.HDFSServer
+	 * @generated
+	 */
+	public Adapter createHDFSServerAdapter() {
+		return null;
+	}
+
+	/**
+	 * Creates a new adapter for an object of class '{@link org.apache.hdt.core.internal.model.Servers <em>Servers</em>}'.
+	 * <!-- begin-user-doc -->
+	 * This default implementation returns null so that we can easily ignore cases;
+	 * it's useful to ignore a case when inheritance will catch all the cases anyway.
+	 * <!-- end-user-doc -->
+	 * @return the new adapter.
+	 * @see org.apache.hdt.core.internal.model.Servers
+	 * @generated
+	 */
+	public Adapter createServersAdapter() {
+		return null;
+	}
+
+	/**
+	 * Creates a new adapter for an object of class '{@link org.apache.hdt.core.internal.model.Server <em>Server</em>}'.
+	 * <!-- begin-user-doc -->
+	 * This default implementation returns null so that we can easily ignore cases;
+	 * it's useful to ignore a case when inheritance will catch all the cases anyway.
+	 * <!-- end-user-doc -->
+	 * @return the new adapter.
+	 * @see org.apache.hdt.core.internal.model.Server
+	 * @generated
+	 */
+	public Adapter createServerAdapter() {
+		return null;
+	}
+
+	/**
+	 * Creates a new adapter for an object of class '{@link org.apache.hdt.core.internal.model.ZooKeeperServer <em>Zoo Keeper Server</em>}'.
+	 * <!-- begin-user-doc -->
+	 * This default implementation returns null so that we can easily ignore cases;
+	 * it's useful to ignore a case when inheritance will catch all the cases anyway.
+	 * <!-- end-user-doc -->
+	 * @return the new adapter.
+	 * @see org.apache.hdt.core.internal.model.ZooKeeperServer
+	 * @generated
+	 */
+	public Adapter createZooKeeperServerAdapter() {
+		return null;
+	}
+
+	/**
+	 * Creates a new adapter for an object of class '{@link org.apache.hdt.core.internal.model.ZNode <em>ZNode</em>}'.
+	 * <!-- begin-user-doc -->
+	 * This default implementation returns null so that we can easily ignore cases;
+	 * it's useful to ignore a case when inheritance will catch all the cases anyway.
+	 * <!-- end-user-doc -->
+	 * @return the new adapter.
+	 * @see org.apache.hdt.core.internal.model.ZNode
+	 * @generated
+	 */
+	public Adapter createZNodeAdapter() {
+		return null;
+	}
+
+	/**
+	 * Creates a new adapter for the default case.
+	 * <!-- begin-user-doc -->
+	 * This default implementation returns null.
+	 * <!-- end-user-doc -->
+	 * @return the new adapter.
+	 * @generated
+	 */
+	public Adapter createEObjectAdapter() {
+		return null;
+	}
+
+} //HadoopAdapterFactory

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopSwitch.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopSwitch.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopSwitch.java
new file mode 100644
index 0000000..6f0b337
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopSwitch.java
@@ -0,0 +1,229 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *  
+ */
+package org.apache.hdt.core.internal.model.util;
+
+import java.util.List;
+
+import org.apache.hdt.core.internal.model.*;
+
+import org.eclipse.emf.ecore.EClass;
+import org.eclipse.emf.ecore.EObject;
+
+/**
+ * <!-- begin-user-doc -->
+ * The <b>Switch</b> for the model's inheritance hierarchy.
+ * It supports the call {@link #doSwitch(EObject) doSwitch(object)}
+ * to invoke the <code>caseXXX</code> method for each class of the model,
+ * starting with the actual class of the object
+ * and proceeding up the inheritance hierarchy
+ * until a non-null result is returned,
+ * which is the result of the switch.
+ * <!-- end-user-doc -->
+ * @see org.apache.hdt.core.internal.model.HadoopPackage
+ * @generated
+ */
+public class HadoopSwitch<T> {
+	/**
+	 * The cached model package
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	protected static HadoopPackage modelPackage;
+
+	/**
+	 * Creates an instance of the switch.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @generated
+	 */
+	public HadoopSwitch() {
+		if (modelPackage == null) {
+			modelPackage = HadoopPackage.eINSTANCE;
+		}
+	}
+
+	/**
+	 * Calls <code>caseXXX</code> for each class of the model until one returns a non null result; it yields that result.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the first non-null result returned by a <code>caseXXX</code> call.
+	 * @generated
+	 */
+	public T doSwitch(EObject theEObject) {
+		return doSwitch(theEObject.eClass(), theEObject);
+	}
+
+	/**
+	 * Calls <code>caseXXX</code> for each class of the model until one returns a non null result; it yields that result.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the first non-null result returned by a <code>caseXXX</code> call.
+	 * @generated
+	 */
+	protected T doSwitch(EClass theEClass, EObject theEObject) {
+		if (theEClass.eContainer() == modelPackage) {
+			return doSwitch(theEClass.getClassifierID(), theEObject);
+		}
+		else {
+			List<EClass> eSuperTypes = theEClass.getESuperTypes();
+			return
+				eSuperTypes.isEmpty() ?
+					defaultCase(theEObject) :
+					doSwitch(eSuperTypes.get(0), theEObject);
+		}
+	}
+
+	/**
+	 * Calls <code>caseXXX</code> for each class of the model until one returns a non null result; it yields that result.
+	 * <!-- begin-user-doc -->
+	 * <!-- end-user-doc -->
+	 * @return the first non-null result returned by a <code>caseXXX</code> call.
+	 * @generated
+	 */
+	protected T doSwitch(int classifierID, EObject theEObject) {
+		switch (classifierID) {
+			case HadoopPackage.HDFS_SERVER: {
+				HDFSServer hdfsServer = (HDFSServer)theEObject;
+				T result = caseHDFSServer(hdfsServer);
+				if (result == null) result = caseServer(hdfsServer);
+				if (result == null) result = defaultCase(theEObject);
+				return result;
+			}
+			case HadoopPackage.SERVERS: {
+				Servers servers = (Servers)theEObject;
+				T result = caseServers(servers);
+				if (result == null) result = defaultCase(theEObject);
+				return result;
+			}
+			case HadoopPackage.SERVER: {
+				Server server = (Server)theEObject;
+				T result = caseServer(server);
+				if (result == null) result = defaultCase(theEObject);
+				return result;
+			}
+			case HadoopPackage.ZOO_KEEPER_SERVER: {
+				ZooKeeperServer zooKeeperServer = (ZooKeeperServer)theEObject;
+				T result = caseZooKeeperServer(zooKeeperServer);
+				if (result == null) result = caseServer(zooKeeperServer);
+				if (result == null) result = caseZNode(zooKeeperServer);
+				if (result == null) result = defaultCase(theEObject);
+				return result;
+			}
+			case HadoopPackage.ZNODE: {
+				ZNode zNode = (ZNode)theEObject;
+				T result = caseZNode(zNode);
+				if (result == null) result = defaultCase(theEObject);
+				return result;
+			}
+			default: return defaultCase(theEObject);
+		}
+	}
+
+	/**
+	 * Returns the result of interpreting the object as an instance of '<em>HDFS Server</em>'.
+	 * <!-- begin-user-doc -->
+	 * This implementation returns null;
+	 * returning a non-null result will terminate the switch.
+	 * <!-- end-user-doc -->
+	 * @param object the target of the switch.
+	 * @return the result of interpreting the object as an instance of '<em>HDFS Server</em>'.
+	 * @see #doSwitch(org.eclipse.emf.ecore.EObject) doSwitch(EObject)
+	 * @generated
+	 */
+	public T caseHDFSServer(HDFSServer object) {
+		return null;
+	}
+
+	/**
+	 * Returns the result of interpreting the object as an instance of '<em>Servers</em>'.
+	 * <!-- begin-user-doc -->
+	 * This implementation returns null;
+	 * returning a non-null result will terminate the switch.
+	 * <!-- end-user-doc -->
+	 * @param object the target of the switch.
+	 * @return the result of interpreting the object as an instance of '<em>Servers</em>'.
+	 * @see #doSwitch(org.eclipse.emf.ecore.EObject) doSwitch(EObject)
+	 * @generated
+	 */
+	public T caseServers(Servers object) {
+		return null;
+	}
+
+	/**
+	 * Returns the result of interpreting the object as an instance of '<em>Server</em>'.
+	 * <!-- begin-user-doc -->
+	 * This implementation returns null;
+	 * returning a non-null result will terminate the switch.
+	 * <!-- end-user-doc -->
+	 * @param object the target of the switch.
+	 * @return the result of interpreting the object as an instance of '<em>Server</em>'.
+	 * @see #doSwitch(org.eclipse.emf.ecore.EObject) doSwitch(EObject)
+	 * @generated
+	 */
+	public T caseServer(Server object) {
+		return null;
+	}
+
+	/**
+	 * Returns the result of interpreting the object as an instance of '<em>Zoo Keeper Server</em>'.
+	 * <!-- begin-user-doc -->
+	 * This implementation returns null;
+	 * returning a non-null result will terminate the switch.
+	 * <!-- end-user-doc -->
+	 * @param object the target of the switch.
+	 * @return the result of interpreting the object as an instance of '<em>Zoo Keeper Server</em>'.
+	 * @see #doSwitch(org.eclipse.emf.ecore.EObject) doSwitch(EObject)
+	 * @generated
+	 */
+	public T caseZooKeeperServer(ZooKeeperServer object) {
+		return null;
+	}
+
+	/**
+	 * Returns the result of interpreting the object as an instance of '<em>ZNode</em>'.
+	 * <!-- begin-user-doc -->
+	 * This implementation returns null;
+	 * returning a non-null result will terminate the switch.
+	 * <!-- end-user-doc -->
+	 * @param object the target of the switch.
+	 * @return the result of interpreting the object as an instance of '<em>ZNode</em>'.
+	 * @see #doSwitch(org.eclipse.emf.ecore.EObject) doSwitch(EObject)
+	 * @generated
+	 */
+	public T caseZNode(ZNode object) {
+		return null;
+	}
+
+	/**
+	 * Returns the result of interpreting the object as an instance of '<em>EObject</em>'.
+	 * <!-- begin-user-doc -->
+	 * This implementation returns null;
+	 * returning a non-null result will terminate the switch, but this is the last case anyway.
+	 * <!-- end-user-doc -->
+	 * @param object the target of the switch.
+	 * @return the result of interpreting the object as an instance of '<em>EObject</em>'.
+	 * @see #doSwitch(org.eclipse.emf.ecore.EObject)
+	 * @generated
+	 */
+	public T defaultCase(EObject object) {
+		return null;
+	}
+
+} //HadoopSwitch

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/InterruptableZooKeeperClient.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/InterruptableZooKeeperClient.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/InterruptableZooKeeperClient.java
new file mode 100644
index 0000000..133b9dd
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/InterruptableZooKeeperClient.java
@@ -0,0 +1,233 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.internal.zookeeper;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hdt.core.internal.model.ZNode;
+import org.apache.hdt.core.internal.model.ZooKeeperServer;
+import org.apache.hdt.core.zookeeper.ZooKeeperClient;
+import org.apache.log4j.Logger;
+
+/**
+ * 
+ * @author Srimanth Gunturi
+ * 
+ */
+public class InterruptableZooKeeperClient extends ZooKeeperClient {
+	private static final int DEFAULT_TIMEOUT = 60000;
+	private static final Logger logger = Logger.getLogger(InterruptableZooKeeperClient.class);
+	// private static ExecutorService threadPool =
+	// Executors.newFixedThreadPool(10);
+
+	private final ZooKeeperClient client;
+	private final int timeoutMillis = DEFAULT_TIMEOUT;
+	private final ZooKeeperServer server;
+
+	/**
+	 * @param server
+	 * 
+	 */
+	public InterruptableZooKeeperClient(ZooKeeperServer server, ZooKeeperClient client) {
+		this.server = server;
+		this.client = client;
+	}
+
+	private static interface CustomRunnable<V> {
+		public V run() throws IOException, InterruptedException;
+	}
+
+	protected <T> T executeWithTimeout(final CustomRunnable<T> runnable) throws IOException, InterruptedException {
+		final List<T> data = new ArrayList<T>();
+		final IOException[] ioE = new IOException[1];
+		final InterruptedException[] inE = new InterruptedException[1];
+		Thread runnerThread = new Thread(new Runnable() {
+			public void run() {
+				try {
+					data.add(runnable.run());
+				} catch (IOException e) {
+					ioE[0] = e;
+				} catch (InterruptedException e) {
+					inE[0] = e;
+				}
+			}
+		});
+		boolean interrupted = false;
+		runnerThread.start();
+		runnerThread.join(timeoutMillis);
+		if (runnerThread.isAlive()) {
+			if (logger.isDebugEnabled())
+				logger.debug("executeWithTimeout(): Interrupting server call");
+			runnerThread.interrupt();
+			interrupted = true;
+		}
+		if (ioE[0] != null) {
+			try {
+				if (!client.isConnected())
+					ZooKeeperManager.INSTANCE.disconnect(server);
+			} catch (Throwable t) {
+			}
+			throw ioE[0];
+		}
+		if (inE[0] != null)
+			throw inE[0];
+		if (interrupted) {
+			// Tell HDFS manager that the server timed out
+			if (logger.isDebugEnabled())
+				logger.debug("executeWithTimeout(): Server timed out: " + server);
+			ZooKeeperManager.INSTANCE.disconnect(server);
+			throw new InterruptedException();
+		}
+		if (data.size() > 0)
+			return data.get(0);
+		return null;
+	}
+
+	protected void connectIfNecessary() throws IOException, InterruptedException {
+		if (!client.isConnected())
+			client.connect();
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.zookeeper.ZooKeeperClient#initialize(java.lang
+	 * .String)
+	 */
+	@Override
+	public void initialize(final String serverLocation) {
+		try {
+			executeWithTimeout(new CustomRunnable<Object>() {
+				@Override
+				public Object run() throws IOException, InterruptedException {
+					client.initialize(serverLocation);
+					return null;
+				}
+			});
+		} catch (IOException e) {
+			throw new RuntimeException(e.getMessage(), e);
+		} catch (InterruptedException e) {
+			throw new RuntimeException(e.getMessage(), e);
+		}
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.apache.hdt.core.zookeeper.ZooKeeperClient#isConnected()
+	 */
+	@Override
+	public boolean isConnected() throws IOException, InterruptedException {
+		return executeWithTimeout(new CustomRunnable<Boolean>() {
+			@Override
+			public Boolean run() throws IOException, InterruptedException {
+				return client.isConnected();
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.zookeeper.ZooKeeperClient#connect(java.lang
+	 * .String)
+	 */
+	@Override
+	public void connect() throws IOException, InterruptedException {
+		executeWithTimeout(new CustomRunnable<Object>() {
+			@Override
+			public Object run() throws IOException, InterruptedException {
+				client.connect();
+				return null;
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.zookeeper.ZooKeeperClient#getChildren(java.
+	 * lang.String)
+	 */
+	@Override
+	public List<ZNode> getChildren(final ZNode path) throws IOException, InterruptedException {
+		connectIfNecessary();
+		return executeWithTimeout(new CustomRunnable<List<ZNode>>() {
+			@Override
+			public List<ZNode> run() throws IOException, InterruptedException {
+				return client.getChildren(path);
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.apache.hdt.core.zookeeper.ZooKeeperClient#disconnect()
+	 */
+	@Override
+	public void disconnect() throws IOException, InterruptedException {
+		executeWithTimeout(new CustomRunnable<Object>() {
+			@Override
+			public Object run() throws IOException, InterruptedException {
+				client.disconnect();
+				return null;
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.apache.hdt.core.zookeeper.ZooKeeperClient#disconnect()
+	 */
+	@Override
+	public void delete(final ZNode node) throws IOException, InterruptedException {
+		executeWithTimeout(new CustomRunnable<Object>() {
+			@Override
+			public Object run() throws IOException, InterruptedException {
+				client.delete(node);
+				return null;
+			}
+		});
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.zookeeper.ZooKeeperClient#open(java.lang.String
+	 * )
+	 */
+	@Override
+	public byte[] open(final ZNode path) throws InterruptedException, IOException {
+		connectIfNecessary();
+		return executeWithTimeout(new CustomRunnable<byte[]>() {
+			@Override
+			public byte[] run() throws IOException, InterruptedException {
+				return client.open(path);
+			}
+		});
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/ZooKeeperManager.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/ZooKeeperManager.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/ZooKeeperManager.java
new file mode 100644
index 0000000..4c36259
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/ZooKeeperManager.java
@@ -0,0 +1,162 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.internal.zookeeper;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hdt.core.Activator;
+import org.apache.hdt.core.internal.HadoopManager;
+import org.apache.hdt.core.internal.model.HadoopFactory;
+import org.apache.hdt.core.internal.model.ServerStatus;
+import org.apache.hdt.core.internal.model.ZooKeeperServer;
+import org.apache.hdt.core.zookeeper.ZooKeeperClient;
+import org.apache.log4j.Logger;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IConfigurationElement;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.Platform;
+import org.eclipse.core.runtime.Status;
+import org.eclipse.emf.common.util.EList;
+
+/**
+ * @author Srimanth Gunturi
+ * 
+ */
+public class ZooKeeperManager {
+	private static final Logger logger = Logger.getLogger(ZooKeeperManager.class);
+	public static ZooKeeperManager INSTANCE = new ZooKeeperManager();
+	private Map<String, ZooKeeperClient> clientsMap = new HashMap<String, ZooKeeperClient>();
+
+	private ZooKeeperManager() {
+	}
+
+	/**
+	 * 
+	 */
+	public void loadServers() {
+
+	}
+
+	public EList<ZooKeeperServer> getServers() {
+		return HadoopManager.INSTANCE.getServers().getZookeeperServers();
+	}
+
+	/**
+	 * @param zkServerName
+	 * @param uri
+	 */
+	public ZooKeeperServer createServer(String zkServerName, String zkServerLocation) {
+		ZooKeeperServer zkServer = HadoopFactory.eINSTANCE.createZooKeeperServer();
+		zkServer.setName(zkServerName);
+		zkServer.setUri(zkServerLocation);
+		getServers().add(zkServer);
+		HadoopManager.INSTANCE.saveServers();
+		return zkServer;
+	}
+
+	/**
+	 * @param r
+	 */
+	public void disconnect(ZooKeeperServer server) {
+		try {
+			if (ServerStatus.DISCONNECTED_VALUE != server.getStatusCode()) {
+				getClient(server).disconnect();
+				server.setStatusCode(ServerStatus.DISCONNECTED_VALUE);
+			}
+		} catch (IOException e) {
+			// TODO Auto-generated catch block
+			e.printStackTrace();
+		} catch (InterruptedException e) {
+			// TODO Auto-generated catch block
+			e.printStackTrace();
+		} catch (CoreException e) {
+			// TODO Auto-generated catch block
+			e.printStackTrace();
+		}
+	}
+
+	/**
+	 * Provides a ZooKeeper instance using plugin extensions.
+	 * 
+	 * @param r
+	 */
+	public void reconnect(ZooKeeperServer server) {
+		try {
+			if (logger.isDebugEnabled())
+				logger.debug("reconnect(): Reconnecting: " + server);
+			server.setStatusCode(0);
+			getClient(server).connect();
+			if (!getClient(server).isConnected()) {
+				if (logger.isDebugEnabled())
+					logger.debug("reconnect(): Client not connected. Setting to disconnected: " + server);
+				server.setStatusCode(ServerStatus.DISCONNECTED_VALUE);
+			}
+			if (logger.isDebugEnabled())
+				logger.debug("reconnect(): Reconnected: " + server);
+		} catch (IOException e) {
+			server.setStatusCode(ServerStatus.DISCONNECTED_VALUE);
+			// TODO Auto-generated catch block
+			e.printStackTrace();
+		} catch (InterruptedException e) {
+			server.setStatusCode(ServerStatus.DISCONNECTED_VALUE);
+			// TODO Auto-generated catch block
+			e.printStackTrace();
+		} catch (CoreException e) {
+			server.setStatusCode(ServerStatus.DISCONNECTED_VALUE);
+			// TODO Auto-generated catch block
+			e.printStackTrace();
+		}
+	}
+
+	public ZooKeeperClient getClient(ZooKeeperServer server) throws CoreException {
+		if (server != null && server.getStatusCode() == ServerStatus.DISCONNECTED_VALUE) {
+			if (logger.isDebugEnabled())
+				logger.debug("getClient(" + server.getUri() + "): Server disconnected. Not returning client");
+			throw new CoreException(new Status(IStatus.WARNING, Activator.BUNDLE_ID, "Server disconnected. Please reconnect to server."));
+		}
+		if (clientsMap.containsKey(server.getUri()))
+			return clientsMap.get(server.getUri());
+		else {
+			IConfigurationElement[] elementsFor = Platform.getExtensionRegistry().getConfigurationElementsFor("org.apache.hdt.core.zookeeperClient");
+			for (IConfigurationElement element : elementsFor) {
+				ZooKeeperClient client = (ZooKeeperClient) element.createExecutableExtension("class");
+				client.initialize(server.getUri());
+				clientsMap.put(server.getUri(), new InterruptableZooKeeperClient(server, client));
+			}
+			return clientsMap.get(server.getUri());
+		}
+	}
+
+	/**
+	 * @param r
+	 * @throws CoreException
+	 */
+	public void delete(ZooKeeperServer server) throws CoreException {
+		if (server != null && server.getStatusCode() != ServerStatus.DISCONNECTED_VALUE) {
+			if (logger.isDebugEnabled())
+				logger.debug("getClient(" + server.getUri() + "): Cannot delete a connected server.");
+			throw new CoreException(new Status(IStatus.WARNING, Activator.BUNDLE_ID, "Cannot delete a connected server."));
+		}
+		if (clientsMap.containsKey(server.getUri()))
+			clientsMap.remove(server.getUri());
+		getServers().remove(server);
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.core/src/org/apache/hdt/core/zookeeper/ZooKeeperClient.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/zookeeper/ZooKeeperClient.java b/org.apache.hdt.core/src/org/apache/hdt/core/zookeeper/ZooKeeperClient.java
new file mode 100644
index 0000000..cec9a73
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/zookeeper/ZooKeeperClient.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.zookeeper;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hdt.core.internal.model.ZNode;
+
+/**
+ * @author Srimanth Gunturi
+ * 
+ */
+public abstract class ZooKeeperClient {
+
+	public abstract void initialize(String serverLocation);
+
+	public abstract boolean isConnected() throws IOException, InterruptedException;
+
+	public abstract void connect() throws IOException, InterruptedException;
+
+	public abstract List<ZNode> getChildren(ZNode path) throws IOException, InterruptedException;
+
+	public abstract void disconnect() throws IOException, InterruptedException;
+
+	public abstract void delete(ZNode zkn) throws IOException, InterruptedException;
+
+	public abstract byte[] open(ZNode path) throws InterruptedException, IOException;
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.feature/.project
----------------------------------------------------------------------
diff --git a/org.apache.hdt.feature/.project b/org.apache.hdt.feature/.project
new file mode 100644
index 0000000..bbf2949
--- /dev/null
+++ b/org.apache.hdt.feature/.project
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+	<name>org.apache.hdt.feature</name>
+	<comment></comment>
+	<projects>
+	</projects>
+	<buildSpec>
+		<buildCommand>
+			<name>org.eclipse.pde.FeatureBuilder</name>
+			<arguments>
+			</arguments>
+		</buildCommand>
+	</buildSpec>
+	<natures>
+		<nature>org.eclipse.pde.FeatureNature</nature>
+	</natures>
+</projectDescription>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.feature/build.properties
----------------------------------------------------------------------
diff --git a/org.apache.hdt.feature/build.properties b/org.apache.hdt.feature/build.properties
new file mode 100644
index 0000000..64f93a9
--- /dev/null
+++ b/org.apache.hdt.feature/build.properties
@@ -0,0 +1 @@
+bin.includes = feature.xml

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.feature/feature.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.feature/feature.xml b/org.apache.hdt.feature/feature.xml
new file mode 100644
index 0000000..96add65
--- /dev/null
+++ b/org.apache.hdt.feature/feature.xml
@@ -0,0 +1,68 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<feature
+      id="org.apache.hadoop.eclipse.feature"
+      label="Apache Hadoop Eclipse"
+      version="1.0.0.qualifier">
+
+   <description url="http://people.apache.org/~srimanth/hadoop-eclipse">
+      Apache Hadoop Eclipse feature provides useful Hadoop capabilities from the Eclipse platform.
+   </description>
+
+   <copyright url="http://www.apache.org/licenses/LICENSE-2.0">
+      Copyright 2013 Srimanth Gunturi
+
+Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+or implied. See the License for the specific language governing
+permissions and limitations under the License.
+   </copyright>
+
+   <license url="http://www.apache.org/licenses/LICENSE-2.0">
+      Copyright 2013 Srimanth Gunturi
+Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+or implied. See the License for the specific language governing
+permissions and limitations under the License.
+   </license>
+
+   <url>
+      <update label="Apache Hadoop Eclipse Update Site" url="http://people.apache.org/~srimanth/hadoop-eclipse/update-site"/>
+      <discovery label="Apache Hadoop Eclipse Update Site" url="http://people.apache.org/~srimanth/hadoop-eclipse/update-site/"/>
+      <discovery label="Apache Hadoop Eclipse WebSite" url="http://people.apache.org/~srimanth/hadoop-eclipse"/>
+   </url>
+
+   <plugin
+         id="org.apache.hadoop.eclipse"
+         download-size="0"
+         install-size="0"
+         version="0.0.0"
+         unpack="false"/>
+
+   <plugin
+         id="org.apache.hadoop.eclipse.release"
+         download-size="0"
+         install-size="0"
+         version="0.0.0"
+         fragment="true"
+         unpack="false"/>
+
+   <plugin
+         id="org.apache.hadoop.eclipse.ui"
+         download-size="0"
+         install-size="0"
+         version="0.0.0"
+         unpack="false"/>
+
+</feature>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.hadoop.release/.classpath
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/.classpath b/org.apache.hdt.hadoop.release/.classpath
new file mode 100644
index 0000000..178956b
--- /dev/null
+++ b/org.apache.hdt.hadoop.release/.classpath
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<classpath>
+	<classpathentry exported="true" kind="lib" path="lib/zookeeper-3.4.5/jline-0.9.94.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/zookeeper-3.4.5/log4j-1.2.15.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/zookeeper-3.4.5/netty-3.2.2.Final.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/zookeeper-3.4.5/slf4j-api-1.6.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/zookeeper-3.4.5/slf4j-log4j12-1.6.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/zookeeper-3.4.5/zookeeper-3.4.5.21.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/commons-configuration-1.6.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/commons-lang-2.4.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/commons-logging-1.1.1.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/commons-logging-api-1.0.4.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/hadoop-ant-1.1.2.21.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/hadoop-client-1.1.2.21.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/hadoop-core-1.1.1.jar" sourcepath="/release-1.1.2-rc5"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/hadoop-core-1.1.2.21.jar" sourcepath="/release-1.1.2-rc5/src"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/hadoop-examples-1.1.2.21.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/hadoop-minicluster-1.1.2.21.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/hadoop-test-1.1.2.21.jar"/>
+	<classpathentry exported="true" kind="lib" path="lib/hadoop-1.1.2.21/hadoop-tools-1.1.2.21.jar"/>
+	<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
+	<classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
+	<classpathentry kind="src" path="src"/>
+	<classpathentry kind="output" path="bin"/>
+</classpath>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.hadoop.release/.project
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/.project b/org.apache.hdt.hadoop.release/.project
new file mode 100644
index 0000000..1759ed6
--- /dev/null
+++ b/org.apache.hdt.hadoop.release/.project
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+	<name>org.apache.hdt.hadoop.release</name>
+	<comment></comment>
+	<projects>
+	</projects>
+	<buildSpec>
+		<buildCommand>
+			<name>org.eclipse.jdt.core.javabuilder</name>
+			<arguments>
+			</arguments>
+		</buildCommand>
+		<buildCommand>
+			<name>org.eclipse.pde.ManifestBuilder</name>
+			<arguments>
+			</arguments>
+		</buildCommand>
+		<buildCommand>
+			<name>org.eclipse.pde.SchemaBuilder</name>
+			<arguments>
+			</arguments>
+		</buildCommand>
+	</buildSpec>
+	<natures>
+		<nature>org.eclipse.pde.PluginNature</nature>
+		<nature>org.eclipse.jdt.core.javanature</nature>
+	</natures>
+</projectDescription>


[3/8] HDT-32: Merge the code base of Hadoop-Eclipse project into HDT. Contributed by Srimanth Gunturi

Posted by rs...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.hadoop.release/.settings/org.eclipse.jdt.core.prefs
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/.settings/org.eclipse.jdt.core.prefs b/org.apache.hdt.hadoop.release/.settings/org.eclipse.jdt.core.prefs
new file mode 100644
index 0000000..9a9b636
--- /dev/null
+++ b/org.apache.hdt.hadoop.release/.settings/org.eclipse.jdt.core.prefs
@@ -0,0 +1,276 @@
+#Thu Mar 21 01:59:25 PDT 2013
+eclipse.preferences.version=1
+org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
+org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6
+org.eclipse.jdt.core.compiler.compliance=1.6
+org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
+org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
+org.eclipse.jdt.core.compiler.source=1.6
+org.eclipse.jdt.core.formatter.align_type_members_on_columns=false
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_allocation_expression=16
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_annotation=0
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_enum_constant=16
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_explicit_constructor_call=16
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_method_invocation=16
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_qualified_allocation_expression=16
+org.eclipse.jdt.core.formatter.alignment_for_assignment=0
+org.eclipse.jdt.core.formatter.alignment_for_binary_expression=16
+org.eclipse.jdt.core.formatter.alignment_for_compact_if=16
+org.eclipse.jdt.core.formatter.alignment_for_conditional_expression=80
+org.eclipse.jdt.core.formatter.alignment_for_enum_constants=0
+org.eclipse.jdt.core.formatter.alignment_for_expressions_in_array_initializer=16
+org.eclipse.jdt.core.formatter.alignment_for_method_declaration=0
+org.eclipse.jdt.core.formatter.alignment_for_multiple_fields=16
+org.eclipse.jdt.core.formatter.alignment_for_parameters_in_constructor_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_parameters_in_method_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_selector_in_method_invocation=16
+org.eclipse.jdt.core.formatter.alignment_for_superclass_in_type_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_enum_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_type_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_constructor_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_method_declaration=16
+org.eclipse.jdt.core.formatter.blank_lines_after_imports=1
+org.eclipse.jdt.core.formatter.blank_lines_after_package=1
+org.eclipse.jdt.core.formatter.blank_lines_before_field=0
+org.eclipse.jdt.core.formatter.blank_lines_before_first_class_body_declaration=0
+org.eclipse.jdt.core.formatter.blank_lines_before_imports=1
+org.eclipse.jdt.core.formatter.blank_lines_before_member_type=1
+org.eclipse.jdt.core.formatter.blank_lines_before_method=1
+org.eclipse.jdt.core.formatter.blank_lines_before_new_chunk=1
+org.eclipse.jdt.core.formatter.blank_lines_before_package=0
+org.eclipse.jdt.core.formatter.blank_lines_between_import_groups=1
+org.eclipse.jdt.core.formatter.blank_lines_between_type_declarations=1
+org.eclipse.jdt.core.formatter.brace_position_for_annotation_type_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_anonymous_type_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_array_initializer=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_block=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_block_in_case=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_constructor_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_enum_constant=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_enum_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_method_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_switch=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_type_declaration=end_of_line
+org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_block_comment=false
+org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_javadoc_comment=false
+org.eclipse.jdt.core.formatter.comment.format_block_comments=true
+org.eclipse.jdt.core.formatter.comment.format_header=false
+org.eclipse.jdt.core.formatter.comment.format_html=true
+org.eclipse.jdt.core.formatter.comment.format_javadoc_comments=true
+org.eclipse.jdt.core.formatter.comment.format_line_comments=true
+org.eclipse.jdt.core.formatter.comment.format_source_code=true
+org.eclipse.jdt.core.formatter.comment.indent_parameter_description=true
+org.eclipse.jdt.core.formatter.comment.indent_root_tags=true
+org.eclipse.jdt.core.formatter.comment.insert_new_line_before_root_tags=insert
+org.eclipse.jdt.core.formatter.comment.insert_new_line_for_parameter=insert
+org.eclipse.jdt.core.formatter.comment.line_length=80
+org.eclipse.jdt.core.formatter.comment.new_lines_at_block_boundaries=true
+org.eclipse.jdt.core.formatter.comment.new_lines_at_javadoc_boundaries=true
+org.eclipse.jdt.core.formatter.compact_else_if=true
+org.eclipse.jdt.core.formatter.continuation_indentation=2
+org.eclipse.jdt.core.formatter.continuation_indentation_for_array_initializer=2
+org.eclipse.jdt.core.formatter.disabling_tag=@formatter\:off
+org.eclipse.jdt.core.formatter.enabling_tag=@formatter\:on
+org.eclipse.jdt.core.formatter.format_guardian_clause_on_one_line=false
+org.eclipse.jdt.core.formatter.format_line_comment_starting_on_first_column=true
+org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_annotation_declaration_header=true
+org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_constant_header=true
+org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_declaration_header=true
+org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_type_header=true
+org.eclipse.jdt.core.formatter.indent_breaks_compare_to_cases=true
+org.eclipse.jdt.core.formatter.indent_empty_lines=false
+org.eclipse.jdt.core.formatter.indent_statements_compare_to_block=true
+org.eclipse.jdt.core.formatter.indent_statements_compare_to_body=true
+org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_cases=true
+org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_switch=false
+org.eclipse.jdt.core.formatter.indentation.size=4
+org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_local_variable=insert
+org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_member=insert
+org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_parameter=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_after_label=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_after_opening_brace_in_array_initializer=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_at_end_of_file_if_missing=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_catch_in_try_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_closing_brace_in_array_initializer=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_else_in_if_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_finally_in_try_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_while_in_do_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_annotation_declaration=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_anonymous_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_block=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_constant=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_declaration=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_method_body=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_after_and_in_type_parameter=insert
+org.eclipse.jdt.core.formatter.insert_space_after_assignment_operator=insert
+org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation_type_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_binary_operator=insert
+org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_parameters=insert
+org.eclipse.jdt.core.formatter.insert_space_after_closing_brace_in_block=insert
+org.eclipse.jdt.core.formatter.insert_space_after_closing_paren_in_cast=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_assert=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_case=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_conditional=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_for=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_labeled_statement=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_allocation_expression=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_annotation=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_array_initializer=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_parameters=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_throws=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_constant_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_declarations=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_explicitconstructorcall_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_increments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_inits=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_parameters=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_throws=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_invocation_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_field_declarations=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_local_declarations=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_parameterized_type_reference=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_superinterfaces=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_parameters=insert
+org.eclipse.jdt.core.formatter.insert_space_after_ellipsis=insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_parameterized_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_brace_in_array_initializer=insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_cast=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_catch=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_constructor_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_enum_constant=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_for=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_if=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_invocation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_parenthesized_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_switch=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_synchronized=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_while=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_postfix_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_prefix_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_question_in_conditional=insert
+org.eclipse.jdt.core.formatter.insert_space_after_question_in_wildcard=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_for=insert
+org.eclipse.jdt.core.formatter.insert_space_after_unary_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_and_in_type_parameter=insert
+org.eclipse.jdt.core.formatter.insert_space_before_assignment_operator=insert
+org.eclipse.jdt.core.formatter.insert_space_before_at_in_annotation_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_binary_operator=insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_parameterized_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_brace_in_array_initializer=insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_cast=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_catch=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_constructor_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_enum_constant=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_for=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_if=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_invocation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_parenthesized_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_switch=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_synchronized=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_while=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_assert=insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_case=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_conditional=insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_default=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_for=insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_labeled_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_array_initializer=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_throws=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_constant_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_declarations=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_explicitconstructorcall_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_increments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_inits=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_throws=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_invocation_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_field_declarations=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_local_declarations=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_parameterized_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_superinterfaces=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_ellipsis=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_parameterized_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_annotation_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_anonymous_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_array_initializer=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_block=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_constructor_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_constant=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_method_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_switch=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation_type_member_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_catch=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_constructor_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_enum_constant=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_for=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_if=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_invocation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_parenthesized_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_switch=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_synchronized=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_while=insert
+org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_return=insert
+org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_throw=insert
+org.eclipse.jdt.core.formatter.insert_space_before_postfix_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_prefix_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_question_in_conditional=insert
+org.eclipse.jdt.core.formatter.insert_space_before_question_in_wildcard=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_semicolon=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_for=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_unary_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_brackets_in_array_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_braces_in_array_initializer=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_brackets_in_array_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_annotation_type_member_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_constructor_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_enum_constant=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_invocation=do not insert
+org.eclipse.jdt.core.formatter.join_lines_in_comments=true
+org.eclipse.jdt.core.formatter.join_wrapped_lines=true
+org.eclipse.jdt.core.formatter.keep_else_statement_on_same_line=false
+org.eclipse.jdt.core.formatter.keep_empty_array_initializer_on_one_line=false
+org.eclipse.jdt.core.formatter.keep_imple_if_on_one_line=false
+org.eclipse.jdt.core.formatter.keep_then_statement_on_same_line=false
+org.eclipse.jdt.core.formatter.lineSplit=160
+org.eclipse.jdt.core.formatter.never_indent_block_comments_on_first_column=false
+org.eclipse.jdt.core.formatter.never_indent_line_comments_on_first_column=false
+org.eclipse.jdt.core.formatter.number_of_blank_lines_at_beginning_of_method_body=0
+org.eclipse.jdt.core.formatter.number_of_empty_lines_to_preserve=1
+org.eclipse.jdt.core.formatter.put_empty_statement_on_new_line=true
+org.eclipse.jdt.core.formatter.tabulation.char=tab
+org.eclipse.jdt.core.formatter.tabulation.size=4
+org.eclipse.jdt.core.formatter.use_on_off_tags=false
+org.eclipse.jdt.core.formatter.use_tabs_only_for_leading_indentations=false
+org.eclipse.jdt.core.formatter.wrap_before_binary_operator=true
+org.eclipse.jdt.core.formatter.wrap_outer_expressions_when_nested=false

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.hadoop.release/.settings/org.eclipse.jdt.ui.prefs
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/.settings/org.eclipse.jdt.ui.prefs b/org.apache.hdt.hadoop.release/.settings/org.eclipse.jdt.ui.prefs
new file mode 100644
index 0000000..c2bc5f4
--- /dev/null
+++ b/org.apache.hdt.hadoop.release/.settings/org.eclipse.jdt.ui.prefs
@@ -0,0 +1,6 @@
+#Thu Mar 21 01:59:25 PDT 2013
+eclipse.preferences.version=1
+formatter_profile=_Apache Hadoop Eclipse Format
+formatter_settings_version=11
+org.eclipse.jdt.ui.javadoc=true
+org.eclipse.jdt.ui.text.custom_code_templates=<?xml version\="1.0" encoding\="UTF-8" standalone\="no"?><templates><template autoinsert\="true" context\="gettercomment_context" deleted\="false" description\="Comment for getter method" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.gettercomment" name\="gettercomment">/**\n * @return the ${bare_field_name}\n */</template><template autoinsert\="true" context\="settercomment_context" deleted\="false" description\="Comment for setter method" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.settercomment" name\="settercomment">/**\n * @param ${param} the ${bare_field_name} to set\n */</template><template autoinsert\="true" context\="constructorcomment_context" deleted\="false" description\="Comment for created constructors" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.constructorcomment" name\="constructorcomment">/**\n * ${tags}\n */</template><template autoinsert\="false" context\="filecomment_context
 " deleted\="false" description\="Comment for created Java files" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.filecomment" name\="filecomment">/**\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements.  See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership.  The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * "License"); you may not use this file except in compliance\n * with the License.  You may obtain a copy of the License at\n *\n *     http\://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an "AS IS" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */</template><templ
 ate autoinsert\="false" context\="typecomment_context" deleted\="false" description\="Comment for created types" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.typecomment" name\="typecomment">/**\n * @author Srimanth Gunturi\n *\n * ${tags}\n */</template><template autoinsert\="true" context\="fieldcomment_context" deleted\="false" description\="Comment for fields" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.fieldcomment" name\="fieldcomment">/**\n * \n */</template><template autoinsert\="true" context\="methodcomment_context" deleted\="false" description\="Comment for non-overriding methods" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.methodcomment" name\="methodcomment">/**\n * ${tags}\n */</template><template autoinsert\="true" context\="overridecomment_context" deleted\="false" description\="Comment for overriding methods" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.overridecomment" name\="overridecomment">/* (non-Javadoc
 )\n * ${see_to_overridden}\n */</template><template autoinsert\="true" context\="delegatecomment_context" deleted\="false" description\="Comment for delegate methods" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.delegatecomment" name\="delegatecomment">/**\n * ${tags}\n * ${see_to_target}\n */</template><template autoinsert\="true" context\="newtype_context" deleted\="false" description\="Newly created files" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.newtype" name\="newtype">${filecomment}\n${package_declaration}\n\n${typecomment}\n${type_declaration}</template><template autoinsert\="true" context\="classbody_context" deleted\="false" description\="Code in new class type bodies" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.classbody" name\="classbody">\n</template><template autoinsert\="true" context\="interfacebody_context" deleted\="false" description\="Code in new interface type bodies" enabled\="true" id\="org.eclipse.jdt.ui.text.code
 templates.interfacebody" name\="interfacebody">\n</template><template autoinsert\="true" context\="enumbody_context" deleted\="false" description\="Code in new enum type bodies" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.enumbody" name\="enumbody">\n</template><template autoinsert\="true" context\="annotationbody_context" deleted\="false" description\="Code in new annotation type bodies" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.annotationbody" name\="annotationbody">\n</template><template autoinsert\="true" context\="catchblock_context" deleted\="false" description\="Code in new catch blocks" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.catchblock" name\="catchblock">// ${todo} Auto-generated catch block\n${exception_var}.printStackTrace();</template><template autoinsert\="true" context\="methodbody_context" deleted\="false" description\="Code in created method stubs" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.methodbod
 y" name\="methodbody">// ${todo} Auto-generated method stub\n${body_statement}</template><template autoinsert\="true" context\="constructorbody_context" deleted\="false" description\="Code in created constructor stubs" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.constructorbody" name\="constructorbody">${body_statement}\n// ${todo} Auto-generated constructor stub</template><template autoinsert\="true" context\="getterbody_context" deleted\="false" description\="Code in created getters" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.getterbody" name\="getterbody">return ${field};</template><template autoinsert\="true" context\="setterbody_context" deleted\="false" description\="Code in created setters" enabled\="true" id\="org.eclipse.jdt.ui.text.codetemplates.setterbody" name\="setterbody">${field} \= ${param};</template></templates>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF b/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
new file mode 100644
index 0000000..4ae7da6
--- /dev/null
+++ b/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
@@ -0,0 +1,27 @@
+Manifest-Version: 1.0
+Bundle-ManifestVersion: 2
+Bundle-Name: Apache Hadoop 1.1.2.21 Release Eclipse Plugin
+Bundle-SymbolicName: org.apache.hdt.hadoop.release;singleton:=true
+Bundle-Version: 1.1.2.qualifier
+Bundle-Vendor: Apache Hadoop
+Fragment-Host: org.apache.hdt.core;bundle-version="1.0.0"
+Bundle-RequiredExecutionEnvironment: JavaSE-1.6
+Bundle-ClassPath: lib/hadoop-1.1.2.21/hadoop-ant-1.1.2.21.jar,
+ lib/hadoop-1.1.2.21/hadoop-client-1.1.2.21.jar,
+ lib/hadoop-1.1.2.21/hadoop-core-1.1.1.jar,
+ lib/hadoop-1.1.2.21/hadoop-core-1.1.2.21.jar,
+ lib/hadoop-1.1.2.21/hadoop-examples-1.1.2.21.jar,
+ lib/hadoop-1.1.2.21/hadoop-minicluster-1.1.2.21.jar,
+ lib/hadoop-1.1.2.21/hadoop-test-1.1.2.21.jar,
+ lib/hadoop-1.1.2.21/hadoop-tools-1.1.2.21.jar,
+ lib/hadoop-1.1.2.21/commons-configuration-1.6.jar,
+ lib/hadoop-1.1.2.21/commons-lang-2.4.jar,
+ lib/hadoop-1.1.2.21/commons-logging-1.1.1.jar,
+ lib/hadoop-1.1.2.21/commons-logging-api-1.0.4.jar,
+ .,
+ lib/zookeeper-3.4.5/jline-0.9.94.jar,
+ lib/zookeeper-3.4.5/log4j-1.2.15.jar,
+ lib/zookeeper-3.4.5/netty-3.2.2.Final.jar,
+ lib/zookeeper-3.4.5/slf4j-api-1.6.1.jar,
+ lib/zookeeper-3.4.5/slf4j-log4j12-1.6.1.jar,
+ lib/zookeeper-3.4.5/zookeeper-3.4.5.21.jar

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.hadoop.release/build.properties
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/build.properties b/org.apache.hdt.hadoop.release/build.properties
new file mode 100644
index 0000000..b430c64
--- /dev/null
+++ b/org.apache.hdt.hadoop.release/build.properties
@@ -0,0 +1,23 @@
+source.. = src/
+output.. = bin/
+bin.includes = META-INF/,\
+               .,\
+               lib/hadoop-1.1.2.21/hadoop-ant-1.1.2.21.jar,\
+               lib/hadoop-1.1.2.21/hadoop-client-1.1.2.21.jar,\
+               lib/hadoop-1.1.2.21/hadoop-core-1.1.1.jar,\
+               lib/hadoop-1.1.2.21/hadoop-core-1.1.2.21.jar,\
+               lib/hadoop-1.1.2.21/hadoop-examples-1.1.2.21.jar,\
+               lib/hadoop-1.1.2.21/hadoop-minicluster-1.1.2.21.jar,\
+               lib/hadoop-1.1.2.21/hadoop-test-1.1.2.21.jar,\
+               lib/hadoop-1.1.2.21/hadoop-tools-1.1.2.21.jar,\
+               fragment.xml,\
+               lib/hadoop-1.1.2.21/commons-configuration-1.6.jar,\
+               lib/hadoop-1.1.2.21/commons-lang-2.4.jar,\
+               lib/hadoop-1.1.2.21/commons-logging-1.1.1.jar,\
+               lib/hadoop-1.1.2.21/commons-logging-api-1.0.4.jar,\
+               lib/zookeeper-3.4.5/jline-0.9.94.jar,\
+               lib/zookeeper-3.4.5/log4j-1.2.15.jar,\
+               lib/zookeeper-3.4.5/netty-3.2.2.Final.jar,\
+               lib/zookeeper-3.4.5/slf4j-api-1.6.1.jar,\
+               lib/zookeeper-3.4.5/slf4j-log4j12-1.6.1.jar,\
+               lib/zookeeper-3.4.5/zookeeper-3.4.5.21.jar

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.hadoop.release/fragment.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/fragment.xml b/org.apache.hdt.hadoop.release/fragment.xml
new file mode 100644
index 0000000..1b11581
--- /dev/null
+++ b/org.apache.hdt.hadoop.release/fragment.xml
@@ -0,0 +1,36 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?eclipse version="3.4"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<fragment>
+   <extension
+         point="org.apache.hdt.core.hdfsClient">
+      <hdfsClient
+            class="org.apache.hdt.hadoop.release.HDFSClientRelease"
+            protocol="hdfs"
+            protocolVersion="1.1.2.21">
+      </hdfsClient>
+   </extension>
+   <extension
+         point="org.apache.hdt.core.zookeeperClient">
+      <zookeeperClient
+            class="org.apache.hdt.hadoop.release.ZooKeeperClientRelease"
+            protocolVersion="3.4.5">
+      </zookeeperClient>
+   </extension>
+
+</fragment>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HDFSClientRelease.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HDFSClientRelease.java b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HDFSClientRelease.java
new file mode 100644
index 0000000..f9c4678
--- /dev/null
+++ b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HDFSClientRelease.java
@@ -0,0 +1,235 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.hadoop.release;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hdt.core.hdfs.ResourceInformation;
+import org.apache.log4j.Logger;
+
+/**
+ * HDFS Client for HDFS version 1.1.2.21.
+ * 
+ * @author Srimanth Gunturi
+ */
+public class HDFSClientRelease extends org.apache.hdt.core.hdfs.HDFSClient {
+
+	private static Logger logger = Logger.getLogger(HDFSClientRelease.class);
+	private Configuration config;
+
+	public HDFSClientRelease() {
+		config = new Configuration();
+	}
+
+	private ResourceInformation getResourceInformation(FileStatus fileStatus) {
+		ResourceInformation fi = new ResourceInformation();
+		fi.setFolder(fileStatus.isDir());
+		fi.setGroup(fileStatus.getGroup());
+		fi.setLastAccessedTime(fileStatus.getAccessTime());
+		fi.setLastModifiedTime(fileStatus.getAccessTime());
+		fi.setName(fileStatus.getPath().getName());
+		fi.setOwner(fileStatus.getOwner());
+		fi.setPath(fileStatus.getPath().getParent() == null ? "/" : fileStatus.getPath().getParent().toString());
+		fi.setReplicationFactor(fileStatus.getReplication());
+		fi.setSize(fileStatus.getLen());
+		FsPermission fsPermission = fileStatus.getPermission();
+		updatePermissions(fi.getUserPermissions(), fsPermission.getUserAction());
+		updatePermissions(fi.getGroupPermissions(), fsPermission.getGroupAction());
+		updatePermissions(fi.getOtherPermissions(), fsPermission.getOtherAction());
+		return fi;
+	}
+
+	private void updatePermissions(ResourceInformation.Permissions permissions, FsAction action) {
+		permissions.read = action.implies(FsAction.READ);
+		permissions.write = action.implies(FsAction.WRITE);
+		permissions.execute = action.implies(FsAction.EXECUTE);
+	}
+	
+	protected FileSystem createFS(URI uri, String user) throws IOException, InterruptedException{
+		if(user==null)
+			return FileSystem.get(uri, config);
+		return FileSystem.get(uri, config, user);
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.apache.hdt.core.hdfs.HDFSClient#getResource(java.net.URI)
+	 */
+	@Override
+	public ResourceInformation getResourceInformation(URI uri, String user) throws IOException, InterruptedException {
+		FileSystem fs = createFS(uri, user);
+		Path path = new Path(uri.getPath());
+		FileStatus fileStatus = null;
+		ResourceInformation fi = null;
+		try {
+			fileStatus = fs.getFileStatus(path);
+			fi = getResourceInformation(fileStatus);
+		} catch (FileNotFoundException fne) {
+			logger.info(fne.getMessage());
+			logger.debug(fne.getMessage(), fne);
+		}
+		return fi;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.apache.hdt.core.hdfs.HDFSClient#setResource(java.net.URI,
+	 * org.apache.hdt.core.hdfs.ResourceInformation)
+	 */
+	@Override
+	public void setResourceInformation(URI uri, ResourceInformation information, String user) throws IOException, InterruptedException {
+		FileSystem fs = createFS(uri, user);
+		Path path = new Path(uri.getPath());
+		if (!information.isFolder()) {
+			fs.setTimes(path, information.getLastModifiedTime(), information.getLastAccessedTime());
+		}
+		if (information.getOwner() != null || information.getGroup() != null)
+			fs.setOwner(path, information.getOwner(), information.getGroup());
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#listResources(java.net.URI)
+	 */
+	@Override
+	public List<ResourceInformation> listResources(URI uri, String user) throws IOException, InterruptedException {
+		List<ResourceInformation> ris = null;
+		FileSystem fs = createFS(uri, user);
+		Path path = new Path(uri.getPath());
+		FileStatus[] listStatus = fs.listStatus(path);
+		if (listStatus != null) {
+			ris = new ArrayList<ResourceInformation>();
+			for (FileStatus ls : listStatus) {
+				ris.add(getResourceInformation(ls));
+			}
+		}
+		return ris;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#openInputStream(java.net.URI,
+	 * org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public InputStream openInputStream(URI uri, String user) throws IOException, InterruptedException {
+		FileSystem fs = createFS(uri, user);
+		Path path = new Path(uri.getPath());
+		FSDataInputStream open = fs.open(path);
+		return open;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#openInputStream(java.net.URI,
+	 * org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public OutputStream createOutputStream(URI uri, String user) throws IOException, InterruptedException {
+		FileSystem fs = createFS(uri, user);
+		Path path = new Path(uri.getPath());
+		FSDataOutputStream outputStream = fs.create(path);
+		return outputStream;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#openInputStream(java.net.URI,
+	 * org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public OutputStream openOutputStream(URI uri, String user) throws IOException, InterruptedException {
+		FileSystem fs = createFS(uri, user);
+		Path path = new Path(uri.getPath());
+		// TODO. Temporary fix till Issue#3 is fixed.
+		FSDataOutputStream outputStream = fs.create(path);
+		return outputStream;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.apache.hdt.core.hdfs.HDFSClient#mkdirs(java.net.URI,
+	 * org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public boolean mkdirs(URI uri, String user) throws IOException, InterruptedException {
+		FileSystem fs = createFS(uri, user);
+		Path path = new Path(uri.getPath());
+		return fs.mkdirs(path);
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.apache.hdt.core.hdfs.HDFSClient#delete(java.net.URI,
+	 * org.eclipse.core.runtime.IProgressMonitor)
+	 */
+	@Override
+	public void delete(URI uri, String user) throws IOException, InterruptedException {
+		FileSystem fs = createFS(uri, user);
+		Path path = new Path(uri.getPath());
+		fs.delete(path, true);
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.hdfs.HDFSClient#getDefaultUserAndGroupIds()
+	 */
+	@Override
+	public List<String> getDefaultUserAndGroupIds() throws IOException {
+		List<String> idList = new ArrayList<String>();
+		UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
+		idList.add(currentUser.getShortUserName());
+		String[] groupIds = currentUser.getGroupNames();
+		if (groupIds != null) {
+			for (String groupId : groupIds) {
+				idList.add(groupId);
+			}
+		}
+		return idList;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/ZooKeeperClientRelease.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/ZooKeeperClientRelease.java b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/ZooKeeperClientRelease.java
new file mode 100644
index 0000000..7de54a5
--- /dev/null
+++ b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/ZooKeeperClientRelease.java
@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.hadoop.release;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hdt.core.internal.model.HadoopFactory;
+import org.apache.hdt.core.internal.model.ZNode;
+import org.apache.hdt.core.internal.model.ZNodeType;
+import org.apache.hdt.core.zookeeper.ZooKeeperClient;
+import org.apache.log4j.Logger;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.ZooKeeper.States;
+import org.apache.zookeeper.data.Stat;
+
+/**
+ * @author Srimanth Gunturi
+ * 
+ */
+public class ZooKeeperClientRelease extends ZooKeeperClient {
+
+	private static final Logger logger = Logger.getLogger(ZooKeeperClientRelease.class);
+	private ZooKeeper client = null;
+	private String serverLocation;
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.zookeeper.ZooKeeperClient#initialize(java.lang
+	 * .String)
+	 */
+	@Override
+	public void initialize(String serverLocation) {
+		if (logger.isDebugEnabled())
+			logger.debug("initialize(" + serverLocation + ")");
+		this.serverLocation = serverLocation;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.apache.hdt.core.zookeeper.ZooKeeperClient#isConnected()
+	 */
+	@Override
+	public boolean isConnected() throws IOException, InterruptedException {
+		if (client != null) {
+			if (logger.isDebugEnabled())
+				logger.debug("isConnected(" + serverLocation + "): Client state = " + client.getState());
+			return client.getState() == States.CONNECTED;
+		}
+		return false;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.zookeeper.ZooKeeperClient#connect(java.lang
+	 * .String)
+	 */
+	@Override
+	public void connect() throws IOException, InterruptedException {
+		if (client == null) {
+			if (logger.isDebugEnabled())
+				logger.debug("connect(" + serverLocation + "): Connecting begin");
+			client = new ZooKeeper(serverLocation, 5000, new Watcher() {
+				@Override
+				public void process(WatchedEvent event) {
+				}
+			});
+			int waitCount = 0;
+			while (client.getState() == States.CONNECTING && waitCount++ < 5) {
+				if (logger.isDebugEnabled())
+					logger.debug("connect(" + serverLocation + "): Still connecting... sleep for 1s");
+				Thread.sleep(1000);
+			}
+			if (logger.isDebugEnabled())
+				logger.debug("connect(" + serverLocation + "): Connecting finish with state: " + client.getState());
+		}
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see org.apache.hdt.core.zookeeper.ZooKeeperClient#disconnect()
+	 */
+	@Override
+	public void disconnect() throws IOException, InterruptedException {
+		if (logger.isDebugEnabled())
+			logger.debug("disconnect(" + serverLocation + ")");
+		if (client != null) {
+			client.close();
+			client = null;
+		}
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.zookeeper.ZooKeeperClient#getChildren(java.
+	 * lang.String)
+	 */
+	@Override
+	public List<ZNode> getChildren(ZNode node) throws IOException, InterruptedException {
+		if (logger.isDebugEnabled())
+			logger.debug("getChildren(" + node.getPath() + ")");
+		List<ZNode> childNodes = new ArrayList<ZNode>();
+		try {
+			Stat nodeStat = new Stat();
+			List<String> children = client.getChildren(node.getPath(), false, nodeStat);
+			copyFromStat(nodeStat, node);
+
+			if (children != null) {
+				for (String child : children) {
+					ZNode cNode = HadoopFactory.eINSTANCE.createZNode();
+					cNode.setNodeName(child);
+					cNode.setParent(node);
+					Stat exists = client.exists(cNode.getPath(), false);
+					if (exists != null) {
+						copyFromStat(exists, cNode);
+						childNodes.add(cNode);
+					}
+				}
+			}
+		} catch (KeeperException e) {
+			logger.debug(e.getMessage(), e);
+			throw new IOException(e.getMessage(), e);
+		}
+		if (logger.isDebugEnabled())
+			logger.debug("getChildren(" + node.getPath() + "): ChildCount="+childNodes.size());
+		return childNodes;
+	}
+
+	/**
+	 * @param nodeStat
+	 * @param node
+	 */
+	private void copyFromStat(Stat nodeStat, ZNode node) {
+		node.setAclVersion(nodeStat.getAversion());
+		node.setChildrenCount(nodeStat.getNumChildren());
+		node.setChildrenVersion(nodeStat.getCversion());
+		node.setCreationId(nodeStat.getCzxid());
+		node.setCreationTime(nodeStat.getCtime());
+		node.setDataLength(nodeStat.getDataLength());
+		node.setEphermalOwnerSessionId(nodeStat.getEphemeralOwner());
+		node.setLastRefresh(System.currentTimeMillis());
+		node.setModifiedId(nodeStat.getMzxid());
+		node.setModifiedTime(nodeStat.getMtime());
+		node.setVersion(nodeStat.getVersion());
+		if (nodeStat.getEphemeralOwner() > 0)
+			node.setEphermeral(true);
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.zookeeper.ZooKeeperClient#delete(org.apache
+	 * .hadoop.eclipse.internal.zookeeper.ZooKeeperNode)
+	 */
+	@Override
+	public void delete(ZNode zkn) throws IOException, InterruptedException {
+		if(logger.isDebugEnabled())
+			logger.debug("delete("+zkn.getPath()+")");
+		try {
+			client.delete(zkn.getPath(), -1);
+		} catch (KeeperException e) {
+			throw new IOException(e.getMessage(), e);
+		}
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * 
+	 * @see
+	 * org.apache.hdt.core.zookeeper.ZooKeeperClient#open(java.lang.String
+	 * )
+	 */
+	@Override
+	public byte[] open(ZNode node) throws InterruptedException, IOException {
+		if(logger.isDebugEnabled())
+			logger.debug("open("+node.getPath()+")");
+		Stat stat = new Stat();
+		byte[] nd;
+		try {
+			nd = client.getData(node.getPath(), false, stat);
+		} catch (KeeperException e) {
+			throw new IOException(e.getMessage(), e);
+		}
+		return nd;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui.test/.classpath
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui.test/.classpath b/org.apache.hdt.ui.test/.classpath
new file mode 100644
index 0000000..1fa3e68
--- /dev/null
+++ b/org.apache.hdt.ui.test/.classpath
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<classpath>
+	<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
+	<classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
+	<classpathentry kind="src" path="src"/>
+	<classpathentry kind="output" path="bin"/>
+</classpath>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui.test/.project
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui.test/.project b/org.apache.hdt.ui.test/.project
new file mode 100644
index 0000000..bd7f861
--- /dev/null
+++ b/org.apache.hdt.ui.test/.project
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+	<name>org.apache.hdt.ui.test</name>
+	<comment></comment>
+	<projects>
+	</projects>
+	<buildSpec>
+		<buildCommand>
+			<name>org.eclipse.jdt.core.javabuilder</name>
+			<arguments>
+			</arguments>
+		</buildCommand>
+		<buildCommand>
+			<name>org.eclipse.pde.ManifestBuilder</name>
+			<arguments>
+			</arguments>
+		</buildCommand>
+		<buildCommand>
+			<name>org.eclipse.pde.SchemaBuilder</name>
+			<arguments>
+			</arguments>
+		</buildCommand>
+	</buildSpec>
+	<natures>
+		<nature>org.eclipse.pde.PluginNature</nature>
+		<nature>org.eclipse.jdt.core.javanature</nature>
+	</natures>
+</projectDescription>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui.test/.settings/org.eclipse.jdt.core.prefs
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui.test/.settings/org.eclipse.jdt.core.prefs b/org.apache.hdt.ui.test/.settings/org.eclipse.jdt.core.prefs
new file mode 100644
index 0000000..c537b63
--- /dev/null
+++ b/org.apache.hdt.ui.test/.settings/org.eclipse.jdt.core.prefs
@@ -0,0 +1,7 @@
+eclipse.preferences.version=1
+org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
+org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6
+org.eclipse.jdt.core.compiler.compliance=1.6
+org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
+org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
+org.eclipse.jdt.core.compiler.source=1.6

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui.test/META-INF/MANIFEST.MF
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui.test/META-INF/MANIFEST.MF b/org.apache.hdt.ui.test/META-INF/MANIFEST.MF
new file mode 100644
index 0000000..e537255
--- /dev/null
+++ b/org.apache.hdt.ui.test/META-INF/MANIFEST.MF
@@ -0,0 +1,14 @@
+Manifest-Version: 1.0
+Bundle-ManifestVersion: 2
+Bundle-Name: Apache Hadoop UI Test Eclipse Plugin
+Bundle-SymbolicName: org.apache.hdt.ui.test;singleton:=true
+Bundle-Version: 1.0.0.qualifier
+Bundle-Activator: org.apache.hdt.ui.test.Activator
+Bundle-Vendor: Apache Hadoop
+Require-Bundle: org.eclipse.ui,
+ org.eclipse.core.runtime,
+ org.junit4;bundle-version="4.8.1",
+ org.apache.hdt.core;bundle-version="1.0.0",
+ org.apache.hdt.ui;bundle-version="1.0.0"
+Bundle-RequiredExecutionEnvironment: JavaSE-1.6
+Bundle-ActivationPolicy: lazy

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui.test/build.properties
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui.test/build.properties b/org.apache.hdt.ui.test/build.properties
new file mode 100644
index 0000000..e9863e2
--- /dev/null
+++ b/org.apache.hdt.ui.test/build.properties
@@ -0,0 +1,5 @@
+source.. = src/
+output.. = bin/
+bin.includes = META-INF/,\
+               .,\
+               plugin.xml

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui.test/plugin.xml
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui.test/plugin.xml b/org.apache.hdt.ui.test/plugin.xml
new file mode 100644
index 0000000..c424fb9
--- /dev/null
+++ b/org.apache.hdt.ui.test/plugin.xml
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?eclipse version="3.4"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<plugin>
+
+</plugin>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui.test/src/org/apache/hdt/ui/test/Activator.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui.test/src/org/apache/hdt/ui/test/Activator.java b/org.apache.hdt.ui.test/src/org/apache/hdt/ui/test/Activator.java
new file mode 100644
index 0000000..6279db7
--- /dev/null
+++ b/org.apache.hdt.ui.test/src/org/apache/hdt/ui/test/Activator.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.ui.test;
+
+import org.eclipse.ui.plugin.AbstractUIPlugin;
+import org.osgi.framework.BundleContext;
+
+/**
+ * The activator class controls the plug-in life cycle
+ */
+public class Activator extends AbstractUIPlugin {
+
+	// The plug-in ID
+	public static final String PLUGIN_ID = "org.apache.hdt.ui.test"; //$NON-NLS-1$
+
+	// The shared instance
+	private static Activator plugin;
+	
+	/**
+	 * The constructor
+	 */
+	public Activator() {
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * @see org.eclipse.ui.plugin.AbstractUIPlugin#start(org.osgi.framework.BundleContext)
+	 */
+	public void start(BundleContext context) throws Exception {
+		super.start(context);
+		plugin = this;
+	}
+
+	/*
+	 * (non-Javadoc)
+	 * @see org.eclipse.ui.plugin.AbstractUIPlugin#stop(org.osgi.framework.BundleContext)
+	 */
+	public void stop(BundleContext context) throws Exception {
+		plugin = null;
+		super.stop(context);
+	}
+
+	/**
+	 * Returns the shared instance
+	 *
+	 * @return the shared instance
+	 */
+	public static Activator getDefault() {
+		return plugin;
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui.test/src/org/apache/hdt/ui/test/AllTests.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui.test/src/org/apache/hdt/ui/test/AllTests.java b/org.apache.hdt.ui.test/src/org/apache/hdt/ui/test/AllTests.java
new file mode 100644
index 0000000..f6e6b37
--- /dev/null
+++ b/org.apache.hdt.ui.test/src/org/apache/hdt/ui/test/AllTests.java
@@ -0,0 +1,21 @@
+/**
+ * 
+ */
+package org.apache.hdt.ui.test;
+
+import org.apache.hdt.ui.test.hdfs.HDFSTests;
+import org.junit.runner.RunWith;
+import org.junit.runners.Suite;
+
+import junit.framework.Test;
+import junit.framework.TestSuite;
+
+@RunWith(Suite.class)
+@Suite.SuiteClasses({
+	HDFSTests.class
+})
+/**
+ * @author Srimanth Gunturi
+ *
+ */
+public class AllTests {}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui.test/src/org/apache/hdt/ui/test/hdfs/HDFSTests.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui.test/src/org/apache/hdt/ui/test/hdfs/HDFSTests.java b/org.apache.hdt.ui.test/src/org/apache/hdt/ui/test/hdfs/HDFSTests.java
new file mode 100644
index 0000000..e1d5128
--- /dev/null
+++ b/org.apache.hdt.ui.test/src/org/apache/hdt/ui/test/hdfs/HDFSTests.java
@@ -0,0 +1,20 @@
+/**
+ * 
+ */
+package org.apache.hdt.ui.test.hdfs;
+
+import org.junit.runner.RunWith;
+import org.junit.runners.Suite;
+import org.junit.runners.Suite.SuiteClasses;
+
+import junit.framework.Test;
+import junit.framework.TestSuite;
+
+@RunWith(Suite.class)
+@Suite.SuiteClasses({
+	ModelTests.class
+})
+/**
+ * @author Srimanth Gunturi
+ */
+public class HDFSTests {}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui.test/src/org/apache/hdt/ui/test/hdfs/ModelTests.java
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui.test/src/org/apache/hdt/ui/test/hdfs/ModelTests.java b/org.apache.hdt.ui.test/src/org/apache/hdt/ui/test/hdfs/ModelTests.java
new file mode 100644
index 0000000..96bff84
--- /dev/null
+++ b/org.apache.hdt.ui.test/src/org/apache/hdt/ui/test/hdfs/ModelTests.java
@@ -0,0 +1,22 @@
+/**
+ * 
+ */
+package org.apache.hdt.ui.test.hdfs;
+
+import org.apache.hdt.core.internal.hdfs.HDFSManager;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+@RunWith(JUnit4.class)
+/**
+ * @author Srimanth Gunturi
+ *
+ */
+public class ModelTests {
+
+	@Test
+	public void testModelLoadSave() {
+		HDFSManager manager = HDFSManager.INSTANCE;
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/.classpath
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/.classpath b/org.apache.hdt.ui/.classpath
new file mode 100644
index 0000000..1fa3e68
--- /dev/null
+++ b/org.apache.hdt.ui/.classpath
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<classpath>
+	<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
+	<classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
+	<classpathentry kind="src" path="src"/>
+	<classpathentry kind="output" path="bin"/>
+</classpath>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/.project
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/.project b/org.apache.hdt.ui/.project
new file mode 100644
index 0000000..feb7efa
--- /dev/null
+++ b/org.apache.hdt.ui/.project
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+	<name>org.apache.hdt.ui</name>
+	<comment></comment>
+	<projects>
+	</projects>
+	<buildSpec>
+		<buildCommand>
+			<name>org.eclipse.jdt.core.javabuilder</name>
+			<arguments>
+			</arguments>
+		</buildCommand>
+		<buildCommand>
+			<name>org.eclipse.pde.ManifestBuilder</name>
+			<arguments>
+			</arguments>
+		</buildCommand>
+		<buildCommand>
+			<name>org.eclipse.pde.SchemaBuilder</name>
+			<arguments>
+			</arguments>
+		</buildCommand>
+	</buildSpec>
+	<natures>
+		<nature>org.eclipse.pde.PluginNature</nature>
+		<nature>org.eclipse.jdt.core.javanature</nature>
+	</natures>
+</projectDescription>

http://git-wip-us.apache.org/repos/asf/incubator-hdt/blob/63bec260/org.apache.hdt.ui/.settings/org.eclipse.jdt.core.prefs
----------------------------------------------------------------------
diff --git a/org.apache.hdt.ui/.settings/org.eclipse.jdt.core.prefs b/org.apache.hdt.ui/.settings/org.eclipse.jdt.core.prefs
new file mode 100644
index 0000000..35420bb
--- /dev/null
+++ b/org.apache.hdt.ui/.settings/org.eclipse.jdt.core.prefs
@@ -0,0 +1,276 @@
+#Thu Mar 21 00:36:13 PDT 2013
+eclipse.preferences.version=1
+org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
+org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6
+org.eclipse.jdt.core.compiler.compliance=1.6
+org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
+org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
+org.eclipse.jdt.core.compiler.source=1.6
+org.eclipse.jdt.core.formatter.align_type_members_on_columns=false
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_allocation_expression=16
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_annotation=0
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_enum_constant=16
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_explicit_constructor_call=16
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_method_invocation=16
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_qualified_allocation_expression=16
+org.eclipse.jdt.core.formatter.alignment_for_assignment=0
+org.eclipse.jdt.core.formatter.alignment_for_binary_expression=16
+org.eclipse.jdt.core.formatter.alignment_for_compact_if=16
+org.eclipse.jdt.core.formatter.alignment_for_conditional_expression=80
+org.eclipse.jdt.core.formatter.alignment_for_enum_constants=0
+org.eclipse.jdt.core.formatter.alignment_for_expressions_in_array_initializer=16
+org.eclipse.jdt.core.formatter.alignment_for_method_declaration=0
+org.eclipse.jdt.core.formatter.alignment_for_multiple_fields=16
+org.eclipse.jdt.core.formatter.alignment_for_parameters_in_constructor_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_parameters_in_method_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_selector_in_method_invocation=16
+org.eclipse.jdt.core.formatter.alignment_for_superclass_in_type_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_enum_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_type_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_constructor_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_method_declaration=16
+org.eclipse.jdt.core.formatter.blank_lines_after_imports=1
+org.eclipse.jdt.core.formatter.blank_lines_after_package=1
+org.eclipse.jdt.core.formatter.blank_lines_before_field=0
+org.eclipse.jdt.core.formatter.blank_lines_before_first_class_body_declaration=0
+org.eclipse.jdt.core.formatter.blank_lines_before_imports=1
+org.eclipse.jdt.core.formatter.blank_lines_before_member_type=1
+org.eclipse.jdt.core.formatter.blank_lines_before_method=1
+org.eclipse.jdt.core.formatter.blank_lines_before_new_chunk=1
+org.eclipse.jdt.core.formatter.blank_lines_before_package=0
+org.eclipse.jdt.core.formatter.blank_lines_between_import_groups=1
+org.eclipse.jdt.core.formatter.blank_lines_between_type_declarations=1
+org.eclipse.jdt.core.formatter.brace_position_for_annotation_type_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_anonymous_type_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_array_initializer=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_block=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_block_in_case=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_constructor_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_enum_constant=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_enum_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_method_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_switch=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_type_declaration=end_of_line
+org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_block_comment=false
+org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_javadoc_comment=false
+org.eclipse.jdt.core.formatter.comment.format_block_comments=true
+org.eclipse.jdt.core.formatter.comment.format_header=false
+org.eclipse.jdt.core.formatter.comment.format_html=true
+org.eclipse.jdt.core.formatter.comment.format_javadoc_comments=true
+org.eclipse.jdt.core.formatter.comment.format_line_comments=true
+org.eclipse.jdt.core.formatter.comment.format_source_code=true
+org.eclipse.jdt.core.formatter.comment.indent_parameter_description=true
+org.eclipse.jdt.core.formatter.comment.indent_root_tags=true
+org.eclipse.jdt.core.formatter.comment.insert_new_line_before_root_tags=insert
+org.eclipse.jdt.core.formatter.comment.insert_new_line_for_parameter=insert
+org.eclipse.jdt.core.formatter.comment.line_length=80
+org.eclipse.jdt.core.formatter.comment.new_lines_at_block_boundaries=true
+org.eclipse.jdt.core.formatter.comment.new_lines_at_javadoc_boundaries=true
+org.eclipse.jdt.core.formatter.compact_else_if=true
+org.eclipse.jdt.core.formatter.continuation_indentation=2
+org.eclipse.jdt.core.formatter.continuation_indentation_for_array_initializer=2
+org.eclipse.jdt.core.formatter.disabling_tag=@formatter\:off
+org.eclipse.jdt.core.formatter.enabling_tag=@formatter\:on
+org.eclipse.jdt.core.formatter.format_guardian_clause_on_one_line=false
+org.eclipse.jdt.core.formatter.format_line_comment_starting_on_first_column=true
+org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_annotation_declaration_header=true
+org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_constant_header=true
+org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_declaration_header=true
+org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_type_header=true
+org.eclipse.jdt.core.formatter.indent_breaks_compare_to_cases=true
+org.eclipse.jdt.core.formatter.indent_empty_lines=false
+org.eclipse.jdt.core.formatter.indent_statements_compare_to_block=true
+org.eclipse.jdt.core.formatter.indent_statements_compare_to_body=true
+org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_cases=true
+org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_switch=false
+org.eclipse.jdt.core.formatter.indentation.size=4
+org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_local_variable=insert
+org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_member=insert
+org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_parameter=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_after_label=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_after_opening_brace_in_array_initializer=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_at_end_of_file_if_missing=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_catch_in_try_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_closing_brace_in_array_initializer=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_else_in_if_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_finally_in_try_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_while_in_do_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_annotation_declaration=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_anonymous_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_block=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_constant=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_declaration=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_method_body=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_after_and_in_type_parameter=insert
+org.eclipse.jdt.core.formatter.insert_space_after_assignment_operator=insert
+org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation_type_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_binary_operator=insert
+org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_parameters=insert
+org.eclipse.jdt.core.formatter.insert_space_after_closing_brace_in_block=insert
+org.eclipse.jdt.core.formatter.insert_space_after_closing_paren_in_cast=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_assert=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_case=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_conditional=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_for=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_labeled_statement=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_allocation_expression=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_annotation=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_array_initializer=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_parameters=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_throws=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_constant_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_declarations=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_explicitconstructorcall_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_increments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_inits=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_parameters=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_throws=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_invocation_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_field_declarations=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_local_declarations=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_parameterized_type_reference=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_superinterfaces=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_parameters=insert
+org.eclipse.jdt.core.formatter.insert_space_after_ellipsis=insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_parameterized_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_brace_in_array_initializer=insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_cast=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_catch=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_constructor_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_enum_constant=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_for=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_if=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_invocation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_parenthesized_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_switch=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_synchronized=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_while=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_postfix_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_prefix_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_question_in_conditional=insert
+org.eclipse.jdt.core.formatter.insert_space_after_question_in_wildcard=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_for=insert
+org.eclipse.jdt.core.formatter.insert_space_after_unary_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_and_in_type_parameter=insert
+org.eclipse.jdt.core.formatter.insert_space_before_assignment_operator=insert
+org.eclipse.jdt.core.formatter.insert_space_before_at_in_annotation_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_binary_operator=insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_parameterized_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_brace_in_array_initializer=insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_cast=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_catch=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_constructor_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_enum_constant=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_for=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_if=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_invocation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_parenthesized_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_switch=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_synchronized=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_while=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_assert=insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_case=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_conditional=insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_default=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_for=insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_labeled_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_array_initializer=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_throws=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_constant_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_declarations=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_explicitconstructorcall_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_increments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_inits=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_throws=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_invocation_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_field_declarations=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_local_declarations=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_parameterized_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_superinterfaces=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_ellipsis=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_parameterized_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_annotation_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_anonymous_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_array_initializer=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_block=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_constructor_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_constant=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_method_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_switch=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation_type_member_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_catch=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_constructor_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_enum_constant=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_for=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_if=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_invocation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_parenthesized_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_switch=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_synchronized=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_while=insert
+org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_return=insert
+org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_throw=insert
+org.eclipse.jdt.core.formatter.insert_space_before_postfix_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_prefix_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_question_in_conditional=insert
+org.eclipse.jdt.core.formatter.insert_space_before_question_in_wildcard=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_semicolon=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_for=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_unary_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_brackets_in_array_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_braces_in_array_initializer=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_brackets_in_array_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_annotation_type_member_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_constructor_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_enum_constant=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_invocation=do not insert
+org.eclipse.jdt.core.formatter.join_lines_in_comments=true
+org.eclipse.jdt.core.formatter.join_wrapped_lines=true
+org.eclipse.jdt.core.formatter.keep_else_statement_on_same_line=false
+org.eclipse.jdt.core.formatter.keep_empty_array_initializer_on_one_line=false
+org.eclipse.jdt.core.formatter.keep_imple_if_on_one_line=false
+org.eclipse.jdt.core.formatter.keep_then_statement_on_same_line=false
+org.eclipse.jdt.core.formatter.lineSplit=160
+org.eclipse.jdt.core.formatter.never_indent_block_comments_on_first_column=false
+org.eclipse.jdt.core.formatter.never_indent_line_comments_on_first_column=false
+org.eclipse.jdt.core.formatter.number_of_blank_lines_at_beginning_of_method_body=0
+org.eclipse.jdt.core.formatter.number_of_empty_lines_to_preserve=1
+org.eclipse.jdt.core.formatter.put_empty_statement_on_new_line=true
+org.eclipse.jdt.core.formatter.tabulation.char=tab
+org.eclipse.jdt.core.formatter.tabulation.size=4
+org.eclipse.jdt.core.formatter.use_on_off_tags=false
+org.eclipse.jdt.core.formatter.use_tabs_only_for_leading_indentations=false
+org.eclipse.jdt.core.formatter.wrap_before_binary_operator=true
+org.eclipse.jdt.core.formatter.wrap_outer_expressions_when_nested=false