You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@bigtop.apache.org by rv...@apache.org on 2013/02/10 07:04:01 UTC

[31/50] [abbrv] git commit: BIGTOP-731 Reorganize Hadoop tests (by Wing Yew Poon)

BIGTOP-731 Reorganize Hadoop tests (by Wing Yew Poon)


Project: http://git-wip-us.apache.org/repos/asf/bigtop/repo
Commit: http://git-wip-us.apache.org/repos/asf/bigtop/commit/87e5388a
Tree: http://git-wip-us.apache.org/repos/asf/bigtop/tree/87e5388a
Diff: http://git-wip-us.apache.org/repos/asf/bigtop/diff/87e5388a

Branch: refs/heads/RCs
Commit: 87e5388aaf468c96c12d207e8fea8b2619b5fb76
Parents: 029d6c4
Author: Stephen Chu <sc...@cloudera.com>
Authored: Wed Dec 5 12:53:53 2012 -0800
Committer: Stephen Chu <sc...@cloudera.com>
Committed: Wed Dec 5 12:53:53 2012 -0800

----------------------------------------------------------------------
 .../bigtop/itest/hadoop/hdfs/FSCmdExecutor.java    |   60 +++
 .../apache/bigtop/itest/hadoop/hdfs/TestCLI.java   |   93 +++++
 .../bigtop/itest/hadoop/hdfs/TestDFSAdmin.groovy   |  182 +++++++++
 .../bigtop/itest/hadoop/hdfs/TestFsck.groovy       |   51 +++
 .../itest/hadoop/hdfs/TestHDFSBalancer.groovy      |   84 ++++
 .../bigtop/itest/hadoop/hdfs/TestHDFSQuota.groovy  |  308 +++++++++++++++
 .../hadoop/mapreduce/TestHadoopExamples.groovy     |  106 +++++
 .../itest/hadoop/mapreduce/TestHadoopSmoke.groovy  |   89 +++++
 .../itest/hadoopexamples/TestHadoopExamples.groovy |  106 -----
 .../itest/hadoopsmoke/TestHadoopSmoke.groovy       |   89 -----
 .../bigtop/itest/hadooptests/FSCmdExecutor.java    |   60 ---
 .../apache/bigtop/itest/hadooptests/TestCLI.java   |   93 -----
 .../bigtop/itest/hdfstests/TestDFSAdmin.groovy     |  182 ---------
 .../apache/bigtop/itest/hdfstests/TestFsck.groovy  |   51 ---
 .../bigtop/itest/hdfstests/TestHDFSBalancer.groovy |   84 ----
 .../bigtop/itest/hdfstests/TestHDFSQuota.groovy    |  308 ---------------
 16 files changed, 973 insertions(+), 973 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/bigtop/blob/87e5388a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/FSCmdExecutor.java
----------------------------------------------------------------------
diff --git a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/FSCmdExecutor.java b/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/FSCmdExecutor.java
new file mode 100644
index 0000000..bb5adc4
--- /dev/null
+++ b/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/FSCmdExecutor.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.bigtop.itest.hadoop.hdfs;
+
+import java.io.File;
+import java.util.StringTokenizer;
+
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.cli.CLITestHelper;
+import org.apache.hadoop.cli.util.CommandExecutor;
+import org.apache.hadoop.util.ToolRunner;
+
+public class FSCmdExecutor extends CommandExecutor {
+  protected String namenode = null;
+  protected FsShell shell = null;
+
+  public FSCmdExecutor(String namenode, FsShell shell) {
+    this.namenode = namenode;
+    this.shell = shell;
+  }
+
+  protected void execute(final String cmd) throws Exception{
+    String[] args = getCommandAsArgs(cmd, "NAMENODE", this.namenode);
+    ToolRunner.run(shell, args);
+  }
+
+  @Override
+  protected String[] getCommandAsArgs(final String cmd, final String masterKey,
+                                      final String master) {
+    StringTokenizer tokenizer = new StringTokenizer(cmd, " ");
+    String[] args = new String[tokenizer.countTokens()];
+    int i = 0;
+    while (tokenizer.hasMoreTokens()) {
+      args[i] = tokenizer.nextToken();
+      args[i] = args[i].replaceAll(masterKey, master);
+      args[i] = args[i].replaceAll("CLITEST_DATA", 
+        new File(CLITestHelper.TEST_CACHE_DATA_DIR).toURI().toString().replace(' ', '+'));
+      args[i] = args[i].replaceAll("TEST_DIR_ABSOLUTE", TestCLI.TEST_DIR_ABSOLUTE);
+      args[i] = args[i].replaceAll("USERNAME", System.getProperty("user.name"));
+
+      i++;
+    }
+    return args;
+  }
+}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/87e5388a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/TestCLI.java
----------------------------------------------------------------------
diff --git a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/TestCLI.java b/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/TestCLI.java
new file mode 100644
index 0000000..c39f029
--- /dev/null
+++ b/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/TestCLI.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.bigtop.itest.hadoop.hdfs;
+
+import java.io.File;
+
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.cli.CLITestHelper;
+import org.apache.hadoop.cli.util.CLICommand;
+import org.apache.hadoop.cli.util.CLICommandFS;
+import org.apache.hadoop.cli.util.CommandExecutor;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Tests for the Command Line Interface (CLI)
+ */
+public class TestCLI extends CLITestHelper {
+  public static final String TEST_DIR_ABSOLUTE = "/tmp/testcli";
+  private String nn;
+  private String sug;
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    readTestConfigFile();
+    conf = new HdfsConfiguration();
+    conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, 
+                    true);
+    clitestDataDir =
+      new File(TEST_CACHE_DATA_DIR).toURI().toString().replace(' ', '+');
+    nn = conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY);
+    sug = conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY);
+    // Many of the tests expect a replication value of 1 in the output
+    conf.setInt("dfs.replication", 1);
+  }
+
+  @After
+  @Override
+  public void tearDown() throws Exception {
+    super.tearDown();
+  }
+
+  @Override
+  protected String getTestFile() {
+    return "testConf.xml";
+  }
+
+  @Test
+  @Override
+  public void testAll() {
+    super.testAll();
+  }
+
+  @Override
+  protected String expandCommand(final String cmd) {
+    String expCmd = super.expandCommand(cmd);
+    String testcliDir = TEST_DIR_ABSOLUTE;
+    expCmd = expCmd.replaceAll("TEST_DIR_ABSOLUTE", testcliDir);
+    expCmd = expCmd.replaceAll("SUPERGROUP", sug);
+    return expCmd;
+  }
+
+  @Override
+  protected CommandExecutor.Result execute(CLICommand cmd) throws Exception {
+    if (cmd.getType() instanceof CLICommandFS) {
+      CommandExecutor cmdExecutor = new FSCmdExecutor(nn, new FsShell(conf));
+      return cmdExecutor.executeCommand(cmd.getCmd());
+    } else {
+      throw new IllegalArgumentException("Unknown type of test command: " + cmd.getType());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/87e5388a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/TestDFSAdmin.groovy
----------------------------------------------------------------------
diff --git a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/TestDFSAdmin.groovy b/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/TestDFSAdmin.groovy
new file mode 100644
index 0000000..e50c597
--- /dev/null
+++ b/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/TestDFSAdmin.groovy
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.bigtop.itest.hadoop.hdfs;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.apache.bigtop.itest.JarContent;
+import org.apache.bigtop.itest.shell.Shell;
+
+public class TestDFSAdmin {
+ 
+  // set debugging variable to true if you want error messages sent to stdout
+  private static Shell shHDFS = new Shell("/bin/bash", "hdfs");
+
+  @BeforeClass
+  public static void setUp() {
+    // unpack resource
+    JarContent.unpackJarContainer(TestDFSAdmin.class, "." , null);
+    System.out.println("Running DFSAdmin commands:");
+  }
+
+  @AfterClass
+  public static void tearDown() {
+  }
+
+  @Test
+  public void testDFSbasic() { 
+    // report
+    System.out.println("-report"); 
+    shHDFS.exec("hdfs dfsadmin -report");    
+    assertTrue("-report failed", shHDFS.getRet() == 0);
+
+    // help
+    System.out.println("-help"); 
+    shHDFS.exec("hdfs dfsadmin -help");
+    assertTrue("-help failed", shHDFS.getRet() == 0);
+
+    // printTopology
+    System.out.println("-printTopology"); 
+    shHDFS.exec("hdfs dfsadmin -printTopology");
+    assertTrue("-printTopology failed", shHDFS.getRet() == 0);
+
+    // metasave
+    System.out.println("-metasave");
+    shHDFS.exec("hdfs dfsadmin -metasave metasave_test");
+    assertTrue("-metasave failed", shHDFS.getRet() == 0); 
+  }
+
+  @Test
+  public void testDFSsafemode() {
+    // safemode
+    System.out.println("-safemode"); 
+    shHDFS.exec("hdfs dfsadmin -safemode leave");
+    assertTrue("-safemode leave failed", shHDFS.getRet() == 0);
+    shHDFS.exec("hdfs dfsadmin -safemode get");
+    assertTrue("-safemode get failed", shHDFS.getOut().get(0) == "Safe mode is OFF");
+    assertTrue("-safemode get failed", shHDFS.getRet() == 0);
+    shHDFS.exec("hdfs dfsadmin -safemode enter");
+    assertTrue("-safemode enter failed", shHDFS.getRet() == 0);
+    shHDFS.exec("hdfs dfsadmin -safemode get");
+    assertTrue("-safemode get failed", shHDFS.getOut().get(0) == "Safe mode is ON");
+    assertTrue("-safemode get failed", shHDFS.getRet() == 0);
+    shHDFS.exec("hdfs dfsadmin -safemode leave");
+    assertTrue("-safemode leave failed", shHDFS.getRet() == 0); 
+  }
+
+  @Test
+  public void testDFSnamespace() {
+    // saveNamespace
+    System.out.println("-saveNamespace");
+    shHDFS.exec("hdfs dfsadmin -safemode enter"); 
+    shHDFS.exec("hdfs dfsadmin -saveNamespace");
+    assertTrue("-saveNamespace failed", shHDFS.getRet() == 0);
+    shHDFS.exec("hdfs dfsadmin -safemode leave");
+    shHDFS.exec("hdfs dfsadmin -saveNamespace"); 
+    assertTrue("-saveNamespace worked in non safemode", shHDFS.getRet() != 0);
+  }
+
+  @Test
+  public void testDFSrefreshcommands() {
+    // refreshNodes
+    System.out.println("-refreshNodes"); 
+    shHDFS.exec("hdfs dfsadmin -refreshNodes");
+    assertTrue("-refreshNodes failed", shHDFS.getRet() == 0);
+
+    /*// refreshServiceAcl - does not work - shHDFS.getRet() = 255
+    System.out.println("-refreshServiceAcl");
+    shHDFS.exec("hdfs dfsadmin -refreshServiceAcl");
+    System.out.println(shHDFS.getRet());
+    assertTrue("-refreshServiceAcl failed", shHDFS.getRet() == 0); */
+   
+    // refreshUserToGroupsMappings
+    System.out.println("-refreshUserToGroupsMappings");
+    shHDFS.exec("hdfs dfsadmin -refreshUserToGroupsMappings");
+    assertTrue("-refreshUserToGroupsMappings failed", shHDFS.getRet() == 0);
+
+    // refreshSuperUserGroupsConfiguration
+    System.out.println("-refreshSuperUserGroupsConfiguration");
+    shHDFS.exec("hdfs dfsadmin -refreshSuperUserGroupsConfiguration");
+    assertTrue("-refreshSuperUserGroupsConfiguration failed", shHDFS.getRet() == 0); 
+  }
+
+  @Test
+  public void testDFSupgrades() {
+    // upgradeProgress
+    System.out.println("-upgradeProgress"); 
+    shHDFS.exec("hdfs dfsadmin -upgradeProgress details");
+    assertTrue("-upgradeProgress details failed", shHDFS.getRet() == 0);
+    shHDFS.exec("hdfs dfsadmin -upgradeProgress status");
+    assertTrue("-upgradeProgress status failed", shHDFS.getRet() == 0);
+
+    // finalizeUpgrade
+    System.out.println("-finalizeUpgrade");
+    shHDFS.exec("hdfs dfsadmin -finalizeUpgrade");
+    assertTrue("-finalizeUpgrade failed", shHDFS.getRet() == 0);
+  }
+
+  @Test
+  public void testDFSstorage() {  
+    // restoreFailedStorage
+    System.out.println("-restoreFailedStorage"); 
+    shHDFS.exec("hdfs dfsadmin -restoreFailedStorage false");
+    assertTrue("-restoreFailedStorage false failed", shHDFS.getRet() == 0);
+    shHDFS.exec("hdfs dfsadmin -restoreFailedStorage check");
+    assertTrue("-restoreFailedStorage check failed", shHDFS.getOut().get(0) == "restoreFailedStorage is set to false");
+    assertTrue("-restoreFailedStorage check failed", shHDFS.getRet() == 0);
+    shHDFS.exec("hdfs dfsadmin -restoreFailedStorage true");
+    assertTrue("-restoreFailedStorage true failed", shHDFS.getRet() == 0);
+    shHDFS.exec("hdfs dfsadmin -restoreFailedStorage check");
+    assertTrue("-restoreFailedStorage check", shHDFS.getOut().get(0) == "restoreFailedStorage is set to true");
+    assertTrue("-restoreFailedStorage check", shHDFS.getRet() == 0);
+    shHDFS.exec("hdfs dfsadmin -restoreFailedStorage false");
+    assertTrue("-restoreFailedStorage false failed", shHDFS.getRet() == 0); 
+  }
+
+  @Test
+  public void testDFSquotas() {
+    // setQuota, clrQuota
+    System.out.println("-setQuota, -clrQuota");
+    shHDFS.exec("date");
+    String quota_test = "quota_test" + shHDFS.getOut().get(0).replaceAll("\\s","").replaceAll(":","");
+    shHDFS.exec("hadoop fs -test -e $quota_test");
+    if (shHDFS.getRet() == 0) {
+      shHDFS.exec("hadoop fs -rmr -skipTrash $quota_test");
+      assertTrue("Deletion of previous testDistcpInputs from HDFS failed",
+          shHDFS.getRet() == 0);
+    }
+    shHDFS.exec("hadoop fs -mkdir $quota_test");
+    shHDFS.exec("hdfs dfsadmin -setQuota 1000 $quota_test");
+    assertTrue("-setQuota failed", shHDFS.getRet() == 0);
+    shHDFS.exec("hdfs dfsadmin -clrQuota $quota_test");
+    assertTrue("-clrQuota failed", shHDFS.getRet() == 0); 
+
+    // setSpaceQuota, clrSpaceQuota
+    System.out.println("-setSpaceQuota, -clrSpaceQuota");
+    shHDFS.exec("hdfs dfsadmin -setSpaceQuota 1000 $quota_test");
+    assertTrue("-setSpaceQuota failed", shHDFS.getRet() == 0);
+    shHDFS.exec("hdfs dfsadmin -clrSpaceQuota $quota_test");
+    assertTrue("-clrSpaceQuota failed", shHDFS.getRet() == 0);
+    shHDFS.exec("hadoop fs -rmr $quota_test"); 
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/87e5388a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/TestFsck.groovy
----------------------------------------------------------------------
diff --git a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/TestFsck.groovy b/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/TestFsck.groovy
new file mode 100644
index 0000000..62efd7c
--- /dev/null
+++ b/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/TestFsck.groovy
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.bigtop.itest.hadoop.hdfs
+
+import org.junit.Test
+import org.apache.bigtop.itest.shell.Shell
+import static org.junit.Assert.assertTrue
+import static org.apache.bigtop.itest.LogErrorsUtils.logError
+
+/**
+ * Tests the HDFS fsck command.
+ */
+public class TestFsck {
+  static Shell shHDFS = new Shell("/bin/bash", "hdfs" )
+  String[] fsckCmds = [
+    "hdfs fsck /",
+    "hdfs fsck -move /",
+    "hdfs fsck -delete /",
+    "hdfs fsck / -files",
+    "hdfs fsck -openforwrite /",
+    "hdfs fsck -list-corruptfileblocks /",
+    "hdfs fsck -blocks /",
+    "hdfs fsck -locations /",
+    "hdfs fsck -racks /"
+  ]
+
+  @Test
+  public void testFsckBasic() {
+    for (cmd in fsckCmds) {
+      shHDFS.exec(cmd)
+      logError(shHDFS)
+      assertTrue(shHDFS.getRet() == 0)
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/87e5388a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/TestHDFSBalancer.groovy
----------------------------------------------------------------------
diff --git a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/TestHDFSBalancer.groovy b/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/TestHDFSBalancer.groovy
new file mode 100644
index 0000000..e0fca84
--- /dev/null
+++ b/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/TestHDFSBalancer.groovy
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.bigtop.itest.hadoop.hdfs;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.apache.bigtop.itest.JarContent;
+import org.apache.bigtop.itest.shell.Shell;
+
+public class TestHDFSBalancer {
+ 
+  private static Shell shHDFS = new Shell("/bin/bash", "hdfs");
+  // set with -Dthreshold
+  private static String thresh = "10";
+
+  @BeforeClass
+  public static void setUp() {
+    // unpack resource
+    JarContent.unpackJarContainer(TestHDFSBalancer.class, "." , null);
+    if (System.getProperty("threshold") != null) {
+      thresh = System.getProperty("threshold");
+    }  
+  }
+
+  @AfterClass
+  public static void tearDown() {
+  }
+
+  @Test
+  public void testBalancer() { 
+    System.out.println("Running Balancer:");
+    System.out.println("Threshold is set to " + thresh +". Toggle by adding -Dthreshold=#");
+
+    // must run balancer as hdfs user   
+    shHDFS.exec("hdfs balancer -threshold $thresh");
+  
+    boolean success = false;
+    // success_string message signifies balancing worked correctly
+    String success_string1 = "The cluster is balanced. Exiting..."
+    String success_string2 = "No block can be moved"
+    String success_string3 = "No block has been moved for 3 iterations"
+    List out_msgs = shHDFS.getOut();
+    Iterator out_iter = out_msgs.iterator();
+    while (out_iter.hasNext()) {
+      String next_val = out_iter.next();
+      if (next_val.equals(success_string1) || next_val.contains(success_string2) || next_val.contains(success_string3)) {
+        success = true;
+       }
+    }
+
+    String failure_string1 = "namenodes = []"
+    List err_msgs = shHDFS.getErr();
+    Iterator err = err_msgs.iterator();
+
+    while (err.hasNext()) {
+      String err_next = err.next()
+      assertTrue("Balancer could not find namenode", !err_next.contains(failure_string1));
+    }
+
+    // could not just check if shHDFS.getRet() = 0 because balancer prints out INFO messages that the shell thinks are error messages 
+    assertTrue("Balancer failed", success == true);
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/bigtop/blob/87e5388a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/TestHDFSQuota.groovy
----------------------------------------------------------------------
diff --git a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/TestHDFSQuota.groovy b/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/TestHDFSQuota.groovy
new file mode 100644
index 0000000..5d3c9a7
--- /dev/null
+++ b/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/hdfs/TestHDFSQuota.groovy
@@ -0,0 +1,308 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.bigtop.itest.hadoop.hdfs
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.apache.bigtop.itest.shell.Shell;
+
+public class TestHDFSQuota {
+ 
+  private static Shell shHDFS = new Shell("/bin/bash", "hdfs");
+  private static Shell sh = new Shell("/bin/bash");
+  private static final long LARGE = Long.MAX_VALUE - 1;
+  private static final String USERNAME = System.getProperty("user.name");
+  private static String quotaDate = shHDFS.exec("date").getOut().get(0).replaceAll("\\s","").replaceAll(":","");
+  private static String testQuotaFolder = "testQuotaFolder" + quotaDate;
+  private static String testQuotaFolder1 = testQuotaFolder + "1";
+  private static String testQuotaFolder2 = testQuotaFolder + "2";
+  
+  @BeforeClass
+  public static void setUp() {
+    // creating test folders
+    shHDFS.exec("hadoop fs -mkdir $testQuotaFolder1");
+    assertTrue("Could not create input directory", shHDFS.getRet() == 0);
+
+    sh.exec("hadoop fs -mkdir $testQuotaFolder1");
+    assertTrue("Could not create input directory", sh.getRet() == 0);
+  }
+
+  @AfterClass
+  public static void tearDown() {
+    // clean up of existing folders
+    shHDFS.exec("hadoop fs -test -e $testQuotaFolder1");
+    if (shHDFS.getRet() == 0) {
+      shHDFS.exec("hadoop fs -rmr -skipTrash $testQuotaFolder1");
+      assertTrue("Deletion of previous testQuotaFolder1 from HDFS failed",
+          shHDFS.getRet() == 0);
+    }
+    shHDFS.exec("hadoop fs -test -e $testQuotaFolder2");
+    if (shHDFS.getRet() == 0) {
+      shHDFS.exec("hadoop fs -rmr -skipTrash $testQuotaFolder2");
+      assertTrue("Deletion of previous testQuotaFolder2 from HDFS failed",
+          shHDFS.getRet() == 0);
+    }
+    sh.exec("hadoop fs -test -e $testQuotaFolder1");
+    if (sh.getRet() == 0) {
+      sh.exec("hadoop fs -rmr -skipTrash $testQuotaFolder1");
+      assertTrue("Deletion of previous testQuotaFolder1 from HDFS failed",
+          sh.getRet() == 0);
+    }
+  }
+
+  @Test
+  public void testNewlyCreatedDir() { 
+    // newly created dir should have no name quota, no space quota   
+    shHDFS.exec("hadoop fs -count -q $testQuotaFolder1");
+    assertTrue("Could not use count command", shHDFS.getRet() == 0);
+    String[] output = shHDFS.getOut().get(0).trim().split();
+    assertTrue("Newly created directory had a set name quota", output[0].equals("none"));
+    assertTrue("Newly created directory had a set name quota left", output[1].equals("inf"));
+    assertTrue("Newly created directory had a set space quota", output[2].equals("none"));
+    assertTrue("Newly created directory had a set space quota left", output[3].equals("inf"));
+  } 
+
+  @Test
+  public void testAdminPermissions() { 
+    // admin setting quotas should succeed
+    shHDFS.exec("hadoop dfsadmin -setQuota 10 $testQuotaFolder1");
+    assertTrue("setQuota failed", shHDFS.getRet() == 0);
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota 1000000 $testQuotaFolder1");
+    assertTrue("setSpaceQuota failed", shHDFS.getRet() == 0);
+
+    // non-admin setting/clearing quotas should fail
+    sh.exec("hadoop dfsadmin -setQuota 10 $testQuotaFolder1");
+    assertTrue("setQuota should not have worked", sh.getRet() != 0);
+    sh.exec("hadoop dfsadmin -setSpaceQuota 1000000 $testQuotaFolder1");
+    assertTrue("setSpaceQuota should not have worked", sh.getRet() != 0);
+    sh.exec("hadoop dfsadmin -clrQuota $testQuotaFolder1");
+    assertTrue("clrQuota should not have worked", sh.getRet() != 0);
+    sh.exec("hadoop dfsadmin -clrSpaceQuota $testQuotaFolder1");
+    assertTrue("clrSpaceQuota should not have worked", sh.getRet() != 0);
+
+    // admin clearing quotas should succeed
+    shHDFS.exec("hadoop dfsadmin -clrQuota $testQuotaFolder1");
+    assertTrue("clrQuota failed", shHDFS.getRet() == 0);
+    shHDFS.exec("hadoop dfsadmin -clrSpaceQuota $testQuotaFolder1");
+    assertTrue("clrSpaceQuota failed", shHDFS.getRet() == 0);
+  } 
+
+  @Test
+  public void testRename() { 
+    // name and space quotas stick after rename
+    shHDFS.exec("hadoop fs -count -q $testQuotaFolder1");
+    assertTrue("Could not use count command", shHDFS.getRet() == 0);
+    String[] status1 = shHDFS.getOut().get(0).trim().split();
+    shHDFS.exec("hadoop fs -mv $testQuotaFolder1" + " /user/hdfs/$testQuotaFolder2");
+    assertTrue("Could not use move command", shHDFS.getRet() == 0);
+    shHDFS.exec("hadoop fs -count -q $testQuotaFolder2");
+    assertTrue("Could not use count command", shHDFS.getRet() == 0);
+    String[] status2 = shHDFS.getOut().get(0).trim().split();
+    for (int i = 0; i < status1.length - 1; i++) {
+      assertTrue("quotas changed after folder rename", status1[i].equals(status2[i]));
+    }
+    shHDFS.exec("hadoop fs -mv $testQuotaFolder2" + " /user/hdfs/$testQuotaFolder1");
+    assertTrue("Could not use move command", shHDFS.getRet() == 0);
+  }
+
+  @Test
+  public void testInputValues() { 
+    // the largest allowable quota size is Long.Max_Value and must be greater than zero
+    shHDFS.exec("hadoop dfsadmin -setQuota -1 $testQuotaFolder1");
+    assertTrue("setQuota should not have worked", shHDFS.getRet() != 0);
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota -1 $testQuotaFolder1");
+    assertTrue("setSpaceQuota should not have worked", shHDFS.getRet() != 0);  
+    shHDFS.exec("hadoop dfsadmin -setQuota 1.04 $testQuotaFolder1");
+    assertTrue("setQuota should not have worked", shHDFS.getRet() != 0);
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota 1.04 $testQuotaFolder1");
+    assertTrue("setSpaceQuota should not have worked", shHDFS.getRet() != 0);        
+    shHDFS.exec("hadoop dfsadmin -setQuota 0 $testQuotaFolder1");
+    assertTrue("setQuota should not have worked", shHDFS.getRet() != 0);
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota 0 $testQuotaFolder1");
+    assertTrue("setSpaceQuota should not have worked", shHDFS.getRet() != 0);
+    shHDFS.exec("hadoop dfsadmin -setQuota $LARGE $testQuotaFolder1");
+    assertTrue("setQuota failed", shHDFS.getRet() == 0);
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota $LARGE $testQuotaFolder1");
+    assertTrue("setSpaceQuota failed", shHDFS.getRet() == 0);
+    shHDFS.exec("hadoop dfsadmin -setQuota 9223372036854775808 $testQuotaFolder1");
+    assertTrue("setQuota should not have worked", shHDFS.getRet() != 0);
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota 9223372036854775808 $testQuotaFolder1");
+    assertTrue("setSpaceQuota should not have worked", shHDFS.getRet() != 0);
+  }
+
+  @Test
+  public void testForceDirEmpty() {
+    // setting the name quota to 1 for an empty dir will cause the dir to remain empty
+    shHDFS.exec("hadoop dfsadmin -setQuota 1 $testQuotaFolder1");
+    assertTrue("Could not setQuota", shHDFS.getRet() == 0);
+    shHDFS.exec("hadoop fs -mkdir $testQuotaFolder1" + "/sample1");
+    assertTrue("mkdir should not have worked due to quota of 1", shHDFS.getRet() != 0);
+  }
+
+  @Test
+  public void testQuotasPostViolation() {  
+    // quota can be set even if it violates
+    shHDFS.exec("hadoop dfsadmin -setQuota $LARGE $testQuotaFolder1");
+    assertTrue("Could not setQuota", shHDFS.getRet() == 0);
+    shHDFS.exec("hadoop fs -put - $testQuotaFolder1" + "/testString1", "-------TEST STRING--------"); 
+    assertTrue("Could not use put command", shHDFS.getRet() == 0);
+    shHDFS.exec("hadoop fs -mkdir $testQuotaFolder1" + "/sample1");
+    assertTrue("Could not use mkdir command", shHDFS.getRet() == 0);
+    shHDFS.exec("hadoop fs -mkdir $testQuotaFolder1" + "/sample2");
+    assertTrue("Could not use mkdir command", shHDFS.getRet() == 0);
+    shHDFS.exec("hadoop dfsadmin -setQuota 2 $testQuotaFolder1");
+    assertTrue("setQuota should have worked", shHDFS.getRet() == 0);
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota 1 $testQuotaFolder1");
+    assertTrue("setSpaceQuota should have worked", shHDFS.getRet() == 0);
+  }
+
+  @Test
+  public void testQuotas() {
+    // dir creation should fail - name quota
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota 10000000000 $testQuotaFolder1");
+    assertTrue("Could not setSpaceQuota", shHDFS.getRet() == 0);
+    shHDFS.exec("hadoop fs -mkdir $testQuotaFolder1" + "/sample3");
+    assertTrue("mkdir should not have worked", shHDFS.getRet() != 0);
+
+    // file creation should fail - name quota
+    shHDFS.exec("hadoop fs -rmr $testQuotaFolder1" + "/testString1"); 
+    shHDFS.exec("hadoop fs -put - $testQuotaFolder1" + "/testString2", "-------TEST STRING--------"); 
+    assertTrue("put should not have worked", shHDFS.getRet() != 0);
+
+    // file creation should fail - space quota
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota 10 $testQuotaFolder1");
+    assertTrue("Could not setSpaceQuota", shHDFS.getRet() == 0);
+    shHDFS.exec("hadoop dfsadmin -setQuota 1000 $testQuotaFolder1");
+    assertTrue("Could not setQuota", shHDFS.getRet() == 0);
+    shHDFS.exec("hadoop fs -put - $testQuotaFolder1"  + "/testString3", "-------TEST STRING--------"); 
+    assertTrue("put should not have worked", shHDFS.getRet() != 0); 
+  }
+
+  //@Test - can be reinstated upon resolution of BIGTOP-635 due to restarting of hdfs service
+  public void testLogEntries() {
+    // Log entry created when nodes are started with both quota violations
+    shHDFS.exec("date");
+    String date = "logTest" + shHDFS.getOut().get(0).replaceAll("\\s","").replaceAll(":","");
+    shHDFS.exec("hadoop fs -mkdir $date");
+    assertTrue("Could not use mkdir command", shHDFS.getRet() == 0);
+    shHDFS.exec("hadoop fs -put - $date" + "/testString1", "-------TEST STRING--------");
+    assertTrue("Could not use put command", shHDFS.getRet() == 0);
+    shHDFS.exec("hadoop dfsadmin -setQuota 1 $date");
+    assertTrue("Could not setQuota", shHDFS.getRet() == 0); 
+    shHDFS.exec("date");
+    String date1 = "logTest" + shHDFS.getOut().get(0).replaceAll("\\s","").replaceAll(":","");
+    shHDFS.exec("hadoop fs -mkdir $date1");
+    assertTrue("Could not use mkdir command", shHDFS.getRet() == 0);
+    shHDFS.exec("hadoop fs -put - $date1"  + "/testString2", "-------TEST STRING--------"); 
+    assertTrue("Could not use put command", shHDFS.getRet() == 0);
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota 1 $date1");
+    assertTrue("Could not setSpaceQuota", shHDFS.getRet() == 0); 
+    shHDFS.exec("for service in /etc/init.d/hadoop-hdfs-*; do sudo \$service stop; done");
+    shHDFS.exec("for service in /etc/init.d/hadoop-hdfs-*; do sudo \$service start; done");
+    shHDFS.exec("grep \"Quota violation in image for //user/hdfs/$date\" /var/log/hadoop-hdfs/hadoop-hdfs-namenode*.log");
+    if (shHDFS.getOut().isEmpty()) {
+      assertTrue("Log was not written", 1 == 0);
+    }
+    else {
+      assertTrue(shHDFS.getOut().get(0).contains(date));
+    }
+    shHDFS.exec("grep \"Quota violation in image for //user/hdfs/$date1\" /var/log/hadoop-hdfs/hadoop-hdfs-namenode*.log");
+    if (shHDFS.getOut().isEmpty()) {
+      assertTrue("Log was not written", 1 == 0);
+    }
+    else {
+      assertTrue(shHDFS.getOut().get(0).contains(date1));
+    }
+    
+    shHDFS.exec("hadoop fs -rmr $date1");
+    // following while loop is due to namenode going into safemode for about 15 seconds after being restarted
+    while (shHDFS.getErr().get(0).contains("safe mode") || (shHDFS.getErr().size() > 1 && shHDFS.getErr().get(1).contains("safe mode"))) {
+          shHDFS.exec("hadoop fs -rmr $date1");
+    } 
+  }
+
+  @Test
+  public void testQuotasShouldFail() {
+    shHDFS.exec("date");
+    String date = "failTest" + shHDFS.getOut().get(0).replaceAll("\\s","").replaceAll(":","");
+    shHDFS.exec("hadoop fs -mkdir $date");
+    assertTrue("Could not use mkdir command", shHDFS.getRet() == 0);
+    shHDFS.exec("hadoop fs -put - $date" + "/testString1", "-------TEST STRING--------");
+    assertTrue("Could not use put command", shHDFS.getRet() == 0);
+    // Errors when setting quotas on a file
+    shHDFS.exec("hadoop dfsadmin -setQuota 1000 $date/testString1");
+    assertTrue("setting quota on a file should not have worked", shHDFS.getRet() != 0);
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota 1000 $date/testString1");
+    assertTrue("setting quota on a file should not have worked", shHDFS.getRet() != 0); 
+
+    // Errors when clearing quotas on a file
+    shHDFS.exec("hadoop dfsadmin -clrQuota $date/testString1");
+    assertTrue("clearing quota on a file should not have worked", shHDFS.getRet() != 0);
+    shHDFS.exec("hadoop dfsadmin -clrSpaceQuota $date/testString1");
+    assertTrue("clearing quota on a file should not have worked", shHDFS.getRet() != 0);
+
+    // set/clr quota on nonexistant directory
+    shHDFS.exec("hadoop dfsadmin -setQuota 100 DIRECTORYDOESNOTEXIST" + date);
+    assertTrue("setting quota on non-existant directory should not have worked", shHDFS.getRet() != 0); 
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota 100 DIRECTORYDOESNOTEXIST" + date);
+    assertTrue("setting quota on non-existant directory should not have worked", shHDFS.getRet() != 0); 
+    shHDFS.exec("hadoop dfsadmin -clrQuota DIRECTORYDOESNOTEXIST" + date);
+    assertTrue("clearing quota on non-existant directory should not have worked", shHDFS.getRet() != 0); 
+    shHDFS.exec("hadoop dfsadmin -clrSpaceQuota DIRECTORYDOESNOTEXIST" + date);
+    assertTrue("clearing quota on non-existant directory should not have worked", shHDFS.getRet() != 0); 
+
+    shHDFS.exec("hadoop fs -rmr $date"); 
+  }
+
+  @Test
+  public void testReplicationFactor() {
+    // increasing/decreasing replication factor of a file should debit/credit quota
+    shHDFS.exec("date");
+    String repFolder = "repFactorTest" + shHDFS.getOut().get(0).replaceAll("\\s","").replaceAll(":","");
+    shHDFS.exec("hadoop fs -mkdir $repFolder");
+    assertTrue("Could not use mkdir command", shHDFS.getRet() == 0);    
+    shHDFS.exec("hadoop fs -put - $repFolder" + "/testString1" , "-------TEST STRING--------");
+    assertTrue("Could not use put command", shHDFS.getRet() == 0);
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota 1000 $repFolder");
+    assertTrue("Could not setQuota", shHDFS.getRet() == 0); 
+    shHDFS.exec("hadoop fs -setrep 1 $repFolder/testString1");
+    shHDFS.exec("hadoop fs -count -q $repFolder");
+    assertTrue("Could not use count command", shHDFS.getRet() == 0);
+    String[] output = shHDFS.getOut().get(0).trim().split();   
+    int size_of_one = Integer.parseInt(output[2]) - Integer.parseInt(output[3]);
+    shHDFS.exec("hadoop fs -setrep 5 $repFolder/testString1");
+    shHDFS.exec("hadoop fs -count -q $repFolder");
+    assertTrue("Could not use count command", shHDFS.getRet() == 0);
+    output = shHDFS.getOut().get(0).trim().split();   
+    int size_of_five = Integer.parseInt(output[2]) - Integer.parseInt(output[3]);
+    assertTrue("Quota not debited correctly", size_of_one * 5 == size_of_five);
+    shHDFS.exec("hadoop fs -setrep 3 $repFolder/testString1");
+    shHDFS.exec("hadoop fs -count -q $repFolder");
+    assertTrue("Could not use count command", shHDFS.getRet() == 0);
+    output = shHDFS.getOut().get(0).trim().split();   
+    int size_of_three = Integer.parseInt(output[2]) - Integer.parseInt(output[3]);
+    assertTrue("Quota not credited correctly", size_of_one * 3 == size_of_three);
+    shHDFS.exec("hadoop fs -rmr $repFolder"); 
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/bigtop/blob/87e5388a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/mapreduce/TestHadoopExamples.groovy
----------------------------------------------------------------------
diff --git a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/mapreduce/TestHadoopExamples.groovy b/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/mapreduce/TestHadoopExamples.groovy
new file mode 100644
index 0000000..7d91f28
--- /dev/null
+++ b/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/mapreduce/TestHadoopExamples.groovy
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.bigtop.itest.hadoop.mapreduce
+
+import org.junit.BeforeClass
+import static org.junit.Assert.assertNotNull
+import org.apache.bigtop.itest.shell.Shell
+import static org.junit.Assert.assertTrue
+import org.junit.Test
+import org.apache.hadoop.conf.Configuration
+import org.apache.bigtop.itest.JarContent
+import org.apache.bigtop.itest.TestUtils
+import org.apache.commons.logging.LogFactory
+import org.apache.commons.logging.Log
+
+import org.apache.bigtop.itest.junit.OrderedParameterized
+import org.junit.runners.Parameterized.Parameters
+import org.junit.runner.RunWith
+
+@RunWith(OrderedParameterized.class)
+class TestHadoopExamples {
+  static private Log LOG = LogFactory.getLog(TestHadoopExamples.class);
+
+  private static final String HADOOP_MAPRED_HOME = System.getenv('HADOOP_MAPRED_HOME');
+  private static final String HADOOP_CONF_DIR = System.getenv('HADOOP_CONF_DIR');
+  private static String hadoopExamplesJar =
+    JarContent.getJarName(HADOOP_MAPRED_HOME, 'hadoop.*examples.*.jar');
+  static {
+    assertNotNull("HADOOP_MAPRED_HOME has to be set to run this test",
+        HADOOP_MAPRED_HOME);
+    assertNotNull("HADOOP_CONF_DIR has to be set to run this test",
+        HADOOP_CONF_DIR);
+    assertNotNull("Can't find hadoop-examples.jar file", hadoopExamplesJar);
+  }
+  static final String HADOOP_EXAMPLES_JAR =
+    HADOOP_MAPRED_HOME + "/" + hadoopExamplesJar;
+
+  static Shell sh = new Shell("/bin/bash -s");
+  private static final String EXAMPLES = "examples";
+  private static final String EXAMPLES_OUT = "examples-output";
+  private static Configuration conf;
+
+  private static String mr_version = System.getProperty("mr.version", "mr2");
+  static final String RANDOMTEXTWRITER_TOTALBYTES = (mr_version == "mr1") ?
+      "test.randomtextwrite.total_bytes" : "mapreduce.randomtextwriter.totalbytes";
+
+  @BeforeClass
+  static void setUp() {
+    conf = new Configuration();
+    TestUtils.unpackTestResources(TestHadoopExamples.class, EXAMPLES, null, EXAMPLES_OUT);
+  }
+
+  static Map examples =
+    [
+        pi                :'2 1000',
+        wordcount         :"$EXAMPLES/text $EXAMPLES_OUT/wordcount",
+        multifilewc       :"$EXAMPLES/text $EXAMPLES_OUT/multifilewc",
+        aggregatewordcount:"$EXAMPLES/text $EXAMPLES_OUT/aggregatewordcount 2 textinputformat",
+        aggregatewordhist :"$EXAMPLES/text $EXAMPLES_OUT/aggregatewordhist 2 textinputformat",
+        grep              :"$EXAMPLES/text $EXAMPLES_OUT/grep '[Cc]uriouser'",
+//        sleep             :"-m 10 -r 10",
+        secondarysort     :"$EXAMPLES/ints $EXAMPLES_OUT/secondarysort",
+        randomtextwriter  :"-D $RANDOMTEXTWRITER_TOTALBYTES=1073741824 $EXAMPLES_OUT/randomtextwriter"
+    ];
+
+  private String testName;
+  private String testJar;
+  private String testArgs;
+
+  @Parameters
+  public static Map<String, Object[]> generateTests() {
+    Map<String, Object[]> res = [:];
+    examples.each { k, v -> res[k] = [k.toString(), v.toString()] as Object[]; }
+    return res;
+  }
+
+  public TestHadoopExamples(String name, String args) {
+    testName = name;
+    testArgs = args;
+    testJar = HADOOP_EXAMPLES_JAR;
+  }
+
+  @Test
+  void testMRExample() {
+    sh.exec("hadoop jar $testJar $testName $testArgs");
+
+    assertTrue("Example $testName failed", 
+               sh.getRet() == 0);
+  }
+}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/87e5388a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/mapreduce/TestHadoopSmoke.groovy
----------------------------------------------------------------------
diff --git a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/mapreduce/TestHadoopSmoke.groovy b/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/mapreduce/TestHadoopSmoke.groovy
new file mode 100644
index 0000000..0b2970b
--- /dev/null
+++ b/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoop/mapreduce/TestHadoopSmoke.groovy
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.bigtop.itest.hadoop.mapreduce
+
+import org.apache.bigtop.itest.JarContent
+import org.apache.bigtop.itest.TestUtils
+import org.apache.bigtop.itest.shell.Shell
+import org.junit.AfterClass
+import org.junit.BeforeClass
+import org.junit.Test
+import org.apache.hadoop.conf.Configuration
+import org.apache.hadoop.hdfs.DFSConfigKeys
+import static org.junit.Assert.assertEquals
+import static org.junit.Assert.assertNotNull
+import static org.apache.bigtop.itest.LogErrorsUtils.logError
+
+class TestHadoopSmoke {
+  static Shell sh = new Shell("/bin/bash -s")
+
+  static String hadoopHome = System.getProperty('HADOOP_HOME', '/usr/lib/hadoop')
+  static String streamingHome = System.getenv('STREAMING_HOME')
+  static String hadoopMapReduceHome = System.getProperty('HADOOP_MAPRED_HOME', '/usr/lib/hadoop-mapreduce')
+  static final String STREAMING_HOME =
+    (streamingHome == null) ? hadoopMapReduceHome : streamingHome;
+  static String streaming_jar =
+    JarContent.getJarName(STREAMING_HOME, 'hadoop.*streaming.*.jar');
+  static {
+    assertNotNull("Can't find hadoop-streaming.jar", streaming_jar);
+  }
+  static final String STREAMING_JAR = STREAMING_HOME + "/" + streaming_jar;
+  static String testDir = "test.hadoopsmoke." + (new Date().getTime())
+  static String nn = (new Configuration()).get(DFSConfigKeys.FS_DEFAULT_NAME_KEY)
+
+  String cmd = "hadoop jar ${STREAMING_JAR}" +
+      " -D mapred.map.tasks=1 -D mapred.reduce.tasks=1 -D mapred.job.name=Experiment"
+  String cmd2 = " -input ${testDir}/cachefile/input.txt -mapper map.sh -file map.sh -reducer cat" +
+      " -output ${testDir}/cachefile/out -verbose"
+  String arg = "${nn}/user/${System.properties['user.name']}/${testDir}/cachefile/cachedir.jar#testlink"
+
+  @BeforeClass
+  static void  setUp() throws IOException {
+    String[] inputFiles = ["cachedir.jar", "input.txt"];
+    TestUtils.unpackTestResources(TestHadoopSmoke.class, "${testDir}/cachefile", inputFiles, null);
+  }
+
+  @AfterClass
+  static void tearDown() {
+    sh.exec("hadoop fs -rmr -skipTrash ${testDir}")
+  }
+
+  @Test
+  void testCacheArchive() {
+    sh.exec("hadoop fs -rmr ${testDir}/cachefile/out",
+             cmd + ' -cacheArchive ' + arg + cmd2)
+    logError(sh)
+    sh.exec("hadoop fs -cat ${testDir}/cachefile/out/part-00000")
+    logError(sh)
+
+    assertEquals("cache1\t\ncache2\t", sh.out.join('\n'))
+  }
+
+  @Test
+  void testArchives() {
+    sh.exec("hadoop fs -rmr ${testDir}/cachefile/out",
+             cmd + ' -archives ' + arg + cmd2)
+    logError(sh)
+    sh.exec("hadoop fs -cat ${testDir}/cachefile/out/part-00000")
+    logError(sh)
+
+    assertEquals("cache1\t\ncache2\t", sh.out.join('\n'))
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/87e5388a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoopexamples/TestHadoopExamples.groovy
----------------------------------------------------------------------
diff --git a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoopexamples/TestHadoopExamples.groovy b/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoopexamples/TestHadoopExamples.groovy
deleted file mode 100644
index 6f56f1d..0000000
--- a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoopexamples/TestHadoopExamples.groovy
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.bigtop.itest.hadoopexamples
-
-import org.junit.BeforeClass
-import static org.junit.Assert.assertNotNull
-import org.apache.bigtop.itest.shell.Shell
-import static org.junit.Assert.assertTrue
-import org.junit.Test
-import org.apache.hadoop.conf.Configuration
-import org.apache.bigtop.itest.JarContent
-import org.apache.bigtop.itest.TestUtils
-import org.apache.commons.logging.LogFactory
-import org.apache.commons.logging.Log
-
-import org.apache.bigtop.itest.junit.OrderedParameterized
-import org.junit.runners.Parameterized.Parameters
-import org.junit.runner.RunWith
-
-@RunWith(OrderedParameterized.class)
-class TestHadoopExamples {
-  static private Log LOG = LogFactory.getLog(TestHadoopExamples.class);
-
-  private static final String HADOOP_MAPRED_HOME = System.getenv('HADOOP_MAPRED_HOME');
-  private static final String HADOOP_CONF_DIR = System.getenv('HADOOP_CONF_DIR');
-  private static String hadoopExamplesJar =
-    JarContent.getJarName(HADOOP_MAPRED_HOME, 'hadoop.*examples.*.jar');
-  static {
-    assertNotNull("HADOOP_MAPRED_HOME has to be set to run this test",
-        HADOOP_MAPRED_HOME);
-    assertNotNull("HADOOP_CONF_DIR has to be set to run this test",
-        HADOOP_CONF_DIR);
-    assertNotNull("Can't find hadoop-examples.jar file", hadoopExamplesJar);
-  }
-  static final String HADOOP_EXAMPLES_JAR =
-    HADOOP_MAPRED_HOME + "/" + hadoopExamplesJar;
-
-  static Shell sh = new Shell("/bin/bash -s");
-  private static final String EXAMPLES = "examples";
-  private static final String EXAMPLES_OUT = "examples-output";
-  private static Configuration conf;
-
-  private static String mr_version = System.getProperty("mr.version", "mr2");
-  static final String RANDOMTEXTWRITER_TOTALBYTES = (mr_version == "mr1") ?
-      "test.randomtextwrite.total_bytes" : "mapreduce.randomtextwriter.totalbytes";
-
-  @BeforeClass
-  static void setUp() {
-    conf = new Configuration();
-    TestUtils.unpackTestResources(TestHadoopExamples.class, EXAMPLES, null, EXAMPLES_OUT);
-  }
-
-  static Map examples =
-    [
-        pi                :'2 1000',
-        wordcount         :"$EXAMPLES/text $EXAMPLES_OUT/wordcount",
-        multifilewc       :"$EXAMPLES/text $EXAMPLES_OUT/multifilewc",
-        aggregatewordcount:"$EXAMPLES/text $EXAMPLES_OUT/aggregatewordcount 2 textinputformat",
-        aggregatewordhist :"$EXAMPLES/text $EXAMPLES_OUT/aggregatewordhist 2 textinputformat",
-        grep              :"$EXAMPLES/text $EXAMPLES_OUT/grep '[Cc]uriouser'",
-//        sleep             :"-m 10 -r 10",
-        secondarysort     :"$EXAMPLES/ints $EXAMPLES_OUT/secondarysort",
-        randomtextwriter  :"-D $RANDOMTEXTWRITER_TOTALBYTES=1073741824 $EXAMPLES_OUT/randomtextwriter"
-    ];
-
-  private String testName;
-  private String testJar;
-  private String testArgs;
-
-  @Parameters
-  public static Map<String, Object[]> generateTests() {
-    Map<String, Object[]> res = [:];
-    examples.each { k, v -> res[k] = [k.toString(), v.toString()] as Object[]; }
-    return res;
-  }
-
-  public TestHadoopExamples(String name, String args) {
-    testName = name;
-    testArgs = args;
-    testJar = HADOOP_EXAMPLES_JAR;
-  }
-
-  @Test
-  void testMRExample() {
-    sh.exec("hadoop jar $testJar $testName $testArgs");
-
-    assertTrue("Example $testName failed", 
-               sh.getRet() == 0);
-  }
-}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/87e5388a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoopsmoke/TestHadoopSmoke.groovy
----------------------------------------------------------------------
diff --git a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoopsmoke/TestHadoopSmoke.groovy b/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoopsmoke/TestHadoopSmoke.groovy
deleted file mode 100644
index e024fea..0000000
--- a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadoopsmoke/TestHadoopSmoke.groovy
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.bigtop.itest.hadoopsmoke
-
-import org.apache.bigtop.itest.JarContent
-import org.apache.bigtop.itest.TestUtils
-import org.apache.bigtop.itest.shell.Shell
-import org.junit.AfterClass
-import org.junit.BeforeClass
-import org.junit.Test
-import org.apache.hadoop.conf.Configuration
-import org.apache.hadoop.hdfs.DFSConfigKeys
-import static org.junit.Assert.assertEquals
-import static org.junit.Assert.assertNotNull
-import static org.apache.bigtop.itest.LogErrorsUtils.logError
-
-class TestHadoopSmoke {
-  static Shell sh = new Shell("/bin/bash -s")
-
-  static String hadoopHome = System.getProperty('HADOOP_HOME', '/usr/lib/hadoop')
-  static String streamingHome = System.getenv('STREAMING_HOME')
-  static String hadoopMapReduceHome = System.getProperty('HADOOP_MAPRED_HOME', '/usr/lib/hadoop-mapreduce')
-  static final String STREAMING_HOME =
-    (streamingHome == null) ? hadoopMapReduceHome : streamingHome;
-  static String streaming_jar =
-    JarContent.getJarName(STREAMING_HOME, 'hadoop.*streaming.*.jar');
-  static {
-    assertNotNull("Can't find hadoop-streaming.jar", streaming_jar);
-  }
-  static final String STREAMING_JAR = STREAMING_HOME + "/" + streaming_jar;
-  static String testDir = "test.hadoopsmoke." + (new Date().getTime())
-  static String nn = (new Configuration()).get(DFSConfigKeys.FS_DEFAULT_NAME_KEY)
-
-  String cmd = "hadoop jar ${STREAMING_JAR}" +
-      " -D mapred.map.tasks=1 -D mapred.reduce.tasks=1 -D mapred.job.name=Experiment"
-  String cmd2 = " -input ${testDir}/cachefile/input.txt -mapper map.sh -file map.sh -reducer cat" +
-      " -output ${testDir}/cachefile/out -verbose"
-  String arg = "${nn}/user/${System.properties['user.name']}/${testDir}/cachefile/cachedir.jar#testlink"
-
-  @BeforeClass
-  static void  setUp() throws IOException {
-    String[] inputFiles = ["cachedir.jar", "input.txt"];
-    TestUtils.unpackTestResources(TestHadoopSmoke.class, "${testDir}/cachefile", inputFiles, null);
-  }
-
-  @AfterClass
-  static void tearDown() {
-    sh.exec("hadoop fs -rmr -skipTrash ${testDir}")
-  }
-
-  @Test
-  void testCacheArchive() {
-    sh.exec("hadoop fs -rmr ${testDir}/cachefile/out",
-             cmd + ' -cacheArchive ' + arg + cmd2)
-    logError(sh)
-    sh.exec("hadoop fs -cat ${testDir}/cachefile/out/part-00000")
-    logError(sh)
-
-    assertEquals("cache1\t\ncache2\t", sh.out.join('\n'))
-  }
-
-  @Test
-  void testArchives() {
-    sh.exec("hadoop fs -rmr ${testDir}/cachefile/out",
-             cmd + ' -archives ' + arg + cmd2)
-    logError(sh)
-    sh.exec("hadoop fs -cat ${testDir}/cachefile/out/part-00000")
-    logError(sh)
-
-    assertEquals("cache1\t\ncache2\t", sh.out.join('\n'))
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/87e5388a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadooptests/FSCmdExecutor.java
----------------------------------------------------------------------
diff --git a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadooptests/FSCmdExecutor.java b/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadooptests/FSCmdExecutor.java
deleted file mode 100644
index 9fd48bb..0000000
--- a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadooptests/FSCmdExecutor.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.bigtop.itest.hadooptests;
-
-import java.io.File;
-import java.util.StringTokenizer;
-
-import org.apache.hadoop.fs.FsShell;
-import org.apache.hadoop.cli.CLITestHelper;
-import org.apache.hadoop.cli.util.CommandExecutor;
-import org.apache.hadoop.util.ToolRunner;
-
-public class FSCmdExecutor extends CommandExecutor {
-  protected String namenode = null;
-  protected FsShell shell = null;
-
-  public FSCmdExecutor(String namenode, FsShell shell) {
-    this.namenode = namenode;
-    this.shell = shell;
-  }
-
-  protected void execute(final String cmd) throws Exception{
-    String[] args = getCommandAsArgs(cmd, "NAMENODE", this.namenode);
-    ToolRunner.run(shell, args);
-  }
-
-  @Override
-  protected String[] getCommandAsArgs(final String cmd, final String masterKey,
-                                      final String master) {
-    StringTokenizer tokenizer = new StringTokenizer(cmd, " ");
-    String[] args = new String[tokenizer.countTokens()];
-    int i = 0;
-    while (tokenizer.hasMoreTokens()) {
-      args[i] = tokenizer.nextToken();
-      args[i] = args[i].replaceAll(masterKey, master);
-      args[i] = args[i].replaceAll("CLITEST_DATA", 
-        new File(CLITestHelper.TEST_CACHE_DATA_DIR).toURI().toString().replace(' ', '+'));
-      args[i] = args[i].replaceAll("TEST_DIR_ABSOLUTE", TestCLI.TEST_DIR_ABSOLUTE);
-      args[i] = args[i].replaceAll("USERNAME", System.getProperty("user.name"));
-
-      i++;
-    }
-    return args;
-  }
-}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/87e5388a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadooptests/TestCLI.java
----------------------------------------------------------------------
diff --git a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadooptests/TestCLI.java b/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadooptests/TestCLI.java
deleted file mode 100644
index 954b61b..0000000
--- a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hadooptests/TestCLI.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.bigtop.itest.hadooptests;
-
-import java.io.File;
-
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.FsShell;
-import org.apache.hadoop.cli.CLITestHelper;
-import org.apache.hadoop.cli.util.CLICommand;
-import org.apache.hadoop.cli.util.CLICommandFS;
-import org.apache.hadoop.cli.util.CommandExecutor;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * Tests for the Command Line Interface (CLI)
- */
-public class TestCLI extends CLITestHelper {
-  public static final String TEST_DIR_ABSOLUTE = "/tmp/testcli";
-  private String nn;
-  private String sug;
-
-  @Before
-  @Override
-  public void setUp() throws Exception {
-    readTestConfigFile();
-    conf = new HdfsConfiguration();
-    conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, 
-                    true);
-    clitestDataDir =
-      new File(TEST_CACHE_DATA_DIR).toURI().toString().replace(' ', '+');
-    nn = conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY);
-    sug = conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY);
-    // Many of the tests expect a replication value of 1 in the output
-    conf.setInt("dfs.replication", 1);
-  }
-
-  @After
-  @Override
-  public void tearDown() throws Exception {
-    super.tearDown();
-  }
-
-  @Override
-  protected String getTestFile() {
-    return "testConf.xml";
-  }
-
-  @Test
-  @Override
-  public void testAll() {
-    super.testAll();
-  }
-
-  @Override
-  protected String expandCommand(final String cmd) {
-    String expCmd = super.expandCommand(cmd);
-    String testcliDir = TEST_DIR_ABSOLUTE;
-    expCmd = expCmd.replaceAll("TEST_DIR_ABSOLUTE", testcliDir);
-    expCmd = expCmd.replaceAll("SUPERGROUP", sug);
-    return expCmd;
-  }
-
-  @Override
-  protected CommandExecutor.Result execute(CLICommand cmd) throws Exception {
-    if (cmd.getType() instanceof CLICommandFS) {
-      CommandExecutor cmdExecutor = new FSCmdExecutor(nn, new FsShell(conf));
-      return cmdExecutor.executeCommand(cmd.getCmd());
-    } else {
-      throw new IllegalArgumentException("Unknown type of test command: " + cmd.getType());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/87e5388a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/TestDFSAdmin.groovy
----------------------------------------------------------------------
diff --git a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/TestDFSAdmin.groovy b/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/TestDFSAdmin.groovy
deleted file mode 100644
index cb50725..0000000
--- a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/TestDFSAdmin.groovy
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.bigtop.itest.hdfstests;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.apache.bigtop.itest.JarContent;
-import org.apache.bigtop.itest.shell.Shell;
-
-public class TestDFSAdmin {
- 
-  // set debugging variable to true if you want error messages sent to stdout
-  private static Shell shHDFS = new Shell("/bin/bash", "hdfs");
-
-  @BeforeClass
-  public static void setUp() {
-    // unpack resource
-    JarContent.unpackJarContainer(TestDFSAdmin.class, "." , null);
-    System.out.println("Running DFSAdmin commands:");
-  }
-
-  @AfterClass
-  public static void tearDown() {
-  }
-
-  @Test
-  public void testDFSbasic() { 
-    // report
-    System.out.println("-report"); 
-    shHDFS.exec("hdfs dfsadmin -report");    
-    assertTrue("-report failed", shHDFS.getRet() == 0);
-
-    // help
-    System.out.println("-help"); 
-    shHDFS.exec("hdfs dfsadmin -help");
-    assertTrue("-help failed", shHDFS.getRet() == 0);
-
-    // printTopology
-    System.out.println("-printTopology"); 
-    shHDFS.exec("hdfs dfsadmin -printTopology");
-    assertTrue("-printTopology failed", shHDFS.getRet() == 0);
-
-    // metasave
-    System.out.println("-metasave");
-    shHDFS.exec("hdfs dfsadmin -metasave metasave_test");
-    assertTrue("-metasave failed", shHDFS.getRet() == 0); 
-  }
-
-  @Test
-  public void testDFSsafemode() {
-    // safemode
-    System.out.println("-safemode"); 
-    shHDFS.exec("hdfs dfsadmin -safemode leave");
-    assertTrue("-safemode leave failed", shHDFS.getRet() == 0);
-    shHDFS.exec("hdfs dfsadmin -safemode get");
-    assertTrue("-safemode get failed", shHDFS.getOut().get(0) == "Safe mode is OFF");
-    assertTrue("-safemode get failed", shHDFS.getRet() == 0);
-    shHDFS.exec("hdfs dfsadmin -safemode enter");
-    assertTrue("-safemode enter failed", shHDFS.getRet() == 0);
-    shHDFS.exec("hdfs dfsadmin -safemode get");
-    assertTrue("-safemode get failed", shHDFS.getOut().get(0) == "Safe mode is ON");
-    assertTrue("-safemode get failed", shHDFS.getRet() == 0);
-    shHDFS.exec("hdfs dfsadmin -safemode leave");
-    assertTrue("-safemode leave failed", shHDFS.getRet() == 0); 
-  }
-
-  @Test
-  public void testDFSnamespace() {
-    // saveNamespace
-    System.out.println("-saveNamespace");
-    shHDFS.exec("hdfs dfsadmin -safemode enter"); 
-    shHDFS.exec("hdfs dfsadmin -saveNamespace");
-    assertTrue("-saveNamespace failed", shHDFS.getRet() == 0);
-    shHDFS.exec("hdfs dfsadmin -safemode leave");
-    shHDFS.exec("hdfs dfsadmin -saveNamespace"); 
-    assertTrue("-saveNamespace worked in non safemode", shHDFS.getRet() != 0);
-  }
-
-  @Test
-  public void testDFSrefreshcommands() {
-    // refreshNodes
-    System.out.println("-refreshNodes"); 
-    shHDFS.exec("hdfs dfsadmin -refreshNodes");
-    assertTrue("-refreshNodes failed", shHDFS.getRet() == 0);
-
-    /*// refreshServiceAcl - does not work - shHDFS.getRet() = 255
-    System.out.println("-refreshServiceAcl");
-    shHDFS.exec("hdfs dfsadmin -refreshServiceAcl");
-    System.out.println(shHDFS.getRet());
-    assertTrue("-refreshServiceAcl failed", shHDFS.getRet() == 0); */
-   
-    // refreshUserToGroupsMappings
-    System.out.println("-refreshUserToGroupsMappings");
-    shHDFS.exec("hdfs dfsadmin -refreshUserToGroupsMappings");
-    assertTrue("-refreshUserToGroupsMappings failed", shHDFS.getRet() == 0);
-
-    // refreshSuperUserGroupsConfiguration
-    System.out.println("-refreshSuperUserGroupsConfiguration");
-    shHDFS.exec("hdfs dfsadmin -refreshSuperUserGroupsConfiguration");
-    assertTrue("-refreshSuperUserGroupsConfiguration failed", shHDFS.getRet() == 0); 
-  }
-
-  @Test
-  public void testDFSupgrades() {
-    // upgradeProgress
-    System.out.println("-upgradeProgress"); 
-    shHDFS.exec("hdfs dfsadmin -upgradeProgress details");
-    assertTrue("-upgradeProgress details failed", shHDFS.getRet() == 0);
-    shHDFS.exec("hdfs dfsadmin -upgradeProgress status");
-    assertTrue("-upgradeProgress status failed", shHDFS.getRet() == 0);
-
-    // finalizeUpgrade
-    System.out.println("-finalizeUpgrade");
-    shHDFS.exec("hdfs dfsadmin -finalizeUpgrade");
-    assertTrue("-finalizeUpgrade failed", shHDFS.getRet() == 0);
-  }
-
-  @Test
-  public void testDFSstorage() {  
-    // restoreFailedStorage
-    System.out.println("-restoreFailedStorage"); 
-    shHDFS.exec("hdfs dfsadmin -restoreFailedStorage false");
-    assertTrue("-restoreFailedStorage false failed", shHDFS.getRet() == 0);
-    shHDFS.exec("hdfs dfsadmin -restoreFailedStorage check");
-    assertTrue("-restoreFailedStorage check failed", shHDFS.getOut().get(0) == "restoreFailedStorage is set to false");
-    assertTrue("-restoreFailedStorage check failed", shHDFS.getRet() == 0);
-    shHDFS.exec("hdfs dfsadmin -restoreFailedStorage true");
-    assertTrue("-restoreFailedStorage true failed", shHDFS.getRet() == 0);
-    shHDFS.exec("hdfs dfsadmin -restoreFailedStorage check");
-    assertTrue("-restoreFailedStorage check", shHDFS.getOut().get(0) == "restoreFailedStorage is set to true");
-    assertTrue("-restoreFailedStorage check", shHDFS.getRet() == 0);
-    shHDFS.exec("hdfs dfsadmin -restoreFailedStorage false");
-    assertTrue("-restoreFailedStorage false failed", shHDFS.getRet() == 0); 
-  }
-
-  @Test
-  public void testDFSquotas() {
-    // setQuota, clrQuota
-    System.out.println("-setQuota, -clrQuota");
-    shHDFS.exec("date");
-    String quota_test = "quota_test" + shHDFS.getOut().get(0).replaceAll("\\s","").replaceAll(":","");
-    shHDFS.exec("hadoop fs -test -e $quota_test");
-    if (shHDFS.getRet() == 0) {
-      shHDFS.exec("hadoop fs -rmr -skipTrash $quota_test");
-      assertTrue("Deletion of previous testDistcpInputs from HDFS failed",
-          shHDFS.getRet() == 0);
-    }
-    shHDFS.exec("hadoop fs -mkdir $quota_test");
-    shHDFS.exec("hdfs dfsadmin -setQuota 1000 $quota_test");
-    assertTrue("-setQuota failed", shHDFS.getRet() == 0);
-    shHDFS.exec("hdfs dfsadmin -clrQuota $quota_test");
-    assertTrue("-clrQuota failed", shHDFS.getRet() == 0); 
-
-    // setSpaceQuota, clrSpaceQuota
-    System.out.println("-setSpaceQuota, -clrSpaceQuota");
-    shHDFS.exec("hdfs dfsadmin -setSpaceQuota 1000 $quota_test");
-    assertTrue("-setSpaceQuota failed", shHDFS.getRet() == 0);
-    shHDFS.exec("hdfs dfsadmin -clrSpaceQuota $quota_test");
-    assertTrue("-clrSpaceQuota failed", shHDFS.getRet() == 0);
-    shHDFS.exec("hadoop fs -rmr $quota_test"); 
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/87e5388a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/TestFsck.groovy
----------------------------------------------------------------------
diff --git a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/TestFsck.groovy b/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/TestFsck.groovy
deleted file mode 100644
index 183fea7..0000000
--- a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/TestFsck.groovy
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.bigtop.itest.hdfstests
-
-import org.junit.Test
-import org.apache.bigtop.itest.shell.Shell
-import static org.junit.Assert.assertTrue
-import static org.apache.bigtop.itest.LogErrorsUtils.logError
-
-/**
- * Tests the HDFS fsck command.
- */
-public class TestFsck {
-  static Shell shHDFS = new Shell("/bin/bash", "hdfs" )
-  String[] fsckCmds = [
-    "hdfs fsck /",
-    "hdfs fsck -move /",
-    "hdfs fsck -delete /",
-    "hdfs fsck / -files",
-    "hdfs fsck -openforwrite /",
-    "hdfs fsck -list-corruptfileblocks /",
-    "hdfs fsck -blocks /",
-    "hdfs fsck -locations /",
-    "hdfs fsck -racks /"
-  ]
-
-  @Test
-  public void testFsckBasic() {
-    for (cmd in fsckCmds) {
-      shHDFS.exec(cmd)
-      logError(shHDFS)
-      assertTrue(shHDFS.getRet() == 0)
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/87e5388a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/TestHDFSBalancer.groovy
----------------------------------------------------------------------
diff --git a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/TestHDFSBalancer.groovy b/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/TestHDFSBalancer.groovy
deleted file mode 100644
index 25d37b4..0000000
--- a/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/TestHDFSBalancer.groovy
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.bigtop.itest.hdfstests;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.apache.bigtop.itest.JarContent;
-import org.apache.bigtop.itest.shell.Shell;
-
-public class TestHDFSBalancer {
- 
-  private static Shell shHDFS = new Shell("/bin/bash", "hdfs");
-  // set with -Dthreshold
-  private static String thresh = "10";
-
-  @BeforeClass
-  public static void setUp() {
-    // unpack resource
-    JarContent.unpackJarContainer(TestHDFSBalancer.class, "." , null);
-    if (System.getProperty("threshold") != null) {
-      thresh = System.getProperty("threshold");
-    }  
-  }
-
-  @AfterClass
-  public static void tearDown() {
-  }
-
-  @Test
-  public void testBalancer() { 
-    System.out.println("Running Balancer:");
-    System.out.println("Threshold is set to " + thresh +". Toggle by adding -Dthreshold=#");
-
-    // must run balancer as hdfs user   
-    shHDFS.exec("hdfs balancer -threshold $thresh");
-  
-    boolean success = false;
-    // success_string message signifies balancing worked correctly
-    String success_string1 = "The cluster is balanced. Exiting..."
-    String success_string2 = "No block can be moved"
-    String success_string3 = "No block has been moved for 3 iterations"
-    List out_msgs = shHDFS.getOut();
-    Iterator out_iter = out_msgs.iterator();
-    while (out_iter.hasNext()) {
-      String next_val = out_iter.next();
-      if (next_val.equals(success_string1) || next_val.contains(success_string2) || next_val.contains(success_string3)) {
-        success = true;
-       }
-    }
-
-    String failure_string1 = "namenodes = []"
-    List err_msgs = shHDFS.getErr();
-    Iterator err = err_msgs.iterator();
-
-    while (err.hasNext()) {
-      String err_next = err.next()
-      assertTrue("Balancer could not find namenode", !err_next.contains(failure_string1));
-    }
-
-    // could not just check if shHDFS.getRet() = 0 because balancer prints out INFO messages that the shell thinks are error messages 
-    assertTrue("Balancer failed", success == true);
-  }
-
-}
-