You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@bigtop.apache.org by rv...@apache.org on 2012/08/09 22:35:10 UTC

svn commit: r1371458 - /incubator/bigtop/branches/branch-0.4/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/TestHDFSQuota.groovy

Author: rvs
Date: Thu Aug  9 20:35:09 2012
New Revision: 1371458

URL: http://svn.apache.org/viewvc?rev=1371458&view=rev
Log:
BIGTOP-682. Minor Clean up of TestHDFSQuota (Sujay Rau via Bruno Mahé)

Modified:
    incubator/bigtop/branches/branch-0.4/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/TestHDFSQuota.groovy

Modified: incubator/bigtop/branches/branch-0.4/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/TestHDFSQuota.groovy
URL: http://svn.apache.org/viewvc/incubator/bigtop/branches/branch-0.4/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/TestHDFSQuota.groovy?rev=1371458&r1=1371457&r2=1371458&view=diff
==============================================================================
--- incubator/bigtop/branches/branch-0.4/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/TestHDFSQuota.groovy (original)
+++ incubator/bigtop/branches/branch-0.4/bigtop-tests/test-artifacts/hadoop/src/main/groovy/org/apache/bigtop/itest/hdfstests/TestHDFSQuota.groovy Thu Aug  9 20:35:09 2012
@@ -23,59 +23,56 @@ import static org.junit.Assert.assertTru
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.apache.bigtop.itest.JarContent;
 import org.apache.bigtop.itest.shell.Shell;
 
 public class TestHDFSQuota {
  
   private static Shell shHDFS = new Shell("/bin/bash", "hdfs");
+  private static Shell sh = new Shell("/bin/bash");
   private static final long LARGE = Long.MAX_VALUE - 1;
   private static final String USERNAME = System.getProperty("user.name");
   private static String quotaDate = shHDFS.exec("date").getOut().get(0).replaceAll("\\s","").replaceAll(":","");
   private static String testQuotaFolder = "testQuotaFolder" + quotaDate;
+  private static String testQuotaFolder1 = testQuotaFolder + "1";
+  private static String testQuotaFolder2 = testQuotaFolder + "2";
   
   @BeforeClass
   public static void setUp() {
-    // unpack resource
-    JarContent.unpackJarContainer(TestHDFSQuota.class, "." , null); 
-
-    shHDFS.exec("hadoop fs -mkdir $testQuotaFolder" + "1");
-    assertTrue("Could not create input directory", shHDFS.getRet() == 0);
-
+    // creating test folders
+    shHDFS.exec("hadoop fs -mkdir $testQuotaFolder1");
     assertTrue("Could not create input directory", shHDFS.getRet() == 0);
 
-    System.out.println("Running Quota Tests:");
+    sh.exec("hadoop fs -mkdir $testQuotaFolder1");
+    assertTrue("Could not create input directory", sh.getRet() == 0);
   }
 
   @AfterClass
   public static void tearDown() {
     // clean up of existing folders
-    shHDFS.exec("hadoop fs -test -e $testQuotaFolder" + "1");
-    if (shHDFS.getRet() == 0) {
-      shHDFS.exec("hadoop fs -rmr -skipTrash $testQuotaFolder" + "1");
-      assertTrue("Deletion of previous testQuotaFolder1 from HDFS failed",
-          shHDFS.getRet() == 0);
-    }
-    shHDFS.exec("hadoop fs -test -e $testQuotaFolder" + "3");
+    shHDFS.exec("hadoop fs -test -e $testQuotaFolder1");
     if (shHDFS.getRet() == 0) {
-      shHDFS.exec("hadoop fs -rmr -skipTrash $testQuotaFolder" + "3");
+      shHDFS.exec("hadoop fs -rmr -skipTrash $testQuotaFolder1");
       assertTrue("Deletion of previous testQuotaFolder1 from HDFS failed",
           shHDFS.getRet() == 0);
     }
-    shHDFS.exec("hadoop fs -test -e /user/$USERNAME/$testQuotaFolder" + "2");
+    shHDFS.exec("hadoop fs -test -e $testQuotaFolder2");
     if (shHDFS.getRet() == 0) {
-      shHDFS.exec("hadoop fs -rmr -skipTrash /user/$USERNAME/$testQuotaFolder" + "2");
+      shHDFS.exec("hadoop fs -rmr -skipTrash $testQuotaFolder2");
       assertTrue("Deletion of previous testQuotaFolder2 from HDFS failed",
           shHDFS.getRet() == 0);
     }
-    shHDFS.exec("hadoop fs -mkdir /user/$USERNAME/$testQuotaFolder" + "2");
-
+    sh.exec("hadoop fs -test -e $testQuotaFolder1");
+    if (sh.getRet() == 0) {
+      sh.exec("hadoop fs -rmr -skipTrash $testQuotaFolder1");
+      assertTrue("Deletion of previous testQuotaFolder1 from HDFS failed",
+          sh.getRet() == 0);
+    }
   }
 
   @Test
   public void testNewlyCreatedDir() { 
     // newly created dir should have no name quota, no space quota   
-    shHDFS.exec("hadoop fs -count -q $testQuotaFolder" + "1");
+    shHDFS.exec("hadoop fs -count -q $testQuotaFolder1");
     assertTrue("Could not use count command", shHDFS.getRet() == 0);
     String[] output = shHDFS.getOut().get(0).trim().split();
     assertTrue("Newly created directory had a set name quota", output[0].equals("none"));
@@ -87,137 +84,137 @@ public class TestHDFSQuota {
   @Test
   public void testAdminPermissions() { 
     // admin setting quotas should succeed
-    shHDFS.exec("hdfs dfsadmin -setQuota 10 $testQuotaFolder" + "1");
+    shHDFS.exec("hadoop dfsadmin -setQuota 10 $testQuotaFolder1");
     assertTrue("setQuota failed", shHDFS.getRet() == 0);
-    shHDFS.exec("hdfs dfsadmin -setSpaceQuota 1000000 $testQuotaFolder" + "1");
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota 1000000 $testQuotaFolder1");
     assertTrue("setSpaceQuota failed", shHDFS.getRet() == 0);
 
     // non-admin setting/clearing quotas should fail
-    shHDFS.exec("hdfs dfsadmin -setQuota 10 $testQuotaFolder" + "2");
-    assertTrue("setQuota should not have worked", shHDFS.getRet() != 0);
-    shHDFS.exec("hdfs dfsadmin -setSpaceQuota 1000000 $testQuotaFolder" + "2");
-    assertTrue("setSpaceQuota should not have worked", shHDFS.getRet() != 0);
-    shHDFS.exec("hdfs dfsadmin -clrQuota $testQuotaFolder" + "2");
-    assertTrue("clrQuota should not have worked", shHDFS.getRet() != 0);
-    shHDFS.exec("hdfs dfsadmin -clrSpaceQuota $testQuotaFolder" + "2");
-    assertTrue("clrSpaceQuota should not have worked", shHDFS.getRet() != 0);
+    sh.exec("hadoop dfsadmin -setQuota 10 $testQuotaFolder1");
+    assertTrue("setQuota should not have worked", sh.getRet() != 0);
+    sh.exec("hadoop dfsadmin -setSpaceQuota 1000000 $testQuotaFolder1");
+    assertTrue("setSpaceQuota should not have worked", sh.getRet() != 0);
+    sh.exec("hadoop dfsadmin -clrQuota $testQuotaFolder1");
+    assertTrue("clrQuota should not have worked", sh.getRet() != 0);
+    sh.exec("hadoop dfsadmin -clrSpaceQuota $testQuotaFolder1");
+    assertTrue("clrSpaceQuota should not have worked", sh.getRet() != 0);
 
     // admin clearing quotas should succeed
-    shHDFS.exec("hdfs dfsadmin -clrQuota $testQuotaFolder" + "1");
+    shHDFS.exec("hadoop dfsadmin -clrQuota $testQuotaFolder1");
     assertTrue("clrQuota failed", shHDFS.getRet() == 0);
-    shHDFS.exec("hdfs dfsadmin -clrSpaceQuota $testQuotaFolder" + "1");
+    shHDFS.exec("hadoop dfsadmin -clrSpaceQuota $testQuotaFolder1");
     assertTrue("clrSpaceQuota failed", shHDFS.getRet() == 0);
   } 
 
   @Test
   public void testRename() { 
     // name and space quotas stick after rename
-    shHDFS.exec("hadoop fs -count -q $testQuotaFolder" + "1");
+    shHDFS.exec("hadoop fs -count -q $testQuotaFolder1");
     assertTrue("Could not use count command", shHDFS.getRet() == 0);
     String[] status1 = shHDFS.getOut().get(0).trim().split();
-    shHDFS.exec("hadoop fs -mv $testQuotaFolder" + "1" + " /user/hdfs/$testQuotaFolder" + "3");
+    shHDFS.exec("hadoop fs -mv $testQuotaFolder1" + " /user/hdfs/$testQuotaFolder2");
     assertTrue("Could not use move command", shHDFS.getRet() == 0);
-    shHDFS.exec("hadoop fs -count -q $testQuotaFolder" + "3");
+    shHDFS.exec("hadoop fs -count -q $testQuotaFolder2");
     assertTrue("Could not use count command", shHDFS.getRet() == 0);
     String[] status2 = shHDFS.getOut().get(0).trim().split();
     for (int i = 0; i < status1.length - 1; i++) {
       assertTrue("quotas changed after folder rename", status1[i].equals(status2[i]));
     }
-    shHDFS.exec("hadoop fs -mv $testQuotaFolder" + "3" + " /user/hdfs/$testQuotaFolder" + "1");
+    shHDFS.exec("hadoop fs -mv $testQuotaFolder2" + " /user/hdfs/$testQuotaFolder1");
     assertTrue("Could not use move command", shHDFS.getRet() == 0);
   }
 
   @Test
   public void testInputValues() { 
     // the largest allowable quota size is Long.Max_Value and must be greater than zero
-    shHDFS.exec("hdfs dfsadmin -setQuota -1 $testQuotaFolder" + "1");
+    shHDFS.exec("hadoop dfsadmin -setQuota -1 $testQuotaFolder1");
     assertTrue("setQuota should not have worked", shHDFS.getRet() != 0);
-    shHDFS.exec("hdfs dfsadmin -setSpaceQuota -1 $testQuotaFolder" + "1");
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota -1 $testQuotaFolder1");
     assertTrue("setSpaceQuota should not have worked", shHDFS.getRet() != 0);  
-    shHDFS.exec("hdfs dfsadmin -setQuota 1.04 $testQuotaFolder" + "1");
+    shHDFS.exec("hadoop dfsadmin -setQuota 1.04 $testQuotaFolder1");
     assertTrue("setQuota should not have worked", shHDFS.getRet() != 0);
-    shHDFS.exec("hdfs dfsadmin -setSpaceQuota 1.04 $testQuotaFolder" + "1");
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota 1.04 $testQuotaFolder1");
     assertTrue("setSpaceQuota should not have worked", shHDFS.getRet() != 0);        
-    shHDFS.exec("hdfs dfsadmin -setQuota 0 $testQuotaFolder" + "1");
+    shHDFS.exec("hadoop dfsadmin -setQuota 0 $testQuotaFolder1");
     assertTrue("setQuota should not have worked", shHDFS.getRet() != 0);
-    shHDFS.exec("hdfs dfsadmin -setSpaceQuota 0 $testQuotaFolder" + "1");
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota 0 $testQuotaFolder1");
     assertTrue("setSpaceQuota should not have worked", shHDFS.getRet() != 0);
-    shHDFS.exec("hdfs dfsadmin -setQuota $LARGE $testQuotaFolder" + "1");
+    shHDFS.exec("hadoop dfsadmin -setQuota $LARGE $testQuotaFolder1");
     assertTrue("setQuota failed", shHDFS.getRet() == 0);
-    shHDFS.exec("hdfs dfsadmin -setSpaceQuota $LARGE $testQuotaFolder" + "1");
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota $LARGE $testQuotaFolder1");
     assertTrue("setSpaceQuota failed", shHDFS.getRet() == 0);
-    shHDFS.exec("hdfs dfsadmin -setQuota 9223372036854775808 $testQuotaFolder" + "1");
+    shHDFS.exec("hadoop dfsadmin -setQuota 9223372036854775808 $testQuotaFolder1");
     assertTrue("setQuota should not have worked", shHDFS.getRet() != 0);
-    shHDFS.exec("hdfs dfsadmin -setSpaceQuota 9223372036854775808 $testQuotaFolder" + "1");
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota 9223372036854775808 $testQuotaFolder1");
     assertTrue("setSpaceQuota should not have worked", shHDFS.getRet() != 0);
   }
 
   @Test
   public void testForceDirEmpty() {
     // setting the name quota to 1 for an empty dir will cause the dir to remain empty
-    shHDFS.exec("hdfs dfsadmin -setQuota 1 $testQuotaFolder" + "1");
+    shHDFS.exec("hadoop dfsadmin -setQuota 1 $testQuotaFolder1");
     assertTrue("Could not setQuota", shHDFS.getRet() == 0);
-    shHDFS.exec("hadoop fs -mkdir $testQuotaFolder" + "1" + "/sample1");
+    shHDFS.exec("hadoop fs -mkdir $testQuotaFolder1" + "/sample1");
     assertTrue("mkdir should not have worked due to quota of 1", shHDFS.getRet() != 0);
   }
 
   @Test
   public void testQuotasPostViolation() {  
     // quota can be set even if it violates
-    shHDFS.exec("hdfs dfsadmin -setQuota $LARGE $testQuotaFolder" + "1");
+    shHDFS.exec("hadoop dfsadmin -setQuota $LARGE $testQuotaFolder1");
     assertTrue("Could not setQuota", shHDFS.getRet() == 0);
-    shHDFS.exec("hadoop fs -put - $testQuotaFolder" + "1", "-------TEST STRING--------"); 
+    shHDFS.exec("hadoop fs -put - $testQuotaFolder1" + "/testString1", "-------TEST STRING--------"); 
     assertTrue("Could not use put command", shHDFS.getRet() == 0);
-    shHDFS.exec("hadoop fs -mkdir $testQuotaFolder" + "1" + "/sample1");
+    shHDFS.exec("hadoop fs -mkdir $testQuotaFolder1" + "/sample1");
     assertTrue("Could not use mkdir command", shHDFS.getRet() == 0);
-    shHDFS.exec("hadoop fs -mkdir $testQuotaFolder" + "1" + "/sample2");
+    shHDFS.exec("hadoop fs -mkdir $testQuotaFolder1" + "/sample2");
     assertTrue("Could not use mkdir command", shHDFS.getRet() == 0);
-    shHDFS.exec("hdfs dfsadmin -setQuota 2 $testQuotaFolder" + "1");
+    shHDFS.exec("hadoop dfsadmin -setQuota 2 $testQuotaFolder1");
     assertTrue("setQuota should have worked", shHDFS.getRet() == 0);
-    shHDFS.exec("hdfs dfsadmin -setSpaceQuota 1 $testQuotaFolder" + "1");
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota 1 $testQuotaFolder1");
     assertTrue("setSpaceQuota should have worked", shHDFS.getRet() == 0);
   }
 
   @Test
   public void testQuotas() {
     // dir creation should fail - name quota
-    shHDFS.exec("hdfs dfsadmin -setSpaceQuota 10000000000 $testQuotaFolder" + "1");
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota 10000000000 $testQuotaFolder1");
     assertTrue("Could not setSpaceQuota", shHDFS.getRet() == 0);
-    shHDFS.exec("hadoop fs -mkdir $testQuotaFolder" + "1" + "/sample3");
+    shHDFS.exec("hadoop fs -mkdir $testQuotaFolder1" + "/sample3");
     assertTrue("mkdir should not have worked", shHDFS.getRet() != 0);
 
     // file creation should fail - name quota
-    shHDFS.exec("hadoop fs -rmr $testQuotaFolder" + "1" + "/-"); 
-    shHDFS.exec("hadoop fs -put - $testQuotaFolder" + "1", "-------TEST STRING--------"); 
+    shHDFS.exec("hadoop fs -rmr $testQuotaFolder1" + "/testString1"); 
+    shHDFS.exec("hadoop fs -put - $testQuotaFolder1" + "/testString2", "-------TEST STRING--------"); 
     assertTrue("put should not have worked", shHDFS.getRet() != 0);
 
     // file creation should fail - space quota
-    shHDFS.exec("hdfs dfsadmin -setSpaceQuota 10 $testQuotaFolder" + "1");
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota 10 $testQuotaFolder1");
     assertTrue("Could not setSpaceQuota", shHDFS.getRet() == 0);
-    shHDFS.exec("hdfs dfsadmin -setQuota 1000 $testQuotaFolder" + "1");
+    shHDFS.exec("hadoop dfsadmin -setQuota 1000 $testQuotaFolder1");
     assertTrue("Could not setQuota", shHDFS.getRet() == 0);
-    shHDFS.exec("hadoop fs -put - $testQuotaFolder" + "1", "-------TEST STRING--------"); 
+    shHDFS.exec("hadoop fs -put - $testQuotaFolder1"  + "/testString3", "-------TEST STRING--------"); 
     assertTrue("put should not have worked", shHDFS.getRet() != 0); 
   }
 
-  @Test
+  //@Test - can be reinstated upon resolution of BIGTOP-635 due to restarting of hdfs service
   public void testLogEntries() {
     // Log entry created when nodes are started with both quota violations
     shHDFS.exec("date");
     String date = "logTest" + shHDFS.getOut().get(0).replaceAll("\\s","").replaceAll(":","");
     shHDFS.exec("hadoop fs -mkdir $date");
     assertTrue("Could not use mkdir command", shHDFS.getRet() == 0);
-    shHDFS.exec("hadoop fs -put - $date", "-------TEST STRING--------");
+    shHDFS.exec("hadoop fs -put - $date" + "/testString1", "-------TEST STRING--------");
     assertTrue("Could not use put command", shHDFS.getRet() == 0);
-    shHDFS.exec("hdfs dfsadmin -setQuota 1 $date");
+    shHDFS.exec("hadoop dfsadmin -setQuota 1 $date");
     assertTrue("Could not setQuota", shHDFS.getRet() == 0); 
     shHDFS.exec("date");
     String date1 = "logTest" + shHDFS.getOut().get(0).replaceAll("\\s","").replaceAll(":","");
     shHDFS.exec("hadoop fs -mkdir $date1");
     assertTrue("Could not use mkdir command", shHDFS.getRet() == 0);
-    shHDFS.exec("hadoop fs -put - $date1", "-------TEST STRING--------"); 
+    shHDFS.exec("hadoop fs -put - $date1"  + "/testString2", "-------TEST STRING--------"); 
     assertTrue("Could not use put command", shHDFS.getRet() == 0);
-    shHDFS.exec("hdfs dfsadmin -setSpaceQuota 1 $date1");
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota 1 $date1");
     assertTrue("Could not setSpaceQuota", shHDFS.getRet() == 0); 
     shHDFS.exec("for service in /etc/init.d/hadoop-hdfs-*; do sudo \$service stop; done");
     shHDFS.exec("for service in /etc/init.d/hadoop-hdfs-*; do sudo \$service start; done");
@@ -249,28 +246,28 @@ public class TestHDFSQuota {
     String date = "failTest" + shHDFS.getOut().get(0).replaceAll("\\s","").replaceAll(":","");
     shHDFS.exec("hadoop fs -mkdir $date");
     assertTrue("Could not use mkdir command", shHDFS.getRet() == 0);
-    shHDFS.exec("hadoop fs -put - $date", "-------TEST STRING--------");
+    shHDFS.exec("hadoop fs -put - $date" + "/testString1", "-------TEST STRING--------");
     assertTrue("Could not use put command", shHDFS.getRet() == 0);
     // Errors when setting quotas on a file
-    shHDFS.exec("hdfs dfsadmin -setQuota 1000 $date/-");
+    shHDFS.exec("hadoop dfsadmin -setQuota 1000 $date/testString1");
     assertTrue("setting quota on a file should not have worked", shHDFS.getRet() != 0);
-    shHDFS.exec("hdfs dfsadmin -setSpaceQuota 1000 $date/-");
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota 1000 $date/testString1");
     assertTrue("setting quota on a file should not have worked", shHDFS.getRet() != 0); 
 
     // Errors when clearing quotas on a file
-    shHDFS.exec("hdfs dfsadmin -clrQuota $date/-");
-    assertTrue("setting quota on a file should not have worked", shHDFS.getErr().size() > 0 && shHDFS.getErr().get(0).contains("on a file"));
-    shHDFS.exec("hdfs dfsadmin -clrSpaceQuota $date/-");
-    assertTrue("setting quota on a file should not have worked", shHDFS.getErr().size() > 0 && shHDFS.getErr().get(0).contains("on a file"));
+    shHDFS.exec("hadoop dfsadmin -clrQuota $date/testString1");
+    assertTrue("clearing quota on a file should not have worked", shHDFS.getRet() != 0);
+    shHDFS.exec("hadoop dfsadmin -clrSpaceQuota $date/testString1");
+    assertTrue("clearing quota on a file should not have worked", shHDFS.getRet() != 0);
 
     // set/clr quota on nonexistant directory
-    shHDFS.exec("hdfs dfsadmin -setQuota 100 DIRECTORYDOESNOTEXIST" + date);
+    shHDFS.exec("hadoop dfsadmin -setQuota 100 DIRECTORYDOESNOTEXIST" + date);
     assertTrue("setting quota on non-existant directory should not have worked", shHDFS.getRet() != 0); 
-    shHDFS.exec("hdfs dfsadmin -setSpaceQuota 100 DIRECTORYDOESNOTEXIST" + date);
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota 100 DIRECTORYDOESNOTEXIST" + date);
     assertTrue("setting quota on non-existant directory should not have worked", shHDFS.getRet() != 0); 
-    shHDFS.exec("hdfs dfsadmin -clrQuota DIRECTORYDOESNOTEXIST" + date);
+    shHDFS.exec("hadoop dfsadmin -clrQuota DIRECTORYDOESNOTEXIST" + date);
     assertTrue("clearing quota on non-existant directory should not have worked", shHDFS.getRet() != 0); 
-    shHDFS.exec("hdfs dfsadmin -clrSpaceQuota DIRECTORYDOESNOTEXIST" + date);
+    shHDFS.exec("hadoop dfsadmin -clrSpaceQuota DIRECTORYDOESNOTEXIST" + date);
     assertTrue("clearing quota on non-existant directory should not have worked", shHDFS.getRet() != 0); 
 
     shHDFS.exec("hadoop fs -rmr $date"); 
@@ -283,22 +280,22 @@ public class TestHDFSQuota {
     String repFolder = "repFactorTest" + shHDFS.getOut().get(0).replaceAll("\\s","").replaceAll(":","");
     shHDFS.exec("hadoop fs -mkdir $repFolder");
     assertTrue("Could not use mkdir command", shHDFS.getRet() == 0);    
-    shHDFS.exec("hadoop fs -put - $repFolder", "-------TEST STRING--------");
+    shHDFS.exec("hadoop fs -put - $repFolder" + "/testString1" , "-------TEST STRING--------");
     assertTrue("Could not use put command", shHDFS.getRet() == 0);
-    shHDFS.exec("hdfs dfsadmin -setSpaceQuota 1000 $repFolder");
+    shHDFS.exec("hadoop dfsadmin -setSpaceQuota 1000 $repFolder");
     assertTrue("Could not setQuota", shHDFS.getRet() == 0); 
-    shHDFS.exec("hadoop fs -setrep 1 $repFolder/-");
+    shHDFS.exec("hadoop fs -setrep 1 $repFolder/testString1");
     shHDFS.exec("hadoop fs -count -q $repFolder");
     assertTrue("Could not use count command", shHDFS.getRet() == 0);
     String[] output = shHDFS.getOut().get(0).trim().split();   
     int size_of_one = Integer.parseInt(output[2]) - Integer.parseInt(output[3]);
-    shHDFS.exec("hadoop fs -setrep 5 $repFolder/-");
+    shHDFS.exec("hadoop fs -setrep 5 $repFolder/testString1");
     shHDFS.exec("hadoop fs -count -q $repFolder");
     assertTrue("Could not use count command", shHDFS.getRet() == 0);
     output = shHDFS.getOut().get(0).trim().split();   
     int size_of_five = Integer.parseInt(output[2]) - Integer.parseInt(output[3]);
     assertTrue("Quota not debited correctly", size_of_one * 5 == size_of_five);
-    shHDFS.exec("hadoop fs -setrep 3 $repFolder/-");
+    shHDFS.exec("hadoop fs -setrep 3 $repFolder/testString1");
     shHDFS.exec("hadoop fs -count -q $repFolder");
     assertTrue("Could not use count command", shHDFS.getRet() == 0);
     output = shHDFS.getOut().get(0).trim().split();   
@@ -309,4 +306,3 @@ public class TestHDFSQuota {
 
 }
 
-