You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-commits@hadoop.apache.org by vi...@apache.org on 2010/03/03 05:52:17 UTC

svn commit: r918326 - in /hadoop/mapreduce/trunk: ./ src/test/mapred/org/apache/hadoop/mapred/ src/test/mapred/org/apache/hadoop/mapred/pipes/

Author: vinodkv
Date: Wed Mar  3 04:52:17 2010
New Revision: 918326

URL: http://svn.apache.org/viewvc?rev=918326&view=rev
Log:
MAPREDUCE-1421. LinuxTaskController tests failing on trunk after the commit of MAPREDUCE-1385. Contributed by Amareshwari Sriramadasu.

Modified:
    hadoop/mapreduce/trunk/CHANGES.txt
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestDebugScript.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestDebugScriptWithLinuxTaskController.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobExecutionAsDifferentUser.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestKillSubProcessesWithLinuxTaskController.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestLinuxTaskController.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestLocalizationWithLinuxTaskController.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestTrackerDistributedCacheManagerWithLinuxTaskController.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/pipes/TestPipes.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/pipes/TestPipesAsDifferentUser.java

Modified: hadoop/mapreduce/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/CHANGES.txt?rev=918326&r1=918325&r2=918326&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/CHANGES.txt (original)
+++ hadoop/mapreduce/trunk/CHANGES.txt Wed Mar  3 04:52:17 2010
@@ -395,6 +395,9 @@
     MAPREDUCE-1510. RAID should regenerate parity files if they get deleted.
     (Rodrigo Schmidt via dhruba)
 
+    MAPREDUCE-1421. Fix the LinuxTaskController tests failing on trunk after
+    the commit of MAPREDUCE-1385. (Amareshwari Sriramadasu via vinodkv)
+
 Release 0.21.0 - Unreleased
 
   INCOMPATIBLE CHANGES

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java?rev=918326&r1=918325&r2=918326&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java Wed Mar  3 04:52:17 2010
@@ -34,7 +34,6 @@
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
-import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 
@@ -49,7 +48,11 @@
  * <ol>
  * <li>Build LinuxTaskController by not passing any
  * <code>-Dhadoop.conf.dir</code></li>
- * <li>Make the built binary to setuid executable</li>
+ * <li>Change ownership of the built binary to root:group1, where group1 is
+ * a secondary group of the test runner.</li>
+ * <li>Change permissions on the binary so that <em>others</em> component does
+ * not have any permissions on binary</li> 
+ * <li>Make the built binary to setuid and setgid executable</li>
  * <li>Execute following targets:
  * <code>ant test -Dcompile.c++=true -Dtaskcontroller-path=<em>path to built binary</em> 
  * -Dtaskcontroller-ugi=<em>user,group</em></code>
@@ -74,11 +77,7 @@
     
     @Override
     public void setup() throws IOException {
-      // get the current ugi and set the task controller group owner
-      Groups groups = new Groups(new Configuration());
-      String ttGroup = groups.getGroups(
-          UserGroupInformation.getCurrentUser().getUserName()).get(0);
-      getConf().set(TTConfig.TT_GROUP, ttGroup);
+      getConf().set(TTConfig.TT_GROUP, taskTrackerSpecialGroup);
 
       // write configuration file
       configurationFile = createTaskControllerConf(System
@@ -134,6 +133,21 @@
   private static File configurationFile = null;
 
   protected UserGroupInformation taskControllerUser;
+  
+  protected static String taskTrackerSpecialGroup = null;
+  static {
+    if (isTaskExecPathPassed()) {
+      try {
+        taskTrackerSpecialGroup = FileSystem.getLocal(new Configuration())
+            .getFileStatus(
+                new Path(System.getProperty(TASKCONTROLLER_PATH),
+                    "task-controller")).getGroup();
+      } catch (IOException e) {
+        LOG.warn("Could not get group of the binary", e);
+        fail("Could not get group of the binary");
+      }
+    }
+  }
 
   /*
    * Utility method which subclasses use to start and configure the MR Cluster

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestDebugScript.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestDebugScript.java?rev=918326&r1=918325&r2=918326&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestDebugScript.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestDebugScript.java Wed Mar  3 04:52:17 2010
@@ -133,7 +133,7 @@
    * @throws Exception
    */
   static void verifyDebugScriptOutput(TaskAttemptID taskId) throws Exception {
-    verifyDebugScriptOutput(taskId, null, null);
+    verifyDebugScriptOutput(taskId, null, null, null);
   }
   /**
    * Method which verifies if debug script ran and ran correctly.
@@ -145,7 +145,7 @@
    * @throws Exception
    */
   static void verifyDebugScriptOutput(TaskAttemptID taskId, String expectedUser, 
-      String expectedPerms) throws Exception {
+      String expectedGroup, String expectedPerms) throws Exception {
     File output = TaskLog.getRealTaskLogFileLocation(taskId, 
         TaskLog.LogName.DEBUGOUT);
     // Check the presence of the output file if the script is to be run.
@@ -161,10 +161,8 @@
     assertTrue(out.contains("failing map"));
     if (expectedPerms != null && expectedUser != null) {
       //check whether the debugout file ownership/permissions are as expected
-      Groups groups = new Groups(new Configuration());
-      String ttGroup = groups.getGroups(expectedUser).get(0);
       TestTaskTrackerLocalization.checkFilePermissions(output.getAbsolutePath(),
-          expectedPerms, expectedUser, ttGroup);
+          expectedPerms, expectedUser, expectedGroup);
     }
   }
 

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestDebugScriptWithLinuxTaskController.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestDebugScriptWithLinuxTaskController.java?rev=918326&r1=918325&r2=918326&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestDebugScriptWithLinuxTaskController.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestDebugScriptWithLinuxTaskController.java Wed Mar  3 04:52:17 2010
@@ -59,7 +59,7 @@
     TaskAttemptID taskId = new TaskAttemptID(
         new TaskID(jobId,TaskType.MAP, 0), 0);
     TestDebugScript.verifyDebugScriptOutput(taskId, splits[0],
-        "-rw-rw----");
+        taskTrackerSpecialGroup, "-rw-rw----");
     TestDebugScript.cleanupDebugScriptDirs();
   }
 }

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobExecutionAsDifferentUser.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobExecutionAsDifferentUser.java?rev=918326&r1=918325&r2=918326&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobExecutionAsDifferentUser.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestJobExecutionAsDifferentUser.java Wed Mar  3 04:52:17 2010
@@ -19,6 +19,7 @@
 package org.apache.hadoop.mapred;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -40,34 +41,41 @@
       return;
     }
     startCluster();
-    Path inDir = new Path("input");
-    Path outDir = new Path("output");
 
-    RunningJob job;
+    
+    taskControllerUser.doAs(new PrivilegedExceptionAction<Object>() {
+      public Object run() throws Exception {
+        Path inDir = new Path("input");
+        Path outDir = new Path("output");
+
+        RunningJob job;
+        // Run a job with zero maps/reduces
+        job = UtilsForTests.runJob(getClusterConf(), inDir, outDir, 0, 0);
+        job.waitForCompletion();
+        assertTrue("Job failed", job.isSuccessful());
+        assertOwnerShip(outDir);
+
+        // Run a job with 1 map and zero reduces
+        job = UtilsForTests.runJob(getClusterConf(), inDir, outDir, 1, 0);
+        job.waitForCompletion();
+        assertTrue("Job failed", job.isSuccessful());
+        assertOwnerShip(outDir);
+
+        // Run a normal job with maps/reduces
+        job = UtilsForTests.runJob(getClusterConf(), inDir, outDir, 1, 1);
+        job.waitForCompletion();
+        assertTrue("Job failed", job.isSuccessful());
+        assertOwnerShip(outDir);
+
+        // Run a job with jvm reuse
+        JobConf myConf = getClusterConf();
+        myConf.set(JobContext.JVM_NUMTASKS_TORUN, "-1");
+        String[] args = { "-m", "6", "-r", "3", "-mt", "1000", "-rt", "1000" };
+        assertEquals(0, ToolRunner.run(myConf, new SleepJob(), args)); 
+        return null;
+      }
+    });
 
-    // Run a job with zero maps/reduces
-    job = UtilsForTests.runJob(getClusterConf(), inDir, outDir, 0, 0);
-    job.waitForCompletion();
-    assertTrue("Job failed", job.isSuccessful());
-    assertOwnerShip(outDir);
-
-    // Run a job with 1 map and zero reduces
-    job = UtilsForTests.runJob(getClusterConf(), inDir, outDir, 1, 0);
-    job.waitForCompletion();
-    assertTrue("Job failed", job.isSuccessful());
-    assertOwnerShip(outDir);
-
-    // Run a normal job with maps/reduces
-    job = UtilsForTests.runJob(getClusterConf(), inDir, outDir, 1, 1);
-    job.waitForCompletion();
-    assertTrue("Job failed", job.isSuccessful());
-    assertOwnerShip(outDir);
-
-    // Run a job with jvm reuse
-    JobConf myConf = getClusterConf();
-    myConf.set(JobContext.JVM_NUMTASKS_TORUN, "-1");
-    String[] args = { "-m", "6", "-r", "3", "-mt", "1000", "-rt", "1000" };
-    assertEquals(0, ToolRunner.run(myConf, new SleepJob(), args));
   }
   
   public void testEnvironment() throws Exception {
@@ -75,23 +83,29 @@
       return;
     }
     startCluster();
-    TestMiniMRChildTask childTask = new TestMiniMRChildTask();
-    Path inDir = new Path("input1");
-    Path outDir = new Path("output1");
-    try {
-      childTask.runTestTaskEnv(getClusterConf(), inDir, outDir, false);
-    } catch (IOException e) {
-      fail("IOException thrown while running enviroment test."
-          + e.getMessage());
-    } finally {
-      FileSystem outFs = outDir.getFileSystem(getClusterConf());
-      if (outFs.exists(outDir)) {
-        assertOwnerShip(outDir);
-        outFs.delete(outDir, true);
-      } else {
-        fail("Output directory does not exist" + outDir.toString());
+    taskControllerUser.doAs(new PrivilegedExceptionAction<Object>() {
+      public Object run() throws Exception {
+
+        TestMiniMRChildTask childTask = new TestMiniMRChildTask();
+        Path inDir = new Path("input1");
+        Path outDir = new Path("output1");
+        try {
+          childTask.runTestTaskEnv(getClusterConf(), inDir, outDir, false);
+        } catch (IOException e) {
+          fail("IOException thrown while running enviroment test."
+              + e.getMessage());
+        } finally {
+          FileSystem outFs = outDir.getFileSystem(getClusterConf());
+          if (outFs.exists(outDir)) {
+            assertOwnerShip(outDir);
+            outFs.delete(outDir, true);
+          } else {
+            fail("Output directory does not exist" + outDir.toString());
+          }
+          return null;
+        }
       }
-    }
+    });
   }
 
   /** Ensure that SIGQUIT can be properly sent by the LinuxTaskController
@@ -104,19 +118,23 @@
 
     // Run a job that should timeout and trigger a SIGQUIT.
     startCluster();
-    JobConf conf = getClusterConf();
-    conf.setInt(JobContext.TASK_TIMEOUT, 10000);
-    conf.setInt(Job.COMPLETION_POLL_INTERVAL_KEY, 50);
-    SleepJob sleepJob = new SleepJob();
-    sleepJob.setConf(conf);
-    Job job = sleepJob.createJob(1, 0, 30000, 1, 0, 0);
-    job.setMaxMapAttempts(1);
-    int prevNumSigQuits = MyLinuxTaskController.attemptedSigQuits;
-    job.waitForCompletion(true);
-    assertTrue("Did not detect a new SIGQUIT!",
-        prevNumSigQuits < MyLinuxTaskController.attemptedSigQuits);
-    assertEquals("A SIGQUIT attempt failed!", 0,
-        MyLinuxTaskController.failedSigQuits);
-
+    taskControllerUser.doAs(new PrivilegedExceptionAction<Object>() {
+      public Object run() throws Exception {
+        JobConf conf = getClusterConf();
+        conf.setInt(JobContext.TASK_TIMEOUT, 10000);
+        conf.setInt(Job.COMPLETION_POLL_INTERVAL_KEY, 50);
+        SleepJob sleepJob = new SleepJob();
+        sleepJob.setConf(conf);
+        Job job = sleepJob.createJob(1, 0, 30000, 1, 0, 0);
+        job.setMaxMapAttempts(1);
+        int prevNumSigQuits = MyLinuxTaskController.attemptedSigQuits;
+        job.waitForCompletion(true);
+        assertTrue("Did not detect a new SIGQUIT!",
+            prevNumSigQuits < MyLinuxTaskController.attemptedSigQuits);
+        assertEquals("A SIGQUIT attempt failed!", 0,
+            MyLinuxTaskController.failedSigQuits);
+        return null;
+      }
+    });
   }
 }

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestKillSubProcessesWithLinuxTaskController.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestKillSubProcessesWithLinuxTaskController.java?rev=918326&r1=918325&r2=918326&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestKillSubProcessesWithLinuxTaskController.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestKillSubProcessesWithLinuxTaskController.java Wed Mar  3 04:52:17 2010
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.mapred;
 
+import java.security.PrivilegedExceptionAction;
+
 /**
  * Test killing of child processes spawned by the jobs with LinuxTaskController
  * running the jobs as a user different from the user running the cluster. 
@@ -32,11 +34,16 @@
       return;
     }
     startCluster();
-    JobConf myConf = getClusterConf();
-    JobTracker jt = mrCluster.getJobTrackerRunner().getJobTracker();
+    taskControllerUser.doAs(new PrivilegedExceptionAction<Object>() {
+      public Object run() throws Exception {
+        JobConf myConf = getClusterConf();
+        JobTracker jt = mrCluster.getJobTrackerRunner().getJobTracker();
 
-    TestKillSubProcesses.mr = mrCluster;
-    TestKillSubProcesses sbProc = new TestKillSubProcesses();
-    sbProc.runTests(myConf, jt);
+        TestKillSubProcesses.mr = mrCluster;
+        TestKillSubProcesses sbProc = new TestKillSubProcesses();
+        sbProc.runTests(myConf, jt);
+        return null;
+      }
+    });
   }
 }

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestLinuxTaskController.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestLinuxTaskController.java?rev=918326&r1=918325&r2=918326&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestLinuxTaskController.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestLinuxTaskController.java Wed Mar  3 04:52:17 2010
@@ -104,11 +104,8 @@
         conf);
     validateTaskControllerSetup(controller, true);
 
-    // get the current ugi and set the task controller group owner in conf
-    Groups groups = new Groups(new Configuration());
-    String ttGroup = groups.getGroups(
-        UserGroupInformation.getCurrentUser().getUserName()).get(0);
-    conf.set(TTConfig.TT_GROUP, ttGroup);
+    conf.set(TTConfig.TT_GROUP,
+        ClusterWithLinuxTaskController.taskTrackerSpecialGroup);
     // write the task-controller's conf file
     ClusterWithLinuxTaskController.createTaskControllerConf(taskControllerPath,
         conf);

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestLocalizationWithLinuxTaskController.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestLocalizationWithLinuxTaskController.java?rev=918326&r1=918325&r2=918326&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestLocalizationWithLinuxTaskController.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestLocalizationWithLinuxTaskController.java Wed Mar  3 04:52:17 2010
@@ -42,8 +42,6 @@
 
   private File configFile;
 
-  private static String taskTrackerSpecialGroup;
-
   @Override
   protected boolean canRun() {
     return ClusterWithLinuxTaskController.shouldRun();
@@ -64,7 +62,6 @@
         System.getProperty(ClusterWithLinuxTaskController.TASKCONTROLLER_PATH);
     String execPath = path + "/task-controller";
     ((MyLinuxTaskController) taskController).setTaskControllerExe(execPath);
-    taskTrackerSpecialGroup = getFilePermissionAttrs(execPath)[2];
     taskController.setConf(trackerFConf);
     taskController.setup();
 
@@ -119,13 +116,13 @@
       assertTrue("user-dir in taskTrackerSubdir " + taskTrackerSubDir
           + "is not created!", userDir.exists());
       checkFilePermissions(userDir.getAbsolutePath(), "dr-xrws---", task
-          .getUser(), taskTrackerSpecialGroup);
+          .getUser(), ClusterWithLinuxTaskController.taskTrackerSpecialGroup);
 
       File jobCache = new File(userDir, TaskTracker.JOBCACHE);
       assertTrue("jobcache in the userDir " + userDir + " isn't created!",
           jobCache.exists());
       checkFilePermissions(jobCache.getAbsolutePath(), "dr-xrws---", task
-          .getUser(), taskTrackerSpecialGroup);
+          .getUser(), ClusterWithLinuxTaskController.taskTrackerSpecialGroup);
 
       // Verify the distributed cache dir.
       File distributedCacheDir =
@@ -134,7 +131,8 @@
       assertTrue("distributed cache dir " + distributedCacheDir
           + " doesn't exists!", distributedCacheDir.exists());
       checkFilePermissions(distributedCacheDir.getAbsolutePath(),
-          "dr-xrws---", task.getUser(), taskTrackerSpecialGroup);
+          "dr-xrws---", task.getUser(),
+          ClusterWithLinuxTaskController.taskTrackerSpecialGroup);
     }
   }
 
@@ -147,7 +145,7 @@
               .toString()));
       // check the private permissions on the job directory
       checkFilePermissions(jobDir.getAbsolutePath(), "dr-xrws---", task
-          .getUser(), taskTrackerSpecialGroup);
+          .getUser(), ClusterWithLinuxTaskController.taskTrackerSpecialGroup);
     }
 
     // check the private permissions of various directories
@@ -159,7 +157,8 @@
     dirs.add(new Path(jarsDir, "lib"));
     for (Path dir : dirs) {
       checkFilePermissions(dir.toUri().getPath(), "dr-xrws---",
-          task.getUser(), taskTrackerSpecialGroup);
+          task.getUser(),
+          ClusterWithLinuxTaskController.taskTrackerSpecialGroup);
     }
 
     // job-work dir needs user writable permissions
@@ -167,7 +166,7 @@
         lDirAlloc.getLocalPathToRead(TaskTracker.getJobWorkDir(task.getUser(),
             jobId.toString()), trackerFConf);
     checkFilePermissions(jobWorkDir.toUri().getPath(), "drwxrws---", task
-        .getUser(), taskTrackerSpecialGroup);
+        .getUser(), ClusterWithLinuxTaskController.taskTrackerSpecialGroup);
 
     // check the private permissions of various files
     List<Path> files = new ArrayList<Path>();
@@ -179,7 +178,7 @@
     files.add(new Path(jarsDir, "lib" + Path.SEPARATOR + "lib2.jar"));
     for (Path file : files) {
       checkFilePermissions(file.toUri().getPath(), "-r-xrwx---", task
-          .getUser(), taskTrackerSpecialGroup);
+          .getUser(), ClusterWithLinuxTaskController.taskTrackerSpecialGroup);
     }
   }
 
@@ -195,7 +194,8 @@
     dirs.add(new Path(attemptLogFiles[1].getParentFile().getAbsolutePath()));
     for (Path dir : dirs) {
       checkFilePermissions(dir.toUri().getPath(), "drwxrws---",
-          task.getUser(), taskTrackerSpecialGroup);
+          task.getUser(),
+          ClusterWithLinuxTaskController.taskTrackerSpecialGroup);
     }
 
     // check the private permissions of various files
@@ -205,7 +205,7 @@
         task.isTaskCleanupTask()), trackerFConf));
     for (Path file : files) {
       checkFilePermissions(file.toUri().getPath(), "-rwxrwx---", task
-          .getUser(), taskTrackerSpecialGroup);
+          .getUser(), ClusterWithLinuxTaskController.taskTrackerSpecialGroup);
     }
   }
 }

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestTrackerDistributedCacheManagerWithLinuxTaskController.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestTrackerDistributedCacheManagerWithLinuxTaskController.java?rev=918326&r1=918325&r2=918326&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestTrackerDistributedCacheManagerWithLinuxTaskController.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestTrackerDistributedCacheManagerWithLinuxTaskController.java Wed Mar  3 04:52:17 2010
@@ -37,7 +37,6 @@
     TestTrackerDistributedCacheManager {
 
   private File configFile;
-  private String taskTrackerSpecialGroup;
 
   private static final Log LOG =
       LogFactory
@@ -65,9 +64,6 @@
     ((MyLinuxTaskController)taskController).setTaskControllerExe(execPath);
     taskController.setConf(conf);
     taskController.setup();
-
-    taskTrackerSpecialGroup =
-        TestTaskTrackerLocalization.getFilePermissionAttrs(execPath)[2];
   }
 
   @Override
@@ -113,7 +109,8 @@
     for (Path p : localCacheFiles) {
       // First make sure that the cache file has proper permissions.
       TestTaskTrackerLocalization.checkFilePermissions(p.toUri().getPath(),
-          "-r-xrwx---", userName, taskTrackerSpecialGroup);
+          "-r-xrwx---", userName,
+          ClusterWithLinuxTaskController.taskTrackerSpecialGroup);
       // Now. make sure that all the path components also have proper
       // permissions.
       checkPermissionOnPathComponents(p.toUri().getPath(), userName);
@@ -148,7 +145,8 @@
     File path = new File(cachedFilePath).getParentFile();
     while (!path.getAbsolutePath().equals(leadingStringForFirstFile)) {
       TestTaskTrackerLocalization.checkFilePermissions(path.getAbsolutePath(),
-          "dr-xrws---", userName, taskTrackerSpecialGroup);
+          "dr-xrws---", userName, 
+          ClusterWithLinuxTaskController.taskTrackerSpecialGroup);
       path = path.getParentFile();
     }
   }

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/pipes/TestPipes.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/pipes/TestPipes.java?rev=918326&r1=918325&r2=918326&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/pipes/TestPipes.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/pipes/TestPipes.java Wed Mar  3 04:52:17 2010
@@ -73,8 +73,8 @@
     }
     MiniDFSCluster dfs = null;
     MiniMRCluster mr = null;
-    Path inputPath = new Path("/testing/in");
-    Path outputPath = new Path("/testing/out");
+    Path inputPath = new Path("testing/in");
+    Path outputPath = new Path("testing/out");
     try {
       final int numSlaves = 2;
       Configuration conf = new Configuration();
@@ -151,7 +151,7 @@
                           int numMaps, int numReduces, String[] expectedResults,
                           JobConf conf
                          ) throws IOException {
-    Path wordExec = new Path("/testing/bin/application");
+    Path wordExec = new Path("testing/bin/application");
     JobConf job = null;
     if(conf == null) {
       job = mr.createJobConf();
@@ -232,7 +232,7 @@
                             "pipes");
     Path inDir = new Path(testDir, "input");
     nonPipedOutDir = new Path(testDir, "output");
-    Path wordExec = new Path("/testing/bin/application");
+    Path wordExec = new Path("testing/bin/application");
     Path jobXml = new Path(testDir, "job.xml");
     {
       FileSystem fs = dfs.getFileSystem();

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/pipes/TestPipesAsDifferentUser.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/pipes/TestPipesAsDifferentUser.java?rev=918326&r1=918325&r2=918326&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/pipes/TestPipesAsDifferentUser.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/pipes/TestPipesAsDifferentUser.java Wed Mar  3 04:52:17 2010
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.mapred.pipes;
 
+import java.security.PrivilegedExceptionAction;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
@@ -46,29 +48,35 @@
     }
 
     super.startCluster();
-    JobConf clusterConf = getClusterConf();
-    Path inputPath = new Path(homeDirectory, "in");
-    Path outputPath = new Path(homeDirectory, "out");
-
-    TestPipes.writeInputFile(FileSystem.get(clusterConf), inputPath);
-    TestPipes.runProgram(mrCluster, dfsCluster, TestPipes.wordCountSimple,
-        inputPath, outputPath, 3, 2, TestPipes.twoSplitOutput, clusterConf);
-    assertOwnerShip(outputPath);
-    TestPipes.cleanup(dfsCluster.getFileSystem(), outputPath);
-
-    TestPipes.runProgram(mrCluster, dfsCluster, TestPipes.wordCountSimple,
-        inputPath, outputPath, 3, 0, TestPipes.noSortOutput, clusterConf);
-    assertOwnerShip(outputPath);
-    TestPipes.cleanup(dfsCluster.getFileSystem(), outputPath);
-
-    TestPipes.runProgram(mrCluster, dfsCluster, TestPipes.wordCountPart,
-        inputPath, outputPath, 3, 2, TestPipes.fixedPartitionOutput,
-        clusterConf);
-    assertOwnerShip(outputPath);
-    TestPipes.cleanup(dfsCluster.getFileSystem(), outputPath);
-
-    TestPipes.runNonPipedProgram(mrCluster, dfsCluster,
-        TestPipes.wordCountNoPipes, clusterConf);
-    assertOwnerShip(TestPipes.nonPipedOutDir, FileSystem.getLocal(clusterConf));
+    taskControllerUser.doAs(new PrivilegedExceptionAction<Object>() {
+      public Object run() throws Exception {
+        JobConf clusterConf = getClusterConf();
+        Path inputPath = new Path(homeDirectory, "in");
+        Path outputPath = new Path(homeDirectory, "out");
+
+        TestPipes.writeInputFile(FileSystem.get(clusterConf), inputPath);
+        TestPipes.runProgram(mrCluster, dfsCluster, TestPipes.wordCountSimple,
+            inputPath, outputPath, 3, 2, TestPipes.twoSplitOutput, clusterConf);
+        assertOwnerShip(outputPath);
+        TestPipes.cleanup(dfsCluster.getFileSystem(), outputPath);
+
+        TestPipes.runProgram(mrCluster, dfsCluster, TestPipes.wordCountSimple,
+            inputPath, outputPath, 3, 0, TestPipes.noSortOutput, clusterConf);
+        assertOwnerShip(outputPath);
+        TestPipes.cleanup(dfsCluster.getFileSystem(), outputPath);
+
+        TestPipes.runProgram(mrCluster, dfsCluster, TestPipes.wordCountPart,
+            inputPath, outputPath, 3, 2, TestPipes.fixedPartitionOutput,
+            clusterConf);
+        assertOwnerShip(outputPath);
+        TestPipes.cleanup(dfsCluster.getFileSystem(), outputPath);
+
+        TestPipes.runNonPipedProgram(mrCluster, dfsCluster,
+            TestPipes.wordCountNoPipes, clusterConf);
+        assertOwnerShip(TestPipes.nonPipedOutDir, FileSystem
+            .getLocal(clusterConf));
+        return null;
+      }
+    });
   }
 }