You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by om...@apache.org on 2011/03/04 04:44:57 UTC

svn commit: r1077137 [4/5] - in /hadoop/common/branches/branch-0.20-security-patches: ./ .eclipse.templates/ ivy/ src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/ src/contrib/stream...

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestJobHistory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestJobHistory.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestJobHistory.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestJobHistory.java Fri Mar  4 03:44:54 2011
@@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.mapred.JobHistory.*;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-
+import org.apache.hadoop.security.UserGroupInformation;
 /**
  * Tests the JobHistory files - to catch any changes to JobHistory that can
  * cause issues for the execution of JobTracker.RecoveryManager, HistoryViewer.
@@ -831,7 +831,7 @@ public class TestJobHistory extends Test
       // Make sure that the job is not removed from memory until we do finish
       // the validation of history file content
       conf.setInt("mapred.jobtracker.completeuserjobs.maximum", 10);
-
+      conf.set("user.name", UserGroupInformation.getCurrentUser().getUserName());
       // Run a job that will be succeeded and validate its history file
       RunningJob job = UtilsForTests.runJobSucceed(conf, inDir, outDir);
       
@@ -840,7 +840,7 @@ public class TestJobHistory extends Test
           doneFolder, doneDir.getName());
       JobID id = job.getID();
       String logFileName = getDoneFile(conf, id, doneDir);
-
+      assertNotNull(logFileName);
       // Framework history log file location
       Path logFile = new Path(doneDir, logFileName);
       FileSystem fileSys = logFile.getFileSystem(conf);
@@ -918,7 +918,7 @@ public class TestJobHistory extends Test
       // Make sure that the job is not removed from memory until we do finish
       // the validation of history file content
       conf.setInt("mapred.jobtracker.completeuserjobs.maximum", 10);
-
+      conf.set("user.name", UserGroupInformation.getCurrentUser().getUserName());
       // Run a job that will be succeeded and validate its history file
       RunningJob job = UtilsForTests.runJobSucceed(conf, inDir, outDir);
       
@@ -1073,7 +1073,7 @@ public class TestJobHistory extends Test
 
       Path inDir = new Path(TEST_ROOT_DIR + "/succeed/input1");
       Path outDir = new Path(TEST_ROOT_DIR + "/succeed/output1");
-
+      conf.set("user.name", UserGroupInformation.getCurrentUser().getUserName());
       // validate for the case of null(default)
       RunningJob job = UtilsForTests.runJobSucceed(conf, inDir, outDir);
       validateJobHistoryUserLogLocation(job.getID(), conf);
@@ -1167,7 +1167,7 @@ public class TestJobHistory extends Test
 
       Path inDir = new Path(TEST_ROOT_DIR + "/succeedfailkilljob/input");
       Path outDir = new Path(TEST_ROOT_DIR + "/succeedfailkilljob/output");
-
+      conf.set("user.name", UserGroupInformation.getCurrentUser().getUserName());
       // Run a job that will be succeeded and validate its job status
       // existing in history file
       RunningJob job = UtilsForTests.runJobSucceed(conf, inDir, outDir);

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestJobQueueInformation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestJobQueueInformation.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestJobQueueInformation.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestJobQueueInformation.java Fri Mar  4 03:44:54 2011
@@ -34,7 +34,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker;
 
 import junit.framework.TestCase;

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestJobTrackerRestart.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestJobTrackerRestart.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestJobTrackerRestart.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestJobTrackerRestart.java Fri Mar  4 03:44:54 2011
@@ -533,7 +533,7 @@ public class TestJobTrackerRestart exten
       jtConf.setLong("mapred.tasktracker.expiry.interval", 25 * 1000);
       jtConf.setBoolean("mapred.acls.enabled", true);
       // get the user group info
-      UserGroupInformation ugi = UserGroupInformation.getCurrentUGI();
+      UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
       jtConf.set("mapred.queue.default.acl-submit-job", ugi.getUserName());
       
       mr = new MiniMRCluster(1, namenode, 1, null, null, jtConf);

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestLocalizationWithLinuxTaskController.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestLocalizationWithLinuxTaskController.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestLocalizationWithLinuxTaskController.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestLocalizationWithLinuxTaskController.java Fri Mar  4 03:44:54 2011
@@ -78,12 +78,14 @@ public class TestLocalizationWithLinuxTa
     String ugi =
         System.getProperty(ClusterWithLinuxTaskController.TASKCONTROLLER_UGI);
     JobConf jobConf = new JobConf(task.getConf());
-    jobConf.setUser(ugi.split(",")[0]);
+    String user = ugi.split(",")[0];
+    jobConf.setUser(user);
     File jobConfFile = uploadJobConf(jobConf);
     // Create the task again to change the job-user
     task =
       new MapTask(jobConfFile.toURI().toString(), taskId, 1, null, 1);
     task.setConf(jobConf);
+    task.setUser(user);
   }
 
   @Override

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestLostTracker.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestLostTracker.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestLostTracker.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestLostTracker.java Fri Mar  4 03:44:54 2011
@@ -20,6 +20,7 @@ package org.apache.hadoop.mapred;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.security.UserGroupInformation;
 
 import junit.framework.TestCase;
 import java.io.*;
@@ -48,7 +49,7 @@ public class TestLostTracker extends Tes
     int numReds = 1;
     String mapSignalFile = UtilsForTests.getMapSignalFile(shareDir);
     String redSignalFile = UtilsForTests.getReduceSignalFile(shareDir);
-    
+    jobConf.set("user.name", UserGroupInformation.getCurrentUser().getUserName());
     // Configure the job
     JobConf job = configureJob(jobConf, numMaps, numReds, 
                                mapSignalFile, redSignalFile);

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestMapredSystemDir.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestMapredSystemDir.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestMapredSystemDir.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestMapredSystemDir.java Fri Mar  4 03:44:54 2011
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.mapred;
 
+import java.io.IOException;
+import java.security.PrivilegedAction;
+import java.security.PrivilegedExceptionAction;
 import junit.framework.TestCase;
 
 import org.apache.commons.logging.Log;
@@ -34,63 +37,81 @@ import org.apache.hadoop.security.*;
 public class TestMapredSystemDir extends TestCase {
   private static final Log LOG = LogFactory.getLog(TestMapredSystemDir.class);
   
-  // dfs ugi
-  private static final UnixUserGroupInformation DFS_UGI = 
-    TestMiniMRWithDFSWithDistinctUsers.createUGI("dfs", true);
   // mapred ugi
-  private static final UnixUserGroupInformation MR_UGI = 
+  private static final UserGroupInformation MR_UGI = 
     TestMiniMRWithDFSWithDistinctUsers.createUGI("mr", false);
   private static final FsPermission SYSTEM_DIR_PERMISSION =
     FsPermission.createImmutable((short) 0733); // rwx-wx-wx
   
   public void testGarbledMapredSystemDir() throws Exception {
+    final Configuration conf = new Configuration();
     MiniDFSCluster dfs = null;
     MiniMRCluster mr = null;
     try {
       // start dfs
-      Configuration conf = new Configuration();
       conf.set("dfs.permissions.supergroup", "supergroup");
-      UnixUserGroupInformation.saveToConf(conf,
-          UnixUserGroupInformation.UGI_PROPERTY_NAME, DFS_UGI);
+      conf.set("mapred.system.dir", "/mapred");
       dfs = new MiniDFSCluster(conf, 1, true, null);
       FileSystem fs = dfs.getFileSystem();
-      
-      // create mapred.system.dir
-      Path mapredSysDir = new Path("/mapred");
+      // create Configs.SYSTEM_DIR's parent (the parent has to be given
+      // permissions since the JT internally tries to delete the leaf of
+      // the directory structure
+      Path mapredSysDir =  new Path(conf.get("mapred.system.dir")).getParent();
       fs.mkdirs(mapredSysDir);
       fs.setPermission(mapredSysDir, new FsPermission(SYSTEM_DIR_PERMISSION));
       fs.setOwner(mapredSysDir, "mr", "mrgroup");
 
-      // start mr (i.e jobtracker)
-      Configuration mrConf = new Configuration();
-      UnixUserGroupInformation.saveToConf(mrConf,
-          UnixUserGroupInformation.UGI_PROPERTY_NAME, MR_UGI);
-      mr = new MiniMRCluster(0, 0, 0, dfs.getFileSystem().getUri().toString(),
-                             1, null, null, MR_UGI, new JobConf(mrConf));
-      JobTracker jobtracker = mr.getJobTrackerRunner().getJobTracker();
+      final MiniDFSCluster finalDFS = dfs;
+      
+      // Become MR_UGI to do start the job tracker...
+      mr = MR_UGI.doAs(new PrivilegedExceptionAction<MiniMRCluster>() {
+        @Override
+        public MiniMRCluster run() throws Exception {
+          // start mr (i.e jobtracker)
+          Configuration mrConf = new Configuration();
+          
+          FileSystem fs = finalDFS.getFileSystem();
+          MiniMRCluster mr2 = new MiniMRCluster(0, 0, 0, fs.getUri().toString(),
+              1, null, null, MR_UGI, new JobConf(mrConf));
+          JobTracker jobtracker = mr2.getJobTrackerRunner().getJobTracker();
+          // add garbage to mapred.system.dir
+          Path garbage = new Path(jobtracker.getSystemDir(), "garbage");
+          fs.mkdirs(garbage);
+          fs.setPermission(garbage, new FsPermission(SYSTEM_DIR_PERMISSION));
+          return mr2;
+        }
+      });
       
-      // add garbage to mapred.system.dir
-      Path garbage = new Path(jobtracker.getSystemDir(), "garbage");
-      fs.mkdirs(garbage);
-      fs.setPermission(garbage, new FsPermission(SYSTEM_DIR_PERMISSION));
+      // Drop back to regular user (superuser) to change owner of garbage dir
+      final Path garbage = new Path(
+          mr.getJobTrackerRunner().getJobTracker().getSystemDir(), "garbage");
       fs.setOwner(garbage, "test", "test-group");
       
-      // stop the jobtracker
-      mr.stopJobTracker();
-      mr.getJobTrackerConf().setBoolean("mapred.jobtracker.restart.recover", 
-                                        false);
-      // start jobtracker but dont wait for it to be up
-      mr.startJobTracker(false);
-
-      // check 5 times .. each time wait for 2 secs to check if the jobtracker
-      // has crashed or not.
-      for (int i = 0; i < 5; ++i) {
-        LOG.info("Check #" + i);
-        if (!mr.getJobTrackerRunner().isActive()) {
-          return;
+      // Again become MR_UGI to start/stop the MR cluster
+      final MiniMRCluster mr2 = mr;
+      MR_UGI.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          // stop the jobtracker
+          mr2.stopJobTracker();
+          mr2.getJobTrackerConf().setBoolean(
+              "mapred.jobtracker.restart.recover", false);
+          // start jobtracker but dont wait for it to be up
+          mr2.startJobTracker(false);
+
+          // check 5 times .. each time wait for 2 secs to check if the
+          // jobtracker
+          // has crashed or not.
+          for (int i = 0; i < 5; ++i) {
+            LOG.info("Check #" + i);
+            if (!mr2.getJobTrackerRunner().isActive()) {
+              return null;
+            }
+            UtilsForTests.waitFor(2000);
+          }
+          return null;
         }
-        UtilsForTests.waitFor(2000);
-      }
+      });
 
       assertFalse("JobTracker did not bail out (waited for 10 secs)", 
                   mr.getJobTrackerRunner().isActive());

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestMiniMRWithDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestMiniMRWithDFS.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestMiniMRWithDFS.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestMiniMRWithDFS.java Fri Mar  4 03:44:54 2011
@@ -41,7 +41,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 
 /**
@@ -233,15 +233,7 @@ public class TestMiniMRWithDFS extends T
         NUM_MAPS, NUM_SAMPLES, jobconf).doubleValue();
     double error = Math.abs(Math.PI - estimate);
     assertTrue("Error in PI estimation "+error+" exceeds 0.01", (error < 0.01));
-    String userName = jobconf.getUser();
-    if (userName == null) {
-      try {
-        userName = UnixUserGroupInformation.login(jobconf).getUserName();
-      } catch (LoginException le) {
-        throw new IOException("Cannot get the login username : "
-            + StringUtils.stringifyException(le));
-      }
-    }
+    String userName = UserGroupInformation.getLoginUser().getUserName();
     checkTaskDirectories(mr, userName, new String[] {}, new String[] {});
   }
 
@@ -263,15 +255,7 @@ public class TestMiniMRWithDFS extends T
     JobID jobid = result.job.getID();
     TaskAttemptID taskid = new TaskAttemptID(
         new TaskID(jobid, true, 1),0);
-    String userName = jobConf.getUser();
-    if (userName == null) {
-      try {
-        userName = UnixUserGroupInformation.login(jobConf).getUserName();
-      } catch (LoginException le) {
-        throw new IOException("Cannot get the login username : "
-            + StringUtils.stringifyException(le));
-      }
-    }
+    String userName = UserGroupInformation.getLoginUser().getUserName();
     checkTaskDirectories(mr, userName, new String[] { jobid.toString() },
         new String[] { taskid.toString() });
     // test with maps=0

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestMiniMRWithDFSWithDistinctUsers.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestMiniMRWithDFSWithDistinctUsers.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestMiniMRWithDFSWithDistinctUsers.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestMiniMRWithDFSWithDistinctUsers.java Fri Mar  4 03:44:54 2011
@@ -18,6 +18,7 @@
 package org.apache.hadoop.mapred;
 
 import java.io.*;
+import java.security.PrivilegedExceptionAction;
 
 import junit.framework.TestCase;
 
@@ -34,25 +35,15 @@ import org.apache.hadoop.security.*;
  * A JUnit test to test Mini Map-Reduce Cluster with Mini-DFS.
  */
 public class TestMiniMRWithDFSWithDistinctUsers extends TestCase {
-  static final UnixUserGroupInformation DFS_UGI = createUGI("dfs", true); 
-  static final UnixUserGroupInformation PI_UGI = createUGI("pi", false); 
-  static final UnixUserGroupInformation WC_UGI = createUGI("wc", false); 
+  static final UserGroupInformation DFS_UGI = createUGI("dfs", true);
+  static final UserGroupInformation PI_UGI = createUGI("pi", false);
+  static final UserGroupInformation WC_UGI = createUGI("wc", false);
 
-  static UnixUserGroupInformation createUGI(String name, boolean issuper) {
+  static UserGroupInformation createUGI(String name, boolean issuper) {
     String group = issuper? "supergroup": name;
-    return UnixUserGroupInformation.createImmutable(
-        new String[]{name, group});
+    return UserGroupInformation.createUserForTesting(name,new String[]{group});
   }
   
-  static JobConf createJobConf(MiniMRCluster mr, UnixUserGroupInformation ugi) {
-    return createJobConf(mr.createJobConf(), ugi);
-  }
-  static JobConf createJobConf(JobConf conf, UnixUserGroupInformation ugi) {
-    JobConf jobconf = new JobConf(conf);    UnixUserGroupInformation.saveToConf(jobconf,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
-    return jobconf;
-  }
-
   static void mkdir(FileSystem fs, String dir) throws IOException {
     Path p = new Path(dir);
     fs.mkdirs(p);
@@ -60,19 +51,23 @@ public class TestMiniMRWithDFSWithDistin
   }
 
   // runs a sample job as a user (ugi)
-  RunningJob runJobAsUser(JobConf job, UserGroupInformation ugi)
+  RunningJob runJobAsUser(final JobConf job, UserGroupInformation ugi)
   throws Exception {
     JobSubmissionProtocol jobSubmitClient =
       TestSubmitJob.getJobSubmitClient(job, ugi);
     JobID id = jobSubmitClient.getNewJobId();
    
     InputSplit[] splits = computeJobSplit(JobID.downgrade(id), job);
-    Path jobSubmitDir = new Path(id.toString());
-    FileSystem fs = jobSubmitDir.getFileSystem(job);
-    jobSubmitDir = jobSubmitDir.makeQualified(fs);
-    uploadJobFiles(JobID.downgrade(id), splits, jobSubmitDir, job);
+    final Path jobSubmitDir = new Path(id.toString());
+    FileSystem fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      public FileSystem run() throws IOException {
+        return jobSubmitDir.getFileSystem(job);
+      }
+    });
+    Path qJobSubmitDir = jobSubmitDir.makeQualified(fs);
+    uploadJobFiles(JobID.downgrade(id), splits, qJobSubmitDir, ugi, job);
    
-    jobSubmitClient.submitJob(id, jobSubmitDir.toString(), null);
+    jobSubmitClient.submitJob(id, qJobSubmitDir.toString(), null);
    
     JobClient jc = new JobClient(job);
     return jc.getJob(JobID.downgrade(id));
@@ -90,11 +85,16 @@ public class TestMiniMRWithDFSWithDistin
 
   // a helper api for split submission
   private void uploadJobFiles(JobID id, InputSplit[] splits,
-                             Path jobSubmitDir, JobConf conf)
-  throws IOException {
-    Path confLocation = JobSubmissionFiles.getJobConfPath(jobSubmitDir);
-    JobSplitWriter.createSplitFiles(jobSubmitDir, conf, splits);
-    FileSystem fs = confLocation.getFileSystem(conf);
+                             Path jobSubmitDir, UserGroupInformation ugi,
+                             final JobConf conf)
+  throws Exception {
+    final Path confLocation = JobSubmissionFiles.getJobConfPath(jobSubmitDir);
+    FileSystem fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      public FileSystem run() throws IOException {
+        return confLocation.getFileSystem(conf);
+      }
+    });
+    JobSplitWriter.createSplitFiles(jobSubmitDir, conf, fs, splits);
     FsPermission perm = new FsPermission((short)0700);
    
     // localize conf
@@ -104,19 +104,19 @@ public class TestMiniMRWithDFSWithDistin
   }
  
   public void testDistinctUsers() throws Exception {
-    MiniDFSCluster dfs = null;
     MiniMRCluster mr = null;
+    Configuration conf = new Configuration();
+    final MiniDFSCluster dfs = new MiniDFSCluster(conf, 4, true, null);
     try {
-      Configuration conf = new Configuration();
-      UnixUserGroupInformation.saveToConf(conf,
-          UnixUserGroupInformation.UGI_PROPERTY_NAME, DFS_UGI);
-      dfs = new MiniDFSCluster(conf, 4, true, null);
-      FileSystem fs = dfs.getFileSystem();
+      FileSystem fs = DFS_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
+        public FileSystem run() throws IOException {
+          return dfs.getFileSystem();
+        }
+      });
       mkdir(fs, "/user");
       mkdir(fs, "/mapred");
 
-      UnixUserGroupInformation MR_UGI = createUGI(
-          UnixUserGroupInformation.login().getUserName(), false); 
+      UserGroupInformation MR_UGI = UserGroupInformation.getLoginUser();
       mr = new MiniMRCluster(0, 0, 4, dfs.getFileSystem().getUri().toString(),
            1, null, null, MR_UGI);
 
@@ -129,7 +129,6 @@ public class TestMiniMRWithDFSWithDistin
       TestMiniMRClasspath.configureWordCount(fs, jobTrackerName, job1,
                                              input, 2, 1, inDir, outDir);
 
-      job1 = createJobConf(job1, PI_UGI);
       runJobAsUser(job1, PI_UGI);
 
       JobConf job2 = mr.createJobConf();
@@ -137,7 +136,6 @@ public class TestMiniMRWithDFSWithDistin
       Path outDir2 = new Path("/testing/distinct/output2");
       TestMiniMRClasspath.configureWordCount(fs, jobTrackerName, job2,
                                              input, 2, 1, inDir2, outDir2);
-      job2 = createJobConf(job2, WC_UGI);
       runJobAsUser(job2, WC_UGI);
     } finally {
       if (dfs != null) { dfs.shutdown(); }

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestNodeRefresh.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestNodeRefresh.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestNodeRefresh.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestNodeRefresh.java Fri Mar  4 03:44:54 2011
@@ -32,12 +32,13 @@ import junit.framework.TestCase;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 
@@ -52,7 +53,8 @@ public class TestNodeRefresh extends Tes
   private JobTracker jt = null;
   private String[] hosts = null;
   private String[] trackerHosts = null;
-  public static final Log LOG = 
+  private UserGroupInformation owner, user1, user2, user3, user4;
+  private static final Log LOG = 
     LogFactory.getLog(TestNodeRefresh.class);
   
   private String getHostname(int i) {
@@ -60,17 +62,20 @@ public class TestNodeRefresh extends Tes
   }
 
   private void startCluster(int numHosts, int numTrackerPerHost, 
-                            int numExcluded, Configuration conf) 
+                            int numExcluded, UserGroupInformation clusterUgi,
+                            Configuration conf) 
   throws IOException {
     try {
-   // create fake mapping for the groups
-      Map<String, String[]> u2g_map = new HashMap<String, String[]> (1);
-      u2g_map.put("user1", new String[] {"user1" });
-      u2g_map.put("user2", new String[] {"user2" });
-      u2g_map.put("user3", new String[] {"abc" });
-      u2g_map.put("user4", new String[] {"supergroup" });
-      DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map);
-      
+      // create fake mapping for the groups
+      owner = UserGroupInformation.getLoginUser();
+      user1= UserGroupInformation.createUserForTesting("user1", 
+                                                       new String[] {"user1"});
+      user2= UserGroupInformation.createUserForTesting("user2", 
+                                                       new String[] {"user2"});
+      user3= UserGroupInformation.createUserForTesting("user3", 
+                                                       new String[] {"abc"});
+      user4= UserGroupInformation.createUserForTesting("user4", 
+                                                   new String[] {"supergroup"});
       conf.setBoolean("dfs.replication.considerLoad", false);
       
       // prepare hosts info
@@ -84,6 +89,8 @@ public class TestNodeRefresh extends Tes
       dfs.waitActive();
       dfs.startDataNodes(conf, numHosts, true, null, null, hosts, null);
       dfs.waitActive();
+      FileSystem.mkdirs(dfs.getFileSystem(), new Path("/"),
+          new FsPermission((short) 0777));
 
       namenode = (dfs.getFileSystem()).getUri().getHost() + ":" + 
       (dfs.getFileSystem()).getUri().getPort(); 
@@ -97,7 +104,7 @@ public class TestNodeRefresh extends Tes
       // start mini mr
       JobConf jtConf = new JobConf(conf);
       mr = new MiniMRCluster(0, 0, numHosts * numTrackerPerHost, namenode, 1, 
-                             null, trackerHosts, null, jtConf, 
+                             null, trackerHosts, clusterUgi, jtConf, 
                              numExcluded * numTrackerPerHost);
       
       jt = mr.getJobTrackerRunner().getJobTracker();
@@ -145,14 +152,12 @@ public class TestNodeRefresh extends Tes
     // start a cluster with 2 hosts and no exclude-hosts file
     Configuration conf = new Configuration();
     conf.set("mapred.hosts.exclude", "");
-    startCluster(2, 1, 0, conf);
+    startCluster(2, 1, 0, UserGroupInformation.getLoginUser(),conf);
 
     conf = mr.createJobConf(new JobConf(conf));
 
     // refresh with wrong user
-    UserGroupInformation ugi_wrong =
-      TestMiniMRWithDFSWithDistinctUsers.createUGI("user1", false);
-    AdminOperationsProtocol client = getClient(conf, ugi_wrong);
+    AdminOperationsProtocol client = getClient(conf, user1);
     boolean success = false;
     try {
       // Also try tool runner
@@ -163,10 +168,7 @@ public class TestNodeRefresh extends Tes
 
     // refresh with correct user
     success = false;
-    String owner = ShellCommandExecutor.execCommand("whoami").trim();
-    UserGroupInformation ugi_correct =
-      TestMiniMRWithDFSWithDistinctUsers.createUGI(owner, false);
-    client = getClient(conf, ugi_correct);
+    client = getClient(conf, owner);
     try {
       client.refreshNodes();
       success = true;
@@ -176,9 +178,7 @@ public class TestNodeRefresh extends Tes
 
     // refresh with super user
     success = false;
-    UserGroupInformation ugi_super =
-      TestMiniMRWithDFSWithDistinctUsers.createUGI("user4", true);
-    client = getClient(conf, ugi_super);
+    client = getClient(conf, user4);
     try {
       client.refreshNodes();
       success = true;
@@ -208,21 +208,15 @@ public class TestNodeRefresh extends Tes
    */
   public void testMRSuperUsers() throws IOException {  
     // start a cluster with 1 host and specified superuser and supergroup
-    UnixUserGroupInformation ugi =
-      TestMiniMRWithDFSWithDistinctUsers.createUGI("user1", false);
     Configuration conf = new Configuration();
-    UnixUserGroupInformation.saveToConf(conf, 
-        UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
     // set the supergroup
     conf.set("mapred.permissions.supergroup", "abc");
-    startCluster(2, 1, 0, conf);
-
+    startCluster(2, 1, 0, UserGroupInformation.createRemoteUser("user1"), conf);
+    
     conf = mr.createJobConf(new JobConf(conf));
 
     // refresh with wrong user
-    UserGroupInformation ugi_wrong =
-      TestMiniMRWithDFSWithDistinctUsers.createUGI("user2", false);
-    AdminOperationsProtocol client = getClient(conf, ugi_wrong);
+    AdminOperationsProtocol client = getClient(conf, user2);
     boolean success = false;
     try {
       // Also try tool runner
@@ -233,7 +227,7 @@ public class TestNodeRefresh extends Tes
 
     // refresh with correct user
     success = false;
-    client = getClient(conf, ugi);
+    client = getClient(conf, user1);
     try {
       client.refreshNodes();
       success = true;
@@ -243,9 +237,7 @@ public class TestNodeRefresh extends Tes
 
     // refresh with super user
     success = false;
-    UserGroupInformation ugi_super =
-      UnixUserGroupInformation.createImmutable(new String[]{"user3", "abc"});
-    client = getClient(conf, ugi_super);
+    client = getClient(conf, user3);
     try {
       client.refreshNodes();
       success = true;
@@ -266,7 +258,7 @@ public class TestNodeRefresh extends Tes
     Configuration conf = new Configuration();
     File file = new File("hosts.exclude");
     file.delete();
-    startCluster(2, 1, 0, conf);
+    startCluster(2, 1, 0, UserGroupInformation.getLoginUser(), conf);
     String hostToDecommission = getHostname(1);
     conf = mr.createJobConf(new JobConf(conf));
 
@@ -285,10 +277,7 @@ public class TestNodeRefresh extends Tes
     }
     file.deleteOnExit();
 
-    String owner = ShellCommandExecutor.execCommand("whoami").trim();
-    UserGroupInformation ugi_correct =
-      TestMiniMRWithDFSWithDistinctUsers.createUGI(owner, false);
-    AdminOperationsProtocol client = getClient(conf, ugi_correct);
+    AdminOperationsProtocol client = getClient(conf, owner);
     try {
       client.refreshNodes();
     } catch (IOException ioe){}
@@ -334,7 +323,7 @@ public class TestNodeRefresh extends Tes
       out.close();
     }
     
-    startCluster(2, 1, 1, conf);
+    startCluster(2, 1, 1, UserGroupInformation.getLoginUser(), conf);
     
     file.delete();
 
@@ -356,10 +345,7 @@ public class TestNodeRefresh extends Tes
     
     conf = mr.createJobConf(new JobConf(conf));
 
-    String owner = ShellCommandExecutor.execCommand("whoami").trim();
-    UserGroupInformation ugi_correct =  
-      TestMiniMRWithDFSWithDistinctUsers.createUGI(owner, false);
-    AdminOperationsProtocol client = getClient(conf, ugi_correct);
+    AdminOperationsProtocol client = getClient(conf, owner);
     try {
       client.refreshNodes();
     } catch (IOException ioe){}

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestQueueAclsForCurrentUser.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestQueueAclsForCurrentUser.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestQueueAclsForCurrentUser.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestQueueAclsForCurrentUser.java Fri Mar  4 03:44:54 2011
@@ -20,7 +20,6 @@ package org.apache.hadoop.mapred;
 import java.io.IOException;
 import javax.security.auth.login.LoginException;
 import junit.framework.TestCase;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
 /**
@@ -36,7 +35,7 @@ public class TestQueueAclsForCurrentUser
   String adminAcl  = QueueManager.QueueOperation.ADMINISTER_JOBS.getAclName();
 
   private void setupConfForNoAccess() throws IOException,LoginException {
-    currentUGI = UnixUserGroupInformation.login();
+    currentUGI = UserGroupInformation.getLoginUser();
     String userName = currentUGI.getUserName();
     conf = new JobConf();
 
@@ -58,7 +57,7 @@ public class TestQueueAclsForCurrentUser
    * @return
    */
   private void setupConf(boolean aclSwitch) throws IOException,LoginException{
-    currentUGI = UnixUserGroupInformation.login();
+    currentUGI = UserGroupInformation.getLoginUser();
     String userName = currentUGI.getUserName();
     conf = new JobConf();
 

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestQueueManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestQueueManager.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestQueueManager.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestQueueManager.java Fri Mar  4 03:44:54 2011
@@ -22,6 +22,7 @@ import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.PrintWriter;
+import java.security.PrivilegedExceptionAction;
 import java.util.Properties;
 import java.util.Set;
 import java.util.TreeSet;
@@ -37,7 +38,6 @@ import org.apache.hadoop.examples.SleepJ
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
 public class TestQueueManager extends TestCase {
@@ -46,7 +46,26 @@ public class TestQueueManager extends Te
   
   private MiniDFSCluster miniDFSCluster;
   private MiniMRCluster miniMRCluster;
-
+  
+  /**
+   * For some tests it is necessary to sandbox them in a doAs with a fake user
+   * due to bug HADOOP-6527, which wipes out real group mappings. It's also
+   * necessary to then add the real user running the test to the fake users
+   * so that child processes can write to the DFS.
+   */
+  private UserGroupInformation createNecessaryUsers() throws IOException {
+    // Add real user to fake groups mapping so that child processes (tasks)
+    // will have permissions on the dfs
+    String j = UserGroupInformation.getCurrentUser().getUserName();
+    UserGroupInformation.createUserForTesting(j, new String [] { "supergroup"});
+    
+    
+    // Create a fake superuser for all processes to execute within
+    UserGroupInformation ugi = UserGroupInformation.createUserForTesting("Zork",
+                                                 new String [] {"Zork"});
+    return ugi;
+  }
+  
   public void testDefaultQueueConfiguration() {
     JobConf conf = new JobConf();
     QueueManager qMgr = new QueueManager(conf);
@@ -78,23 +97,27 @@ public class TestQueueManager extends Te
     assertEquals(qMgr.getSchedulerInfo("qq1"), "queueInfoForqq1");
   }
   
-  public void testAllEnabledACLForJobSubmission() throws IOException {
+  public void testAllEnabledACLForJobSubmission() 
+  throws IOException, InterruptedException {
     JobConf conf = setupConf("mapred.queue.default.acl-submit-job", "*");
     verifyJobSubmission(conf, true);
   }
   
-  public void testAllDisabledACLForJobSubmission() throws IOException {
+  public void testAllDisabledACLForJobSubmission() 
+  throws IOException, InterruptedException {
     JobConf conf = setupConf("mapred.queue.default.acl-submit-job", "");
     verifyJobSubmission(conf, false);
   }
   
-  public void testUserDisabledACLForJobSubmission() throws IOException {
+  public void testUserDisabledACLForJobSubmission() 
+  throws IOException, InterruptedException {
     JobConf conf = setupConf("mapred.queue.default.acl-submit-job", 
                                 "3698-non-existent-user");
     verifyJobSubmission(conf, false);
   }
   
-  public void testDisabledACLForNonDefaultQueue() throws IOException {
+  public void testDisabledACLForNonDefaultQueue() 
+  throws IOException, InterruptedException {
     // allow everyone in default queue
     JobConf conf = setupConf("mapred.queue.default.acl-submit-job", "*");
     // setup a different queue
@@ -105,7 +128,8 @@ public class TestQueueManager extends Te
     verifyJobSubmission(conf, false, "q1");
   }
   
-  public void testSubmissionToInvalidQueue() throws IOException{
+  public void testSubmissionToInvalidQueue() 
+  throws IOException, InterruptedException{
     JobConf conf = new JobConf();
     conf.set("mapred.queue.names","default");
     setUpCluster(conf);
@@ -121,10 +145,10 @@ public class TestQueueManager extends Te
     fail("Job submission to invalid queue job shouldnot complete , it should fail with proper exception ");   
   }
   
-  public void testEnabledACLForNonDefaultQueue() throws IOException,
-                                                          LoginException {
+  public void testEnabledACLForNonDefaultQueue() 
+  throws IOException, LoginException, InterruptedException {
     // login as self...
-    UserGroupInformation ugi = UnixUserGroupInformation.login();
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
     String userName = ugi.getUserName();
     // allow everyone in default queue
     JobConf conf = setupConf("mapred.queue.default.acl-submit-job", "*");
@@ -137,9 +161,9 @@ public class TestQueueManager extends Te
   }
   
   public void testUserEnabledACLForJobSubmission() 
-                                    throws IOException, LoginException {
+  throws IOException, LoginException, InterruptedException {
     // login as self...
-    UserGroupInformation ugi = UnixUserGroupInformation.login();
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
     String userName = ugi.getUserName();
     JobConf conf = setupConf("mapred.queue.default.acl-submit-job",
                                   "3698-junk-user," + userName 
@@ -148,56 +172,126 @@ public class TestQueueManager extends Te
   }
   
   public void testGroupsEnabledACLForJobSubmission() 
-                                    throws IOException, LoginException {
+  throws IOException, LoginException, InterruptedException {
     // login as self, get one group, and add in allowed list.
-    UserGroupInformation ugi = UnixUserGroupInformation.login();
-    String[] groups = ugi.getGroupNames();
-    assertTrue(groups.length > 0);
-    JobConf conf = setupConf("mapred.queue.default.acl-submit-job",
-                                "3698-junk-user1,3698-junk-user2 " 
-                                  + groups[groups.length-1] 
-                                           + ",3698-junk-group");
-    verifyJobSubmission(conf, true);
-  }
-  
-  public void testAllEnabledACLForJobKill() throws IOException {
-    JobConf conf = setupConf("mapred.queue.default.acl-administer-jobs", "*");
-    verifyJobKill(conf, true);
-  }
+    UserGroupInformation ugi = createNecessaryUsers();
+    
+    ugi.doAs(new PrivilegedExceptionAction<Object>() {
+
+      @Override
+      public Object run() throws Exception {
+        String[] groups = UserGroupInformation.getCurrentUser().getGroupNames();
+        JobConf conf = setupConf("mapred.queue.default.acl-submit-job",
+                                    "3698-junk-user1,3698-junk-user2 " 
+                                      + groups[groups.length-1] 
+                                               + ",3698-junk-group");
+        verifyJobSubmission(conf, true);
+        
+        return null;
+      }
+    });
 
-  public void testAllDisabledACLForJobKill() throws IOException {
-    JobConf conf = setupConf("mapred.queue.default.acl-administer-jobs", "");
-    verifyJobKillAsOtherUser(conf, false, "dummy-user,dummy-user-group");
   }
   
-  public void testOwnerAllowedForJobKill() throws IOException {
-    JobConf conf = setupConf("mapred.queue.default.acl-administer-jobs", 
+  public void testAllEnabledACLForJobKill() 
+  throws IOException, InterruptedException {
+    UserGroupInformation ugi = createNecessaryUsers();
+    
+    ugi.doAs(new PrivilegedExceptionAction<Object>() {
+
+      @Override
+      public Object run() throws Exception {
+        JobConf conf = setupConf("mapred.queue.default.acl-administer-jobs", "*");
+        verifyJobKill(conf, true);
+        return null;
+      }
+    });
+  }
+
+  public void testAllDisabledACLForJobKill() 
+  throws IOException, InterruptedException {
+    // Create a fake superuser for all processes to execute within
+    UserGroupInformation ugi = createNecessaryUsers();
+    ugi.doAs(new PrivilegedExceptionAction<Object>() {
+
+      @Override
+      public Object run() throws Exception {
+        // No one should be able to kill jobs
+        JobConf conf = setupConf("mapred.queue.default.acl-administer-jobs", "");
+        // Run as dummy-user, who (obviously) is not able to kill the job,
+        // and expect him to fail
+        verifyJobKillAsOtherUser(conf, false, "dummy-user,dummy-group");
+        return null;
+      }
+    });
+  }
+  
+  public void testOwnerAllowedForJobKill() 
+  throws IOException, InterruptedException {
+    UserGroupInformation ugi = createNecessaryUsers();
+    
+    ugi.doAs(new PrivilegedExceptionAction<Object>() {
+
+      @Override
+      public Object run() throws Exception {
+
+        JobConf conf = setupConf("mapred.queue.default.acl-administer-jobs", 
                                               "junk-user");
-    verifyJobKill(conf, true);
-  }
-  
-  public void testUserDisabledACLForJobKill() throws IOException {
-    //setup a cluster allowing a user to submit
-    JobConf conf = setupConf("mapred.queue.default.acl-administer-jobs", 
-                                              "dummy-user");
-    verifyJobKillAsOtherUser(conf, false, "dummy-user,dummy-user-group");
-  }
-  
-  public void testUserEnabledACLForJobKill() throws IOException, 
-                                                    LoginException {
-    // login as self...
-    UserGroupInformation ugi = UnixUserGroupInformation.login();
-    String userName = ugi.getUserName();
-    JobConf conf = setupConf("mapred.queue.default.acl-administer-jobs",
-                                              "dummy-user,"+userName);
-    verifyJobKillAsOtherUser(conf, true, "dummy-user,dummy-user-group");
+        verifyJobKill(conf, true);
+        return null;
+      }
+    });
   }
   
-  public void testUserDisabledForJobPriorityChange() throws IOException {
-    JobConf conf = setupConf("mapred.queue.default.acl-administer-jobs",
+  public void testUserDisabledACLForJobKill() 
+  throws IOException, InterruptedException {
+    UserGroupInformation ugi = createNecessaryUsers();
+    
+    ugi.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+      //setup a cluster allowing a user to submit
+        JobConf conf = setupConf("mapred.queue.default.acl-administer-jobs", 
+                                                "dummy-user");
+        verifyJobKillAsOtherUser(conf, false, "dummy-user,dummy-group");
+        return null;
+      }
+    });
+   }
+  
+  public void testUserEnabledACLForJobKill() 
+  throws IOException, LoginException, InterruptedException {
+  UserGroupInformation ugi = createNecessaryUsers();
+  
+  ugi.doAs(new PrivilegedExceptionAction<Object>() {
+    @Override
+    public Object run() throws Exception {
+      // login as self...
+      UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+      String userName = ugi.getUserName();
+      JobConf conf = setupConf("mapred.queue.default.acl-administer-jobs",
+                                                "dummy-user,"+userName);
+      verifyJobKillAsOtherUser(conf, true, "dummy-user,dummy-group");
+      return null;
+      }
+    });
+  }
+
+  public void testUserDisabledForJobPriorityChange() 
+  throws IOException, InterruptedException {
+    UserGroupInformation ugi = createNecessaryUsers();
+    ugi.doAs(new PrivilegedExceptionAction<Object>() {
+
+      @Override
+      public Object run() throws Exception {
+
+        JobConf conf = setupConf("mapred.queue.default.acl-administer-jobs",
                               "junk-user");
-    verifyJobPriorityChangeAsOtherUser(conf, false, 
-                              "junk-user,junk-user-group");
+        verifyJobPriorityChangeAsOtherUser(conf, false, 
+                              "junk-user,dummy-group");
+        return null;
+      }
+    });
   }
 
   /**
@@ -232,7 +326,7 @@ public class TestQueueManager extends Te
       //Create a new configuration to be used with QueueManager
       JobConf conf = new JobConf();
       QueueManager queueManager = new QueueManager(conf);
-      UserGroupInformation ugi = UnixUserGroupInformation.getCurrentUGI();
+      UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
       //Job Submission should fail because ugi to be used is set to blank.
       assertFalse("User Job Submission Succeeded before refresh.",
           queueManager.hasAccess("default", QueueManager.QueueOperation.
@@ -245,10 +339,8 @@ public class TestQueueManager extends Te
               SUBMIT_JOB, ugi));
       
       //Test job submission as alternate user.
-      Configuration alternateUserConfig = new Configuration();
-      alternateUserConfig.set("hadoop.job.ugi","u1,users");
       UserGroupInformation alternateUgi = 
-        UserGroupInformation.readFrom(alternateUserConfig);
+        UserGroupInformation.createUserForTesting("u1", new String[]{"user"});
       assertTrue("Alternate User Job Submission failed before refresh.",
           queueManager.hasAccess("q2", QueueManager.QueueOperation.
               SUBMIT_JOB, alternateUgi));
@@ -310,7 +402,7 @@ public class TestQueueManager extends Te
       
       //properties for mapred-queue-acls.xml
       Properties queueConfProps = new Properties();
-      UserGroupInformation ugi = UnixUserGroupInformation.getCurrentUGI();
+      UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
       queueConfProps.put("mapred.queue.default.acl-submit-job", ugi.getUserName());
       queueConfProps.put("mapred.queue.q1.acl-submit-job", ugi.getUserName());
       queueConfProps.put("mapred.queue.q2.acl-submit-job", ugi.getUserName());
@@ -379,12 +471,12 @@ public class TestQueueManager extends Te
   }
   
   private void verifyJobSubmission(JobConf conf, boolean shouldSucceed) 
-                                              throws IOException {
+                                     throws IOException, InterruptedException {
     verifyJobSubmission(conf, shouldSucceed, "default");
   }
 
   private void verifyJobSubmission(JobConf conf, boolean shouldSucceed, 
-      String queue) throws IOException {
+      String queue) throws IOException, InterruptedException {
     setUpCluster(conf);
     try {
       runAndVerifySubmission(conf, shouldSucceed, queue, null);
@@ -395,7 +487,7 @@ public class TestQueueManager extends Te
 
   private void runAndVerifySubmission(JobConf conf, boolean shouldSucceed,
       String queue, String userInfo)
-      throws IOException {
+      throws IOException, InterruptedException {
     try {
       RunningJob rjob = submitSleepJob(1, 1, 100, 100, true, userInfo, queue);
       if (shouldSucceed) {
@@ -428,7 +520,7 @@ public class TestQueueManager extends Te
 }
 
   private void verifyJobKill(JobConf conf, boolean shouldSucceed) 
-                                      throws IOException {
+                                      throws IOException, InterruptedException {
     setUpCluster(conf);
     try {
       RunningJob rjob = submitSleepJob(1, 1, 1000, 1000, false);
@@ -470,7 +562,7 @@ public class TestQueueManager extends Te
   
   private void verifyJobKillAsOtherUser(JobConf conf, boolean shouldSucceed,
                                         String otherUserInfo) 
-                        throws IOException {
+                        throws IOException, InterruptedException {
     setUpCluster(conf);
     try {
       // submit a job as another user.
@@ -512,7 +604,7 @@ public class TestQueueManager extends Te
   
   private void verifyJobPriorityChangeAsOtherUser(JobConf conf, 
                           boolean shouldSucceed, String otherUserInfo)
-                            throws IOException {
+                            throws IOException, InterruptedException {
     setUpCluster(conf);
     try {
       // submit job as another user.
@@ -552,6 +644,7 @@ public class TestQueueManager extends Te
   private void setUpCluster(JobConf conf) throws IOException {
     miniDFSCluster = new MiniDFSCluster(conf, 1, true, null);
     FileSystem fileSys = miniDFSCluster.getFileSystem();
+    TestMiniMRWithDFSWithDistinctUsers.mkdir(fileSys, "/user");
     TestMiniMRWithDFSWithDistinctUsers.mkdir(fileSys,
         conf.get("mapreduce.jobtracker.staging.root.dir",
             "/tmp/hadoop/mapred/staging"));
@@ -568,7 +661,7 @@ public class TestQueueManager extends Te
   private RunningJob submitSleepJob(int numMappers, int numReducers, 
                             long mapSleepTime, long reduceSleepTime,
                             boolean shouldComplete) 
-                              throws IOException {
+                              throws IOException, InterruptedException {
     return submitSleepJob(numMappers, numReducers, mapSleepTime,
                           reduceSleepTime, shouldComplete, null);
   }
@@ -576,19 +669,20 @@ public class TestQueueManager extends Te
   private RunningJob submitSleepJob(int numMappers, int numReducers, 
                                       long mapSleepTime, long reduceSleepTime,
                                       boolean shouldComplete, String userInfo) 
-                                            throws IOException {
+                                     throws IOException, InterruptedException {
     return submitSleepJob(numMappers, numReducers, mapSleepTime, 
                           reduceSleepTime, shouldComplete, userInfo, null);
   }
 
-  private RunningJob submitSleepJob(int numMappers, int numReducers, 
-                                    long mapSleepTime, long reduceSleepTime,
-                                    boolean shouldComplete, String userInfo,
+  private RunningJob submitSleepJob(final int numMappers, final int numReducers, 
+      final long mapSleepTime,
+      final long reduceSleepTime, final boolean shouldComplete, String userInfo,
                                     String queueName) 
-                                      throws IOException {
+                                      throws IOException, InterruptedException {
     JobConf clientConf = new JobConf();
     clientConf.set("mapred.job.tracker", "localhost:"
         + miniMRCluster.getJobTrackerPort());
+    UserGroupInformation ugi;
     SleepJob job = new SleepJob();
     job.setConf(clientConf);
     clientConf = job.setupJobConf(numMappers, numReducers, 
@@ -597,18 +691,26 @@ public class TestQueueManager extends Te
     if (queueName != null) {
       clientConf.setQueueName(queueName);
     }
-    JobConf jc = new JobConf(clientConf);
+    final JobConf jc = new JobConf(clientConf);
     if (userInfo != null) {
-      jc.set(UnixUserGroupInformation.UGI_PROPERTY_NAME, userInfo);
-    }
-    RunningJob rJob = null;
-    if (shouldComplete) {
-      rJob = JobClient.runJob(jc);  
+      String[] splits = userInfo.split(",");
+      String[] groups = new String[splits.length - 1];
+      System.arraycopy(splits, 1, groups, 0, splits.length - 1);
+      ugi = UserGroupInformation.createUserForTesting(splits[0], groups);
     } else {
-      // Job should be submitted as 'userInfo'. So both the client as well as
-      // the configuration should point to the same UGI.
-      rJob = new JobClient(jc).submitJob(jc);
+      ugi = UserGroupInformation.getCurrentUser();
     }
+    RunningJob rJob = ugi.doAs(new PrivilegedExceptionAction<RunningJob>() {
+      public RunningJob run() throws IOException {
+        if (shouldComplete) {
+          return JobClient.runJob(jc);  
+        } else {
+          // Job should be submitted as 'userInfo'. So both the client as well as
+          // the configuration should point to the same UGI.
+          return new JobClient(jc).submitJob(jc);
+        }
+      }
+    });
     return rJob;
   }
 

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestRecoveryManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestRecoveryManager.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestRecoveryManager.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestRecoveryManager.java Fri Mar  4 03:44:54 2011
@@ -19,6 +19,7 @@
 package org.apache.hadoop.mapred;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 
 import junit.framework.TestCase;
 
@@ -199,15 +200,21 @@ public class TestRecoveryManager extends
     }
     
     // now submit job3 with inappropriate acls
-    JobConf job3 = mr.createJobConf();
-    job3.set("hadoop.job.ugi","abc,users");
-
+    final JobConf job3 = mr.createJobConf();
+    UserGroupInformation ugi3 = 
+      UserGroupInformation.createUserForTesting("abc", new String[]{"users"});
+    
     UtilsForTests.configureWaitingJobConf(job3, 
         new Path(TEST_DIR, "input"), new Path(TEST_DIR, "output5"), 1, 0, 
         "test-recovery-manager", signalFile, signalFile);
     
     // submit the job
-    RunningJob rJob3 = (new JobClient(job3)).submitJob(job3);
+    RunningJob rJob3 = ugi3.doAs(new PrivilegedExceptionAction<RunningJob>() {
+      public RunningJob run() throws IOException {
+        return (new JobClient(job3)).submitJob(job3); 
+      }
+    });
+      
     LOG.info("Submitted job " + rJob3.getID() + " with different user");
     
     jip = jobtracker.getJob(rJob3.getID());
@@ -227,7 +234,7 @@ public class TestRecoveryManager extends
     mr.getJobTrackerConf().setInt("mapred.jobtracker.maxtasks.per.job", 25);
     
     mr.getJobTrackerConf().setBoolean("mapred.acls.enabled" , true);
-    UserGroupInformation ugi = UserGroupInformation.readFrom(job1);
+    UserGroupInformation ugi = UserGroupInformation.getLoginUser();
     mr.getJobTrackerConf().set("mapred.queue.default.acl-submit-job", 
                                ugi.getUserName());
 

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestSubmitJob.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestSubmitJob.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestSubmitJob.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestSubmitJob.java Fri Mar  4 03:44:54 2011
@@ -19,6 +19,7 @@ package org.apache.hadoop.mapred;
 
 import java.io.IOException;
 import java.net.URI;
+import java.security.PrivilegedExceptionAction;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -31,7 +32,6 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -170,23 +170,23 @@ public class TestSubmitJob extends TestC
     */
   public void testSecureJobExecution() throws Exception {
     LOG.info("Testing secure job submission/execution");
-    MiniDFSCluster dfs = null;
     MiniMRCluster mr = null;
+    Configuration conf = new Configuration();
+    final MiniDFSCluster dfs = new MiniDFSCluster(conf, 1, true, null);
     try {
-      Configuration conf = new Configuration();
-      UnixUserGroupInformation.saveToConf(conf,
-          UnixUserGroupInformation.UGI_PROPERTY_NAME,
-          TestMiniMRWithDFSWithDistinctUsers.DFS_UGI);
-      dfs = new MiniDFSCluster(conf, 1, true, null);
-      FileSystem fs = dfs.getFileSystem();
+      FileSystem fs =
+        TestMiniMRWithDFSWithDistinctUsers.
+        DFS_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
+          public FileSystem run() throws IOException {
+            return dfs.getFileSystem();
+         }
+        });
       TestMiniMRWithDFSWithDistinctUsers.mkdir(fs, "/user");
       TestMiniMRWithDFSWithDistinctUsers.mkdir(fs, "/mapred");
       TestMiniMRWithDFSWithDistinctUsers.mkdir(fs,
           conf.get("mapreduce.jobtracker.staging.root.dir",
               "/tmp/hadoop/mapred/staging"));
-      UnixUserGroupInformation MR_UGI =
-        TestMiniMRWithDFSWithDistinctUsers.createUGI(
-            UnixUserGroupInformation.login().getUserName(), false);
+      UserGroupInformation MR_UGI = UserGroupInformation.getLoginUser();
       mr = new MiniMRCluster(0, 0, 1, dfs.getFileSystem().getUri().toString(),
           1, null, null, MR_UGI);
       JobTracker jt = mr.getJobTrackerRunner().getJobTracker();
@@ -198,12 +198,11 @@ public class TestSubmitJob extends TestC
       final Path reduceSignalFile = new Path(TEST_DIR, "reduce-signal");
 
       // create a ugi for user 1
-      UnixUserGroupInformation user1 =
+      UserGroupInformation user1 =
         TestMiniMRWithDFSWithDistinctUsers.createUGI("user1", false);
       Path inDir = new Path("/user/input");
       Path outDir = new Path("/user/output");
-      JobConf job =
-        TestMiniMRWithDFSWithDistinctUsers.createJobConf(mr, user1);
+      final JobConf job = mr.createJobConf();
 
       UtilsForTests.configureWaitingJobConf(job, inDir, outDir, 2, 0,
           "test-submit-job", mapSignalFile.toString(),
@@ -213,16 +212,24 @@ public class TestSubmitJob extends TestC
       job.set(UtilsForTests.getTaskSignalParameter(false),
           reduceSignalFile.toString());
       LOG.info("Submit job as the actual user (" + user1.getUserName() + ")");
-      JobClient jClient = new JobClient(job);
-      RunningJob rJob = jClient.submitJob(job);
+      final JobClient jClient =
+        user1.doAs(new PrivilegedExceptionAction<JobClient>() {
+          public JobClient run() throws IOException {
+            return new JobClient(job);
+          }
+        });
+      RunningJob rJob = user1.doAs(new PrivilegedExceptionAction<RunningJob>() {
+        public RunningJob run() throws IOException {
+          return jClient.submitJob(job);
+        }
+      });
       JobID id = rJob.getID();
       LOG.info("Running job " + id);
 
       // create user2
-      UnixUserGroupInformation user2 =
+      UserGroupInformation user2 =
         TestMiniMRWithDFSWithDistinctUsers.createUGI("user2", false);
-      JobConf conf_other =
-        TestMiniMRWithDFSWithDistinctUsers.createJobConf(mr, user2);
+      JobConf conf_other = mr.createJobConf();
       org.apache.hadoop.hdfs.protocol.ClientProtocol client =
         getDFSClient(conf_other, user2);
 

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java Fri Mar  4 03:44:54 2011
@@ -116,8 +116,6 @@ public class TestTaskTrackerLocalization
     // Create the job configuration file. Same as trackerConf in this test.
     JobConf jobConf = trackerFConf;
 
-    // JobClient sets the job credentials.
-    new JobClient().setUGIAndUserGroupNames(jobConf);
 
     // JobClient uploads the job jar to the file system and sets it in the
     // jobConf.
@@ -131,11 +129,12 @@ public class TestTaskTrackerLocalization
     tracker.setConf(trackerFConf);
 
     // for test case system FS is the local FS
+
     tracker.systemFS = FileSystem.getLocal(trackerFConf);
     tracker.systemDirectory = new Path(TEST_ROOT_DIR.getAbsolutePath());
     tracker.setLocalFileSystem(tracker.systemFS);
     
-    taskTrackerUGI = UserGroupInformation.login(trackerFConf);
+    taskTrackerUGI = UserGroupInformation.getCurrentUser();
 
     // Set up the task to be localized
     String jtIdentifier = "200907202331";
@@ -145,6 +144,7 @@ public class TestTaskTrackerLocalization
     task =
         new MapTask(jobConfFile.toURI().toString(), taskId, 1, null, 1);
     task.setConf(jobConf); // Set conf. Set user name in particular.
+    task.setUser(UserGroupInformation.getCurrentUser().getUserName());
 
     // create jobTokens file
     uploadJobTokensFile();
@@ -350,7 +350,7 @@ public class TestTaskTrackerLocalization
    * @throws IOException
    */
   public void testJobLocalization()
-      throws IOException {
+      throws Exception {
     if (!canRun()) {
       return;
     }
@@ -446,7 +446,7 @@ public class TestTaskTrackerLocalization
    * @throws IOException
    */
   public void testTaskLocalization()
-      throws IOException {
+      throws Exception {
     if (!canRun()) {
       return;
     }
@@ -619,7 +619,7 @@ public class TestTaskTrackerLocalization
    * @throws IOException
    */
   public void testTaskCleanup()
-      throws IOException {
+      throws Exception {
     if (!canRun()) {
       return;
     }
@@ -631,7 +631,7 @@ public class TestTaskTrackerLocalization
    * @throws IOException
    */
   public void testFailedTaskCleanup()
-  throws IOException {
+  throws Exception {
     if (!canRun()) {
       return;
     }
@@ -643,7 +643,7 @@ public class TestTaskTrackerLocalization
    * @throws IOException
    */
   public void testTaskCleanupWithJvmUse()
-      throws IOException {
+      throws Exception {
     if (!canRun()) {
       return;
     }
@@ -654,7 +654,7 @@ public class TestTaskTrackerLocalization
    * Validates if task cleanup is done properly
    */
   private void testTaskCleanup(boolean needCleanup, boolean jvmReuse)
-      throws IOException {
+      throws Exception {
     // Localize job and localize task.
     tracker.getLocalizer().initializeUserDirs(task.getUser());
     localizedJobConf = tracker.localizeJobFiles(task);

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestGroupMappingServiceRefresh.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestGroupMappingServiceRefresh.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestGroupMappingServiceRefresh.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestGroupMappingServiceRefresh.java Fri Mar  4 03:44:54 2011
@@ -46,6 +46,7 @@ public class TestGroupMappingServiceRefr
     
     @Override
     public List<String> getGroups(String user) throws IOException {
+      System.err.println("Getting groups in MockUnixGroupsMapping");
       String g1 = user + (10 * i + 1);
       String g2 = user + (10 * i + 2);
       List<String> l = new ArrayList<String>(2);
@@ -63,6 +64,7 @@ public class TestGroupMappingServiceRefr
         TestGroupMappingServiceRefresh.MockUnixGroupsMapping.class,
         GroupMappingServiceProvider.class);
     config.setLong("hadoop.security.groups.cache.secs", groupRefreshTimeoutSec);
+    Groups.getUserToGroupsMappingService(config);
     
     FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
     cluster = new MiniDFSCluster(0, config, 1, true, true, true,  null, null, null, null);
@@ -80,8 +82,8 @@ public class TestGroupMappingServiceRefr
   public void testGroupMappingRefresh() throws Exception {
     DFSAdmin admin = new DFSAdmin(config);
     String [] args =  new String[]{"-refreshUserToGroupsMappings"};
-    Groups groups = SecurityUtil.getUserToGroupsMappingService(config);
-    String user = UnixUserGroupInformation.getUnixUserName();
+    Groups groups = Groups.getUserToGroupsMappingService(config);
+    String user = UserGroupInformation.getCurrentUser().getUserName();
     System.out.println("first attempt:");
     List<String> g1 = groups.getGroups(user);
     String [] str_groups = new String [g1.size()];
@@ -101,7 +103,8 @@ public class TestGroupMappingServiceRefr
     g3.toArray(str_groups);
     System.out.println(Arrays.toString(str_groups));
     for(int i=0; i<g3.size(); i++) {
-      assertFalse("Should be different group ", g1.get(i).equals(g3.get(i)));
+      assertFalse("Should be different group: " + g1.get(i) + " and " + g3.get(i), 
+          g1.get(i).equals(g3.get(i)));
     }
     
     // test time out

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestMapredGroupMappingServiceRefresh.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestMapredGroupMappingServiceRefresh.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestMapredGroupMappingServiceRefresh.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestMapredGroupMappingServiceRefresh.java Fri Mar  4 03:44:54 2011
@@ -76,6 +76,7 @@ public class TestMapredGroupMappingServi
         ShellBasedUnixGroupsMapping.class,GroupMappingServiceProvider.class).
         getName());
     
+    Groups.getUserToGroupsMappingService(config);
     String namenodeUrl = "hdfs://localhost:" + "0";
     FileSystem.setDefaultUri(config, namenodeUrl);
     
@@ -102,8 +103,8 @@ public class TestMapredGroupMappingServi
     MRAdmin admin = new MRAdmin(config);
     String [] args = new String[] { "-refreshUserToGroupsMappings" };
     
-    Groups groups = SecurityUtil.getUserToGroupsMappingService(config);
-    String user = UnixUserGroupInformation.getUnixUserName();
+    Groups groups = Groups.getUserToGroupsMappingService(config);
+    String user = UserGroupInformation.getLoginUser().getShortUserName();
     System.out.println("first attempt:");
     List<String> g1 = groups.getGroups(user);
     String [] str_groups = new String [g1.size()];

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestPermission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestPermission.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestPermission.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestPermission.java Fri Mar  4 03:44:54 2011
@@ -22,13 +22,12 @@ import java.util.Random;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.permission.*;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.log4j.Level;
 
 import junit.framework.TestCase;
 
@@ -36,10 +35,6 @@ import junit.framework.TestCase;
 public class TestPermission extends TestCase {
   public static final Log LOG = LogFactory.getLog(TestPermission.class);
 
-  {
-    ((Log4JLogger)UserGroupInformation.LOG).getLogger().setLevel(Level.ALL);
-  }
-
   final private static Path ROOT_PATH = new Path("/data");
   final private static Path CHILD_DIR1 = new Path(ROOT_PATH, "child1");
   final private static Path CHILD_DIR2 = new Path(ROOT_PATH, "child2");
@@ -117,7 +112,7 @@ public class TestPermission extends Test
   }
 
   public void testFilePermision() throws Exception {
-    Configuration conf = new Configuration();
+    final Configuration conf = new Configuration();
     conf.setBoolean("dfs.permissions", true);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
     cluster.waitActive();
@@ -160,11 +155,10 @@ public class TestPermission extends Test
 
       ////////////////////////////////////////////////////////////////
       // test illegal file/dir creation
-      UnixUserGroupInformation userGroupInfo = new UnixUserGroupInformation(
-          USER_NAME, GROUP_NAMES );
-      UnixUserGroupInformation.saveToConf(conf,
-          UnixUserGroupInformation.UGI_PROPERTY_NAME, userGroupInfo);
-      FileSystem userfs = FileSystem.get(conf);
+      UserGroupInformation userGroupInfo = 
+        UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES );
+      
+      FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);
 
       // make sure mkdir of a existing directory that is not owned by 
       // this user does not throw an exception.

Added: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestUserGroupInformation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestUserGroupInformation.java?rev=1077137&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestUserGroupInformation.java (added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestUserGroupInformation.java Fri Mar  4 03:44:54 2011
@@ -0,0 +1,189 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.security;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import static org.mockito.Mockito.mock;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.junit.Test;
+
+public class TestUserGroupInformation {
+  final private static String USER_NAME = "user1@HADOOP.APACHE.ORG";
+  final private static String GROUP1_NAME = "group1";
+  final private static String GROUP2_NAME = "group2";
+  final private static String GROUP3_NAME = "group3";
+  final private static String[] GROUP_NAMES = 
+    new String[]{GROUP1_NAME, GROUP2_NAME, GROUP3_NAME};
+
+  /**
+   * given user name - get all the groups.
+   * Needs to happen before creating the test users
+   */
+  @Test
+  public void testGetServerSideGroups() throws IOException,
+                                               InterruptedException {
+    // get the user name
+    Process pp = Runtime.getRuntime().exec("whoami");
+    BufferedReader br = new BufferedReader
+                          (new InputStreamReader(pp.getInputStream()));
+    String userName = br.readLine().trim();
+    // get the groups
+    pp = Runtime.getRuntime().exec("id -Gn");
+    br = new BufferedReader(new InputStreamReader(pp.getInputStream()));
+    String line = br.readLine();
+    System.out.println(userName + ":" + line);
+   
+    List<String> groups = new ArrayList<String> ();    
+    for(String s: line.split("[\\s]")) {
+      groups.add(s);
+    }
+    
+    final UserGroupInformation login = UserGroupInformation.getCurrentUser();
+    assertEquals(userName, login.getShortUserName());
+    String[] gi = login.getGroupNames();
+    assertEquals(groups.size(), gi.length);
+    for(int i=0; i < gi.length; i++) {
+      assertEquals(groups.get(i), gi[i]);
+    }
+    
+    final UserGroupInformation fakeUser = 
+      UserGroupInformation.createRemoteUser("foo.bar");
+    fakeUser.doAs(new PrivilegedExceptionAction<Object>(){
+      @Override
+      public Object run() throws IOException {
+        UserGroupInformation current = UserGroupInformation.getCurrentUser();
+        assertFalse(current.equals(login));
+        assertEquals(current, fakeUser);
+        assertEquals(0, current.getGroupNames().length);
+        return null;
+      }});
+  }
+
+  /** Test login method */
+  @Test
+  public void testLogin() throws Exception {
+    // login from unix
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    assertEquals(UserGroupInformation.getCurrentUser(),
+                 UserGroupInformation.getLoginUser());
+    assertTrue(ugi.getGroupNames().length >= 1);
+
+    // ensure that doAs works correctly
+    UserGroupInformation userGroupInfo = 
+      UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES);
+    UserGroupInformation curUGI = 
+      userGroupInfo.doAs(new PrivilegedExceptionAction<UserGroupInformation>(){
+        public UserGroupInformation run() throws IOException {
+          return UserGroupInformation.getCurrentUser();
+        }});
+    // make sure in the scope of the doAs, the right user is current
+    assertEquals(curUGI, userGroupInfo);
+    // make sure it is not the same as the login user
+    assertFalse(curUGI.equals(UserGroupInformation.getLoginUser()));
+  }
+
+  /** test constructor */
+  @Test
+  public void testConstructor() throws Exception {
+    UserGroupInformation ugi = 
+      UserGroupInformation.createUserForTesting("user2/cron@HADOOP.APACHE.ORG", 
+                                                GROUP_NAMES);
+    // make sure the short and full user names are correct
+    assertEquals("user2/cron@HADOOP.APACHE.ORG", ugi.getUserName());
+    assertEquals("user2", ugi.getShortUserName());
+    ugi = UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES);
+    assertEquals("user1", ugi.getShortUserName());
+    
+    // failure test
+    testConstructorFailures(null);
+    testConstructorFailures("");
+  }
+
+  private void testConstructorFailures(String userName) {
+    boolean gotException = false;
+    try {
+      UserGroupInformation.createRemoteUser(userName);
+    } catch (Exception e) {
+      gotException = true;
+    }
+    assertTrue(gotException);
+  }
+
+  @Test
+  public void testEquals() throws Exception {
+    UserGroupInformation uugi = 
+      UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES);
+
+    assertEquals(uugi, uugi);
+    // The subjects should be equal, so this should work
+    assertTrue(uugi.equals(
+                 UserGroupInformation.createUserForTesting
+                   (USER_NAME, GROUP_NAMES)));
+    // ensure that different UGI with the same subject are equal
+    assertEquals(uugi, new UserGroupInformation(uugi.getSubject()));
+  }
+  
+  @Test
+  public void testGettingGroups() throws Exception {
+    UserGroupInformation uugi = 
+      UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES);
+    assertEquals(USER_NAME, uugi.getUserName());
+    assertArrayEquals(new String[]{GROUP1_NAME, GROUP2_NAME, GROUP3_NAME},
+                      uugi.getGroupNames());
+  }
+  
+  @SuppressWarnings("unchecked") // from Mockito mocks
+  @Test
+  public void testUGITokens() {
+    UserGroupInformation ugi = 
+      UserGroupInformation.createUserForTesting("TheDoctor", 
+                                                new String [] { "TheTARDIS"});
+    Token t1 = mock(Token.class);
+    Token t2 = mock(Token.class);
+    
+    ugi.addToken(t1);
+    ugi.addToken(t2);
+    
+    Collection<Token<? extends TokenIdentifier>> z = ugi.getTokens();
+    assertTrue(z.contains(t1));
+    assertTrue(z.contains(t2));
+    assertEquals(2, z.size());
+    
+    try {
+      z.remove(t1);
+      fail("Shouldn't be able to modify token collection from UGI");
+    } catch(UnsupportedOperationException uoe) {
+      // Can't modify tokens
+    }
+  }
+}

Added: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/authorize/TestAccessControlList.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/authorize/TestAccessControlList.java?rev=1077137&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/authorize/TestAccessControlList.java (added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/authorize/TestAccessControlList.java Fri Mar  4 03:44:54 2011
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.authorize;
+
+import java.util.Iterator;
+import java.util.Set;
+
+import org.apache.hadoop.security.authorize.AccessControlList;
+
+
+import junit.framework.TestCase;
+
+public class TestAccessControlList extends TestCase {
+  
+  public void testWildCardAccessControlList() throws Exception {
+    AccessControlList acl;
+    
+    acl = new AccessControlList("*");
+    assertTrue(acl.isAllAllowed());
+    
+    acl = new AccessControlList("  * ");
+    assertTrue(acl.isAllAllowed());
+    
+    acl = new AccessControlList(" *");
+    assertTrue(acl.isAllAllowed());
+    
+    acl = new AccessControlList("*  ");
+    assertTrue(acl.isAllAllowed());
+  }
+  
+  public void testAccessControlList() throws Exception {
+    AccessControlList acl;
+    Set<String> users;
+    Set<String> groups;
+    
+    acl = new AccessControlList("drwho tardis");
+    users = acl.getUsers();
+    assertEquals(users.size(), 1);
+    assertEquals(users.iterator().next(), "drwho");
+    groups = acl.getGroups();
+    assertEquals(groups.size(), 1);
+    assertEquals(groups.iterator().next(), "tardis");
+    
+    acl = new AccessControlList("drwho");
+    users = acl.getUsers();
+    assertEquals(users.size(), 1);
+    assertEquals(users.iterator().next(), "drwho");
+    groups = acl.getGroups();
+    assertEquals(groups.size(), 0);
+    
+    acl = new AccessControlList("drwho ");
+    users = acl.getUsers();
+    assertEquals(users.size(), 1);
+    assertEquals(users.iterator().next(), "drwho");
+    groups = acl.getGroups();
+    assertEquals(groups.size(), 0);
+    
+    acl = new AccessControlList(" tardis");
+    users = acl.getUsers();
+    assertEquals(users.size(), 0);
+    groups = acl.getGroups();
+    assertEquals(groups.size(), 1);
+    assertEquals(groups.iterator().next(), "tardis");
+
+    Iterator<String> iter;
+    acl = new AccessControlList("drwho,joe tardis,users");
+    users = acl.getUsers();
+    assertEquals(users.size(), 2);
+    iter = users.iterator();
+    assertEquals(iter.next(), "drwho");
+    assertEquals(iter.next(), "joe");
+    groups = acl.getGroups();
+    assertEquals(groups.size(), 2);
+    iter = groups.iterator();
+    assertEquals(iter.next(), "tardis");
+    assertEquals(iter.next(), "users");
+    
+    acl = new AccessControlList("drwho,joe tardis, users");
+    users = acl.getUsers();
+    assertEquals(users.size(), 2);
+    iter = users.iterator();
+    assertEquals(iter.next(), "drwho");
+    assertEquals(iter.next(), "joe");
+    groups = acl.getGroups();
+    assertEquals(groups.size(), 2);
+    iter = groups.iterator();
+    assertEquals(iter.next(), "tardis");
+    assertEquals(iter.next(), "users");
+  }
+}

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java Fri Mar  4 03:44:54 2011
@@ -20,6 +20,7 @@ package org.apache.hadoop.security.autho
 import java.io.File;
 import java.io.FileWriter;
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -32,7 +33,7 @@ import org.apache.hadoop.ipc.RemoteExcep
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.MiniMRCluster;
 import org.apache.hadoop.mapred.TestMiniMRWithDFS;
-import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 
 import junit.framework.TestCase;
@@ -105,7 +106,7 @@ public class TestServiceLevelAuthorizati
       final int slaves = 4;
 
       // Turn on service-level authorization
-      Configuration conf = new Configuration();
+      final Configuration conf = new Configuration();
       conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
                     HDFSPolicyProvider.class, PolicyProvider.class);
       conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
@@ -120,12 +121,13 @@ public class TestServiceLevelAuthorizati
       // Simulate an 'edit' of hadoop-policy.xml
       String confDir = System.getProperty("test.build.extraconf", 
                                           "build/test/extraconf");
-      File policyFile = new File(confDir, ConfiguredPolicy.HADOOP_POLICY_FILE);
-      String policyFileCopy = ConfiguredPolicy.HADOOP_POLICY_FILE + ".orig";
+      String HADOOP_POLICY_FILE = System.getProperty("hadoop.policy.file");
+      File policyFile = new File(confDir, HADOOP_POLICY_FILE);
+      String policyFileCopy = HADOOP_POLICY_FILE + ".orig";
       FileUtil.copy(policyFile, FileSystem.getLocal(conf),   // first save original 
                     new Path(confDir, policyFileCopy), false, conf);
       rewriteHadoopPolicyFile(                               // rewrite the file
-          new File(confDir, ConfiguredPolicy.HADOOP_POLICY_FILE));
+          new File(confDir, HADOOP_POLICY_FILE));
       
       // Refresh the service level authorization policy
       refreshPolicy(conf);
@@ -135,17 +137,23 @@ public class TestServiceLevelAuthorizati
       try {
         // Note: hadoop-policy.xml for tests has 
         // security.refresh.policy.protocol.acl = ${user.name}
-        conf.set(UnixUserGroupInformation.UGI_PROPERTY_NAME, UNKNOWN_USER);
-        refreshPolicy(conf);
+        UserGroupInformation unknownUser = 
+          UserGroupInformation.createRemoteUser("unknown");
+        unknownUser.doAs(new PrivilegedExceptionAction<Void>() {
+          public Void run() throws IOException {
+            refreshPolicy(conf);
+            return null;
+          }
+        });
         fail("Refresh of NameNode's policy file cannot be successful!");
-      } catch (RemoteException re) {
+      } catch (Exception re) {
         System.out.println("Good, refresh worked... refresh failed with: " + 
-                           StringUtils.stringifyException(re.unwrapRemoteException()));
+                           StringUtils.stringifyException(re));
       } finally {
         // Reset to original hadoop-policy.xml
         FileUtil.fullyDelete(new File(confDir, 
-            ConfiguredPolicy.HADOOP_POLICY_FILE));
-        FileUtil.replaceFile(new File(confDir, policyFileCopy), new File(confDir, ConfiguredPolicy.HADOOP_POLICY_FILE));
+            HADOOP_POLICY_FILE));
+        FileUtil.replaceFile(new File(confDir, policyFileCopy), new File(confDir, HADOOP_POLICY_FILE));
       }
     } finally {
       if (dfs != null) { dfs.shutdown(); }

Modified: hadoop/common/branches/branch-0.20-security-patches/src/tools/org/apache/hadoop/tools/DistCh.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/tools/org/apache/hadoop/tools/DistCh.java?rev=1077137&r1=1077136&r2=1077137&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/tools/org/apache/hadoop/tools/DistCh.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/tools/org/apache/hadoop/tools/DistCh.java Fri Mar  4 03:44:54 2011
@@ -425,8 +425,12 @@ public class DistCh extends DistTool {
     final String randomId = getRandomId();
     JobClient jClient = new JobClient(jobconf);
     Path stagingArea;
-    stagingArea = JobSubmissionFiles.getStagingDir(
-                     jClient, jobconf);
+    try {
+      stagingArea = JobSubmissionFiles.getStagingDir(
+                       jClient, jobconf);
+    } catch (InterruptedException e) {
+      throw new IOException(e);
+    }
     Path jobdir = new Path(stagingArea + NAME + "_" + randomId);
     FsPermission mapredSysPerms =
       new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);