You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-commits@hadoop.apache.org by at...@apache.org on 2011/11/15 03:39:22 UTC

svn commit: r1202013 [3/4] - in /hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project: ./ conf/ hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/ hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/mai...

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java Tue Nov 15 02:39:13 2011
@@ -63,13 +63,15 @@ import org.apache.hadoop.yarn.event.Drai
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat;
+import org.apache.hadoop.yarn.logaggregation.ContainerLogsRetentionPolicy;
+import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey;
+import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader;
 import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedAppsEvent;
 import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
-import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.AggregatedLogFormat.LogKey;
-import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.AggregatedLogFormat.LogReader;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerContainerFinishedEvent;

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java Tue Nov 15 02:39:13 2011
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler;
 
 import static org.mockito.Matchers.any;
@@ -18,10 +35,10 @@ import org.apache.hadoop.yarn.conf.YarnC
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
 import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.logaggregation.ContainerLogsRetentionPolicy;
 import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
-import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.ContainerLogsRetentionPolicy;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerContainerFinishedEvent;

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestProcessIdFileReader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestProcessIdFileReader.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestProcessIdFileReader.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestProcessIdFileReader.java Tue Nov 15 02:39:13 2011
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hadoop.yarn.server.nodemanager.util;
 
 import static org.junit.Assert.*;

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java Tue Nov 15 02:39:13 2011
@@ -278,7 +278,8 @@ public class RMAppManager implements Eve
       // Setup tokens for renewal
       if (UserGroupInformation.isSecurityEnabled()) {
         this.rmContext.getDelegationTokenRenewer().addApplication(
-            applicationId,parseCredentials(submissionContext)
+            applicationId,parseCredentials(submissionContext),
+            submissionContext.getCancelTokensWhenComplete()
             );
       }      
       

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java Tue Nov 15 02:39:13 2011
@@ -595,8 +595,13 @@ public class RMAppAttemptImpl implements
           AM_CONTAINER_PRIORITY, "*", appAttempt.submissionContext
               .getAMContainerSpec().getResource(), 1);
 
-      appAttempt.scheduler.allocate(appAttempt.applicationAttemptId,
-          Collections.singletonList(request), EMPTY_CONTAINER_RELEASE_LIST);
+      Allocation amContainerAllocation = 
+          appAttempt.scheduler.allocate(appAttempt.applicationAttemptId,
+              Collections.singletonList(request), EMPTY_CONTAINER_RELEASE_LIST);
+      if (amContainerAllocation != null
+          && amContainerAllocation.getContainers() != null) {
+        assert(amContainerAllocation.getContainers().size() == 0);
+      }
     }
   }
 

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java Tue Nov 15 02:39:13 2011
@@ -245,16 +245,45 @@ public class LeafQueue implements CSQueu
       aclsString.append(e.getKey() + ":" + e.getValue().getAclString());
     }
 
-    LOG.info("Initializing " + queueName +
-        ", capacity=" + capacity + 
-        ", asboluteCapacity=" + absoluteCapacity + 
-        ", maxCapacity=" + maxCapacity +
-        ", asboluteMaxCapacity=" + absoluteMaxCapacity +
-        ", userLimit=" + userLimit + ", userLimitFactor=" + userLimitFactor + 
-        ", maxApplications=" + maxApplications + 
-        ", maxApplicationsPerUser=" + maxApplicationsPerUser + 
-        ", state=" + state +
-        ", acls=" + aclsString);
+    LOG.info("Initializing " + queueName + "\n" +
+        "capacity = " + capacity +
+        " [= (float) configuredCapacity / 100 ]" + "\n" + 
+        "asboluteCapacity = " + absoluteCapacity +
+        " [= parentAbsoluteCapacity * capacity ]" + "\n" +
+        "maxCapacity = " + maxCapacity +
+        " [= configuredMaxCapacity ]" + "\n" +
+        "absoluteMaxCapacity = " + absoluteMaxCapacity +
+        " [= Float.MAX_VALUE if maximumCapacity undefined, " +
+        "(parentAbsoluteCapacity * maximumCapacity) / 100 otherwise ]" + "\n" +
+        "userLimit = " + userLimit +
+        " [= configuredUserLimit ]" + "\n" +
+        "userLimitFactor = " + userLimitFactor +
+        " [= configuredUserLimitFactor ]" + "\n" +
+        "maxApplications = " + maxApplications +
+        " [= (int)(configuredMaximumSystemApplications * absoluteCapacity) ]" + "\n" +
+        "maxApplicationsPerUser = " + maxApplicationsPerUser +
+        " [= (int)(maxApplications * (userLimit / 100.0f) * userLimitFactor) ]" + "\n" +
+        "maxActiveApplications = " + maxActiveApplications +
+        " [= max(" + 
+        "(int)((clusterResourceMemory / (float)DEFAULT_AM_RESOURCE) *" + 
+        "maxAMResourcePercent * absoluteCapacity)," + 
+        "1) ]" + "\n" +
+        "maxActiveApplicationsPerUser = " + maxActiveApplicationsPerUser +
+        " [= (int)(maxActiveApplications * (userLimit / 100.0f) * userLimitFactor) ]" + "\n" +
+        "utilization = " + utilization +
+        " [= usedResourcesMemory / queueLimit ]" + "\n" +
+        "usedCapacity = " + usedCapacity +
+        " [= usedResourcesMemory / (clusterResourceMemory * capacity) ]" + "\n" +
+        "maxAMResourcePercent = " + maxAMResourcePercent +
+        " [= configuredMaximumAMResourcePercent ]" + "\n" +
+        "minimumAllocationFactor = " + minimumAllocationFactor +
+        " [= (float)(maximumAllocationMemory - minimumAllocationMemory) / maximumAllocationMemory ]" + "\n" +
+        "numContainers = " + numContainers +
+        " [= currentNumContainers ]" + "\n" +
+        "state = " + state +
+        " [= configuredState ]" + "\n" +
+        "acls = " + aclsString +
+        " [= configuredAcls ]" + "\n");
   }
   
   @Override

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java Tue Nov 15 02:39:13 2011
@@ -236,28 +236,30 @@ public class FifoScheduler implements Re
           RMContainerEventType.RELEASED);
     }
 
-    if (!ask.isEmpty()) {
-      LOG.debug("allocate: pre-update" +
-          " applicationId=" + applicationAttemptId + 
-          " application=" + application);
-      application.showRequests();
-
-      // Update application requests
-      application.updateResourceRequests(ask);
-
-      LOG.debug("allocate: post-update" +
-          " applicationId=" + applicationAttemptId + 
-          " application=" + application);
-      application.showRequests();
+    synchronized (application) {
+      if (!ask.isEmpty()) {
+        LOG.debug("allocate: pre-update" +
+            " applicationId=" + applicationAttemptId + 
+            " application=" + application);
+        application.showRequests();
+
+        // Update application requests
+        application.updateResourceRequests(ask);
+
+        LOG.debug("allocate: post-update" +
+            " applicationId=" + applicationAttemptId + 
+            " application=" + application);
+        application.showRequests();
+
+        LOG.debug("allocate:" +
+            " applicationId=" + applicationAttemptId + 
+            " #ask=" + ask.size());
+      }
 
-      LOG.debug("allocate:" +
-          " applicationId=" + applicationAttemptId + 
-          " #ask=" + ask.size());
+      return new Allocation(
+          application.pullNewlyAllocatedContainers(), 
+          application.getHeadroom());
     }
-
-    return new Allocation(
-        application.pullNewlyAllocatedContainers(), 
-        application.getHeadroom());
   }
 
   private SchedulerApp getApplication(

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java Tue Nov 15 02:39:13 2011
@@ -108,15 +108,17 @@ public class DelegationTokenRenewer exte
     public final Configuration conf;
     public long expirationDate;
     public TimerTask timerTask;
+    public final boolean shouldCancelAtEnd;
     
     public DelegationTokenToRenew(
         ApplicationId jId, Token<?> token, 
-        Configuration conf, long expirationDate) {
+        Configuration conf, long expirationDate, boolean shouldCancelAtEnd) {
       this.token = token;
       this.applicationId = jId;
       this.conf = conf;
       this.expirationDate = expirationDate;
       this.timerTask = null;
+      this.shouldCancelAtEnd = shouldCancelAtEnd;
       if (this.token==null || this.applicationId==null || this.conf==null) {
         throw new IllegalArgumentException("Invalid params to renew token" +
             ";token=" + this.token +
@@ -218,10 +220,12 @@ public class DelegationTokenRenewer exte
    * Add application tokens for renewal.
    * @param applicationId added application
    * @param ts tokens
+   * @param shouldCancelAtEnd true if tokens should be canceled when the app is
+   * done else false. 
    * @throws IOException
    */
   public synchronized void addApplication(
-      ApplicationId applicationId, Credentials ts) 
+      ApplicationId applicationId, Credentials ts, boolean shouldCancelAtEnd) 
   throws IOException {
     if (ts == null) {
       return; //nothing to add
@@ -239,7 +243,8 @@ public class DelegationTokenRenewer exte
       // first renew happens immediately
       if (token.isManaged()) {
         DelegationTokenToRenew dtr = 
-          new DelegationTokenToRenew(applicationId, token, getConfig(), now); 
+          new DelegationTokenToRenew(applicationId, token, getConfig(), now, 
+              shouldCancelAtEnd); 
 
         addTokenToList(dtr);
       
@@ -317,7 +322,11 @@ public class DelegationTokenRenewer exte
 
   // cancel a token
   private void cancelToken(DelegationTokenToRenew t) {
-    dtCancelThread.cancelToken(t.token, t.conf);
+    if(t.shouldCancelAtEnd) {
+      dtCancelThread.cancelToken(t.token, t.conf);
+    } else {
+      LOG.info("Did not cancel "+t);
+    }
   }
   
   /**

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java Tue Nov 15 02:39:13 2011
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
 import static org.junit.Assert.*;

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java Tue Nov 15 02:39:13 2011
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.re
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.net.URI;
@@ -243,16 +244,16 @@ public class TestDelegationTokenRenewer 
   /**
    * Basic idea of the test:
    * 1. create tokens.
-   * 2. Mark one of them to be renewed in 2 seconds (istead of
-   * 24 hourse)
+   * 2. Mark one of them to be renewed in 2 seconds (instead of
+   * 24 hours)
    * 3. register them for renewal
    * 4. sleep for 3 seconds
    * 5. count number of renewals (should 3 initial ones + one extra)
    * 6. register another token for 2 seconds 
    * 7. cancel it immediately
    * 8. Sleep and check that the 2 seconds renew didn't happen 
-   * (totally 5 reneals)
-   * 9. check cancelation
+   * (totally 5 renewals)
+   * 9. check cancellation
    * @throws IOException
    * @throws URISyntaxException
    */
@@ -287,7 +288,7 @@ public class TestDelegationTokenRenewer 
     // register the tokens for renewal
     ApplicationId applicationId_0 = 
         BuilderUtils.newApplicationId(0, 0);
-    delegationTokenRenewer.addApplication(applicationId_0, ts);
+    delegationTokenRenewer.addApplication(applicationId_0, ts, true);
     
     // first 3 initial renewals + 1 real
     int numberOfExpectedRenewals = 3+1; 
@@ -326,7 +327,7 @@ public class TestDelegationTokenRenewer 
     
 
     ApplicationId applicationId_1 = BuilderUtils.newApplicationId(0, 1);
-    delegationTokenRenewer.addApplication(applicationId_1, ts);
+    delegationTokenRenewer.addApplication(applicationId_1, ts, true);
     delegationTokenRenewer.removeApplication(applicationId_1);
     
     numberOfExpectedRenewals = Renewer.counter; // number of renewals so far
@@ -347,4 +348,49 @@ public class TestDelegationTokenRenewer 
       //expected
     }
   }
+  
+  /**
+   * Basic idea of the test:
+   * 1. register a token for 2 seconds with no cancel at the end
+   * 2. cancel it immediately
+   * 3. Sleep and check that the 2 seconds renew didn't happen 
+   * (totally 5 renewals)
+   * 4. check cancellation
+   * @throws IOException
+   * @throws URISyntaxException
+   */
+  @Test
+  public void testDTRenewalWithNoCancel () throws Exception {
+    MyFS dfs = (MyFS)FileSystem.get(conf);
+    LOG.info("dfs="+(Object)dfs.hashCode() + ";conf="+conf.hashCode());
+
+    Credentials ts = new Credentials();
+    MyToken token1 = dfs.getDelegationToken(new Text("user1"));
+    
+    //to cause this one to be set for renew in 2 secs
+    Renewer.tokenToRenewIn2Sec = token1; 
+    LOG.info("token="+token1+" should be renewed for 2 secs");
+    
+    String nn1 = DelegationTokenRenewer.SCHEME + "://host1:0";
+    ts.addToken(new Text(nn1), token1);
+    
+
+    ApplicationId applicationId_1 = BuilderUtils.newApplicationId(0, 1);
+    delegationTokenRenewer.addApplication(applicationId_1, ts, false);
+    delegationTokenRenewer.removeApplication(applicationId_1);
+    
+    int numberOfExpectedRenewals = Renewer.counter; // number of renewals so far
+    try {
+      Thread.sleep(6*1000); // sleep 6 seconds, so it has time to renew
+    } catch (InterruptedException e) {}
+    LOG.info("Counter = " + Renewer.counter + ";t="+ Renewer.lastRenewed);
+    
+    // counter and the token should still be the old ones
+    assertEquals("renew wasn't called as many times as expected",
+        numberOfExpectedRenewals, Renewer.counter);
+    
+    // also renewing of the canceled token should not fail, because it has not
+    // been canceled
+    token1.renew(conf);
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm Tue Nov 15 02:39:13 2011
@@ -50,7 +50,7 @@ Hadoop MapReduce Next Generation - Capac
   that the available resources in the Hadoop cluster are shared among multiple 
   organizations who collectively fund the cluster based on their computing 
   needs. There is an added benefit that an organization can access 
-  any excess capacity no being used by others. This provides elasticity for 
+  any excess capacity not being used by others. This provides elasticity for 
   the organizations in a cost-effective manner.
    
   Sharing clusters across organizations necessitates strong support for
@@ -58,7 +58,7 @@ Hadoop MapReduce Next Generation - Capac
   safe-guards to ensure the shared cluster is impervious to single rouge 
   application or user or sets thereof. The <<<CapacityScheduler>>> provides a 
   stringent set of limits to ensure that a single application or user or queue 
-  cannot consume dispropotionate amount of resources in the cluster. Also, the 
+  cannot consume disproportionate amount of resources in the cluster. Also, the 
   <<<CapacityScheduler>>> provides limits on initialized/pending applications 
   from a single user and queue to ensure fairness and stability of the cluster.
    
@@ -67,7 +67,7 @@ Hadoop MapReduce Next Generation - Capac
   economics of the shared cluster. 
   
   To provide further control and predictability on sharing of resources, the 
-  <<<CapacityScheduler>>> supports <heirarchical queues> to ensure 
+  <<<CapacityScheduler>>> supports <hierarchical queues> to ensure 
   resources are shared among the sub-queues of an organization before other 
   queues are allowed to use free resources, there-by providing <affinity> 
   for sharing free resources among applications of a given organization.
@@ -76,7 +76,7 @@ Hadoop MapReduce Next Generation - Capac
 
   The <<<CapacityScheduler>>> supports the following features:
   
-  * Heirarchical Queues - Heirarchy of queues is supported to ensure resources 
+  * Hierarchical Queues - Hierarchy of queues is supported to ensure resources 
     are shared among the sub-queues of an organization before other 
     queues are allowed to use free resources, there-by providing more control
     and predictability.
@@ -96,12 +96,12 @@ Hadoop MapReduce Next Generation - Capac
     capacity. When there is demand for these resources from queues running below 
     capacity at a future point in time, as tasks scheduled on these resources 
     complete, they will be assigned to applications on queues running below the
-    capacity. This ensures that resources are available in a predictable and 
-    elastic manner to queues, thus preventing artifical silos of resources in 
-    the cluster which helps utilization.
+    capacity (pre-emption is not supported). This ensures that resources are available 
+    in a predictable and elastic manner to queues, thus preventing artifical silos 
+    of resources in the cluster which helps utilization.
     
   * Multi-tenancy - Comprehensive set of limits are provided to prevent a 
-    single application, user and queue from monpolizing resources of the queue 
+    single application, user and queue from monopolizing resources of the queue 
     or the cluster as a whole to ensure that the cluster isn't overwhelmed.
     
   * Operability
@@ -110,8 +110,8 @@ Hadoop MapReduce Next Generation - Capac
       capacity, ACLs can be changed, at runtime, by administrators in a secure 
       manner to minimize disruption to users. Also, a console is provided for 
       users and administrators to view current allocation of resources to 
-      various queues in the system. Administrators can also 
-      <add additional queues> at runtime.
+      various queues in the system. Administrators can <add additional queues> 
+      at runtime, but queues cannot be <deleted> at runtime.
       
     * Drain applications - Administrators can <stop> queues
       at runtime to ensure that while existing applications run to completion,
@@ -139,7 +139,7 @@ Hadoop MapReduce Next Generation - Capac
 || Property                            || Value                                |
 *--------------------------------------+--------------------------------------+
 | <<<yarn.resourcemanager.scheduler.class>>> | |
-| | <<<org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.CapacityScheduler>>> |
+| | <<<org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler>>> |
 *--------------------------------------+--------------------------------------+
 
   * Setting up <queues>
@@ -155,13 +155,14 @@ Hadoop MapReduce Next Generation - Capac
     child queues.
     
     The configuration for <<<CapacityScheduler>>> uses a concept called
-    <queue path> to configure the heirarchy of queues. The <queue path> is the
-    full path of the queue's heirarcy, starting at <root>, with . (dot) as the 
+    <queue path> to configure the hierarchy of queues. The <queue path> is the
+    full path of the queue's hierarchy, starting at <root>, with . (dot) as the 
     delimiter.
     
     A given queue's children can be defined with the configuration knob:
-    <<<yarn.scheduler.capacity.<queue-path>.queues>>>
-     
+    <<<yarn.scheduler.capacity.<queue-path>.queues>>>. Children do not 
+    inherit properties directly from the parent.
+
     Here is an example with three top-level child-queues <<<a>>>, <<<b>>> and 
     <<<c>>> and some sub-queues for <<<a>>> and <<<b>>>:
      
@@ -197,52 +198,59 @@ Hadoop MapReduce Next Generation - Capac
 *--------------------------------------+--------------------------------------+
 | <<<yarn.scheduler.capacity.<queue-path>.capacity>>> | |
 | | Queue <capacity> in percentage (%). | 
-| | The sum of capacities for all queues, at each level, should be less than | 
-| | or equal to 100. | 
+| | The sum of capacities for all queues, at each level, must be equal |
+| | to 100. | 
 | | Applications in the queue may consume more resources than the queue's | 
 | | capacity if there are free resources, providing elasticity. |
 *--------------------------------------+--------------------------------------+
 | <<<yarn.scheduler.capacity.<queue-path>.maximum-capacity>>> |   | 
 | | Maximum queue capacity in percentage (%). |
 | | This limits the <elasticity> for applications in the queue. |
+| | Defaults to -1 which disables it. |
 *--------------------------------------+--------------------------------------+
 | <<<yarn.scheduler.capacity.<queue-path>.minimum-user-limit-percent>>> |   | 
 | | Each queue enforces a limit on the percentage of resources allocated to a | 
 | | user at any given time, if there is demand for resources. The user limit | 
-| | can vary between a minimum and maximum value. The former depends on the | 
-| | number of users who have submitted applications, and the latter is set to | 
-| | this property value. For e.g., suppose the value of this property is 25. | 
+| | can vary between a minimum and maximum value. The the former |
+| | (the minimum value) is set to this property value and the latter |
+| | (the maximum value) depends on the number of users who have submitted |
+| | applications. For e.g., suppose the value of this property is 25. | 
 | | If two users have submitted applications to a queue, no single user can |
 | | use more than 50% of the queue resources. If a third user submits an | 
 | | application, no single user can use more than 33% of the queue resources. |
 | | With 4 or more users, no user can use more than 25% of the queues |
-| | resources. A value of 100 implies no user limits are imposed. |
+| | resources. A value of 100 implies no user limits are imposed. The default |
+| | is 100.|
 *--------------------------------------+--------------------------------------+
 | <<<yarn.scheduler.capacity.<queue-path>.user-limit-factor>>> |   | 
 | | The multiple of the queue capacity which can be configured to allow a | 
 | | single user to acquire more resources. By default this is set to 1 which | 
 | | ensures that a single user can never take more than the queue's configured | 
-| | capacity irrespective of how idle th cluster is. |
+| | capacity irrespective of how idle th cluster is. Value is specified as |
+| | a float.|
 *--------------------------------------+--------------------------------------+
 
     * Running and Pending Application Limits
     
     
     The <<<CapacityScheduler>>> supports the following parameters to control 
-    the runnign and pending applications:
+    the running and pending applications:
     
 
 *--------------------------------------+--------------------------------------+
 || Property                            || Description                         |
 *--------------------------------------+--------------------------------------+
 | <<<yarn.scheduler.capacity.maximum-applications>>> | |
-| | Maximum number of jobs in the system which can be concurently active |
-| | both running and pending. Limits on each queue are directly proportional |
-| | to their queue capacities. |
+| | Maximum number of applications in the system which can be concurrently |
+| | active both running and pending. Limits on each queue are directly |
+| | proportional to their queue capacities and user limits. This is a 
+| | hard limit and any applications submitted when this limit is reached will |
+| | be rejected. Default is 10000.|
 *--------------------------------------+--------------------------------------+
 | yarn.scheduler.capacity.maximum-am-resource-percent | |
 | | Maximum percent of resources in the cluster which can be used to run |
 | | application masters - controls number of concurrent running applications. |
+| | Specified as a float - ie 0.5 = 50%. Default is 10%. |
 *--------------------------------------+--------------------------------------+
 
     * Queue Administration & Permissions
@@ -257,7 +265,7 @@ Hadoop MapReduce Next Generation - Capac
 | <<<yarn.scheduler.capacity.<queue-path>.state>>> | |
 | | The <state> of the queue. Can be one of <<<RUNNING>>> or <<<STOPPED>>>. |
 | | If a queue is in <<<STOPPED>>> state, new applications cannot be |
-| | submitted to <itself> or <any of its child queueus>. | 
+| | submitted to <itself> or <any of its child queues>. | 
 | | Thus, if the <root> queue is <<<STOPPED>>> no applications can be | 
 | | submitted to the entire cluster. |
 | | Existing applications continue to completion, thus the queue can be 
@@ -276,7 +284,7 @@ Hadoop MapReduce Next Generation - Capac
     
     <Note:> An <ACL> is of the form <user1>, <user2><space><group1>, <group2>.
     The special value of <<*>> implies <anyone>. The special value of <space>
-    implies <no one>.
+    implies <no one>. The default is <<*>> if not specified.
      
     * Reviewing the configuration of the CapacityScheduler
 
@@ -295,14 +303,14 @@ Hadoop MapReduce Next Generation - Capac
 * {Changing Queue Configuration}
 
   Changing queue properties and adding new queues is very simple. You need to
-  edit <<conf/capacity-scheduler.xml>> and run <rmadmin -refreshQueues>.
+  edit <<conf/capacity-scheduler.xml>> and run <yarn rmadmin -refreshQueues>.
   
 ----
 $ vi $HADOOP_CONF_DIR/capacity-scheduler.xml
-$ $YARN_HOME/bin/rmadmin -refreshQueues
+$ $YARN_HOME/bin/yarn rmadmin -refreshQueues
 ----  
 
   <Note:> Queues cannot be <deleted>, only addition of new queues is supported -
   the updated queue configuration should be a valid one i.e. queue-capacity at
   each <level> should be equal to 100%.
-  
\ No newline at end of file
+

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/SingleCluster.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/SingleCluster.apt.vm?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/SingleCluster.apt.vm (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/SingleCluster.apt.vm Tue Nov 15 02:39:13 2011
@@ -41,7 +41,7 @@ $ mvn clean install assembly:assembly -P
 * Setting up the environment.
 
   Assuming you have installed hadoop-common/hadoop-hdfs and exported
-  <<$HADOOP_COMMON_HOME>>/<<$HADOOP_COMMON_HOME>>, untar hadoop mapreduce 
+  <<$HADOOP_COMMON_HOME>>/<<$HADOOP_HDFS_HOME>>, untar hadoop mapreduce 
   tarball and set environment variable <<$HADOOP_MAPRED_HOME>> to the 
   untarred directory. Set <<$YARN_HOME>> the same as <<$HADOOP_MAPRED_HOME>>. 
  
@@ -79,15 +79,15 @@ $ mvn clean install assembly:assembly -P
 Add the following configs to your <<<yarn-site.xml>>>
 
 +---+
- <property>
+  <property>
     <name>yarn.resourcemanager.resource-tracker.address</name>
     <value>host:port</value>
     <description>host is the hostname of the resource manager and 
     port is the port on which the NodeManagers contact the Resource Manager.
     </description>
- </property>
+  </property>
 
- <property>
+  <property>
     <name>yarn.resourcemanager.scheduler.address</name>
     <value>host:port</value>
     <description>host is the hostname of the resourcemanager and port is the port
@@ -145,6 +145,32 @@ Add the following configs to your <<<yar
   </property>
 +---+
 
+** Setting up <<<capacity-scheduler.xml>>>
+
+   Make sure you populate the root queues in <<<capacity-scheduler.xml>>>.
+
++---+
+  <property>
+    <name>yarn.scheduler.capacity.root.queues</name>
+    <value>unfunded,default</value>
+  </property>
+  
+  <property>
+    <name>yarn.scheduler.capacity.root.capacity</name>
+    <value>100</value>
+  </property>
+  
+  <property>
+    <name>yarn.scheduler.capacity.root.unfunded.capacity</name>
+    <value>50</value>
+  </property>
+  
+  <property>
+    <name>yarn.scheduler.capacity.root.default.capacity</name>
+    <value>50</value>
+  </property>
++---+
+
 * Create Symlinks.
 
   You will have to create the following symlinks:

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/c++/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Nov 15 02:39:13 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/c++:1159757-1196451
+/hadoop/common/trunk/hadoop-mapreduce-project/src/c++:1159757-1202009
 /hadoop/core/branches/branch-0.19/mapred/src/c++:713112
 /hadoop/core/trunk/src/c++:776175-784663

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Nov 15 02:39:13 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib:1152502-1196451
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib:1152502-1202009
 /hadoop/core/branches/branch-0.19/mapred/src/contrib:713112
 /hadoop/core/trunk/src/contrib:784664-785643

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/block_forensics/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Nov 15 02:39:13 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/block_forensics:1152502-1196451
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/block_forensics:1152502-1202009
 /hadoop/core/branches/branch-0.19/hdfs/src/contrib/block_forensics:713112
 /hadoop/core/branches/branch-0.19/mapred/src/contrib/block_forensics:713112
 /hadoop/core/trunk/src/contrib/block_forensics:784664-785643

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/build-contrib.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Nov 15 02:39:13 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/build-contrib.xml:1161333-1196451
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/build-contrib.xml:1161333-1202009
 /hadoop/core/branches/branch-0.19/mapred/src/contrib/build-contrib.xml:713112
 /hadoop/core/trunk/src/contrib/build-contrib.xml:776175-786373

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/build.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Nov 15 02:39:13 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/build.xml:1161333-1196451
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/build.xml:1161333-1202009
 /hadoop/core/branches/branch-0.19/mapred/src/contrib/build.xml:713112
 /hadoop/core/trunk/src/contrib/build.xml:776175-786373

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/data_join/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Nov 15 02:39:13 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/data_join:1159757-1196451
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/data_join:1159757-1202009
 /hadoop/core/branches/branch-0.19/mapred/src/contrib/data_join:713112
 /hadoop/core/trunk/src/contrib/data_join:776175-786373

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/eclipse-plugin/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Nov 15 02:39:13 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/eclipse-plugin:1159757-1196451
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/eclipse-plugin:1159757-1202009
 /hadoop/core/branches/branch-0.19/core/src/contrib/eclipse-plugin:713112
 /hadoop/core/branches/branch-0.19/mapred/src/contrib/eclipse-plugin:713112
 /hadoop/core/trunk/src/contrib/eclipse-plugin:776175-785643

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Gridmix.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Gridmix.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Gridmix.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/Gridmix.java Tue Nov 15 02:39:13 2011
@@ -123,7 +123,7 @@ public class Gridmix extends Configured 
     summarizer = new Summarizer(args);
   }
   
-  Gridmix() {
+  public Gridmix() {
     summarizer = new Summarizer();
   }
   

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixConfig.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixConfig.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixConfig.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixConfig.java Tue Nov 15 02:39:13 2011
@@ -23,6 +23,7 @@ import org.apache.hadoop.mapred.gridmix.
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
+import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.*;
 
 /**
  * Gridmix system tests configurations. 
@@ -218,4 +219,67 @@ public class GridMixConfig {
    */
   public static final String CLUSTER_MAX_REDUCE_MEMORY = 
       JTConfig.JT_MAX_REDUCEMEMORY_MB;
+
+ /**
+  * Gridmix cpu emulation.
+  */
+ public static final String GRIDMIX_CPU_EMULATON =
+     ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS;
+
+ /**
+  *  Gridmix cpu usage emulation plugin.
+  */
+ public  static final String GRIDMIX_CPU_USAGE_PLUGIN =
+     CumulativeCpuUsageEmulatorPlugin.class.getName();
+
+ /**
+  * Gridmix cpu emulation custom interval.
+  */
+ public static final String GRIDMIX_CPU_CUSTOM_INTERVAL =
+     CumulativeCpuUsageEmulatorPlugin.CPU_EMULATION_PROGRESS_INTERVAL;
+
+ /**
+  * Gridmix cpu emulation lower limit.
+  */
+ public static int GRIDMIX_CPU_EMULATION_LOWER_LIMIT = 55;
+
+ /**
+  * Gridmix cpu emulation upper limit.
+  */
+ public static int GRIDMIX_CPU_EMULATION_UPPER_LIMIT = 130;
+
+ /**
+  * Gridmix heap memory custom interval
+  */
+ public static final String GRIDMIX_HEAP_MEMORY_CUSTOM_INTRVL = 
+     TotalHeapUsageEmulatorPlugin.HEAP_EMULATION_PROGRESS_INTERVAL;
+  
+ /**
+  *  Gridmix heap free memory ratio
+  */
+ public static final String GRIDMIX_HEAP_FREE_MEMORY_RATIO =
+     TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO;
+  
+ /**
+  *  Gridmix memory emulation plugin
+  */
+ public static final String GRIDMIX_MEMORY_EMULATION_PLUGIN = 
+     TotalHeapUsageEmulatorPlugin.class.getName();
+  
+ /**
+  *  Gridmix memory emulation
+  */
+ public static final String GRIDMIX_MEMORY_EMULATON = 
+     ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS;
+  
+ /**
+  *  Gridmix memory emulation lower limit.
+  */
+ public static int GRIDMIX_MEMORY_EMULATION_LOWER_LIMIT = 55;
+  
+ /**
+  * Gridmix memory emulation upper limit. 
+  */
+ public static int GRIDMIX_MEMORY_EMULATION_UPPER_LIMIT = 130;
+
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobVerification.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobVerification.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobVerification.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobVerification.java Tue Nov 15 02:39:13 2011
@@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.TaskCounter;
 import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.Counter;
 import org.apache.hadoop.mapreduce.CounterGroup;
@@ -105,7 +106,7 @@ public class GridmixJobVerification {
    * @throws ParseException - if an parse error occurs.
    */
   public void verifyGridmixJobsWithJobStories(List<JobID> jobids) 
-      throws IOException, ParseException {
+      throws Exception {
 
     SortedMap <Long, String> origSubmissionTime = new TreeMap <Long, String>();
     SortedMap <Long, String> simuSubmissionTime = new TreeMap<Long, String>();
@@ -147,6 +148,8 @@ public class GridmixJobVerification {
       setJobDistributedCacheInfo(simuJobId.toString(), simuJobConf, 
          zombieJob.getJobConf());
       verifyHighRamMemoryJobs(zombieJob, simuJobConf);
+      verifyCPUEmulationOfJobs(zombieJob, jhInfo, simuJobConf);
+      verifyMemoryEmulationOfJobs(zombieJob, jhInfo, simuJobConf);
       LOG.info("Done.");
     }
     verifyDistributedCacheBetweenJobs(simuAndOrigJobsInfo);
@@ -353,6 +356,229 @@ public class GridmixJobVerification {
       fs.close();
     }
   }
+
+  /**
+   * It verifies the heap memory resource usage of gridmix jobs with
+   * corresponding original job in the trace.
+   * @param zombieJob - Original job history.
+   * @param jhInfo - Simulated job history.
+   * @param simuJobConf - simulated job configuration.
+   */
+  public void verifyMemoryEmulationOfJobs(ZombieJob zombieJob,
+                 JobHistoryParser.JobInfo jhInfo,
+                                 JobConf simuJobConf) throws Exception {
+    long origJobMapsTHU = 0;
+    long origJobReducesTHU = 0;
+    long simuJobMapsTHU = 0;
+    long simuJobReducesTHU = 0;
+    boolean isMemEmulOn = false;
+    if (simuJobConf.get(GridMixConfig.GRIDMIX_MEMORY_EMULATON) != null) {
+      isMemEmulOn = 
+          simuJobConf.get(GridMixConfig.GRIDMIX_MEMORY_EMULATON).
+              contains(GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN);
+    }
+
+    if (isMemEmulOn) {
+      for (int index = 0; index < zombieJob.getNumberMaps(); index ++) {
+        TaskInfo mapTask = zombieJob.getTaskInfo(TaskType.MAP, index);
+        if (mapTask.getResourceUsageMetrics().getHeapUsage() > 0) {
+          origJobMapsTHU += 
+                  mapTask.getResourceUsageMetrics().getHeapUsage();
+        }
+      }
+      LOG.info("Original Job Maps Total Heap Usage: " + origJobMapsTHU);
+
+      for (int index = 0; index < zombieJob.getNumberReduces(); index ++) {
+        TaskInfo reduceTask = zombieJob.getTaskInfo(TaskType.REDUCE, index);
+        if (reduceTask.getResourceUsageMetrics().getHeapUsage() > 0) {
+          origJobReducesTHU += 
+                  reduceTask.getResourceUsageMetrics().getHeapUsage();
+        }
+      }
+      LOG.info("Original Job Reduces Total Heap Usage: " + origJobReducesTHU);
+
+      simuJobMapsTHU = 
+          getCounterValue(jhInfo.getMapCounters(), 
+                          TaskCounter.COMMITTED_HEAP_BYTES.toString());
+      LOG.info("Simulated Job Maps Total Heap Usage: " + simuJobMapsTHU);
+
+      simuJobReducesTHU = 
+          getCounterValue(jhInfo.getReduceCounters(), 
+                          TaskCounter.COMMITTED_HEAP_BYTES.toString());
+      LOG.info("Simulated Jobs Reduces Total Heap Usage: " + simuJobReducesTHU);
+
+      long mapCount = jhInfo.getTotalMaps();
+      long reduceCount = jhInfo.getTotalReduces();
+
+      String strHeapRatio =
+          simuJobConf.get(GridMixConfig.GRIDMIX_HEAP_FREE_MEMORY_RATIO);
+      if (strHeapRatio == null) {
+        strHeapRatio = "0.3F";
+      }
+
+      if (mapCount > 0) {
+        double mapEmulFactor = (simuJobMapsTHU * 100) / origJobMapsTHU;
+        long mapEmulAccuracy = Math.round(mapEmulFactor);
+        LOG.info("Maps memory emulation accuracy of a job:" 
+                + mapEmulAccuracy + "%");
+        Assert.assertTrue("Map phase total memory emulation had crossed the "
+                         + "configured max limit.", mapEmulAccuracy 
+                         <= GridMixConfig.GRIDMIX_MEMORY_EMULATION_UPPER_LIMIT);
+        Assert.assertTrue("Map phase total memory emulation had not crossed " 
+                         + "the configured min limit.", mapEmulAccuracy 
+                         >= GridMixConfig.GRIDMIX_MEMORY_EMULATION_LOWER_LIMIT);
+        double expHeapRatio = Double.parseDouble(strHeapRatio);
+        LOG.info("expHeapRatio for maps:" + expHeapRatio);
+        double actHeapRatio = 
+                ((double)Math.abs(origJobMapsTHU - simuJobMapsTHU)) ;
+        actHeapRatio /= origJobMapsTHU;
+          LOG.info("actHeapRatio for maps:" + actHeapRatio);
+          Assert.assertTrue("Simulate job maps heap ratio not matched.",
+                            actHeapRatio <= expHeapRatio); 
+      }
+
+      if (reduceCount >0) {
+        double reduceEmulFactor = (simuJobReducesTHU * 100) / origJobReducesTHU;
+        long reduceEmulAccuracy = Math.round(reduceEmulFactor);
+        LOG.info("Reduces memory emulation accuracy of a job:" 
+                + reduceEmulAccuracy + "%");
+        Assert.assertTrue("Reduce phase total memory emulation had crossed "
+                         + "configured max limit.", reduceEmulAccuracy 
+                         <= GridMixConfig.GRIDMIX_MEMORY_EMULATION_UPPER_LIMIT); 
+        Assert.assertTrue("Reduce phase total memory emulation had not " 
+                         + "crosssed configured min limit.", reduceEmulAccuracy 
+                         >= GridMixConfig.GRIDMIX_MEMORY_EMULATION_LOWER_LIMIT);
+        double expHeapRatio = Double.parseDouble(strHeapRatio);
+        LOG.info("expHeapRatio for reduces:" + expHeapRatio);
+        double actHeapRatio = 
+                ((double)Math.abs(origJobReducesTHU - simuJobReducesTHU));
+        actHeapRatio /= origJobReducesTHU;
+          LOG.info("actHeapRatio for reduces:" + actHeapRatio);
+          Assert.assertTrue("Simulate job reduces heap ratio not matched.",
+                            actHeapRatio <= expHeapRatio); 
+      }
+    }
+  }
+
+  /**
+   * It verifies the cpu resource usage of  a gridmix job against
+   * their original job.
+   * @param origJobHistory - Original job history.
+   * @param simuJobHistoryInfo - Simulated job history.
+   * @param simuJobConf - simulated job configuration.
+   */
+  public void verifyCPUEmulationOfJobs(ZombieJob origJobHistory,
+       JobHistoryParser.JobInfo simuJobHistoryInfo,
+       JobConf simuJobConf) throws Exception {
+
+    boolean isCpuEmulOn = false;
+    if (simuJobConf.get(GridMixConfig.GRIDMIX_CPU_EMULATON) != null) {
+      isCpuEmulOn = 
+          simuJobConf.get(GridMixConfig.GRIDMIX_CPU_EMULATON).
+              contains(GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN);
+    }
+
+    if (isCpuEmulOn) {
+      Map<String,Long> origJobMetrics =
+                       getOriginalJobCPUMetrics(origJobHistory);
+      Map<String,Long> simuJobMetrics =
+                       getSimulatedJobCPUMetrics(simuJobHistoryInfo);
+
+      long origMapUsage = origJobMetrics.get("MAP");
+      LOG.info("Maps cpu usage of original job:" + origMapUsage);
+
+      long origReduceUsage = origJobMetrics.get("REDUCE");
+      LOG.info("Reduces cpu usage of original job:" + origReduceUsage);
+
+      long simuMapUsage = simuJobMetrics.get("MAP");
+      LOG.info("Maps cpu usage of simulated job:" + simuMapUsage);
+
+      long simuReduceUsage = simuJobMetrics.get("REDUCE");
+      LOG.info("Reduces cpu usage of simulated job:"+ simuReduceUsage);
+
+      long mapCount = simuJobHistoryInfo.getTotalMaps(); 
+      long reduceCount = simuJobHistoryInfo.getTotalReduces(); 
+
+      if (mapCount > 0) {
+        double mapEmulFactor = (simuMapUsage * 100) / origMapUsage;
+        long mapEmulAccuracy = Math.round(mapEmulFactor);
+        LOG.info("CPU emulation accuracy for maps in job " + 
+                 simuJobHistoryInfo.getJobId() + 
+                 ":"+ mapEmulAccuracy + "%");
+        Assert.assertTrue("Map-side cpu emulaiton inaccurate!" +
+                          " Actual cpu usage: " + simuMapUsage +
+                          " Expected cpu usage: " + origMapUsage, mapEmulAccuracy
+                          >= GridMixConfig.GRIDMIX_CPU_EMULATION_LOWER_LIMIT
+                          && mapEmulAccuracy
+                          <= GridMixConfig.GRIDMIX_CPU_EMULATION_UPPER_LIMIT);
+      }
+
+      if (reduceCount >0) {
+        double reduceEmulFactor = (simuReduceUsage * 100) / origReduceUsage;
+        long reduceEmulAccuracy = Math.round(reduceEmulFactor);
+        LOG.info("CPU emulation accuracy for reduces in job " + 
+                 simuJobHistoryInfo.getJobId() + 
+                 ": " + reduceEmulAccuracy + "%");
+        Assert.assertTrue("Reduce side cpu emulaiton inaccurate!" +
+                          " Actual cpu usage:" + simuReduceUsage +
+                          "Expected cpu usage: " + origReduceUsage,  
+                          reduceEmulAccuracy
+                          >= GridMixConfig.GRIDMIX_CPU_EMULATION_LOWER_LIMIT
+                          && reduceEmulAccuracy
+                          <= GridMixConfig.GRIDMIX_CPU_EMULATION_UPPER_LIMIT);
+      }
+    }
+  }
+
+  /**
+   *  Get the simulated job cpu metrics.
+   * @param jhInfo - Simulated job history
+   * @return - cpu metrics as a map.
+   * @throws Exception - if an error occurs.
+   */
+  private Map<String,Long> getSimulatedJobCPUMetrics(
+          JobHistoryParser.JobInfo jhInfo) throws Exception {
+    Map<String, Long> resourceMetrics = new HashMap<String, Long>();
+    long mapCPUUsage = 
+        getCounterValue(jhInfo.getMapCounters(), 
+                        TaskCounter.CPU_MILLISECONDS.toString());
+    resourceMetrics.put("MAP", mapCPUUsage);
+    long reduceCPUUsage = 
+        getCounterValue(jhInfo.getReduceCounters(), 
+                        TaskCounter.CPU_MILLISECONDS.toString());
+    resourceMetrics.put("REDUCE", reduceCPUUsage);
+    return resourceMetrics;
+  }
+
+  /**
+   * Get the original job cpu metrics.
+   * @param zombieJob - original job history.
+   * @return - cpu metrics as map.
+   */
+  private Map<String, Long> getOriginalJobCPUMetrics(ZombieJob zombieJob) {
+    long mapTotalCPUUsage = 0;
+    long reduceTotalCPUUsage = 0;
+    Map<String,Long> resourceMetrics = new HashMap<String,Long>();
+
+    for (int index = 0; index < zombieJob.getNumberMaps(); index ++) {
+      TaskInfo mapTask = zombieJob.getTaskInfo(TaskType.MAP, index);
+      if (mapTask.getResourceUsageMetrics().getCumulativeCpuUsage() > 0) {
+        mapTotalCPUUsage += 
+            mapTask.getResourceUsageMetrics().getCumulativeCpuUsage();
+      }
+    }
+    resourceMetrics.put("MAP", mapTotalCPUUsage); 
+    
+    for (int index = 0; index < zombieJob.getNumberReduces(); index ++) {
+      TaskInfo reduceTask = zombieJob.getTaskInfo(TaskType.REDUCE, index);
+      if (reduceTask.getResourceUsageMetrics().getCumulativeCpuUsage() > 0) {
+        reduceTotalCPUUsage += 
+            reduceTask.getResourceUsageMetrics().getCumulativeCpuUsage();
+      }
+    }
+    resourceMetrics.put("REDUCE", reduceTotalCPUUsage);
+    return resourceMetrics;
+  }
   
   /**
    * Get the user resolver of a job.

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/index/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Nov 15 02:39:13 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/index:1159757-1196451
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/index:1159757-1202009
 /hadoop/core/branches/branch-0.19/mapred/src/contrib/index:713112
 /hadoop/core/trunk/src/contrib/index:776175-786373

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/streaming/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Nov 15 02:39:13 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/streaming:1159757-1196451
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/streaming:1159757-1202009
 /hadoop/core/branches/branch-0.19/mapred/src/contrib/streaming:713112
 /hadoop/core/trunk/src/contrib/streaming:776175-786373

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java Tue Nov 15 02:39:13 2011
@@ -984,19 +984,6 @@ public class StreamJob implements Tool {
     return jobConf_.get(JTConfig.JT_IPC_ADDRESS);
   }
 
-  protected void jobInfo() {
-    if (isLocalHadoop()) {
-      LOG.info("Job running in-process (local Hadoop)");
-    } else {
-      String hp = getJobTrackerHostPort();
-      LOG.info("To kill this job, run:");
-      LOG.info(getHadoopClientHome() + "/bin/hadoop job  -D" + JTConfig.JT_IPC_ADDRESS + "=" + hp + " -kill "
-               + jobId_);
-      //LOG.info("Job file: " + running_.getJobFile());
-      LOG.info("Tracking URL: " + StreamUtil.qualifyHost(running_.getTrackingURL()));
-    }
-  }
-
   // Based on JobClient
   public int submitAndMonitorJob() throws IOException {
 
@@ -1012,7 +999,6 @@ public class StreamJob implements Tool {
     try {
       running_ = jc_.submitJob(jobConf_);
       jobId_ = running_.getID();
-      jobInfo();
       if (background_) {
         LOG.info("Job is running in background.");
       } else if (!jc_.monitorAndPrintJob(jobConf_, running_)) {

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/contrib/vaidya/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Nov 15 02:39:13 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/vaidya:1159757-1196451
+/hadoop/common/trunk/hadoop-mapreduce-project/src/contrib/vaidya:1159757-1202009
 /hadoop/core/branches/branch-0.19/mapred/src/contrib/vaidya:713112
 /hadoop/core/trunk/src/contrib/vaidya:776175-786373

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/examples/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Nov 15 02:39:13 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/examples:1152502-1196451
+/hadoop/common/trunk/hadoop-mapreduce-project/src/examples:1152502-1202009
 /hadoop/core/branches/branch-0.19/mapred/src/examples:713112
 /hadoop/core/trunk/src/examples:776175-784663

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Nov 15 02:39:13 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/java:1152502-1196451
+/hadoop/common/trunk/hadoop-mapreduce-project/src/java:1152502-1202009
 /hadoop/core/branches/branch-0.19/mapred/src/java:713112
 /hadoop/core/trunk/src/mapred:776175-785643

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Nov 15 02:39:13 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred:1152502-1196451
+/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred:1152502-1202009
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred:713112
 /hadoop/core/trunk/src/test/mapred:776175-785643

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Nov 15 02:39:13 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs:1159757-1196451
+/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs:1159757-1202009
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred/org/apache/hadoop/fs:713112
 /hadoop/core/trunk/src/test/mapred/org/apache/hadoop/fs:776175-785643
 /hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs:817878-835934

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs/slive/SlivePartitioner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs/slive/SlivePartitioner.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs/slive/SlivePartitioner.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs/slive/SlivePartitioner.java Tue Nov 15 02:39:13 2011
@@ -34,6 +34,6 @@ public class SlivePartitioner implements
   @Override // Partitioner
   public int getPartition(Text key, Text value, int numPartitions) {
     OperationOutput oo = new OperationOutput(key, value);
-    return oo.getOperationType().hashCode() % numPartitions;
+    return (oo.getOperationType().hashCode() & Integer.MAX_VALUE) % numPartitions;
   }
 }

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Nov 15 02:39:13 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/hdfs:1152502-1196451
+/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/hdfs:1152502-1202009
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred/org/apache/hadoop/hdfs:713112
 /hadoop/core/trunk/src/test/mapred/org/apache/hadoop/hdfs:776175-785643
 /hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/hdfs:817878-835934

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/io/FileBench.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Nov 15 02:39:13 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/io/FileBench.java:1161333-1196451
+/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/io/FileBench.java:1161333-1202009
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred/org/apache/hadoop/io/FileBench.java:713112
 /hadoop/core/trunk/src/test/mapred/org/apache/hadoop/io/FileBench.java:776175-785643
 /hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/io/FileBench.java:817878-835934

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/io/TestSequenceFileMergeProgress.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Nov 15 02:39:13 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/io/TestSequenceFileMergeProgress.java:1161333-1196451
+/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/io/TestSequenceFileMergeProgress.java:1161333-1202009
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred/org/apache/hadoop/io/TestSequenceFileMergeProgress.java:713112
 /hadoop/core/trunk/src/test/mapred/org/apache/hadoop/io/TestSequenceFileMergeProgress.java:776175-785643
 /hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/io/TestSequenceFileMergeProgress.java:817878-835934

Propchange: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/ipc/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Nov 15 02:39:13 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/ipc:1159757-1196451
+/hadoop/common/trunk/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/ipc:1159757-1202009
 /hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs-with-mr/org/apache/hadoop/ipc:713112
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred/org/apache/hadoop/ipc:713112
 /hadoop/core/trunk/src/test/hdfs-with-mr/org/apache/hadoop/ipc:776175-784663

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestAuditLogger.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestAuditLogger.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestAuditLogger.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestAuditLogger.java Tue Nov 15 02:39:13 2011
@@ -21,6 +21,7 @@ import java.net.InetAddress;
 import java.net.InetSocketAddress;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.ProtocolInfo;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.TestRPC.TestImpl;
@@ -123,6 +124,7 @@ public class TestAuditLogger extends Tes
    * A special extension of {@link TestImpl} RPC server with
    * {@link TestImpl#ping()} testing the audit logs.
    */
+  @ProtocolInfo(protocolName = "org.apache.hadoop.ipc.TestRPC$TestProtocol")
   private class MyTestRPCServer extends TestImpl {
     @Override
     public void ping() {
@@ -135,10 +137,8 @@ public class TestAuditLogger extends Tes
   /**
    * Test {@link AuditLogger} with IP set.
    */
+  @SuppressWarnings("deprecation")
   public void testAuditLoggerWithIP() throws Exception {
-    /*
-    // TODO
-    // Disable test to address build failures.
     Configuration conf = new Configuration();
     // start the IPC server
     Server server = RPC.getServer(new MyTestRPCServer(), "0.0.0.0", 0, conf);
@@ -153,6 +153,5 @@ public class TestAuditLogger extends Tes
     proxy.ping();
 
     server.stop();
-    */
   }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSubmitJob.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSubmitJob.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSubmitJob.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSubmitJob.java Tue Nov 15 02:39:13 2011
@@ -17,44 +17,33 @@
  */
 package org.apache.hadoop.mapred;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
-import java.io.DataOutputStream;
 import java.io.IOException;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
-import java.util.HashMap;
-import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.mapred.lib.IdentityMapper;
-import org.apache.hadoop.mapred.lib.IdentityReducer;
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.SleepJob;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
-import org.apache.hadoop.mapreduce.split.JobSplit.SplitMetaInfo;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ToolRunner;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Ignore;
 import org.junit.Test;
 
 /**
@@ -68,42 +57,10 @@ import org.junit.Test;
 public class TestSubmitJob {
   static final Log LOG = LogFactory.getLog(TestSubmitJob.class);
   
-  private MiniMRCluster mrCluster;
-
-  private MiniDFSCluster dfsCluster;
-  private JobTracker jt;
-  private FileSystem fs;
   private static Path TEST_DIR = 
     new Path(System.getProperty("test.build.data","/tmp"), 
              "job-submission-testing");
-  private static int numSlaves = 1;
 
-  @Before
-  public void startCluster() throws Exception {
-    Configuration conf = new Configuration();
-    dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
-    JobConf jConf = new JobConf(conf);
-    jConf.setLong("mapred.job.submission.expiry.interval", 6 * 1000);
-    mrCluster = new MiniMRCluster(0, 0, numSlaves, 
-        dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null, 
-        jConf);
-    jt = mrCluster.getJobTrackerRunner().getJobTracker();
-    fs = FileSystem.get(mrCluster.createJobConf());
-  }
-  
-  @After
-  public void stopCluster() throws Exception {
-    if (mrCluster != null) {
-      mrCluster.shutdown();
-      mrCluster = null;
-    }
-    if (dfsCluster != null) {
-      dfsCluster.shutdown();
-      dfsCluster = null;
-    }
-    jt = null;
-    fs = null;
-  }
 
   /**
    * Test to verify that jobs with invalid memory requirements are killed at the
@@ -111,51 +68,53 @@ public class TestSubmitJob {
    * 
    * @throws Exception
    */
+  @SuppressWarnings("deprecation")
   @Test
-  public void testJobWithInvalidMemoryReqs()
-      throws Exception {
-    JobConf jtConf = new JobConf();
-    jtConf
-        .setLong(MRConfig.MAPMEMORY_MB, 1 * 1024L);
-    jtConf.setLong(MRConfig.REDUCEMEMORY_MB,
-        2 * 1024L);
-    jtConf.setLong(JTConfig.JT_MAX_MAPMEMORY_MB,
-        3 * 1024L);
-    jtConf.setLong(JTConfig.JT_MAX_REDUCEMEMORY_MB,
-        4 * 1024L);
-
-    mrCluster = new MiniMRCluster(0, "file:///", 0, null, null, jtConf);
-
-    JobConf clusterConf = mrCluster.createJobConf();
-
-    // No map-memory configuration
-    JobConf jobConf = new JobConf(clusterConf);
-    jobConf.setMemoryForReduceTask(1 * 1024L);
-    runJobAndVerifyFailure(jobConf, JobConf.DISABLED_MEMORY_LIMIT, 1 * 1024L,
-        "Invalid job requirements.");
-
-    // No reduce-memory configuration
-    jobConf = new JobConf(clusterConf);
-    jobConf.setMemoryForMapTask(1 * 1024L);
-    runJobAndVerifyFailure(jobConf, 1 * 1024L, JobConf.DISABLED_MEMORY_LIMIT,
-        "Invalid job requirements.");
-
-    // Invalid map-memory configuration
-    jobConf = new JobConf(clusterConf);
-    jobConf.setMemoryForMapTask(4 * 1024L);
-    jobConf.setMemoryForReduceTask(1 * 1024L);
-    runJobAndVerifyFailure(jobConf, 4 * 1024L, 1 * 1024L,
-        "Exceeds the cluster's max-memory-limit.");
-
-    // No reduce-memory configuration
-    jobConf = new JobConf(clusterConf);
-    jobConf.setMemoryForMapTask(1 * 1024L);
-    jobConf.setMemoryForReduceTask(5 * 1024L);
-    runJobAndVerifyFailure(jobConf, 1 * 1024L, 5 * 1024L,
-        "Exceeds the cluster's max-memory-limit.");
-    
+  public void testJobWithInvalidMemoryReqs() throws Exception {
+    MiniMRCluster mrCluster = null;
+    try {
+      JobConf jtConf = new JobConf();
+      jtConf.setLong(MRConfig.MAPMEMORY_MB, 1 * 1024L);
+      jtConf.setLong(MRConfig.REDUCEMEMORY_MB, 2 * 1024L);
+      jtConf.setLong(JTConfig.JT_MAX_MAPMEMORY_MB, 3 * 1024L);
+      jtConf.setLong(JTConfig.JT_MAX_REDUCEMEMORY_MB, 4 * 1024L);
+
+      mrCluster = new MiniMRCluster(0, "file:///", 0, null, null, jtConf);
+
+      JobConf clusterConf = mrCluster.createJobConf();
+
+      // No map-memory configuration
+      JobConf jobConf = new JobConf(clusterConf);
+      jobConf.setMemoryForReduceTask(1 * 1024L);
+      runJobAndVerifyFailure(jobConf, JobConf.DISABLED_MEMORY_LIMIT, 1 * 1024L,
+          "Invalid job requirements.");
+
+      // No reduce-memory configuration
+      jobConf = new JobConf(clusterConf);
+      jobConf.setMemoryForMapTask(1 * 1024L);
+      runJobAndVerifyFailure(jobConf, 1 * 1024L, JobConf.DISABLED_MEMORY_LIMIT,
+          "Invalid job requirements.");
+
+      // Invalid map-memory configuration
+      jobConf = new JobConf(clusterConf);
+      jobConf.setMemoryForMapTask(4 * 1024L);
+      jobConf.setMemoryForReduceTask(1 * 1024L);
+      runJobAndVerifyFailure(jobConf, 4 * 1024L, 1 * 1024L,
+          "Exceeds the cluster's max-memory-limit.");
+
+      // No reduce-memory configuration
+      jobConf = new JobConf(clusterConf);
+      jobConf.setMemoryForMapTask(1 * 1024L);
+      jobConf.setMemoryForReduceTask(5 * 1024L);
+      runJobAndVerifyFailure(jobConf, 1 * 1024L, 5 * 1024L,
+          "Exceeds the cluster's max-memory-limit.");
+    } finally {
+      if (mrCluster != null)
+        mrCluster.shutdown();
+    }
   }
   
+  @SuppressWarnings("deprecation")
   private void runJobAndVerifyFailure(JobConf jobConf, long memForMapTasks,
       long memForReduceTasks, String expectedMsg)
       throws Exception,
@@ -180,6 +139,7 @@ public class TestSubmitJob {
         .contains(overallExpectedMsg));
   }
   
+  @SuppressWarnings("deprecation")
   static ClientProtocol getJobSubmitClient(JobConf conf, 
                                            UserGroupInformation ugi) 
   throws IOException {
@@ -188,24 +148,23 @@ public class TestSubmitJob {
         conf, NetUtils.getSocketFactory(conf, ClientProtocol.class));
   }
 
-  static org.apache.hadoop.hdfs.protocol.ClientProtocol getDFSClient(
+  static ClientNamenodeWireProtocol getDFSClient(
       Configuration conf, UserGroupInformation ugi) 
   throws IOException {
-    return (org.apache.hadoop.hdfs.protocol.ClientProtocol) 
-      RPC.getProxy(org.apache.hadoop.hdfs.protocol.ClientProtocol.class, 
-        org.apache.hadoop.hdfs.protocol.ClientProtocol.versionID, 
+    return (ClientNamenodeWireProtocol) 
+      RPC.getProxy(ClientNamenodeWireProtocol.class, 
+          ClientNamenodeWireProtocol.versionID, 
         NameNode.getAddress(conf), ugi, 
         conf, 
         NetUtils.getSocketFactory(conf, 
-            org.apache.hadoop.hdfs.protocol.ClientProtocol.class));
+            ClientNamenodeWireProtocol.class));
   }
   
   /**
    * Submit a job and check if the files are accessible to other users.
-   * TODO fix testcase
    */
+  @SuppressWarnings("deprecation")
   @Test
-  @Ignore
   public void testSecureJobExecution() throws Exception {
     LOG.info("Testing secure job submission/execution");
     MiniMRCluster mr = null;
@@ -227,7 +186,6 @@ public class TestSubmitJob {
       mr = new MiniMRCluster(0, 0, 1, dfs.getFileSystem().getUri().toString(),
                              1, null, null, MR_UGI);
       JobTracker jt = mr.getJobTrackerRunner().getJobTracker();
-      String jobTrackerName = "localhost:" + mr.getJobTrackerPort();
 
       // cleanup
       dfs.getFileSystem().delete(TEST_DIR, true);
@@ -268,7 +226,7 @@ public class TestSubmitJob {
       UserGroupInformation user2 = 
         TestMiniMRWithDFSWithDistinctUsers.createUGI("user2", false);
       JobConf conf_other = mr.createJobConf();
-      org.apache.hadoop.hdfs.protocol.ClientProtocol client = 
+      ClientNamenodeWireProtocol client = 
         getDFSClient(conf_other, user2);
 
       // try accessing mapred.system.dir/jobid/*

Modified: hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/TestNoJobSetupCleanup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/TestNoJobSetupCleanup.java?rev=1202013&r1=1202012&r2=1202013&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/TestNoJobSetupCleanup.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/TestNoJobSetupCleanup.java Tue Nov 15 02:39:13 2011
@@ -56,16 +56,15 @@ public class TestNoJobSetupCleanup exten
     FileSystem fs = FileSystem.get(conf);
     assertTrue("Job output directory doesn't exit!", fs.exists(outDir));
 
-    // TODO
-    /*
-    // Disabling check for now to address builds until we fix underlying issue
-    // output still in temporary as job commit only seems
-    // to be called during job cleanup 
-    FileStatus[] list = fs.listStatus(outDir, new OutputFilter());
+    // job commit done only in cleanup 
+    // therefore output should still be in temp location
+    String tempWorkingPathStr = outDir + Path.SEPARATOR + "_temporary"
+        + Path.SEPARATOR + "0";
+    Path tempWorkingPath = new Path(tempWorkingPathStr);
+    FileStatus[] list = fs.listStatus(tempWorkingPath, new OutputFilter());
     int numPartFiles = numReds == 0 ? numMaps : numReds;
     assertTrue("Number of part-files is " + list.length + " and not "
         + numPartFiles, list.length == numPartFiles);
-    */
     return job;
   }