You are viewing a plain text version of this content. The canonical link for it is here.
Posted to yarn-commits@hadoop.apache.org by sz...@apache.org on 2013/01/24 03:45:52 UTC

svn commit: r1437843 [4/4] - in /hadoop/common/branches/HDFS-2802/hadoop-yarn-project: ./ hadoop-yarn/ hadoop-yarn/conf/ hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ hadoop-yarn/hadoop-yarn-api/src/main/...

Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientTokens.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientTokens.java?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientTokens.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientTokens.java Thu Jan 24 02:45:45 2013
@@ -52,8 +52,10 @@ import org.apache.hadoop.yarn.api.protoc
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ClientToken;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
@@ -67,6 +69,7 @@ import org.apache.hadoop.yarn.server.res
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.service.AbstractService;
 import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.apache.hadoop.yarn.util.ProtoUtils;
 import org.apache.hadoop.yarn.util.Records;
 import org.junit.Test;
 
@@ -106,14 +109,14 @@ public class TestClientTokens {
   private static class CustomAM extends AbstractService implements
       CustomProtocol {
 
-    private final ApplicationId appId;
+    private final ApplicationAttemptId appAttemptId;
     private final String secretKey;
     private InetSocketAddress address;
     private boolean pinged = false;
 
-    public CustomAM(ApplicationId appId, String secretKeyStr) {
+    public CustomAM(ApplicationAttemptId appId, String secretKeyStr) {
       super("CustomAM");
-      this.appId = appId;
+      this.appAttemptId = appId;
       this.secretKey = secretKeyStr;
     }
 
@@ -128,7 +131,7 @@ public class TestClientTokens {
 
       ClientToAMTokenSecretManager secretManager = null;
       byte[] bytes = Base64.decodeBase64(this.secretKey);
-      secretManager = new ClientToAMTokenSecretManager(this.appId, bytes);
+      secretManager = new ClientToAMTokenSecretManager(this.appAttemptId, bytes);
       Server server;
       try {
         server =
@@ -216,7 +219,7 @@ public class TestClientTokens {
     GetApplicationReportResponse reportResponse =
         rm.getClientRMService().getApplicationReport(request);
     ApplicationReport appReport = reportResponse.getApplicationReport();
-    String clientTokenEncoded = appReport.getClientToken();
+    ClientToken clientToken = appReport.getClientToken();
 
     // Wait till AM is 'launched'
     int waitTime = 0;
@@ -226,9 +229,11 @@ public class TestClientTokens {
     Assert.assertNotNull(containerManager.clientTokensSecret);
 
     // Start the AM with the correct shared-secret.
+    ApplicationAttemptId appAttemptId =
+        app.getAppAttempts().keySet().iterator().next();
+    Assert.assertNotNull(appAttemptId);
     final CustomAM am =
-        new CustomAM(app.getApplicationId(),
-          containerManager.clientTokensSecret);
+        new CustomAM(appAttemptId, containerManager.clientTokensSecret);
     am.init(conf);
     am.start();
 
@@ -249,21 +254,19 @@ public class TestClientTokens {
 
     // Verify denial for a malicious user
     UserGroupInformation ugi = UserGroupInformation.createRemoteUser("me");
-    Token<ClientTokenIdentifier> clientToken =
-        new Token<ClientTokenIdentifier>();
-    clientToken.decodeFromUrlString(clientTokenEncoded);
-    // RPC layer client expects ip:port as service for tokens
-    SecurityUtil.setTokenService(clientToken, am.address);
+    Token<ClientTokenIdentifier> token =
+        ProtoUtils.convertFromProtoFormat(clientToken, am.address);
 
     // Malicious user, messes with appId
     ClientTokenIdentifier maliciousID =
-        new ClientTokenIdentifier(BuilderUtils.newApplicationId(app
-          .getApplicationId().getClusterTimestamp(), 42));
+        new ClientTokenIdentifier(BuilderUtils.newApplicationAttemptId(
+          BuilderUtils.newApplicationId(app.getApplicationId()
+            .getClusterTimestamp(), 42), 43));
 
     Token<ClientTokenIdentifier> maliciousToken =
         new Token<ClientTokenIdentifier>(maliciousID.getBytes(),
-          clientToken.getPassword(), clientToken.getKind(),
-          clientToken.getService());
+          token.getPassword(), token.getKind(),
+          token.getService());
     ugi.addToken(maliciousToken);
 
     try {
@@ -297,7 +300,7 @@ public class TestClientTokens {
 
     // Now for an authenticated user
     ugi = UserGroupInformation.createRemoteUser("me");
-    ugi.addToken(clientToken);
+    ugi.addToken(token);
 
     ugi.doAs(new PrivilegedExceptionAction<Void>() {
       @Override

Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer Thu Jan 24 02:45:45 2013
@@ -1 +1,14 @@
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
 org.apache.hadoop.yarn.server.resourcemanager.security.TestDelegationTokenRenewer$Renewer

Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java Thu Jan 24 02:45:45 2013
@@ -114,4 +114,13 @@ public class WebAppProxy extends Abstrac
     }
     super.stop();
   }
+
+  public void join() {
+    if(proxyServer != null) {
+      try {
+        proxyServer.join();
+      } catch (InterruptedException e) {
+      }
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServer.java?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServer.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServer.java Thu Jan 24 02:45:45 2013
@@ -73,6 +73,14 @@ public class WebAppProxyServer extends C
         YarnConfiguration.PROXY_PRINCIPAL);
   }
 
+  /**
+   * Wait for service to finish.
+   * (Normally, it runs forever.)
+   */
+  private void join() {
+    proxy.join();
+  }
+
   public static void main(String[] args) {
     Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
     StringUtils.startupShutdownMessage(WebAppProxyServer.class, args, LOG);
@@ -84,6 +92,7 @@ public class WebAppProxyServer extends C
       YarnConfiguration conf = new YarnConfiguration();
       proxy.init(conf);
       proxy.start();
+      proxy.join();
     } catch (Throwable t) {
       LOG.fatal("Error starting Proxy server", t);
       System.exit(-1);

Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm Thu Jan 24 02:45:45 2013
@@ -132,20 +132,45 @@ Hadoop MapReduce Next Generation - Fair 
     * Whether to use the username associated with the allocation as the default 
       queue name, in the event that a queue name is not specified. If this is set 
       to "false" or unset, all jobs have a shared default queue, called "default".
+      Defaults to true.
 
  * <<<yarn.scheduler.fair.preemption>>>
 
     * Whether to use preemption. Note that preemption is experimental in the current
-      version.
+      version. Defaults to false.
 
  * <<<yarn.scheduler.fair.sizebasedweight>>>
   
     * Whether to assign shares to individual apps based on their size, rather than
-      providing an equal share to all apps regardless of size.
+      providing an equal share to all apps regardless of size. Defaults to false.
 
  * <<<yarn.scheduler.fair.assignmultiple>>>
 
-    * Whether to allow multiple container assignments in one heartbeat.
+    * Whether to allow multiple container assignments in one heartbeat. Defaults
+      to false.
+
+ * <<<yarn.scheduler.fair.max.assign>>>
+
+    * If assignmultiple is true, the maximum amount of containers that can be
+      assigned in one heartbeat. Defaults to -1, which sets no limit.
+
+ * <<<locality.threshold.node>>>
+
+    * For applications that request containers on particular nodes, the number of
+      scheduling opportunities since the last container assignment to wait before
+      accepting a placement on another node. Expressed as a float between 0 and 1,
+      which, as a fraction of the cluster size, is the number of scheduling
+      opportunities to pass up. The default value of -1.0 means don't pass up any
+      scheduling opportunities.
+
+ * <<<locality.threshold.rack>>>
+
+    * For applications that request containers on particular racks, the number of
+      scheduling opportunities since the last container assignment to wait before
+      accepting a placement on another rack. Expressed as a float between 0 and 1,
+      which, as a fraction of the cluster size, is the number of scheduling
+      opportunities to pass up. The default value of -1.0 means don't pass up any
+      scheduling opportunities.
 
 Allocation file format
 
@@ -166,6 +191,14 @@ Allocation file format
    * schedulingMode: either "fifo" or "fair" depending on the in-queue scheduling
      policy desired
 
+   * aclSubmitApps: a list of users that can submit apps to the queue. A (default)
+     value of "*" means that any users can submit apps. A queue inherits the ACL of
+     its parent, so if a queue2 descends from queue1, and user1 is in queue1's ACL,
+     and user2 is in queue2's ACL, then both users may submit to queue2.
+
+   * minSharePreemptionTimeout: number of seconds the queue is under its minimum share
+     before it will try to preempt containers to take resources from other queues.
+
  * <<User elements>>, which represent settings governing the behavior of individual 
      users. They can contain a single property: maxRunningApps, a limit on the 
      number of running apps for a particular user.
@@ -173,6 +206,10 @@ Allocation file format
  * <<A userMaxAppsDefault element>>, which sets the default running app limit 
    for any users whose limit is not otherwise specified.
 
+ * <<A fairSharePreemptionTimeout element>>, number of seconds a queue is under
+   its fair share before it will try to preempt containers to take resources from
+   other queues.
+
   An example allocation file is given here:
 
 ---

Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/pom.xml?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/pom.xml (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/pom.xml Thu Jan 24 02:45:45 2013
@@ -149,9 +149,10 @@
         <groupId>org.apache.rat</groupId>
         <artifactId>apache-rat-plugin</artifactId>
         <configuration>
-          <includes>
-            <include>pom.xml</include>
-          </includes>
+          <excludes>
+            <exclude>conf/slaves</exclude>
+            <exclude>conf/container-executor.cfg</exclude>
+          </excludes>
         </configuration>
       </plugin>
       <plugin>

Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/pom.xml?rev=1437843&r1=1437842&r2=1437843&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/pom.xml (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/pom.xml Thu Jan 24 02:45:45 2013
@@ -213,9 +213,9 @@
         <groupId>org.apache.rat</groupId>
         <artifactId>apache-rat-plugin</artifactId>
         <configuration>
-          <includes>
-            <include>pom.xml</include>
-          </includes>
+          <excludes>
+            <exclude>CHANGES.txt</exclude>
+          </excludes>
         </configuration>
       </plugin>
     </plugins>