You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by om...@apache.org on 2011/03/04 04:46:20 UTC

svn commit: r1077150 [2/2] - in /hadoop/common/branches/branch-0.20-security-patches/src: core/org/apache/hadoop/fs/ core/org/apache/hadoop/ipc/ core/org/apache/hadoop/security/ core/org/apache/hadoop/security/token/ hdfs/org/apache/hadoop/hdfs/ hdfs/o...

Added: hadoop/common/branches/branch-0.20-security-patches/src/core/org/apache/hadoop/security/token/TokenInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/core/org/apache/hadoop/security/token/TokenInfo.java?rev=1077150&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/core/org/apache/hadoop/security/token/TokenInfo.java (added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/core/org/apache/hadoop/security/token/TokenInfo.java Fri Mar  4 03:46:18 2011
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.security.token;
+
+import java.lang.annotation.*;
+
+/**
+ * Indicates Token related information to be used
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.TYPE)
+public @interface TokenInfo {
+  /** The type of TokenSelector to be used */
+  Class<? extends TokenSelector<? extends TokenIdentifier>> value();
+}
\ No newline at end of file

Added: hadoop/common/branches/branch-0.20-security-patches/src/core/org/apache/hadoop/security/token/TokenSelector.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/core/org/apache/hadoop/security/token/TokenSelector.java?rev=1077150&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/core/org/apache/hadoop/security/token/TokenSelector.java (added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/core/org/apache/hadoop/security/token/TokenSelector.java Fri Mar  4 03:46:18 2011
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.security.token;
+
+import java.util.Collection;
+
+import org.apache.hadoop.io.Text;
+
+/**
+ * Select token of type T from tokens for use with named service
+ * 
+ * @param <T>
+ *          T extends TokenIdentifier
+ */
+public interface TokenSelector<T extends TokenIdentifier> {
+  Token<T> selectToken(Text service,
+      Collection<Token<? extends TokenIdentifier>> tokens);
+}

Added: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1077150&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java (added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java Fri Mar  4 03:46:18 2011
@@ -0,0 +1,197 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+
+/** 
+ * This class contains constants for configuration keys used
+ * in hdfs.
+ *
+ */
+
+public class DFSConfigKeys extends CommonConfigurationKeys {
+
+  public static final String  DFS_BLOCK_SIZE_KEY = "dfs.blocksize";
+  public static final long    DFS_BLOCK_SIZE_DEFAULT = 64*1024*1024;
+  public static final String  DFS_REPLICATION_KEY = "dfs.replication";
+  public static final short   DFS_REPLICATION_DEFAULT = 3;
+  public static final String  DFS_STREAM_BUFFER_SIZE_KEY = "dfs.stream-buffer-size";
+  public static final int     DFS_STREAM_BUFFER_SIZE_DEFAULT = 4096;
+  public static final String  DFS_BYTES_PER_CHECKSUM_KEY = "dfs.bytes-per-checksum";
+  public static final int     DFS_BYTES_PER_CHECKSUM_DEFAULT = 512;
+  public static final String  DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size";
+  public static final int     DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024;
+  
+  public static final String  DFS_NAMENODE_BACKUP_ADDRESS_KEY = "dfs.namenode.backup.address";
+  public static final String  DFS_NAMENODE_BACKUP_ADDRESS_DEFAULT = "localhost:50100";
+  public static final String  DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY = "dfs.namenode.backup.http-address";
+  public static final String  DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50105";
+  public static final String  DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY = "dfs.datanode.balance.bandwidthPerSec";
+  public static final long    DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT = 1024*1024;
+  public static final String  DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
+  public static final String  DFS_NAMENODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50070";
+  public static final String  DFS_NAMENODE_MAX_OBJECTS_KEY = "dfs.namenode.max.objects";
+  public static final long    DFS_NAMENODE_MAX_OBJECTS_DEFAULT = 0;
+  public static final String  DFS_NAMENODE_SAFEMODE_EXTENSION_KEY = "dfs.namenode.safemode.extension";
+  public static final int     DFS_NAMENODE_SAFEMODE_EXTENSION_DEFAULT = 30000;
+  public static final String  DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY = "dfs.namenode.safemode.threshold-pct";
+  public static final float   DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_DEFAULT = 0.999f;
+  public static final String  DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY = "dfs.namenode.secondary.http-address";
+  public static final String  DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50090";
+  public static final String  DFS_NAMENODE_CHECKPOINT_PERIOD_KEY = "dfs.namenode.checkpoint.period";
+  public static final long    DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT = 3600;
+  public static final String  DFS_NAMENODE_CHECKPOINT_SIZE_KEY = "dfs.namenode.checkpoint.size";
+  public static final long    DFS_NAMENODE_CHECKPOINT_SIZE_DEFAULT = 4194304;
+  public static final String  DFS_NAMENODE_UPGRADE_PERMISSION_KEY = "dfs.namenode.upgrade.permission";
+  public static final int     DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT = 00777;
+  public static final String  DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY = "dfs.namenode.heartbeat.recheck-interval";
+  public static final int     DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT = 5*60*1000;
+  public static final String  DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.client.https.keystore.resource";
+  public static final String  DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-client.xml";
+  public static final String  DFS_CLIENT_HTTPS_NEED_AUTH_KEY = "dfs.client.https.need-auth";
+  public static final boolean DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT = false;
+  public static final String  DFS_NAMENODE_ACCESSTIME_PRECISION_KEY = "dfs.namenode.accesstime.precision";
+  public static final long    DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT = 3600000;
+  public static final String  DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY = "dfs.namenode.replication.considerLoad";
+  public static final boolean DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT = true;
+  public static final String  DFS_NAMENODE_REPLICATION_INTERVAL_KEY = "dfs.namenode.replication.interval";
+  public static final int     DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT = 3;
+  public static final String  DFS_NAMENODE_REPLICATION_MIN_KEY = "dfs.namenode.replication.min";
+  public static final int     DFS_NAMENODE_REPLICATION_MIN_DEFAULT = 1;
+  public static final String  DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY = "dfs.namenode.replication.pending.timeout-sec";
+  public static final int     DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT = -1;
+  public static final String  DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY = "dfs.namenode.replication.max-streams";
+  public static final int     DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT = 2;
+  public static final String  DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled";
+  public static final boolean DFS_PERMISSIONS_ENABLED_DEFAULT = true;
+  public static final String  DFS_PERMISSIONS_SUPERUSERGROUP_KEY = "dfs.permissions.superusergroup";
+  public static final String  DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT = "supergroup";
+  public static final String  DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.https.server.keystore.resource";
+  public static final String  DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-server.xml";
+  public static final String  DFS_NAMENODE_NAME_DIR_RESTORE_KEY = "dfs.namenode.name.dir.restore";
+  public static final boolean DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT = false;
+
+  //Delegation token related keys
+  public static final String  DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY = "dfs.namenode.delegation.key.update-interval";
+  public static final long    DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT = 86400;
+  public static final String  DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY = "dfs.namenode.delegation.token.renew-interval";
+  public static final long    DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT = 86400;
+  public static final String  DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY = "dfs.namenode.delegation.token.max-lifetime";
+  public static final long    DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT = 604800;
+
+  //Following keys have no defaults
+  public static final String  DFS_DATANODE_DATA_DIR_KEY = "dfs.datanode.data.dir";
+  public static final String  DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address";
+  public static final String  DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:50470";
+  public static final String  DFS_NAMENODE_NAME_DIR_KEY = "dfs.namenode.name.dir";
+  public static final String  DFS_NAMENODE_EDITS_DIR_KEY = "dfs.namenode.edits.dir";
+  public static final String  DFS_CLIENT_READ_PREFETCH_SIZE_KEY = "dfs.client.read.prefetch.size"; 
+  public static final String  DFS_CLIENT_RETRY_WINDOW_BASE= "dfs.client.retry.window.base";
+  public static final String  DFS_METRICS_SESSION_ID_KEY = "dfs.metrics.session-id";
+  public static final String  DFS_DATANODE_HOST_NAME_KEY = "dfs.datanode.hostname";
+  public static final String  DFS_DATANODE_STORAGEID_KEY = "dfs.datanode.StorageId";
+  public static final String  DFS_NAMENODE_HOSTS_KEY = "dfs.namenode.hosts";
+  public static final String  DFS_NAMENODE_HOSTS_EXCLUDE_KEY = "dfs.namenode.hosts.exclude";
+  public static final String  DFS_CLIENT_SOCKET_TIMEOUT_KEY = "dfs.client.socket-timeout";
+  public static final String  DFS_NAMENODE_CHECKPOINT_DIR_KEY = "dfs.namenode.checkpoint.dir";
+  public static final String  DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY = "dfs.namenode.checkpoint.edits.dir";
+
+  //Code in hdfs is not updated to use these keys.
+  public static final String  DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY = "dfs.client.block.write.locateFollowingBlock.retries";
+  public static final int     DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT = 5;
+  public static final String  DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY = "dfs.client.block.write.retries";
+  public static final int     DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT = 3;
+  public static final String  DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY = "dfs.client.max.block.acquire.failures";
+  public static final int     DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT = 3;
+  public static final String  DFS_BALANCER_MOVEDWINWIDTH_KEY = "dfs.balancer.movedWinWidth";
+  public static final int     DFS_BALANCER_MOVEDWINWIDTH_DEFAULT = 5400*1000;
+  public static final String  DFS_DATANODE_ADDRESS_KEY = "dfs.datanode.address";
+  public static final String  DFS_DATANODE_ADDRESS_DEFAULT = "0.0.0.0:50010";
+  public static final String  DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY = "dfs.datanode.directoryscan.interval";
+  public static final int     DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT = 21600;
+  public static final String  DFS_DATANODE_DNS_INTERFACE_KEY = "dfs.datanode.dns.interface";
+  public static final String  DFS_DATANODE_DNS_INTERFACE_DEFAULT = "default";
+  public static final String  DFS_DATANODE_DNS_NAMESERVER_KEY = "dfs.datanode.dns.nameserver";
+  public static final String  DFS_DATANODE_DNS_NAMESERVER_DEFAULT = "default";
+  public static final String  DFS_DATANODE_DU_RESERVED_KEY = "dfs.datanode.du.reserved";
+  public static final long    DFS_DATANODE_DU_RESERVED_DEFAULT = 0;
+  public static final String  DFS_DATANODE_HANDLER_COUNT_KEY = "dfs.datanode.handler.count";
+  public static final int     DFS_DATANODE_HANDLER_COUNT_DEFAULT = 3;
+  public static final String  DFS_DATANODE_HTTP_ADDRESS_KEY = "dfs.datanode.http.address";
+  public static final String  DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50075";
+  public static final String  DFS_DATANODE_MAX_XCIEVERS_KEY = "dfs.datanode.max.xcievers";
+  public static final int     DFS_DATANODE_MAX_XCIEVERS_DEFAULT = 256;
+  public static final String  DFS_DATANODE_NUMBLOCKS_KEY = "dfs.datanode.numblocks";
+  public static final int     DFS_DATANODE_NUMBLOCKS_DEFAULT = 64;
+  public static final String  DFS_DATANODE_SCAN_PERIOD_HOURS_KEY = "dfs.datanode.scan.period.hours";
+  public static final int     DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 0;
+  public static final String  DFS_DATANODE_SIMULATEDDATASTORAGE_KEY = "dfs.datanode.simulateddatastorage";
+  public static final boolean DFS_DATANODE_SIMULATEDDATASTORAGE_DEFAULT = false;
+  public static final String  DFS_DATANODE_SIMULATEDDATASTORAGE_CAPACITY_KEY = "dfs.datanode.simulateddatastorage.capacity";
+  public static final long    DFS_DATANODE_SIMULATEDDATASTORAGE_CAPACITY_DEFAULT = 2L<<40;
+  public static final String  DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = "dfs.datanode.transferTo.allowed";
+  public static final boolean DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT = true;
+  public static final String  DFS_HEARTBEAT_INTERVAL_KEY = "dfs.heartbeat.interval";
+  public static final long    DFS_HEARTBEAT_INTERVAL_DEFAULT = 3;
+  public static final String  DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY = "dfs.namenode.decommission.interval";
+  public static final int     DFS_NAMENODE_DECOMMISSION_INTERVAL_DEFAULT = 30;
+  public static final String  DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_KEY = "dfs.namenode.decommission.nodes.per.interval";
+  public static final int     DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_DEFAULT = 5;
+  public static final String  DFS_NAMENODE_HANDLER_COUNT_KEY = "dfs.namenode.handler.count";
+  public static final int     DFS_NAMENODE_HANDLER_COUNT_DEFAULT = 10;
+  public static final String  DFS_SUPPORT_APPEND_KEY = "dfs.support.append";
+  public static final boolean DFS_SUPPORT_APPEND_DEFAULT = false;
+  public static final String  DFS_HTTPS_ENABLE_KEY = "dfs.https.enable";
+  public static final boolean DFS_HTTPS_ENABLE_DEFAULT = false;
+  public static final String  DFS_DEFAULT_CHUNK_VIEW_SIZE_KEY = "dfs.default.chunk.view.size";
+  public static final int     DFS_DEFAULT_CHUNK_VIEW_SIZE_DEFAULT = 32*1024;
+  public static final String  DFS_DATANODE_HTTPS_ADDRESS_KEY = "dfs.datanode.https.address";
+  public static final String  DFS_DATANODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:50475";
+  public static final String  DFS_DATANODE_IPC_ADDRESS_KEY = "dfs.datanode.ipc.address";
+  public static final String  DFS_DATANODE_IPC_ADDRESS_DEFAULT = "0.0.0.0:50020";
+
+  public static final String  DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY = "dfs.block.access.token.enable";
+  public static final boolean DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT = false;
+  public static final String  DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY = "dfs.block.access.key.update.interval";
+  public static final long    DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_DEFAULT = 600L;
+  public static final String  DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY = "dfs.block.access.token.lifetime";
+  public static final long    DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT = 600L;
+
+  public static final String  DFS_REPLICATION_MAX_KEY = "dfs.replication.max";
+  public static final int     DFS_REPLICATION_MAX_DEFAULT = 512;
+  public static final String  DFS_DF_INTERVAL_KEY = "dfs.df.interval";
+  public static final int     DFS_DF_INTERVAL_DEFAULT = 60000;
+  public static final String  DFS_BLOCKREPORT_INTERVAL_MSEC_KEY = "dfs.blockreport.intervalMsec";
+  public static final long    DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT = 21600000;
+  public static final String  DFS_BLOCKREPORT_INITIAL_DELAY_KEY = "dfs.blockreport.initialDelay";
+  public static final int     DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT = 0;
+
+  //Keys with no defaults
+  public static final String  DFS_DATANODE_PLUGINS_KEY = "dfs.datanode.plugins";
+  public static final String  DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY = "dfs.datanode.socket.write.timeout";
+  public static final String  DFS_DATANODE_STARTUP_KEY = "dfs.datanode.startup";
+  public static final String  DFS_NAMENODE_PLUGINS_KEY = "dfs.namenode.plugins";
+  public static final String  DFS_WEB_UGI_KEY = "dfs.web.ugi";
+  public static final String  DFS_NAMENODE_STARTUP_KEY = "dfs.namenode.startup";
+  public static final String  DFS_DATANODE_KEYTAB_FILE_KEY = "dfs.datanode.keytab.file";
+  public static final String  DFS_DATANODE_USER_NAME_KEY = "dfs.datanode.user.name.key";
+  public static final String  DFS_NAMENODE_KEYTAB_FILE_KEY = "dfs.namenode.keytab.file";
+  public static final String  DFS_NAMENODE_USER_NAME_KEY = "dfs.namenode.user.name.key";
+}

Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1077150&r1=1077149&r2=1077150&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Fri Mar  4 03:46:18 2011
@@ -23,12 +23,16 @@ import org.apache.hadoop.ipc.VersionedPr
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
 import org.apache.hadoop.hdfs.security.token.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.DelegationTokenSelector;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.fs.permission.*;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenInfo;
 
 /**********************************************************************
  * ClientProtocol is used by user code via 
@@ -37,6 +41,8 @@ import org.apache.hadoop.security.token.
  * as well as open/close file streams, etc.
  *
  **********************************************************************/
+@KerberosInfo(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
+@TokenInfo(DelegationTokenSelector.class)
 public interface ClientProtocol extends VersionedProtocol {
 
   /**

Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/security/token/DelegationTokenSecretManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/security/token/DelegationTokenSecretManager.java?rev=1077150&r1=1077149&r2=1077150&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/security/token/DelegationTokenSecretManager.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/security/token/DelegationTokenSecretManager.java Fri Mar  4 03:46:18 2011
@@ -267,6 +267,15 @@ public class DelegationTokenSecretManage
   public static SecretKey createSecretKey(byte[] key) {
     return SecretManager.createSecretKey(key);
   }
+  
+  /**
+   * Create an empty delegation token identifier
+   * @return a newly created empty delegation token identifier
+   */
+  @Override
+  public DelegationTokenIdentifier createIdentifier() {
+    return new DelegationTokenIdentifier();
+  }
 
 
   /** Utility class to encapsulate a token's renew date and password. */

Added: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/security/token/DelegationTokenSelector.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/security/token/DelegationTokenSelector.java?rev=1077150&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/security/token/DelegationTokenSelector.java (added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/security/token/DelegationTokenSelector.java Fri Mar  4 03:46:18 2011
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.security.token;
+
+import java.util.Collection;
+
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenSelector;
+
+/**
+ * Look through tokens to find the first delegation token that matches the
+ * service and return it.
+ */
+public class DelegationTokenSelector implements
+    TokenSelector<DelegationTokenIdentifier> {
+
+  @SuppressWarnings("unchecked")
+  @Override
+  public Token<DelegationTokenIdentifier> selectToken(Text service,
+      Collection<Token<? extends TokenIdentifier>> tokens) {
+    if (service == null) {
+      return null;
+    }
+    for (Token<? extends TokenIdentifier> token : tokens) {
+      if (DelegationTokenIdentifier.KIND_NAME.equals(token.getKind())
+          && service.equals(token.getService())) {
+        return (Token<DelegationTokenIdentifier>) token;
+      }
+    }
+    return null;
+  }
+}

Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1077150&r1=1077149&r2=1077150&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Mar  4 03:46:18 2011
@@ -310,7 +310,6 @@ public class FSNamesystem implements FSC
       close();
       throw e;
     }
-    dtSecretManager.startThreads();
   }
 
   /**
@@ -320,6 +319,7 @@ public class FSNamesystem implements FSC
     this.systemStart = now();
     setConfigurationParameters(conf);
     dtSecretManager = createDelegationTokenSecretManager(conf);
+    dtSecretManager.startThreads();
 
     this.nameNodeAddress = nn.getNameNodeAddress();
     this.registerMBean(conf); // register the MBean for the FSNamesystemStutus

Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1077150&r1=1077149&r2=1077150&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java Fri Mar  4 03:46:18 2011
@@ -192,19 +192,22 @@ public class NameNode implements ClientP
             ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
       ServiceAuthorizationManager.refresh(conf, new HDFSPolicyProvider());
     }
+    
+    myMetrics = new NameNodeMetrics(conf, this);
+    this.namesystem = new FSNamesystem(this, conf);
 
     // create rpc server 
-    this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(),
-                                handlerCount, false, conf);
+    this.server = RPC.getServer(this, socAddr.getHostName(),
+        socAddr.getPort(), handlerCount, false, conf, namesystem
+        .getDelegationTokenSecretManager());
 
     // The rpc-server port can be ephemeral... ensure we have the correct info
     this.serverAddress = this.server.getListenerAddress(); 
     FileSystem.setDefaultUri(conf, getUri(serverAddress));
     LOG.info("Namenode up at: " + this.serverAddress);
 
-    myMetrics = new NameNodeMetrics(conf, this);
+    
 
-    this.namesystem = new FSNamesystem(this, conf);
     startHttpServer(conf);
     this.server.start();  //start RPC server   
     startTrashEmptier(conf);

Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java?rev=1077150&r1=1077149&r2=1077150&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java Fri Mar  4 03:46:18 2011
@@ -20,11 +20,14 @@ package org.apache.hadoop.hdfs.server.pr
 
 import java.io.*;
 
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.ipc.VersionedProtocol;
 
+import org.apache.hadoop.security.KerberosInfo;
+
 /**********************************************************************
  * Protocol that a DFS datanode uses to communicate with the NameNode.
  * It's used to upload current load information and block reports.
@@ -33,6 +36,7 @@ import org.apache.hadoop.ipc.VersionedPr
  * returning values from these functions.
  *
  **********************************************************************/
+@KerberosInfo(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
 public interface DatanodeProtocol extends VersionedProtocol {
   /**
    * 20: SendHeartbeat may return KeyUpdateCommand

Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java?rev=1077150&r1=1077149&r2=1077150&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java Fri Mar  4 03:46:18 2011
@@ -22,11 +22,14 @@ import java.io.IOException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.ipc.VersionedProtocol;
+import org.apache.hadoop.security.KerberosInfo;
 
 /** An inter-datanode protocol for updating generation stamp
  */
+@KerberosInfo(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
 public interface InterDatanodeProtocol extends VersionedProtocol {
   public static final Log LOG = LogFactory.getLog(InterDatanodeProtocol.class);
 

Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java?rev=1077150&r1=1077149&r2=1077150&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java Fri Mar  4 03:46:18 2011
@@ -20,15 +20,18 @@ package org.apache.hadoop.hdfs.server.pr
 
 import java.io.IOException;
 
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.security.ExportedAccessKeys;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
 import org.apache.hadoop.ipc.VersionedProtocol;
+import org.apache.hadoop.security.KerberosInfo;
 
 /*****************************************************************************
  * Protocol that a secondary NameNode uses to communicate with the NameNode.
  * It's used to get part of the name node state
  *****************************************************************************/
+@KerberosInfo(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
 public interface NamenodeProtocol extends VersionedProtocol {
   /**
    * 3: new method added: getAccessKeys()

Modified: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/AdminOperationsProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/AdminOperationsProtocol.java?rev=1077150&r1=1077149&r2=1077150&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/AdminOperationsProtocol.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/AdminOperationsProtocol.java Fri Mar  4 03:46:18 2011
@@ -21,11 +21,14 @@ package org.apache.hadoop.mapred;
 import java.io.IOException;
 
 import org.apache.hadoop.ipc.VersionedProtocol;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.security.KerberosInfo;
 
 /**
  * Protocol for admin operations. This is a framework-public interface and is
  * NOT_TO_BE_USED_BY_USERS_DIRECTLY.
  */
+@KerberosInfo(JobContext.JOB_JOBTRACKER_ID)
 public interface AdminOperationsProtocol extends VersionedProtocol {
   
   /**

Modified: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/Child.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/Child.java?rev=1077150&r1=1077149&r2=1077150&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/Child.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/Child.java Fri Mar  4 03:46:18 2011
@@ -28,16 +28,17 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FSError;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.mapreduce.security.TokenCache;
-import org.apache.hadoop.security.TokenStorage;
-import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
 import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
 import org.apache.hadoop.metrics.MetricsContext;
 import org.apache.hadoop.metrics.MetricsUtil;
 import org.apache.hadoop.metrics.jvm.JvmMetrics;
+import org.apache.hadoop.security.TokenStorage;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.LogManager;
@@ -73,6 +74,12 @@ class Child {
       TokenCache.loadTaskTokenStorage(jobTokenFile, defaultConf);
     LOG.debug("loading token. # keys =" +ts.numberOfSecretKeys() + 
         "; from file=" + jobTokenFile);
+    
+    Token<JobTokenIdentifier> jt = TokenCache.getJobToken(ts);
+    jt.setService(new Text(address.getAddress().getHostAddress() + ":"
+        + address.getPort()));
+    UserGroupInformation current = UserGroupInformation.getCurrentUser();
+    current.addToken(jt);
 
     TaskUmbilicalProtocol umbilical =
       (TaskUmbilicalProtocol)RPC.getProxy(TaskUmbilicalProtocol.class,
@@ -151,8 +158,6 @@ class Child {
         TaskLog.syncLogs(firstTaskid, taskid, isCleanup);
         JobConf job = new JobConf(task.getJobFile());
 
-        // set job shuffle token
-        Token<? extends TokenIdentifier> jt = TokenCache.getJobToken(ts);
         // set the jobTokenFile into task
         task.setJobTokenSecret(JobTokenSecretManager.
             createSecretKey(jt.getPassword()));

Modified: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/InterTrackerProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/InterTrackerProtocol.java?rev=1077150&r1=1077149&r2=1077150&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/InterTrackerProtocol.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/InterTrackerProtocol.java Fri Mar  4 03:46:18 2011
@@ -21,11 +21,14 @@ package org.apache.hadoop.mapred;
 import java.io.IOException;
 
 import org.apache.hadoop.ipc.VersionedProtocol;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.security.KerberosInfo;
 
 /** 
  * Protocol that a TaskTracker and the central JobTracker use to communicate.
  * The JobTracker is the Server, which implements this protocol.
  */ 
+@KerberosInfo(JobContext.JOB_JOBTRACKER_ID)
 interface InterTrackerProtocol extends VersionedProtocol {
   /**
    * version 3 introduced to replace 

Modified: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JvmTask.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JvmTask.java?rev=1077150&r1=1077149&r2=1077150&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JvmTask.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/JvmTask.java Fri Mar  4 03:46:18 2011
@@ -23,7 +23,7 @@ import java.io.DataOutput;
 import java.io.IOException;
 import org.apache.hadoop.io.Writable;
 
-class JvmTask implements Writable {
+public class JvmTask implements Writable {
   Task t;
   boolean shouldDie;
   public JvmTask(Task t, boolean shouldDie) {

Modified: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/MapTaskCompletionEventsUpdate.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/MapTaskCompletionEventsUpdate.java?rev=1077150&r1=1077149&r2=1077150&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/MapTaskCompletionEventsUpdate.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/MapTaskCompletionEventsUpdate.java Fri Mar  4 03:46:18 2011
@@ -28,7 +28,7 @@ import org.apache.hadoop.io.Writable;
  * tasks w.r.t the map task completion events. It also indicates whether the 
  * child task should reset its events index.
  */
-class MapTaskCompletionEventsUpdate implements Writable {
+public class MapTaskCompletionEventsUpdate implements Writable {
   TaskCompletionEvent[] events;
   boolean reset;
 

Modified: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskTracker.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskTracker.java?rev=1077150&r1=1077149&r2=1077150&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskTracker.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskTracker.java Fri Mar  4 03:46:18 2011
@@ -668,8 +668,8 @@ public class TaskTracker 
                        maxMapSlots : maxReduceSlots;
     //set the num handlers to max*2 since canCommit may wait for the duration
     //of a heartbeat RPC
-    this.taskReportServer =
-      RPC.getServer(this, bindAddress, tmpPort, 2 * max, false, this.fConf);
+    this.taskReportServer = RPC.getServer(this, bindAddress,
+        tmpPort, 2 * max, false, this.fConf, this.jobTokenSecretManager);
     this.taskReportServer.start();
 
     // get the assigned address
@@ -984,7 +984,6 @@ public class TaskTracker 
    *         job as a starting point.
    * @throws IOException
    */
-  @SuppressWarnings("unchecked")
   JobConf localizeJobFiles(Task t, RunningJob rjob)
       throws IOException, InterruptedException {
     JobID jobId = t.getJobID();
@@ -1003,8 +1002,7 @@ public class TaskTracker 
     
     
     TokenStorage ts = TokenCache.loadTokens(localJobTokenFile, fConf);
-    Token<JobTokenIdentifier> jt = 
-      (Token<JobTokenIdentifier>)TokenCache.getJobToken(ts);
+    Token<JobTokenIdentifier> jt = TokenCache.getJobToken(ts);
     if (jt != null) { //could be null in the case of some unit tests
       getJobTokenSecretManager().addTokenForJob(jobId.toString(), jt);
     }

Modified: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskUmbilicalProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskUmbilicalProtocol.java?rev=1077150&r1=1077149&r2=1077150&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskUmbilicalProtocol.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapred/TaskUmbilicalProtocol.java Fri Mar  4 03:46:18 2011
@@ -22,12 +22,15 @@ import java.io.IOException;
 
 import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.mapred.JvmTask;
+import org.apache.hadoop.mapreduce.security.token.JobTokenSelector;
+import org.apache.hadoop.security.token.TokenInfo;
 
 /** Protocol that task child process uses to contact its parent process.  The
  * parent is a daemon which which polls the central master for a new map or
  * reduce task and runs it as a child process.  All communication between child
- * and parent is via this protocol. */ 
-interface TaskUmbilicalProtocol extends VersionedProtocol {
+ * and parent is via this protocol. */
+@TokenInfo(JobTokenSelector.class)
+public interface TaskUmbilicalProtocol extends VersionedProtocol {
 
   /** 
    * Changed the version to 2, since we have a new method getMapOutputs 
@@ -141,7 +144,7 @@ interface TaskUmbilicalProtocol extends 
    * task-tracker has changed or not. This will trigger some action at the 
    * child-process.
    *
-   * @param taskId the reduce task id
+   * @param jobId the reducer job id
    * @param fromIndex the index starting from which the locations should be 
    * fetched
    * @param maxLocs the max number of locations to fetch

Modified: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapreduce/security/TokenCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapreduce/security/TokenCache.java?rev=1077150&r1=1077149&r2=1077150&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapreduce/security/TokenCache.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapreduce/security/TokenCache.java Fri Mar  4 03:46:18 2011
@@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.TokenStorage;
 import org.apache.hadoop.security.token.Token;
@@ -230,8 +231,9 @@ public class TokenCache {
    * @return job token
    */
   //@InterfaceAudience.Private
-  public static Token<? extends TokenIdentifier> getJobToken(TokenStorage ts) {
-    return ts.getToken(JOB_TOKEN);
+  @SuppressWarnings("unchecked")
+  public static Token<JobTokenIdentifier> getJobToken(TokenStorage ts) {
+    return (Token<JobTokenIdentifier>) ts.getToken(JOB_TOKEN);
   }
 
   /**

Modified: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapreduce/security/token/JobTokenIdentifier.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapreduce/security/token/JobTokenIdentifier.java?rev=1077150&r1=1077149&r2=1077150&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapreduce/security/token/JobTokenIdentifier.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapreduce/security/token/JobTokenIdentifier.java Fri Mar  4 03:46:18 2011
@@ -33,6 +33,13 @@ public class JobTokenIdentifier extends 
   final static Text KIND_NAME = new Text("mapreduce.job");
   
   /**
+   * Default constructor
+   */
+  public JobTokenIdentifier() {
+    this.jobid = new Text();
+  }
+
+  /**
    * Create a job token identifier from a jobid
    * @param jobid the jobid to use
    */
@@ -46,6 +53,12 @@ public class JobTokenIdentifier extends 
     return KIND_NAME;
   }
   
+  /** {@inheritDoc} */
+  @Override
+  public Text getUsername() {
+    return getJobId();
+  }
+
   /**
    * Get the jobid
    * @return the jobid

Modified: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapreduce/security/token/JobTokenSecretManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapreduce/security/token/JobTokenSecretManager.java?rev=1077150&r1=1077149&r2=1077150&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapreduce/security/token/JobTokenSecretManager.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapreduce/security/token/JobTokenSecretManager.java Fri Mar  4 03:46:18 2011
@@ -121,5 +121,13 @@ public class JobTokenSecretManager exten
       throws InvalidToken {
     return retrieveTokenSecret(identifier.getJobId().toString()).getEncoded();
   }
-
+  
+  /**
+   * Create an empty job token identifier
+   * @return a newly created empty job token identifier
+   */
+  @Override
+  public JobTokenIdentifier createIdentifier() {
+    return new JobTokenIdentifier();
+  }
 }

Added: hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapreduce/security/token/JobTokenSelector.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapreduce/security/token/JobTokenSelector.java?rev=1077150&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapreduce/security/token/JobTokenSelector.java (added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/mapred/org/apache/hadoop/mapreduce/security/token/JobTokenSelector.java Fri Mar  4 03:46:18 2011
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.security.token;
+
+import java.util.Collection;
+
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenSelector;
+
+/**
+ * Look through tokens to find the first job token that matches the service
+ * and return it.
+ */
+public class JobTokenSelector implements TokenSelector<JobTokenIdentifier> {
+
+  @SuppressWarnings("unchecked")
+  @Override
+  public Token<JobTokenIdentifier> selectToken(Text service,
+      Collection<Token<? extends TokenIdentifier>> tokens) {
+    if (service == null) {
+      return null;
+    }
+    for (Token<? extends TokenIdentifier> token : tokens) {
+      if (JobTokenIdentifier.KIND_NAME.equals(token.getKind())
+          && service.equals(token.getService())) {
+        return (Token<JobTokenIdentifier>) token;
+      }
+    }
+    return null;
+  }
+}

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/core-site.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/core-site.xml?rev=1077150&r1=1077149&r2=1077150&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/core-site.xml (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/core-site.xml Fri Mar  4 03:46:18 2011
@@ -47,4 +47,10 @@
   <description>The name of the s3n file system for testing.</description>
 </property>
 
+<!-- Turn security off for tests by default -->
+<property>
+  <name>hadoop.security.authentication</name>
+  <value>simple</value>
+</property>
+
 </configuration>

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/findbugsExcludeFile.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/findbugsExcludeFile.xml?rev=1077150&r1=1077149&r2=1077150&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/findbugsExcludeFile.xml (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/findbugsExcludeFile.xml Fri Mar  4 03:46:18 2011
@@ -32,6 +32,15 @@
        <Field name="out" />
        <Bug pattern="IS2_INCONSISTENT_SYNC" />
      </Match>
+     <!-- 
+       Accesses to Client.Connection.saslRpcClient are in fact
+       synchronized (inside synchronized methods).
+     --> 
+     <Match>
+       <Class name="org.apache.hadoop.ipc.Client$Connection" />
+       <Field name="saslRpcClient" />
+       <Bug pattern="IS2_INCONSISTENT_SYNC" />
+     </Match>
      <Match>
        <Class name="org.apache.hadoop.mapred.OutputCommitter" />
        <Or>

Added: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java?rev=1077150&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java (added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java Fri Mar  4 03:46:18 2011
@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.security;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.net.InetSocketAddress;
+import java.security.PrivilegedExceptionAction;
+
+import org.apache.commons.logging.*;
+import org.apache.commons.logging.impl.Log4JLogger;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+
+import org.apache.hadoop.ipc.Client;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.security.token.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.DelegationTokenSecretManager;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.SaslInputStream;
+import org.apache.hadoop.security.SaslRpcClient;
+import org.apache.hadoop.security.SaslRpcServer;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import org.apache.log4j.Level;
+import org.junit.Test;
+
+/** Unit tests for using Delegation Token over RPC. */
+public class TestClientProtocolWithDelegationToken {
+  private static final String ADDRESS = "0.0.0.0";
+
+  public static final Log LOG = LogFactory
+      .getLog(TestClientProtocolWithDelegationToken.class);
+
+  private static Configuration conf;
+  static {
+    conf = new Configuration();
+    conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    UserGroupInformation.setConfiguration(conf);
+  }
+
+  static {
+    ((Log4JLogger) Client.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger) Server.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger) SaslRpcClient.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger) SaslRpcServer.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger) SaslInputStream.LOG).getLogger().setLevel(Level.ALL);
+  }
+
+  @Test
+  public void testDelegationTokenRpc() throws Exception {
+    ClientProtocol mockNN = mock(ClientProtocol.class);
+    when(mockNN.getProtocolVersion(anyString(), anyLong())).thenReturn(
+        ClientProtocol.versionID);
+    DelegationTokenSecretManager sm = new DelegationTokenSecretManager(
+        DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT,
+        DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT,
+        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT,
+        3600000);
+    sm.startThreads();
+    final Server server = RPC.getServer(mockNN, ADDRESS,
+        0, 5, true, conf, sm);
+
+    server.start();
+
+    final UserGroupInformation current = UserGroupInformation.getCurrentUser();
+    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
+    String user = current.getUserName();
+    Text owner = new Text(user);
+    DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(owner, owner);
+    Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(
+        dtId, sm);
+    Text host = new Text(addr.getAddress().getHostAddress() + ":"
+        + addr.getPort());
+    token.setService(host);
+    LOG.info("Service IP address for token is " + host);
+    current.addToken(token);
+    current.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        ClientProtocol proxy = null;
+        try {
+          proxy = (ClientProtocol) RPC.getProxy(ClientProtocol.class,
+              ClientProtocol.versionID, addr, conf);
+          proxy.getStats();
+        } finally {
+          server.stop();
+          if (proxy != null) {
+            RPC.stopProxy(proxy);
+          }
+        }
+        return null;
+      }
+    });
+  }
+
+}

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/ipc/TestRPC.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/ipc/TestRPC.java?rev=1077150&r1=1077149&r2=1077150&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/ipc/TestRPC.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/ipc/TestRPC.java Fri Mar  4 03:46:18 2011
@@ -68,7 +68,7 @@ public class TestRPC extends TestCase {
     int[] exchange(int[] values) throws IOException;
   }
 
-  public class TestImpl implements TestProtocol {
+  public static class TestImpl implements TestProtocol {
     int fastPingCounter = 0;
     
     public long getProtocolVersion(String protocol, long clientVersion) {

Added: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/ipc/TestSaslRPC.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/ipc/TestSaslRPC.java?rev=1077150&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/ipc/TestSaslRPC.java (added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/ipc/TestSaslRPC.java Fri Mar  4 03:46:18 2011
@@ -0,0 +1,216 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.Collection;
+
+import org.apache.commons.logging.*;
+import org.apache.commons.logging.impl.Log4JLogger;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.security.token.TokenSelector;
+import org.apache.hadoop.security.SaslInputStream;
+import org.apache.hadoop.security.SaslRpcClient;
+import org.apache.hadoop.security.SaslRpcServer;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import org.apache.log4j.Level;
+import org.junit.Test;
+
+/** Unit tests for using Sasl over RPC. */
+public class TestSaslRPC {
+  private static final String ADDRESS = "0.0.0.0";
+
+  public static final Log LOG =
+    LogFactory.getLog(TestSaslRPC.class);
+  
+  static final String SERVER_PRINCIPAL_KEY = "test.ipc.server.principal";
+  private static Configuration conf;
+  static {
+    conf = new Configuration();
+    conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    UserGroupInformation.setConfiguration(conf);
+  }
+
+  static {
+    ((Log4JLogger) Client.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger) Server.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger) SaslRpcClient.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger) SaslRpcServer.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger) SaslInputStream.LOG).getLogger().setLevel(Level.ALL);
+  }
+
+  public static class TestTokenIdentifier extends TokenIdentifier {
+    private Text tokenid;
+    final static Text KIND_NAME = new Text("test.token");
+    
+    public TestTokenIdentifier() {
+      this.tokenid = new Text();
+    }
+    public TestTokenIdentifier(Text tokenid) {
+      this.tokenid = tokenid;
+    }
+    @Override
+    public Text getKind() {
+      return KIND_NAME;
+    }
+    @Override
+    public Text getUsername() {
+      return tokenid;
+    }
+    @Override
+    public void readFields(DataInput in) throws IOException {
+      tokenid.readFields(in);
+    }
+    @Override
+    public void write(DataOutput out) throws IOException {
+      tokenid.write(out);
+    }
+  }
+  
+  public static class TestTokenSecretManager extends
+      SecretManager<TestTokenIdentifier> {
+    public byte[] createPassword(TestTokenIdentifier id) {
+      return id.getBytes();
+    }
+
+    public byte[] retrievePassword(TestTokenIdentifier id) 
+        throws InvalidToken {
+      return id.getBytes();
+    }
+    
+    public TestTokenIdentifier createIdentifier() {
+      return new TestTokenIdentifier();
+    }
+  }
+
+  public static class TestTokenSelector implements
+      TokenSelector<TestTokenIdentifier> {
+    @SuppressWarnings("unchecked")
+    @Override
+    public Token<TestTokenIdentifier> selectToken(Text service,
+        Collection<Token<? extends TokenIdentifier>> tokens) {
+      if (service == null) {
+        return null;
+      }
+      for (Token<? extends TokenIdentifier> token : tokens) {
+        if (TestTokenIdentifier.KIND_NAME.equals(token.getKind())
+            && service.equals(token.getService())) {
+          return (Token<TestTokenIdentifier>) token;
+        }
+      }
+      return null;
+    }
+  }
+  
+  @KerberosInfo(SERVER_PRINCIPAL_KEY)
+  @TokenInfo(TestTokenSelector.class)
+  public interface TestSaslProtocol extends TestRPC.TestProtocol {
+  }
+  
+  public static class TestSaslImpl extends TestRPC.TestImpl implements
+      TestSaslProtocol {
+  }
+
+  @Test
+  public void testDigestRpc() throws Exception {
+    TestTokenSecretManager sm = new TestTokenSecretManager();
+    final Server server = RPC.getServer(
+        new TestSaslImpl(), ADDRESS, 0, 5, true, conf, sm);
+
+    server.start();
+
+    final UserGroupInformation current = UserGroupInformation.getCurrentUser();
+    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
+    TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
+        .getUserName()));
+    Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
+        sm);
+    Text host = new Text(addr.getAddress().getHostAddress() + ":"
+        + addr.getPort());
+    token.setService(host);
+    LOG.info("Service IP address for token is " + host);
+    current.addToken(token);
+
+    TestSaslProtocol proxy = null;
+    try {
+      proxy = (TestSaslProtocol) RPC.getProxy(TestSaslProtocol.class,
+          TestSaslProtocol.versionID, addr, conf);
+      proxy.ping();
+    } finally {
+      server.stop();
+      if (proxy != null) {
+        RPC.stopProxy(proxy);
+      }
+    }
+  }
+  
+  static void testKerberosRpc(String principal, String keytab) throws Exception {
+    final Configuration newConf = new Configuration(conf);
+    newConf.set(SERVER_PRINCIPAL_KEY, principal);
+    UserGroupInformation.loginUserFromKeytab(principal, keytab);
+    UserGroupInformation current = UserGroupInformation.getCurrentUser();
+    System.out.println("UGI: " + current);
+
+    Server server = RPC.getServer(new TestSaslImpl(),
+        ADDRESS, 0, 5, true, newConf, null);
+    TestSaslProtocol proxy = null;
+
+    server.start();
+
+    InetSocketAddress addr = NetUtils.getConnectAddress(server);
+    try {
+      proxy = (TestSaslProtocol) RPC.getProxy(TestSaslProtocol.class,
+          TestSaslProtocol.versionID, addr, newConf);
+      proxy.ping();
+    } finally {
+      server.stop();
+      if (proxy != null) {
+        RPC.stopProxy(proxy);
+      }
+    }
+  }
+  
+  public static void main(String[] args) throws Exception {
+    System.out.println("Testing Kerberos authentication over RPC");
+    if (args.length != 2) {
+      System.err
+          .println("Usage: java <options> org.apache.hadoop.ipc.TestSaslRPC "
+              + " <serverPrincipal> <keytabFile>");
+      System.exit(-1);
+    }
+    String principal = args[0];
+    String keytab = args[1];
+    testKerberosRpc(principal, keytab);
+  }
+
+}

Added: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java?rev=1077150&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java (added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java Fri Mar  4 03:46:18 2011
@@ -0,0 +1,117 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.security;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.net.InetSocketAddress;
+import java.security.PrivilegedExceptionAction;
+
+import org.apache.commons.logging.*;
+import org.apache.commons.logging.impl.Log4JLogger;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+
+import org.apache.hadoop.ipc.Client;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.mapred.TaskUmbilicalProtocol;
+import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
+import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.SaslInputStream;
+import org.apache.hadoop.security.SaslRpcClient;
+import org.apache.hadoop.security.SaslRpcServer;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import org.apache.log4j.Level;
+import org.junit.Test;
+
+/** Unit tests for using Job Token over RPC. */
+public class TestUmbilicalProtocolWithJobToken {
+  private static final String ADDRESS = "0.0.0.0";
+
+  public static final Log LOG = LogFactory
+      .getLog(TestUmbilicalProtocolWithJobToken.class);
+
+  private static Configuration conf;
+  static {
+    conf = new Configuration();
+    conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    UserGroupInformation.setConfiguration(conf);
+  }
+
+  static {
+    ((Log4JLogger) Client.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger) Server.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger) SaslRpcClient.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger) SaslRpcServer.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger) SaslInputStream.LOG).getLogger().setLevel(Level.ALL);
+  }
+
+  @Test
+  public void testJobTokenRpc() throws Exception {
+    TaskUmbilicalProtocol mockTT = mock(TaskUmbilicalProtocol.class);
+    when(mockTT.getProtocolVersion(anyString(), anyLong())).thenReturn(
+        TaskUmbilicalProtocol.versionID);
+
+    JobTokenSecretManager sm = new JobTokenSecretManager();
+    final Server server = RPC.getServer(mockTT,
+        ADDRESS, 0, 5, true, conf, sm);
+
+    server.start();
+
+    final UserGroupInformation current = UserGroupInformation.getCurrentUser();
+    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
+    String jobId = current.getUserName();
+    JobTokenIdentifier tokenId = new JobTokenIdentifier(new Text(jobId));
+    Token<JobTokenIdentifier> token = new Token<JobTokenIdentifier>(tokenId, sm);
+    sm.addTokenForJob(jobId, token);
+    Text host = new Text(addr.getAddress().getHostAddress() + ":"
+        + addr.getPort());
+    token.setService(host);
+    LOG.info("Service IP address for token is " + host);
+    current.addToken(token);
+    current.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        TaskUmbilicalProtocol proxy = null;
+        try {
+          proxy = (TaskUmbilicalProtocol) RPC.getProxy(
+              TaskUmbilicalProtocol.class, TaskUmbilicalProtocol.versionID,
+              addr, conf);
+          proxy.ping(null);
+        } finally {
+          server.stop();
+          if (proxy != null) {
+            RPC.stopProxy(proxy);
+          }
+        }
+        return null;
+      }
+    });
+  }
+
+}

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestUserGroupInformation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestUserGroupInformation.java?rev=1077150&r1=1077149&r2=1077150&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestUserGroupInformation.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/security/TestUserGroupInformation.java Fri Mar  4 03:46:18 2011
@@ -27,13 +27,11 @@ import static org.mockito.Mockito.mock;
 import java.io.BufferedReader;
 import java.io.IOException;
 import java.io.InputStreamReader;
-import java.security.PrivilegedAction;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.junit.Test;
@@ -175,7 +173,7 @@ public class TestUserGroupInformation {
     ugi.addToken(t1);
     ugi.addToken(t2);
     
-    Collection<Token<T>> z = ugi.getTokens();
+    Collection<Token<? extends TokenIdentifier>> z = ugi.getTokens();
     assertTrue(z.contains(t1));
     assertTrue(z.contains(t2));
     assertEquals(2, z.size());
@@ -188,9 +186,9 @@ public class TestUserGroupInformation {
     }
     
     // ensure that the tokens are passed through doAs
-    Collection<Token<T>> otherSet = 
-      ugi.doAs(new PrivilegedExceptionAction<Collection<Token<T>>>(){
-        public Collection<Token<T>> run() throws IOException {
+    Collection<Token<? extends TokenIdentifier>> otherSet = 
+      ugi.doAs(new PrivilegedExceptionAction<Collection<Token<?>>>(){
+        public Collection<Token<?>> run() throws IOException {
           return UserGroupInformation.getCurrentUser().getTokens();
         }
       });