You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by vi...@apache.org on 2014/11/04 13:08:50 UTC
[1/9] git commit: YARN-2730. DefaultContainerExecutor runs only one
localizer at a time. Contributed by Siqi Li
Repository: hadoop
Updated Branches:
refs/heads/HDFS-EC 67f13b58e -> 2bb327eb9
YARN-2730. DefaultContainerExecutor runs only one localizer at a time. Contributed by Siqi Li
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6157ace5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6157ace5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6157ace5
Branch: refs/heads/HDFS-EC
Commit: 6157ace5475fff8d2513fd3cd99134b532b0b406
Parents: 67f13b5
Author: Jason Lowe <jl...@apache.org>
Authored: Mon Nov 3 20:37:47 2014 +0000
Committer: Jason Lowe <jl...@apache.org>
Committed: Mon Nov 3 20:37:47 2014 +0000
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +++
.../nodemanager/DefaultContainerExecutor.java | 19 ++++++++++++-------
2 files changed, 15 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6157ace5/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 6f91bd8..1bb116b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -839,6 +839,9 @@ Release 2.6.0 - UNRELEASED
YARN-2785. Fixed intermittent TestContainerResourceUsage failure. (Varun Vasudev
via zjshen)
+ YARN-2730. DefaultContainerExecutor runs only one localizer at a time
+ (Siqi Li via jlowe)
+
Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6157ace5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
index 834b138..cc2de99 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
@@ -94,7 +94,7 @@ public class DefaultContainerExecutor extends ContainerExecutor {
}
@Override
- public synchronized void startLocalizer(Path nmPrivateContainerTokensPath,
+ public void startLocalizer(Path nmPrivateContainerTokensPath,
InetSocketAddress nmAddr, String user, String appId, String locId,
LocalDirsHandlerService dirsHandler)
throws IOException, InterruptedException {
@@ -102,10 +102,6 @@ public class DefaultContainerExecutor extends ContainerExecutor {
List<String> localDirs = dirsHandler.getLocalDirs();
List<String> logDirs = dirsHandler.getLogDirs();
- ContainerLocalizer localizer =
- new ContainerLocalizer(lfs, user, appId, locId, getPaths(localDirs),
- RecordFactoryProvider.getRecordFactory(getConf()));
-
createUserLocalDirs(localDirs, user);
createUserCacheDirs(localDirs, user);
createAppDirs(localDirs, user, appId);
@@ -118,8 +114,17 @@ public class DefaultContainerExecutor extends ContainerExecutor {
Path tokenDst = new Path(appStorageDir, tokenFn);
copyFile(nmPrivateContainerTokensPath, tokenDst, user);
LOG.info("Copying from " + nmPrivateContainerTokensPath + " to " + tokenDst);
- lfs.setWorkingDirectory(appStorageDir);
- LOG.info("CWD set to " + appStorageDir + " = " + lfs.getWorkingDirectory());
+
+
+ FileContext localizerFc = FileContext.getFileContext(
+ lfs.getDefaultFileSystem(), getConf());
+ localizerFc.setUMask(lfs.getUMask());
+ localizerFc.setWorkingDirectory(appStorageDir);
+ LOG.info("Localizer CWD set to " + appStorageDir + " = "
+ + localizerFc.getWorkingDirectory());
+ ContainerLocalizer localizer =
+ new ContainerLocalizer(localizerFc, user, appId, locId,
+ getPaths(localDirs), RecordFactoryProvider.getRecordFactory(getConf()));
// TODO: DO it over RPC for maintaining similarity?
localizer.runLocalization(nmAddr);
}
[5/9] git commit: YARN-2795. Fixed ResourceManager to not crash
loading node-label data from HDFS in secure mode. Contributed by Wangda Tan.
Posted by vi...@apache.org.
YARN-2795. Fixed ResourceManager to not crash loading node-label data from HDFS in secure mode. Contributed by Wangda Tan.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec6cbece
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec6cbece
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec6cbece
Branch: refs/heads/HDFS-EC
Commit: ec6cbece8e7772868ce8ad996135d3136bd32245
Parents: 237890f
Author: Vinod Kumar Vavilapalli <vi...@apache.org>
Authored: Mon Nov 3 13:44:06 2014 -0800
Committer: Vinod Kumar Vavilapalli <vi...@apache.org>
Committed: Mon Nov 3 13:44:06 2014 -0800
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 ++
.../server/resourcemanager/ResourceManager.java | 32 +++++++++-----------
.../yarn/server/resourcemanager/TestRMHA.java | 9 +++---
.../resourcemanager/TestResourceManager.java | 8 ++++-
.../security/TestDelegationTokenRenewer.java | 7 ++++-
5 files changed, 36 insertions(+), 23 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec6cbece/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 9566458..25d03f4 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -849,6 +849,9 @@ Release 2.6.0 - UNRELEASED
that were caused when adding log-upload-time via YARN-2703. (Xuan Gong via
vinodkv)
+ YARN-2795. Fixed ResourceManager to not crash loading node-label data from
+ HDFS in secure mode. (Wangda Tan via vinodkv)
+
Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec6cbece/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 57c8fce..642c732 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -23,9 +23,7 @@ import java.io.InputStream;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
-import java.util.HashMap;
import java.util.List;
-import java.util.Map;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
@@ -195,7 +193,17 @@ public class ResourceManager extends CompositeService implements Recoverable {
protected void serviceInit(Configuration conf) throws Exception {
this.conf = conf;
this.rmContext = new RMContextImpl();
-
+
+ // Set UGI and do login
+ // If security is enabled, use login user
+ // If security is not enabled, use current user
+ this.rmLoginUGI = UserGroupInformation.getCurrentUser();
+ try {
+ doSecureLogin();
+ } catch(IOException ie) {
+ throw new YarnRuntimeException("Failed to login", ie);
+ }
+
this.configurationProvider =
ConfigurationProviderFactory.getConfigurationProvider(conf);
this.configurationProvider.init(this.conf);
@@ -242,14 +250,13 @@ public class ResourceManager extends CompositeService implements Recoverable {
if (this.rmContext.isHAEnabled()) {
HAUtil.verifyAndSetConfiguration(this.conf);
}
+
createAndInitActiveServices();
webAppAddress = WebAppUtils.getWebAppBindURL(this.conf,
YarnConfiguration.RM_BIND_HOST,
WebAppUtils.getRMWebAppURLWithoutScheme(this.conf));
- this.rmLoginUGI = UserGroupInformation.getCurrentUser();
-
super.serviceInit(this.conf);
}
@@ -1019,17 +1026,13 @@ public class ResourceManager extends CompositeService implements Recoverable {
}
synchronized void transitionToActive() throws Exception {
- if (rmContext.getHAServiceState() ==
- HAServiceProtocol.HAServiceState.ACTIVE) {
+ if (rmContext.getHAServiceState() == HAServiceProtocol.HAServiceState.ACTIVE) {
LOG.info("Already in active state");
return;
}
LOG.info("Transitioning to active state");
- // use rmLoginUGI to startActiveServices.
- // in non-secure model, rmLoginUGI will be current UGI
- // in secure model, rmLoginUGI will be LoginUser UGI
this.rmLoginUGI.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
@@ -1071,12 +1074,6 @@ public class ResourceManager extends CompositeService implements Recoverable {
@Override
protected void serviceStart() throws Exception {
- try {
- doSecureLogin();
- } catch(IOException ie) {
- throw new YarnRuntimeException("Failed to login", ie);
- }
-
if (this.rmContext.isHAEnabled()) {
transitionToStandby(true);
} else {
@@ -1084,7 +1081,8 @@ public class ResourceManager extends CompositeService implements Recoverable {
}
startWepApp();
- if (getConfig().getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
+ if (getConfig().getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER,
+ false)) {
int port = webApp.port();
WebAppUtils.setRMWebAppPort(conf, port);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec6cbece/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
index e30ca29..8cef4c9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
@@ -28,8 +28,6 @@ import java.net.InetSocketAddress;
import javax.ws.rs.core.MediaType;
-import org.junit.Assert;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -37,10 +35,10 @@ import org.apache.hadoop.ha.HAServiceProtocol;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.ha.HealthCheckFailedException;
-import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.conf.HAUtil;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -54,6 +52,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptS
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@@ -64,7 +63,7 @@ import com.sun.jersey.api.client.config.DefaultClientConfig;
public class TestRMHA {
private Log LOG = LogFactory.getLog(TestRMHA.class);
- private final Configuration configuration = new YarnConfiguration();
+ private Configuration configuration;
private MockRM rm = null;
private RMApp app = null;
private RMAppAttempt attempt = null;
@@ -82,6 +81,8 @@ public class TestRMHA {
@Before
public void setUp() throws Exception {
+ configuration = new Configuration();
+ UserGroupInformation.setConfiguration(configuration);
configuration.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
configuration.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + ","
+ RM2_NODE_ID);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec6cbece/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
index 1117fbe..6735575 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
@@ -57,6 +57,7 @@ public class TestResourceManager {
@Before
public void setUp() throws Exception {
Configuration conf = new YarnConfiguration();
+ UserGroupInformation.setConfiguration(conf);
resourceManager = new ResourceManager();
resourceManager.init(conf);
resourceManager.getRMContext().getContainerTokenSecretManager().rollMasterKey();
@@ -254,7 +255,12 @@ public class TestResourceManager {
AuthenticationFilterInitializer.class.getName() + ", "
+ this.getClass().getName() };
for (String filterInitializer : filterInitializers) {
- resourceManager = new ResourceManager();
+ resourceManager = new ResourceManager() {
+ @Override
+ protected void doSecureLogin() throws IOException {
+ // Skip the login.
+ }
+ };
Configuration conf = new YarnConfiguration();
conf.set(filterInitializerConfKey, filterInitializer);
conf.set("hadoop.security.authentication", "kerberos");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec6cbece/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
index b824df7..7275089 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
@@ -808,7 +808,12 @@ public class TestDelegationTokenRenewer {
CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
UserGroupInformation.setConfiguration(conf);
- MockRM rm = new MockRM(conf);
+ MockRM rm = new MockRM(conf) {
+ @Override
+ protected void doSecureLogin() throws IOException {
+ // Skip the login.
+ }
+ };
ByteBuffer tokens = ByteBuffer.wrap("BOGUS".getBytes());
ContainerLaunchContext amContainer =
ContainerLaunchContext.newInstance(
[7/9] git commit: HDFS-7147. Update archival storage user
documentation. Contributed by Tsz Wo Nicholas Sze.
Posted by vi...@apache.org.
HDFS-7147. Update archival storage user documentation. Contributed by Tsz Wo Nicholas Sze.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35d353e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35d353e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35d353e0
Branch: refs/heads/HDFS-EC
Commit: 35d353e0f66b424508e2dd93bd036718cc4d5876
Parents: 734eeb4
Author: Haohui Mai <wh...@apache.org>
Authored: Mon Nov 3 15:10:22 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Mon Nov 3 15:10:22 2014 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
.../BlockStoragePolicySuite.java | 6 +-
.../resources/blockStoragePolicy-default.xml | 118 -----------
.../src/site/apt/ArchivalStorage.apt.vm | 209 +++++++------------
hadoop-project/src/site/site.xml | 2 +-
5 files changed, 79 insertions(+), 259 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/35d353e0/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 16040ed..dfe8f4d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -993,6 +993,9 @@ Release 2.6.0 - UNRELEASED
HADOOP-11233. hadoop.security.kms.client.encrypted.key.cache.expiry
property spelled wrong in core-default. (Stephen Chu via yliu)
+ HDFS-7147. Update archival storage user documentation.
+ (Tsz Wo Nicholas Sze via wheat9)
+
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
HDFS-6387. HDFS CLI admin tool for creating & deleting an
http://git-wip-us.apache.org/repos/asf/hadoop/blob/35d353e0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
index 13e9cff..ce87b06 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
@@ -18,7 +18,9 @@
package org.apache.hadoop.hdfs.server.blockmanagement;
import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
+
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.XAttrHelper;
@@ -104,9 +106,11 @@ public class BlockStoragePolicySuite {
}
public BlockStoragePolicy getPolicy(String policyName) {
+ Preconditions.checkNotNull(policyName);
+
if (policies != null) {
for (BlockStoragePolicy policy : policies) {
- if (policy != null && policy.getName().equals(policyName)) {
+ if (policy != null && policy.getName().equalsIgnoreCase(policyName)) {
return policy;
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/35d353e0/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/blockStoragePolicy-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/blockStoragePolicy-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/blockStoragePolicy-default.xml
deleted file mode 100644
index 891909b..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/blockStoragePolicy-default.xml
+++ /dev/null
@@ -1,118 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- Do not modify this file directly. Instead, copy entries that you wish -->
-<!-- to modify from this file into blockStoragePolicy-site.xml and change -->
-<!-- there. If blockStoragePolicy-site.xml does not exist, create it. -->
-
-<configuration>
-<property>
- <name>dfs.block.storage.policies</name>
- <value>HOT:12, WARM:8, COLD:4</value>
- <description>
- A list of block storage policy names and IDs. The syntax is
-
- NAME_1:ID_1, NAME_2:ID_2, ..., NAME_n:ID_n
-
- where ID is an integer in the range [1,15] and NAME is case insensitive.
- The first element is the default policy. Empty list is not allowed.
- </description>
-</property>
-
-<!-- Block Storage Policy HOT:12 -->
-<property>
- <name>dfs.block.storage.policy.12</name>
- <value>DISK</value>
- <description>
- A list of storage types for storing the block replicas such as
-
- STORAGE_TYPE_1, STORAGE_TYPE_2, ..., STORAGE_TYPE_n
-
- When creating a block, the i-th replica is stored using i-th storage type
- for i less than or equal to n, and
- the j-th replica is stored using n-th storage type for j greater than n.
-
- Empty list is not allowed.
-
- Examples:
- DISK : all replicas stored using DISK.
- DISK, ARCHIVE : the first replica is stored using DISK and all the
- remaining replicas are stored using ARCHIVE.
- </description>
-</property>
-
-<property>
- <name>dfs.block.storage.policy.creation-fallback.12</name>
- <value></value>
- <description>
- A list of storage types for creation fallback storage.
-
- STORAGE_TYPE_1, STORAGE_TYPE_2, ..., STORAGE_TYPE_n
-
- When creating a block, if a particular storage type specified in the policy
- is unavailable, the fallback STORAGE_TYPE_1 is used. Further, if
- STORAGE_TYPE_i is also unavailable, the fallback STORAGE_TYPE_(i+1) is used.
- In case that all fallback storages are unavailabe, the block will be created
- with number of replicas less than the specified replication factor.
-
- An empty list indicates that there is no fallback storage.
- </description>
-</property>
-
-<property>
- <name>dfs.block.storage.policy.replication-fallback.12</name>
- <value>ARCHIVE</value>
- <description>
- Similar to dfs.block.storage.policy.creation-fallback.x but for replication.
- </description>
-</property>
-
-<!-- Block Storage Policy WARM:8 -->
-<property>
- <name>dfs.block.storage.policy.8</name>
- <value>DISK, ARCHIVE</value>
-</property>
-
-<property>
- <name>dfs.block.storage.policy.creation-fallback.8</name>
- <value>DISK, ARCHIVE</value>
-</property>
-
-<property>
- <name>dfs.block.storage.policy.replication-fallback.8</name>
- <value>DISK, ARCHIVE</value>
-</property>
-
-<!-- Block Storage Policy COLD:4 -->
-<property>
- <name>dfs.block.storage.policy.4</name>
- <value>ARCHIVE</value>
-</property>
-
-<property>
- <name>dfs.block.storage.policy.creation-fallback.4</name>
- <value></value>
-</property>
-
-<property>
- <name>dfs.block.storage.policy.replication-fallback.4</name>
- <value></value>
-</property>
-</configuration>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/35d353e0/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ArchivalStorage.apt.vm
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ArchivalStorage.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ArchivalStorage.apt.vm
index 5301d52..69674c7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ArchivalStorage.apt.vm
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ArchivalStorage.apt.vm
@@ -11,12 +11,12 @@
~~ limitations under the License. See accompanying LICENSE file.
---
- HDFS Archival Storage
+ Archival Storage, SSD & Memory
---
---
${maven.build.timestamp}
-HDFS Archival Storage
+Archival Storage, SSD & Memory
%{toc|section=1|fromDepth=0}
@@ -29,9 +29,13 @@ HDFS Archival Storage
Adding more nodes to the cold storage can grow the storage independent of the compute capacity
in the cluster.
+ The frameworks provided by Heterogeneous Storage and Archival Storage generalizes the HDFS architecture
+ to include other kinds of storage media including <SSD> and <memory>.
+ Users may choose to store their data in SSD or memory for a better performance.
+
* {Storage Types and Storage Policies}
-** {Storage Types: DISK, SSD and ARCHIVE}
+** {Storage Types: ARCHIVE, DISK, SSD and RAM_DISK}
The first phase of
{{{https://issues.apache.org/jira/browse/HDFS-2832}Heterogeneous Storage (HDFS-2832)}}
@@ -45,7 +49,9 @@ HDFS Archival Storage
which has high storage density (petabyte of storage) but little compute power,
is added for supporting archival storage.
-** {Storage Policies: Hot, Warm and Cold}
+ Another new storage type <RAM_DISK> is added for supporting writing single replica files in memory.
+
+** {Storage Policies: Hot, Warm, Cold, All_SSD, One_SSD and Lazy_Persist}
A new concept of storage policies is introduced in order to allow files to be stored
in different storage types according to the storage policy.
@@ -65,6 +71,14 @@ HDFS Archival Storage
When a block is warm, some of its replicas are stored in DISK
and the remaining replicas are stored in ARCHIVE.
+ * <<All_SSD>> - for storing all replicas in SSD.
+
+ * <<One_SSD>> - for storing one of the replicas in SSD.
+ The remaining replicas are stored in DISK.
+
+ * <<Lazy_Persist>> - for writing blocks with single replica in memory.
+ The replica is first written in RAM_DISK and then it is lazily persisted in DISK.
+
[]
More formally, a storage policy consists of the following fields:
@@ -89,149 +103,54 @@ HDFS Archival Storage
The following is a typical storage policy table.
-*--------+---------------+-------------------------+-----------------------+-----------------------+
-| <<Policy>> | <<Policy>>| <<Block Placement>> | <<Fallback storages>> | <<Fallback storages>> |
-| <<ID>> | <<Name>> | <<(n\ replicas)>> | <<for creation>> | <<for replication>> |
-*--------+---------------+-------------------------+-----------------------+-----------------------+
-| 12 | Hot (default) | DISK: <n> | \<none\> | ARCHIVE |
-*--------+---------------+-------------------------+-----------------------+-----------------------+
-| 8 | Warm | DISK: 1, ARCHIVE: <n>-1 | ARCHIVE, DISK | ARCHIVE, DISK |
-*--------+---------------+-------------------------+-----------------------+-----------------------+
-| 4 | Cold | ARCHIVE: <n> | \<none\> | \<none\> |
-*--------+---------------+-------------------------+-----------------------+-----------------------+
-
- Note that cluster administrators may change the storage policy table
- according to the characteristic of the cluster.
- For example, in order to prevent losing archival data,
- administrators may want to use DISK as fallback storage for replication in the Cold policy.
- A drawback of such setting is that the DISK storages could be filled up with archival data.
- As a result, the entire cluster may become full and cannot serve hot data anymore.
-
-** {Configurations}
-
-*** {Setting The List of All Storage Policies}
-
- * <<dfs.block.storage.policies>>
- - a list of block storage policy names and IDs.
- The syntax is
-
- NAME_1:ID_1, NAME_2:ID_2, ..., NAME_<n>:ID_<n>
-
- where ID is an integer in the closed range [1,15] and NAME is case insensitive.
- The first element is the <default policy>. Empty list is not allowed.
-
- The default value is shown below.
-
-+------------------------------------------+
-<property>
- <name>dfs.block.storage.policies</name>
- <value>HOT:12, WARM:8, COLD:4</value>
-</property>
-+------------------------------------------+
+*--------+---------------+--------------------------+-----------------------+-----------------------+
+| <<Policy>> | <<Policy>>| <<Block Placement>> | <<Fallback storages>> | <<Fallback storages>> |
+| <<ID>> | <<Name>> | <<(n\ replicas)>> | <<for creation>> | <<for replication>> |
+*--------+---------------+--------------------------+-----------------------+-----------------------+
+| 15 | Lasy_Persist | RAM_DISK: 1, DISK: <n>-1 | DISK | DISK |
+*--------+---------------+--------------------------+-----------------------+-----------------------+
+| 12 | All_SSD | SSD: <n> | DISK | DISK |
+*--------+---------------+--------------------------+-----------------------+-----------------------+
+| 10 | One_SSD | SSD: 1, DISK: <n>-1 | SSD, DISK | SSD, DISK |
+*--------+---------------+--------------------------+-----------------------+-----------------------+
+| 7 | Hot (default) | DISK: <n> | \<none\> | ARCHIVE |
+*--------+---------------+--------------------------+-----------------------+-----------------------+
+| 5 | Warm | DISK: 1, ARCHIVE: <n>-1 | ARCHIVE, DISK | ARCHIVE, DISK |
+*--------+---------------+--------------------------+-----------------------+-----------------------+
+| 2 | Cold | ARCHIVE: <n> | \<none\> | \<none\> |
+*--------+---------------+--------------------------+-----------------------+-----------------------+
+
+ Note that the Lasy_Persist policy is useful only for single replica blocks.
+ For blocks with more than one replicas, all the replicas will be written to DISK
+ since writing only one of the replicas to RAM_DISK does not improve the overall performance.
+
+** {Storage Policy Resolution}
+
+ When a file or directory is created, its storage policy is <unspecified>.
+ The storage policy can be specified using
+ the "<<<{{{Set Storage Policy}dfsadmin -setStoragePolicy}}>>>" command.
+ The effective storage policy of a file or directory is resolved by the following rules.
+
+ [[1]] If the file or directory is specificed with a storage policy, return it.
+
+ [[2]] For an unspecified file or directory,
+ if it is the root directory, return the <default storage policy>.
+ Otherwise, return its parent's effective storage policy.
[]
-*** {Setting Storage Policy Details}
-
- The following configuration properties are for setting the details of each storage policy,
- where <<<\<ID\>>>> is the actual policy ID.
-
- * <<dfs.block.storage.policy.\<ID\>>>
- - a list of storage types for storing the block replicas.
- The syntax is
-
- STORAGE_TYPE_1, STORAGE_TYPE_2, ..., STORAGE_TYPE_<n>
-
- When creating a block, the <i>-th replica is stored using <i>-th storage type
- for <i> less than or equal to <n>, and
- the <j>-th replica is stored using <n>-th storage type for <j> greater than <n>.
-
- Empty list is not allowed.
-
- Examples:
-
-+------------------------------------------+
-DISK : all replicas stored using DISK.
-DISK, ARCHIVE : the first replica is stored using DISK and all the
- remaining replicas are stored using ARCHIVE.
-+------------------------------------------+
-
- * <<dfs.block.storage.policy.creation-fallback.\<ID\>>>
- - a list of storage types for creation fallback storage.
- The syntax is
+ The effective storage policy can be retrieved by
+ the "<<<{{{Set Storage Policy}dfsadmin -getStoragePolicy}}>>>" command.
- STORAGE_TYPE_1, STORAGE_TYPE_2, ..., STORAGE_TYPE_n
-
- When creating a block, if a particular storage type specified in the policy
- is unavailable, the fallback STORAGE_TYPE_1 is used. Further, if
- STORAGE_TYPE_<i> is also unavailable, the fallback STORAGE_TYPE_<(i+1)> is used.
- In case all fallback storages are unavailable, the block will be created
- with number of replicas less than the specified replication factor.
- An empty list indicates that there is no fallback storage.
+** {Configuration}
- * <<dfs.block.storage.policy.replication-fallback.\<ID\>>>
- - a list of storage types for replication fallback storage.
- The usage of this configuration property is similar to
- <<<dfs.block.storage.policy.creation-fallback.\<ID\>>>>
- except that it takes effect on replication but not block creation.
+ * <<dfs.storage.policy.enabled>>
+ - for enabling/disabling the storage policy feature.
+ The default value is <<<true>>>.
[]
- The following are the default configuration values for Hot, Warm and Cold storage policies.
-
- * Block Storage Policy <<HOT:12>>
-
-+------------------------------------------+
-<property>
- <name>dfs.block.storage.policy.12</name>
- <value>DISK</value>
-</property>
-<property>
- <name>dfs.block.storage.policy.creation-fallback.12</name>
- <value></value>
-</property>
-<property>
- <name>dfs.block.storage.policy.replication-fallback.12</name>
- <value>ARCHIVE</value>
-</property>
-+------------------------------------------+
-
- * Block Storage Policy <<WARM:8>>
-
-+------------------------------------------+
-<property>
- <name>dfs.block.storage.policy.8</name>
- <value>DISK, ARCHIVE</value>
-</property>
-<property>
- <name>dfs.block.storage.policy.creation-fallback.8</name>
- <value>DISK, ARCHIVE</value>
-</property>
-<property>
- <name>dfs.block.storage.policy.replication-fallback.8</name>
- <value>DISK, ARCHIVE</value>
-</property>
-+------------------------------------------+
-
- * Block Storage Policy <<COLD:4>>
-
-+------------------------------------------+
-<property>
- <name>dfs.block.storage.policy.4</name>
- <value>ARCHIVE</value>
-</property>
-<property>
- <name>dfs.block.storage.policy.creation-fallback.4</name>
- <value></value>
-</property>
-<property>
- <name>dfs.block.storage.policy.replication-fallback.4</name>
- <value></value>
-</property>
-+------------------------------------------+
-
- []
* {Mover - A New Data Migration Tool}
@@ -261,7 +180,19 @@ hdfs mover [-p <files/dirs> | -f <local file name>]
[]
-* {<<<DFSAdmin>>> Commands}
+* {Storage Policy Commands}
+
+** {List Storage Policies}
+
+ List out all the storage policies.
+
+ * Command:
+
++------------------------------------------+
+hdfs storagepolicies
++------------------------------------------+
+
+ * Arguments: none.
** {Set Storage Policy}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/35d353e0/hadoop-project/src/site/site.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index 11cd3c6..e1d4c92 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -92,7 +92,7 @@
<item name="Extended Attributes" href="hadoop-project-dist/hadoop-hdfs/ExtendedAttributes.html"/>
<item name="Transparent Encryption" href="hadoop-project-dist/hadoop-hdfs/TransparentEncryption.html"/>
<item name="HDFS Support for Multihoming" href="hadoop-project-dist/hadoop-hdfs/HdfsMultihoming.html"/>
- <item name="Archival Storage" href="hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html"/>
+ <item name="Archival Storage, SSD & Memory" href="hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html"/>
</menu>
<menu name="MapReduce" inherit="top">
[8/9] git commit: YARN-1922. Fixed NodeManager to kill process-trees
correctly in the presence of races between the launch and the stop-container
call and when root processes crash. Contributed by Billie Rinaldi.
Posted by vi...@apache.org.
YARN-1922. Fixed NodeManager to kill process-trees correctly in the presence of races between the launch and the stop-container call and when root processes crash. Contributed by Billie Rinaldi.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5a46d4c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5a46d4c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5a46d4c
Branch: refs/heads/HDFS-EC
Commit: c5a46d4c8ca236ff641a309f256bbbdf4dd56db5
Parents: 35d353e
Author: Vinod Kumar Vavilapalli <vi...@apache.org>
Authored: Mon Nov 3 16:38:55 2014 -0800
Committer: Vinod Kumar Vavilapalli <vi...@apache.org>
Committed: Mon Nov 3 16:38:55 2014 -0800
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 4 +
.../launcher/ContainerLaunch.java | 5 +-
.../launcher/TestContainerLaunch.java | 111 +++++++++++++++++++
3 files changed, 117 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5a46d4c/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 25d03f4..f7c0dfa 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -852,6 +852,10 @@ Release 2.6.0 - UNRELEASED
YARN-2795. Fixed ResourceManager to not crash loading node-label data from
HDFS in secure mode. (Wangda Tan via vinodkv)
+ YARN-1922. Fixed NodeManager to kill process-trees correctly in the presence
+ of races between the launch and the stop-container call and when root
+ processes crash. (Billie Rinaldi via vinodkv)
+
Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5a46d4c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 434cb4e..57e3bb9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -461,9 +461,8 @@ public class ContainerLaunch implements Callable<Integer> {
final int sleepInterval = 100;
// loop waiting for pid file to show up
- // until either the completed flag is set which means something bad
- // happened or our timer expires in which case we admit defeat
- while (!completed.get()) {
+ // until our timer expires in which case we admit defeat
+ while (true) {
processId = ProcessIdFileReader.getProcessId(pidFilePath);
if (processId != null) {
LOG.debug("Got pid " + processId + " for container "
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5a46d4c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
index 186788d..001643b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
@@ -75,6 +75,7 @@ import org.apache.hadoop.yarn.event.Event;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode;
import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
@@ -969,4 +970,114 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
assertThat(e.getMessage(), containsString(expectedMessage));
}
}
+
+ @Test
+ public void testKillProcessGroup() throws Exception {
+ Assume.assumeTrue(Shell.isSetsidAvailable);
+ containerManager.start();
+
+ // Construct the Container-id
+ ApplicationId appId = ApplicationId.newInstance(2, 2);
+ ApplicationAttemptId appAttemptId =
+ ApplicationAttemptId.newInstance(appId, 1);
+ ContainerId cId = ContainerId.newInstance(appAttemptId, 0);
+ File processStartFile =
+ new File(tmpDir, "pid.txt").getAbsoluteFile();
+ File childProcessStartFile =
+ new File(tmpDir, "child_pid.txt").getAbsoluteFile();
+
+ // setup a script that can handle sigterm gracefully
+ File scriptFile = Shell.appendScriptExtension(tmpDir, "testscript");
+ PrintWriter writer = new PrintWriter(new FileOutputStream(scriptFile));
+ writer.println("#!/bin/bash\n\n");
+ writer.println("echo \"Running testscript for forked process\"");
+ writer.println("umask 0");
+ writer.println("echo $$ >> " + processStartFile);
+ writer.println("while true;\ndo sleep 1s;\ndone > /dev/null 2>&1 &");
+ writer.println("echo $! >> " + childProcessStartFile);
+ writer.println("while true;\ndo sleep 1s;\ndone");
+ writer.close();
+ FileUtil.setExecutable(scriptFile, true);
+
+ ContainerLaunchContext containerLaunchContext =
+ recordFactory.newRecordInstance(ContainerLaunchContext.class);
+
+ // upload the script file so that the container can run it
+ URL resource_alpha =
+ ConverterUtils.getYarnUrlFromPath(localFS
+ .makeQualified(new Path(scriptFile.getAbsolutePath())));
+ LocalResource rsrc_alpha =
+ recordFactory.newRecordInstance(LocalResource.class);
+ rsrc_alpha.setResource(resource_alpha);
+ rsrc_alpha.setSize(-1);
+ rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
+ rsrc_alpha.setType(LocalResourceType.FILE);
+ rsrc_alpha.setTimestamp(scriptFile.lastModified());
+ String destinationFile = "dest_file.sh";
+ Map<String, LocalResource> localResources =
+ new HashMap<String, LocalResource>();
+ localResources.put(destinationFile, rsrc_alpha);
+ containerLaunchContext.setLocalResources(localResources);
+
+ // set up the rest of the container
+ List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile));
+ containerLaunchContext.setCommands(commands);
+ Priority priority = Priority.newInstance(10);
+ long createTime = 1234;
+ Token containerToken = createContainerToken(cId, priority, createTime);
+
+ StartContainerRequest scRequest =
+ StartContainerRequest.newInstance(containerLaunchContext,
+ containerToken);
+ List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
+ list.add(scRequest);
+ StartContainersRequest allRequests =
+ StartContainersRequest.newInstance(list);
+ containerManager.startContainers(allRequests);
+
+ int timeoutSecs = 0;
+ while (!processStartFile.exists() && timeoutSecs++ < 20) {
+ Thread.sleep(1000);
+ LOG.info("Waiting for process start-file to be created");
+ }
+ Assert.assertTrue("ProcessStartFile doesn't exist!",
+ processStartFile.exists());
+
+ BufferedReader reader =
+ new BufferedReader(new FileReader(processStartFile));
+ // Get the pid of the process
+ String pid = reader.readLine().trim();
+ // No more lines
+ Assert.assertEquals(null, reader.readLine());
+ reader.close();
+
+ reader =
+ new BufferedReader(new FileReader(childProcessStartFile));
+ // Get the pid of the child process
+ String child = reader.readLine().trim();
+ // No more lines
+ Assert.assertEquals(null, reader.readLine());
+ reader.close();
+
+ LOG.info("Manually killing pid " + pid + ", but not child pid " + child);
+ Shell.execCommand(new String[]{"kill", "-9", pid});
+
+ BaseContainerManagerTest.waitForContainerState(containerManager, cId,
+ ContainerState.COMPLETE);
+
+ Assert.assertFalse("Process is still alive!",
+ DefaultContainerExecutor.containerIsAlive(pid));
+
+ List<ContainerId> containerIds = new ArrayList<ContainerId>();
+ containerIds.add(cId);
+
+ GetContainerStatusesRequest gcsRequest =
+ GetContainerStatusesRequest.newInstance(containerIds);
+
+ ContainerStatus containerStatus =
+ containerManager.getContainerStatuses(gcsRequest)
+ .getContainerStatuses().get(0);
+ Assert.assertEquals(ExitCode.FORCE_KILLED.getExitCode(),
+ containerStatus.getExitStatus());
+ }
}
[6/9] git commit: HADOOP-11248. Add hadoop configuration to disable
Azure Filesystem metrics collection. Contributed by Shanyu Zhao.
Posted by vi...@apache.org.
HADOOP-11248. Add hadoop configuration to disable Azure Filesystem metrics collection. Contributed by Shanyu Zhao.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/734eeb4f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/734eeb4f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/734eeb4f
Branch: refs/heads/HDFS-EC
Commit: 734eeb4f357ad3210355a0d3fdbc80706a770d61
Parents: ec6cbec
Author: cnauroth <cn...@apache.org>
Authored: Mon Nov 3 14:29:18 2014 -0800
Committer: cnauroth <cn...@apache.org>
Committed: Mon Nov 3 14:29:18 2014 -0800
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../hadoop/fs/azure/NativeAzureFileSystem.java | 20 +++++++++++++-------
.../TestNativeAzureFileSystemMetricsSystem.java | 11 +++++++++++
3 files changed, 27 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/734eeb4f/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 93e58f6..eb91dcb 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -339,6 +339,9 @@ Trunk (Unreleased)
HADOOP-11022. User replaced functions get lost 2-3 levels deep (e.g.,
sbin) (aw)
+ HADOOP-11248. Add hadoop configuration to disable Azure Filesystem metrics
+ collection. (Shanyu Zhao via cnauroth)
+
OPTIMIZATIONS
HADOOP-7761. Improve the performance of raw comparisons. (todd)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/734eeb4f/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 076c48a..ad2e2e6 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -641,6 +641,8 @@ public class NativeAzureFileSystem extends FileSystem {
static final String AZURE_OUTPUT_STREAM_BUFFER_SIZE_PROPERTY_NAME =
"fs.azure.output.stream.buffer.size";
+ public static final String SKIP_AZURE_METRICS_PROPERTY_NAME = "fs.azure.skip.metrics";
+
private class NativeAzureFsInputStream extends FSInputStream {
private InputStream in;
private final String key;
@@ -1035,13 +1037,15 @@ public class NativeAzureFileSystem extends FileSystem {
store = createDefaultStore(conf);
}
- // Make sure the metrics system is available before interacting with Azure
- AzureFileSystemMetricsSystem.fileSystemStarted();
- metricsSourceName = newMetricsSourceName();
- String sourceDesc = "Azure Storage Volume File System metrics";
instrumentation = new AzureFileSystemInstrumentation(conf);
- AzureFileSystemMetricsSystem.registerSource(metricsSourceName, sourceDesc,
+ if(!conf.getBoolean(SKIP_AZURE_METRICS_PROPERTY_NAME, false)) {
+ // Make sure the metrics system is available before interacting with Azure
+ AzureFileSystemMetricsSystem.fileSystemStarted();
+ metricsSourceName = newMetricsSourceName();
+ String sourceDesc = "Azure Storage Volume File System metrics";
+ AzureFileSystemMetricsSystem.registerSource(metricsSourceName, sourceDesc,
instrumentation);
+ }
store.initialize(uri, conf, instrumentation);
setConf(conf);
@@ -2207,8 +2211,10 @@ public class NativeAzureFileSystem extends FileSystem {
long startTime = System.currentTimeMillis();
- AzureFileSystemMetricsSystem.unregisterSource(metricsSourceName);
- AzureFileSystemMetricsSystem.fileSystemClosed();
+ if(!getConf().getBoolean(SKIP_AZURE_METRICS_PROPERTY_NAME, false)) {
+ AzureFileSystemMetricsSystem.unregisterSource(metricsSourceName);
+ AzureFileSystemMetricsSystem.fileSystemClosed();
+ }
if (LOG.isDebugEnabled()) {
LOG.debug("Submitting metrics when file system closed took "
http://git-wip-us.apache.org/repos/asf/hadoop/blob/734eeb4f/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestNativeAzureFileSystemMetricsSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestNativeAzureFileSystemMetricsSystem.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestNativeAzureFileSystemMetricsSystem.java
index f44613d..7820b7e 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestNativeAzureFileSystemMetricsSystem.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestNativeAzureFileSystemMetricsSystem.java
@@ -74,4 +74,15 @@ public class TestNativeAzureFileSystemMetricsSystem {
assertTrue(name2.startsWith("AzureFileSystemMetrics"));
assertTrue(!name1.equals(name2));
}
+
+ @Test
+ public void testSkipMetricsCollection() throws Exception {
+ AzureBlobStorageTestAccount a;
+ a = AzureBlobStorageTestAccount.createMock();
+ a.getFileSystem().getConf().setBoolean(
+ NativeAzureFileSystem.SKIP_AZURE_METRICS_PROPERTY_NAME, true);
+ a.getFileSystem().create(new Path("/foo")).close();
+ a.closeFileSystem(); // Causes the file system to close, which publishes metrics
+ assertEquals(0, getFilesCreated(a));
+ }
}
[3/9] git commit: YARN-2788. Fixed backwards compatiblity issues with
log-aggregation feature that were caused when adding log-upload-time via
YARN-2703. Contributed by Xuan Gong.
Posted by vi...@apache.org.
YARN-2788. Fixed backwards compatiblity issues with log-aggregation feature that were caused when adding log-upload-time via YARN-2703. Contributed by Xuan Gong.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58e9f24e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58e9f24e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58e9f24e
Branch: refs/heads/HDFS-EC
Commit: 58e9f24e0f06efede21085b7ffe36af042fa7b38
Parents: 71fbb47
Author: Vinod Kumar Vavilapalli <vi...@apache.org>
Authored: Mon Nov 3 13:16:29 2014 -0800
Committer: Vinod Kumar Vavilapalli <vi...@apache.org>
Committed: Mon Nov 3 13:16:29 2014 -0800
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 4 +
.../hadoop/yarn/client/cli/TestLogsCLI.java | 58 ++++++++-
.../logaggregation/AggregatedLogFormat.java | 124 ++++++++++---------
.../yarn/logaggregation/LogCLIHelpers.java | 20 ++-
.../yarn/webapp/log/AggregatedLogsBlock.java | 8 +-
.../logaggregation/TestAggregatedLogFormat.java | 26 +++-
.../TestLogAggregationService.java | 15 +--
7 files changed, 172 insertions(+), 83 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/58e9f24e/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c3ee0b0..9566458 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -845,6 +845,10 @@ Release 2.6.0 - UNRELEASED
YARN-2798. Fixed YarnClient to populate the renewer correctly for Timeline
delegation tokens. (Zhijie Shen via vinodkv)
+ YARN-2788. Fixed backwards compatiblity issues with log-aggregation feature
+ that were caused when adding log-upload-time via YARN-2703. (Xuan Gong via
+ vinodkv)
+
Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/58e9f24e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
index 2e9e92d..5ed8398 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
@@ -25,6 +25,7 @@ import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
@@ -33,6 +34,7 @@ import java.io.PrintWriter;
import java.io.Writer;
import java.util.Arrays;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
@@ -170,9 +172,9 @@ public class TestLogsCLI {
ApplicationId appId = ApplicationIdPBImpl.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptIdPBImpl.newInstance(appId, 1);
+ ContainerId containerId0 = ContainerIdPBImpl.newInstance(appAttemptId, 0);
ContainerId containerId1 = ContainerIdPBImpl.newInstance(appAttemptId, 1);
ContainerId containerId2 = ContainerIdPBImpl.newInstance(appAttemptId, 2);
-
NodeId nodeId = NodeId.newInstance("localhost", 1234);
// create local logs
@@ -201,7 +203,15 @@ public class TestLogsCLI {
fs.delete(path, true);
}
assertTrue(fs.mkdirs(path));
+
// upload container logs into remote directory
+ // the first two logs is empty. When we try to read first two logs,
+ // we will meet EOF exception, but it will not impact other logs.
+ // Other logs should be read successfully.
+ uploadEmptyContainerLogIntoRemoteDir(ugi, configuration, rootLogDirs, nodeId,
+ containerId0, path, fs);
+ uploadEmptyContainerLogIntoRemoteDir(ugi, configuration, rootLogDirs, nodeId,
+ containerId1, path, fs);
uploadContainerLogIntoRemoteDir(ugi, configuration, rootLogDirs, nodeId,
containerId1, path, fs);
uploadContainerLogIntoRemoteDir(ugi, configuration, rootLogDirs, nodeId,
@@ -220,6 +230,9 @@ public class TestLogsCLI {
"Hello container_0_0001_01_000002!"));
sysOutStream.reset();
+ // uploaded two logs for container1. The first log is empty.
+ // The second one is not empty.
+ // We can still successfully read logs for container1.
exitCode =
cli.run(new String[] { "-applicationId", appId.toString(),
"-nodeAddress", nodeId.toString(), "-containerId",
@@ -227,7 +240,23 @@ public class TestLogsCLI {
assertTrue(exitCode == 0);
assertTrue(sysOutStream.toString().contains(
"Hello container_0_0001_01_000001!"));
- assertTrue(sysOutStream.toString().contains("LogUploadTime"));
+ assertTrue(sysOutStream.toString().contains("Log Upload Time"));
+ assertTrue(!sysOutStream.toString().contains(
+ "Logs for container " + containerId1.toString()
+ + " are not present in this log-file."));
+ sysOutStream.reset();
+
+ // Uploaded the empty log for container0.
+ // We should see the message showing the log for container0
+ // are not present.
+ exitCode =
+ cli.run(new String[] { "-applicationId", appId.toString(),
+ "-nodeAddress", nodeId.toString(), "-containerId",
+ containerId0.toString() });
+ assertTrue(exitCode == -1);
+ assertTrue(sysOutStream.toString().contains(
+ "Logs for container " + containerId0.toString()
+ + " are not present in this log-file."));
fs.delete(new Path(remoteLogRootDir), true);
fs.delete(new Path(rootLogDir), true);
@@ -266,6 +295,31 @@ public class TestLogsCLI {
writer.close();
}
+ private static void uploadEmptyContainerLogIntoRemoteDir(UserGroupInformation ugi,
+ Configuration configuration, List<String> rootLogDirs, NodeId nodeId,
+ ContainerId containerId, Path appDir, FileSystem fs) throws Exception {
+ Path path =
+ new Path(appDir, LogAggregationUtils.getNodeString(nodeId)
+ + System.currentTimeMillis());
+ AggregatedLogFormat.LogWriter writer =
+ new AggregatedLogFormat.LogWriter(configuration, path, ugi);
+ writer.writeApplicationOwner(ugi.getUserName());
+
+ Map<ApplicationAccessType, String> appAcls =
+ new HashMap<ApplicationAccessType, String>();
+ appAcls.put(ApplicationAccessType.VIEW_APP, ugi.getUserName());
+ writer.writeApplicationACLs(appAcls);
+ DataOutputStream out = writer.getWriter().prepareAppendKey(-1);
+ new AggregatedLogFormat.LogKey(containerId).write(out);
+ out.close();
+ out = writer.getWriter().prepareAppendValue(-1);
+ new AggregatedLogFormat.LogValue(rootLogDirs, containerId,
+ UserGroupInformation.getCurrentUser().getShortUserName()).write(out,
+ new HashSet<File>());
+ out.close();
+ writer.close();
+ }
+
private YarnClient createMockYarnClient(YarnApplicationState appState)
throws YarnException, IOException {
YarnClient mockClient = mock(YarnClient.class);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/58e9f24e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
index 22219be..a434ef5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
@@ -27,6 +27,7 @@ import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
+import java.io.OutputStream;
import java.io.PrintStream;
import java.io.Writer;
import java.security.PrivilegedExceptionAction;
@@ -44,6 +45,7 @@ import java.util.Set;
import java.util.regex.Pattern;
import org.apache.commons.io.input.BoundedInputStream;
+import org.apache.commons.io.output.WriterOutputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -233,9 +235,6 @@ public class AggregatedLogFormat {
// Write the logFile Type
out.writeUTF(logFile.getName());
- // Write the uploaded TimeStamp
- out.writeLong(System.currentTimeMillis());
-
// Write the log length as UTF so that it is printable
out.writeUTF(String.valueOf(fileLength));
@@ -400,6 +399,11 @@ public class AggregatedLogFormat {
writeVersion();
}
+ @VisibleForTesting
+ public TFile.Writer getWriter() {
+ return this.writer;
+ }
+
private void writeVersion() throws IOException {
DataOutputStream out = this.writer.prepareAppendKey(-1);
VERSION_KEY.write(out);
@@ -639,70 +643,55 @@ public class AggregatedLogFormat {
* Writes all logs for a single container to the provided writer.
* @param valueStream
* @param writer
+ * @param logUploadedTime
* @throws IOException
*/
public static void readAcontainerLogs(DataInputStream valueStream,
- Writer writer) throws IOException {
- int bufferSize = 65536;
- char[] cbuf = new char[bufferSize];
- String fileType;
- long uploadTime;
- String fileLengthStr;
- long fileLength;
-
- while (true) {
- try {
- fileType = valueStream.readUTF();
- } catch (EOFException e) {
- // EndOfFile
- return;
- }
- uploadTime = valueStream.readLong();
- fileLengthStr = valueStream.readUTF();
- fileLength = Long.parseLong(fileLengthStr);
- writer.write("\n\nLogType:");
- writer.write(fileType);
- writer.write("\nLogUploadTime:");
- writer.write(String.valueOf(uploadTime));
- writer.write("\nLogLength:");
- writer.write(fileLengthStr);
- writer.write("\nLog Contents:\n");
- // ByteLevel
- BoundedInputStream bis =
- new BoundedInputStream(valueStream, fileLength);
- InputStreamReader reader = new InputStreamReader(bis);
- int currentRead = 0;
- int totalRead = 0;
- while ((currentRead = reader.read(cbuf, 0, bufferSize)) != -1) {
- writer.write(cbuf, 0, currentRead);
- totalRead += currentRead;
+ Writer writer, long logUploadedTime) throws IOException {
+ OutputStream os = null;
+ PrintStream ps = null;
+ try {
+ os = new WriterOutputStream(writer);
+ ps = new PrintStream(os);
+ while (true) {
+ try {
+ readContainerLogs(valueStream, ps, logUploadedTime);
+ } catch (EOFException e) {
+ // EndOfFile
+ return;
+ }
}
+ } finally {
+ IOUtils.cleanup(LOG, ps);
+ IOUtils.cleanup(LOG, os);
}
}
/**
- * Keep calling this till you get a {@link EOFException} for getting logs of
- * all types for a single container.
- *
+ * Writes all logs for a single container to the provided writer.
* @param valueStream
- * @param out
+ * @param writer
* @throws IOException
*/
- public static void readAContainerLogsForALogType(
- DataInputStream valueStream, PrintStream out)
- throws IOException {
+ public static void readAcontainerLogs(DataInputStream valueStream,
+ Writer writer) throws IOException {
+ readAcontainerLogs(valueStream, writer, -1);
+ }
+ private static void readContainerLogs(DataInputStream valueStream,
+ PrintStream out, long logUploadedTime) throws IOException {
byte[] buf = new byte[65535];
String fileType = valueStream.readUTF();
- long uploadTime = valueStream.readLong();
String fileLengthStr = valueStream.readUTF();
long fileLength = Long.parseLong(fileLengthStr);
- out.print("LogType: ");
+ out.print("LogType:");
out.println(fileType);
- out.print("LogUploadTime: ");
- out.println(Times.format(uploadTime));
- out.print("LogLength: ");
+ if (logUploadedTime != -1) {
+ out.print("Log Upload Time:");
+ out.println(Times.format(logUploadedTime));
+ }
+ out.print("LogLength:");
out.println(fileLengthStr);
out.println("Log Contents:");
@@ -723,6 +712,35 @@ public class AggregatedLogFormat {
out.println("");
}
+ /**
+ * Keep calling this till you get a {@link EOFException} for getting logs of
+ * all types for a single container.
+ *
+ * @param valueStream
+ * @param out
+ * @param logUploadedTime
+ * @throws IOException
+ */
+ public static void readAContainerLogsForALogType(
+ DataInputStream valueStream, PrintStream out, long logUploadedTime)
+ throws IOException {
+ readContainerLogs(valueStream, out, logUploadedTime);
+ }
+
+ /**
+ * Keep calling this till you get a {@link EOFException} for getting logs of
+ * all types for a single container.
+ *
+ * @param valueStream
+ * @param out
+ * @throws IOException
+ */
+ public static void readAContainerLogsForALogType(
+ DataInputStream valueStream, PrintStream out)
+ throws IOException {
+ readAContainerLogsForALogType(valueStream, out, -1);
+ }
+
public void close() {
IOUtils.cleanup(LOG, scanner, reader, fsDataIStream);
}
@@ -732,7 +750,6 @@ public class AggregatedLogFormat {
public static class ContainerLogsReader {
private DataInputStream valueStream;
private String currentLogType = null;
- private long currentLogUpLoadTime = 0;
private long currentLogLength = 0;
private BoundedInputStream currentLogData = null;
private InputStreamReader currentLogISR;
@@ -753,14 +770,12 @@ public class AggregatedLogFormat {
}
currentLogType = null;
- currentLogUpLoadTime = 0;
currentLogLength = 0;
currentLogData = null;
currentLogISR = null;
try {
String logType = valueStream.readUTF();
- long logUpLoadTime = valueStream.readLong();
String logLengthStr = valueStream.readUTF();
currentLogLength = Long.parseLong(logLengthStr);
currentLogData =
@@ -768,7 +783,6 @@ public class AggregatedLogFormat {
currentLogData.setPropagateClose(false);
currentLogISR = new InputStreamReader(currentLogData);
currentLogType = logType;
- currentLogUpLoadTime = logUpLoadTime;
} catch (EOFException e) {
}
@@ -779,10 +793,6 @@ public class AggregatedLogFormat {
return currentLogType;
}
- public long getCurrentLogUpLoadTime() {
- return currentLogUpLoadTime;
- }
-
public long getCurrentLogLength() {
return currentLogLength;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/58e9f24e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java
index de06d48..1546ece 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java
@@ -78,7 +78,8 @@ public class LogCLIHelpers implements Configurable {
reader =
new AggregatedLogFormat.LogReader(getConf(),
thisNodeFile.getPath());
- if (dumpAContainerLogs(containerId, reader, System.out) > -1) {
+ if (dumpAContainerLogs(containerId, reader, System.out,
+ thisNodeFile.getModificationTime()) > -1) {
foundContainerLogs = true;
}
} finally {
@@ -97,7 +98,8 @@ public class LogCLIHelpers implements Configurable {
@Private
public int dumpAContainerLogs(String containerIdStr,
- AggregatedLogFormat.LogReader reader, PrintStream out) throws IOException {
+ AggregatedLogFormat.LogReader reader, PrintStream out,
+ long logUploadedTime) throws IOException {
DataInputStream valueStream;
LogKey key = new LogKey();
valueStream = reader.next(key);
@@ -112,14 +114,20 @@ public class LogCLIHelpers implements Configurable {
return -1;
}
+ boolean foundContainerLogs = false;
while (true) {
try {
- LogReader.readAContainerLogsForALogType(valueStream, out);
+ LogReader.readAContainerLogsForALogType(valueStream, out,
+ logUploadedTime);
+ foundContainerLogs = true;
} catch (EOFException eof) {
break;
}
}
- return 0;
+ if (foundContainerLogs) {
+ return 0;
+ }
+ return -1;
}
@Private
@@ -157,13 +165,15 @@ public class LogCLIHelpers implements Configurable {
valueStream = reader.next(key);
while (valueStream != null) {
+
String containerString =
"\n\nContainer: " + key + " on " + thisNodeFile.getPath().getName();
out.println(containerString);
out.println(StringUtils.repeat("=", containerString.length()));
while (true) {
try {
- LogReader.readAContainerLogsForALogType(valueStream, out);
+ LogReader.readAContainerLogsForALogType(valueStream, out,
+ thisNodeFile.getModificationTime());
foundAnyLogs = true;
} catch (EOFException eof) {
break;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/58e9f24e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
index bba3258..3e9f7a2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
@@ -126,6 +126,7 @@ public class AggregatedLogsBlock extends HtmlBlock {
.endsWith(LogAggregationUtils.TMP_FILE_SUFFIX)) {
continue;
}
+ long logUploadedTime = thisNodeFile.getModificationTime();
reader =
new AggregatedLogFormat.LogReader(conf, thisNodeFile.getPath());
@@ -164,7 +165,7 @@ public class AggregatedLogsBlock extends HtmlBlock {
}
foundLog = readContainerLogs(html, logReader, logLimits,
- desiredLogType);
+ desiredLogType, logUploadedTime);
} catch (IOException ex) {
LOG.error("Error getting logs for " + logEntity, ex);
continue;
@@ -189,7 +190,7 @@ public class AggregatedLogsBlock extends HtmlBlock {
private boolean readContainerLogs(Block html,
AggregatedLogFormat.ContainerLogsReader logReader, LogLimits logLimits,
- String desiredLogType) throws IOException {
+ String desiredLogType, long logUpLoadTime) throws IOException {
int bufferSize = 65536;
char[] cbuf = new char[bufferSize];
@@ -199,13 +200,12 @@ public class AggregatedLogsBlock extends HtmlBlock {
if (desiredLogType == null || desiredLogType.isEmpty()
|| desiredLogType.equals(logType)) {
long logLength = logReader.getCurrentLogLength();
- long logUpLoadTime = logReader.getCurrentLogUpLoadTime();
if (foundLog) {
html.pre()._("\n\n")._();
}
html.p()._("Log Type: " + logType)._();
- html.p()._("Log UpLoadTime: " + Times.format(logUpLoadTime))._();
+ html.p()._("Log Upload Time: " + Times.format(logUpLoadTime))._();
html.p()._("Log Length: " + Long.toString(logLength))._();
long start = logLimits.start < 0
http://git-wip-us.apache.org/repos/asf/hadoop/blob/58e9f24e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
index bc0485e..405cb3d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogValue;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter;
+import org.apache.hadoop.yarn.util.Times;
import org.junit.After;
import org.junit.Assume;
import org.junit.Before;
@@ -178,9 +179,16 @@ public class TestAggregatedLogFormat {
logWriter.close();
}
- //Verify the output generated by readAContainerLogs(DataInputStream, Writer)
@Test
public void testReadAcontainerLogs1() throws Exception {
+ //Verify the output generated by readAContainerLogs(DataInputStream, Writer, logUploadedTime)
+ testReadAcontainerLog(true);
+
+ //Verify the output generated by readAContainerLogs(DataInputStream, Writer)
+ testReadAcontainerLog(false);
+ }
+
+ private void testReadAcontainerLog(boolean logUploadedTime) throws Exception {
Configuration conf = new Configuration();
File workDir = new File(testWorkDir, "testReadAcontainerLogs1");
Path remoteAppLogFile =
@@ -233,17 +241,23 @@ public class TestAggregatedLogFormat {
LogKey rLogKey = new LogKey();
DataInputStream dis = logReader.next(rLogKey);
Writer writer = new StringWriter();
- LogReader.readAcontainerLogs(dis, writer);
-
+
+ if (logUploadedTime) {
+ LogReader.readAcontainerLogs(dis, writer, System.currentTimeMillis());
+ } else {
+ LogReader.readAcontainerLogs(dis, writer);
+ }
+
// We should only do the log aggregation for stdout.
// Since we could not open the fileInputStream for stderr, this file is not
// aggregated.
String s = writer.toString();
int expectedLength =
- "\n\nLogType:stdout".length()
- + ("\nLogUploadTime:" + System.currentTimeMillis()).length()
+ "LogType:stdout".length()
+ + (logUploadedTime ? ("\nLog Upload Time:" + Times.format(System
+ .currentTimeMillis())).length() : 0)
+ ("\nLogLength:" + numChars).length()
- + "\nLog Contents:\n".length() + numChars;
+ + "\nLog Contents:\n".length() + numChars + "\n".length();
Assert.assertTrue("LogType not matched", s.contains("LogType:stdout"));
Assert.assertTrue("log file:stderr should not be aggregated.", !s.contains("LogType:stderr"));
Assert.assertTrue("log file:logs should not be aggregated.", !s.contains("LogType:logs"));
http://git-wip-us.apache.org/repos/asf/hadoop/blob/58e9f24e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
index 419de88..7d911e9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
@@ -767,30 +767,27 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream ps = new PrintStream(baos);
+
LogReader.readAContainerLogsForALogType(valueStream, ps);
String writtenLines[] = baos.toString().split(
System.getProperty("line.separator"));
Assert.assertEquals("LogType:", writtenLines[0].substring(0, 8));
- String fileType = writtenLines[0].substring(9);
-
- Assert.assertEquals("LogUploadTime:", writtenLines[1].substring(0, 14));
- String fileUploadedTimeStr = writtenLines[1].substring(15);
+ String fileType = writtenLines[0].substring(8);
- Assert.assertEquals("LogLength:", writtenLines[2].substring(0, 10));
- String fileLengthStr = writtenLines[2].substring(11);
+ Assert.assertEquals("LogLength:", writtenLines[1].substring(0, 10));
+ String fileLengthStr = writtenLines[1].substring(10);
long fileLength = Long.parseLong(fileLengthStr);
Assert.assertEquals("Log Contents:",
- writtenLines[3].substring(0, 13));
+ writtenLines[2].substring(0, 13));
String logContents = StringUtils.join(
- Arrays.copyOfRange(writtenLines, 4, writtenLines.length), "\n");
+ Arrays.copyOfRange(writtenLines, 3, writtenLines.length), "\n");
perContainerMap.put(fileType, logContents);
LOG.info("LogType:" + fileType);
- LOG.info("LogUploadTime:" + fileUploadedTimeStr);
LOG.info("LogLength:" + fileLength);
LOG.info("Log Contents:\n" + perContainerMap.get(fileType));
} catch (EOFException eof) {
[2/9] git commit: YARN-2798. Fixed YarnClient to populate the renewer
correctly for Timeline delegation tokens. Contributed by Zhijie Shen.
Posted by vi...@apache.org.
YARN-2798. Fixed YarnClient to populate the renewer correctly for Timeline delegation tokens. Contributed by Zhijie Shen.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71fbb474
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71fbb474
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71fbb474
Branch: refs/heads/HDFS-EC
Commit: 71fbb474f531f60c5d908cf724f18f90dfd5fa9f
Parents: 6157ace
Author: Vinod Kumar Vavilapalli <vi...@apache.org>
Authored: Mon Nov 3 12:36:44 2014 -0800
Committer: Vinod Kumar Vavilapalli <vi...@apache.org>
Committed: Mon Nov 3 12:49:42 2014 -0800
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +++
.../yarn/client/api/impl/YarnClientImpl.java | 22 ++++++++++++++------
.../yarn/client/api/impl/TestYarnClient.java | 20 +++++++++++++++++-
.../yarn/security/TestYARNTokenIdentifier.java | 19 +++++++++++++++++
4 files changed, 57 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/71fbb474/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1bb116b..c3ee0b0 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -842,6 +842,9 @@ Release 2.6.0 - UNRELEASED
YARN-2730. DefaultContainerExecutor runs only one localizer at a time
(Siqi Li via jlowe)
+ YARN-2798. Fixed YarnClient to populate the renewer correctly for Timeline
+ delegation tokens. (Zhijie Shen via vinodkv)
+
Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/71fbb474/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
index 1193cb4..e4f31f2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.HadoopKerberosName;
+import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
@@ -51,7 +51,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
@@ -124,6 +123,8 @@ public class YarnClientImpl extends YarnClient {
protected TimelineClient timelineClient;
@VisibleForTesting
Text timelineService;
+ @VisibleForTesting
+ String timelineDTRenewer;
protected boolean timelineServiceEnabled;
private static final String ROOT = "root";
@@ -161,6 +162,7 @@ public class YarnClientImpl extends YarnClient {
timelineServiceEnabled = true;
timelineClient = TimelineClient.createTimelineClient();
timelineClient.init(conf);
+ timelineDTRenewer = getTimelineDelegationTokenRenewer(conf);
timelineService = TimelineUtils.buildTimelineTokenService(conf);
}
super.serviceInit(conf);
@@ -320,14 +322,22 @@ public class YarnClientImpl extends YarnClient {
@VisibleForTesting
org.apache.hadoop.security.token.Token<TimelineDelegationTokenIdentifier>
getTimelineDelegationToken() throws IOException, YarnException {
+ return timelineClient.getDelegationToken(timelineDTRenewer);
+ }
+
+ private static String getTimelineDelegationTokenRenewer(Configuration conf)
+ throws IOException, YarnException {
// Parse the RM daemon user if it exists in the config
- String rmPrincipal = getConfig().get(YarnConfiguration.RM_PRINCIPAL);
+ String rmPrincipal = conf.get(YarnConfiguration.RM_PRINCIPAL);
String renewer = null;
if (rmPrincipal != null && rmPrincipal.length() > 0) {
- HadoopKerberosName renewerKrbName = new HadoopKerberosName(rmPrincipal);
- renewer = renewerKrbName.getShortName();
+ String rmHost = conf.getSocketAddr(
+ YarnConfiguration.RM_ADDRESS,
+ YarnConfiguration.DEFAULT_RM_ADDRESS,
+ YarnConfiguration.DEFAULT_RM_PORT).getHostName();
+ renewer = SecurityUtil.getServerPrincipal(rmPrincipal, rmHost);
}
- return timelineClient.getDelegationToken(renewer);
+ return renewer;
}
@Private
http://git-wip-us.apache.org/repos/asf/hadoop/blob/71fbb474/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index d7bea7a..ca7c50a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -852,7 +852,25 @@ public class TestYarnClient {
client.stop();
}
}
-
+
+ @Test
+ public void testParseTimelineDelegationTokenRenewer() throws Exception {
+ // Client side
+ YarnClientImpl client = (YarnClientImpl) YarnClient.createYarnClient();
+ Configuration conf = new YarnConfiguration();
+ conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+ conf.set(YarnConfiguration.RM_PRINCIPAL, "rm/_HOST@EXAMPLE.COM");
+ conf.set(
+ YarnConfiguration.RM_ADDRESS, "localhost:8188");
+ try {
+ client.init(conf);
+ client.start();
+ Assert.assertEquals("rm/localhost@EXAMPLE.COM", client.timelineDTRenewer);
+ } finally {
+ client.stop();
+ }
+ }
+
@Test
public void testReservationAPIs() {
// initialize
http://git-wip-us.apache.org/repos/asf/hadoop/blob/71fbb474/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
index 2052c23..dc4f9e2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
@@ -19,14 +19,18 @@ package org.apache.hadoop.yarn.security;
import java.io.IOException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.HadoopKerberosName;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier;
import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
@@ -299,4 +303,19 @@ public class TestYARNTokenIdentifier {
anotherToken.getMasterKeyId(), masterKeyId);
}
+ @Test
+ public void testParseTimelineDelegationTokenIdentifierRenewer() throws IOException {
+ // Server side when generation a timeline DT
+ Configuration conf = new YarnConfiguration();
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL,
+ "RULE:[2:$1@$0]([nr]m@.*EXAMPLE.COM)s/.*/yarn/");
+ HadoopKerberosName.setConfiguration(conf);
+ Text owner = new Text("owner");
+ Text renewer = new Text("rm/localhost@EXAMPLE.COM");
+ Text realUser = new Text("realUser");
+ TimelineDelegationTokenIdentifier token =
+ new TimelineDelegationTokenIdentifier(owner, renewer, realUser);
+ Assert.assertEquals(new Text("yarn"), token.getRenewer());
+ }
+
}
[4/9] git commit: HDFS-7324. haadmin command usage prints incorrect
command name. Contributed by Brahma Reddy Battula.
Posted by vi...@apache.org.
HDFS-7324. haadmin command usage prints incorrect command name. Contributed by Brahma Reddy Battula.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/237890fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/237890fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/237890fe
Branch: refs/heads/HDFS-EC
Commit: 237890feabc809ade4e7542039634e04219d0bcb
Parents: 58e9f24
Author: Suresh Srinivas <su...@yahoo-inc.com>
Authored: Mon Nov 3 13:15:14 2014 -0800
Committer: Suresh Srinivas <su...@yahoo-inc.com>
Committed: Mon Nov 3 13:27:09 2014 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java | 2 +-
2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/237890fe/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6c11c9f..16040ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -384,6 +384,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7315. DFSTestUtil.readFileBuffer opens extra FSDataInputStream.
(Plamen Jeliazkov via wheat9)
+ HDFS-7324. haadmin command usage prints incorrect command name.
+ (Brahma Reddy Battula via suresh)
+
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/237890fe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
index 5c4b49d..1ec6d35 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
@@ -89,7 +89,7 @@ public class DFSHAAdmin extends HAAdmin {
@Override
protected String getUsageString() {
- return "Usage: DFSHAAdmin [-ns <nameserviceId>]";
+ return "Usage: haadmin";
}
@Override
[9/9] git commit: HDFS-7328. TestTraceAdmin assumes Unix line
endings. Contributed by Chris Nauroth.
Posted by vi...@apache.org.
HDFS-7328. TestTraceAdmin assumes Unix line endings. Contributed by Chris Nauroth.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2bb327eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2bb327eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2bb327eb
Branch: refs/heads/HDFS-EC
Commit: 2bb327eb939f57626d3dac10f7016ed634375d94
Parents: c5a46d4
Author: cnauroth <cn...@apache.org>
Authored: Mon Nov 3 19:59:52 2014 -0800
Committer: cnauroth <cn...@apache.org>
Committed: Mon Nov 3 19:59:52 2014 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
.../java/org/apache/hadoop/tracing/TestTraceAdmin.java | 13 +++++++------
2 files changed, 9 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bb327eb/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index dfe8f4d..be7b9bf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1356,6 +1356,8 @@ Release 2.6.0 - UNRELEASED
HDFS-7274. Disable SSLv3 in HttpFS. (Robert Kanter via kasha)
+ HDFS-7328. TestTraceAdmin assumes Unix line endings. (cnauroth)
+
Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bb327eb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java
index 79a1f39..77860ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java
@@ -28,6 +28,7 @@ import java.io.File;
import java.io.PrintStream;
public class TestTraceAdmin {
+ private static final String NEWLINE = System.getProperty("line.separator");
private String runTraceCommand(TraceAdmin trace, String... cmd)
throws Exception {
@@ -66,10 +67,10 @@ public class TestTraceAdmin {
try {
TraceAdmin trace = new TraceAdmin();
trace.setConf(conf);
- Assert.assertEquals("ret:0, [no span receivers found]\n",
+ Assert.assertEquals("ret:0, [no span receivers found]" + NEWLINE,
runTraceCommand(trace, "-list", "-host", getHostPortForNN(cluster)));
Assert.assertEquals("ret:0, Added trace span receiver 1 with " +
- "configuration local-file-span-receiver.path = " + tracePath + "\n",
+ "configuration local-file-span-receiver.path = " + tracePath + NEWLINE,
runTraceCommand(trace, "-add", "-host", getHostPortForNN(cluster),
"-class", "org.htrace.impl.LocalFileSpanReceiver",
"-Clocal-file-span-receiver.path=" + tracePath));
@@ -77,17 +78,17 @@ public class TestTraceAdmin {
runTraceCommand(trace, "-list", "-host", getHostPortForNN(cluster));
Assert.assertTrue(list.startsWith("ret:0"));
Assert.assertTrue(list.contains("1 org.htrace.impl.LocalFileSpanReceiver"));
- Assert.assertEquals("ret:0, Removed trace span receiver 1\n",
+ Assert.assertEquals("ret:0, Removed trace span receiver 1" + NEWLINE,
runTraceCommand(trace, "-remove", "1", "-host",
getHostPortForNN(cluster)));
- Assert.assertEquals("ret:0, [no span receivers found]\n",
+ Assert.assertEquals("ret:0, [no span receivers found]" + NEWLINE,
runTraceCommand(trace, "-list", "-host", getHostPortForNN(cluster)));
Assert.assertEquals("ret:0, Added trace span receiver 2 with " +
- "configuration local-file-span-receiver.path = " + tracePath + "\n",
+ "configuration local-file-span-receiver.path = " + tracePath + NEWLINE,
runTraceCommand(trace, "-add", "-host", getHostPortForNN(cluster),
"-class", "LocalFileSpanReceiver",
"-Clocal-file-span-receiver.path=" + tracePath));
- Assert.assertEquals("ret:0, Removed trace span receiver 2\n",
+ Assert.assertEquals("ret:0, Removed trace span receiver 2" + NEWLINE,
runTraceCommand(trace, "-remove", "2", "-host",
getHostPortForNN(cluster)));
} finally {