You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by vi...@apache.org on 2014/11/06 09:04:12 UTC
[01/43] git commit: HDFS-7340. Make rollingUpgrade start/finalize
idempotent. Contributed by Jing Zhao.
Repository: hadoop
Updated Branches:
refs/heads/HDFS-EC 094cfd228 -> 4ce3a132e
HDFS-7340. Make rollingUpgrade start/finalize idempotent. Contributed by Jing Zhao.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3dfd6e68
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3dfd6e68
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3dfd6e68
Branch: refs/heads/HDFS-EC
Commit: 3dfd6e68fe5028fe3766ae5056dc175c38cc97e1
Parents: 2bb327e
Author: Jing Zhao <ji...@apache.org>
Authored: Tue Nov 4 10:16:37 2014 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Tue Nov 4 10:16:37 2014 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
.../hadoop/hdfs/server/namenode/FSNamesystem.java | 15 ++++++++-------
.../hdfs/server/namenode/NameNodeRpcServer.java | 3 ++-
.../java/org/apache/hadoop/hdfs/tools/DFSAdmin.java | 5 +++--
.../org/apache/hadoop/hdfs/TestRollingUpgrade.java | 8 ++++----
5 files changed, 19 insertions(+), 14 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dfd6e68/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index be7b9bf..4bc833f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -996,6 +996,8 @@ Release 2.6.0 - UNRELEASED
HDFS-7147. Update archival storage user documentation.
(Tsz Wo Nicholas Sze via wheat9)
+ HDFS-7340. Make rollingUpgrade start/finalize idempotent. (jing9)
+
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
HDFS-6387. HDFS CLI admin tool for creating & deleting an
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dfd6e68/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 93ff63e..2bc4ba0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -8293,6 +8293,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock();
try {
checkOperation(OperationCategory.WRITE);
+ if (isRollingUpgrade()) {
+ return rollingUpgradeInfo;
+ }
long startTime = now();
if (!haEnabled) { // for non-HA, we require NN to be in safemode
startRollingUpgradeInternalForNonHA(startTime);
@@ -8401,13 +8404,16 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
}
- RollingUpgradeInfo finalizeRollingUpgrade() throws IOException {
+ void finalizeRollingUpgrade() throws IOException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.WRITE);
writeLock();
final RollingUpgradeInfo returnInfo;
try {
checkOperation(OperationCategory.WRITE);
+ if (!isRollingUpgrade()) {
+ return;
+ }
checkNameNodeSafeMode("Failed to finalize rolling upgrade");
returnInfo = finalizeRollingUpgradeInternal(now());
@@ -8431,16 +8437,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
logAuditEvent(true, "finalizeRollingUpgrade", null, null, null);
}
- return returnInfo;
+ return;
}
RollingUpgradeInfo finalizeRollingUpgradeInternal(long finalizeTime)
throws RollingUpgradeException {
- if (!isRollingUpgrade()) {
- throw new RollingUpgradeException(
- "Failed to finalize rolling upgrade since there is no rolling upgrade in progress.");
- }
-
final long startTime = rollingUpgradeInfo.getStartTime();
rollingUpgradeInfo = null;
return new RollingUpgradeInfo(blockPoolId, false, startTime, finalizeTime);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dfd6e68/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 9504883..5b36154 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -959,7 +959,8 @@ class NameNodeRpcServer implements NamenodeProtocols {
case PREPARE:
return namesystem.startRollingUpgrade();
case FINALIZE:
- return namesystem.finalizeRollingUpgrade();
+ namesystem.finalizeRollingUpgrade();
+ return null;
default:
throw new UnsupportedActionException(action + " is not yet supported.");
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dfd6e68/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index fed516b..484ac12 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -334,7 +334,8 @@ public class DFSAdmin extends FsShell {
out.println(info);
}
} else {
- out.println("There is no rolling upgrade in progress.");
+ out.println("There is no rolling upgrade in progress or rolling " +
+ "upgrade has already been finalized.");
}
}
@@ -356,7 +357,7 @@ public class DFSAdmin extends FsShell {
Preconditions.checkState(info.isStarted());
break;
case FINALIZE:
- Preconditions.checkState(info.isFinalized());
+ Preconditions.checkState(info == null || info.isFinalized());
break;
}
printMessage(info, System.out);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dfd6e68/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
index 40aa37a..8e7b4b1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
@@ -239,9 +239,9 @@ public class TestRollingUpgrade {
Assert.assertTrue(dfs2.exists(baz));
//finalize rolling upgrade
- final RollingUpgradeInfo finalize = dfs2.rollingUpgrade(RollingUpgradeAction.FINALIZE);
- LOG.info("FINALIZE: " + finalize);
- Assert.assertEquals(info1.getStartTime(), finalize.getStartTime());
+ final RollingUpgradeInfo finalize = dfs2.rollingUpgrade(
+ RollingUpgradeAction.FINALIZE);
+ Assert.assertNull(finalize);
LOG.info("RESTART cluster 2 with regular startup option");
cluster2.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
@@ -385,7 +385,7 @@ public class TestRollingUpgrade {
Assert.assertTrue(fsimage.hasRollbackFSImage());
info = dfs.rollingUpgrade(RollingUpgradeAction.FINALIZE);
- Assert.assertTrue(info.isFinalized());
+ Assert.assertNull(info);
Assert.assertTrue(dfs.exists(foo));
// Once finalized, there should be no more fsimage for rollbacks.
[30/43] git commit: HADOOP-11267. TestSecurityUtil fails when run
with JDK8 because of empty principal names. Contributed by Stephen Chu.
Posted by vi...@apache.org.
HADOOP-11267. TestSecurityUtil fails when run with JDK8 because of empty principal names. Contributed by Stephen Chu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8549fa5d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8549fa5d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8549fa5d
Branch: refs/heads/HDFS-EC
Commit: 8549fa5dc95d3e94e49c9b92734aec0509693a2a
Parents: 834e931
Author: Haohui Mai <wh...@apache.org>
Authored: Wed Nov 5 15:25:35 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Wed Nov 5 15:34:27 2014 -0800
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../test/java/org/apache/hadoop/security/TestSecurityUtil.java | 4 ----
2 files changed, 3 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8549fa5d/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index dbfb7df..422bc3e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -403,6 +403,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-11266. Remove no longer supported activation properties for packaging
from pom. (Masatake Iwasaki via wheat9)
+ HADOOP-11267. TestSecurityUtil fails when run with JDK8 because of empty
+ principal names. (Stephen Chu via wheat9)
+
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8549fa5d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java
index 8c60734..4616c90 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java
@@ -56,12 +56,8 @@ public class TestSecurityUtil {
assertFalse(SecurityUtil.isTGSPrincipal
(new KerberosPrincipal("blah")));
assertFalse(SecurityUtil.isTGSPrincipal
- (new KerberosPrincipal("")));
- assertFalse(SecurityUtil.isTGSPrincipal
(new KerberosPrincipal("krbtgt/hello")));
assertFalse(SecurityUtil.isTGSPrincipal
- (new KerberosPrincipal("/@")));
- assertFalse(SecurityUtil.isTGSPrincipal
(new KerberosPrincipal("krbtgt/foo@FOO")));
}
[02/43] git commit: HADOOP-11165. TestUTF8 fails when run against
java 8. Contributed by Stephen Chu.
Posted by vi...@apache.org.
HADOOP-11165. TestUTF8 fails when run against java 8. Contributed by Stephen Chu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85da71c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85da71c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85da71c2
Branch: refs/heads/HDFS-EC
Commit: 85da71c2d3c565a8920e47fe3925e8e0bef353a5
Parents: 3dfd6e6
Author: cnauroth <cn...@apache.org>
Authored: Tue Nov 4 10:27:41 2014 -0800
Committer: cnauroth <cn...@apache.org>
Committed: Tue Nov 4 10:27:41 2014 -0800
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../java/org/apache/hadoop/io/TestUTF8.java | 23 ++++++++++++++------
2 files changed, 19 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/85da71c2/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index eb91dcb..1d6adc3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -383,6 +383,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-11186. documentation should talk about
hadoop.htrace.spanreceiver.classes, not hadoop.trace.spanreceiver.classes (cmccabe)
+ HADOOP-11165. TestUTF8 fails when run against java 8.
+ (Stephen Chu via cnauroth)
+
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/85da71c2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestUTF8.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestUTF8.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestUTF8.java
index b387224..ede5940 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestUTF8.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestUTF8.java
@@ -19,8 +19,11 @@
package org.apache.hadoop.io;
import junit.framework.TestCase;
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
import java.io.IOException;
import java.io.UTFDataFormatException;
+import java.nio.ByteBuffer;
import java.util.Random;
import org.apache.hadoop.test.GenericTestUtils;
@@ -54,11 +57,22 @@ public class TestUTF8 extends TestCase {
// generate a random string
String before = getTestString();
- // check its utf8
- assertEquals(before, new String(UTF8.getBytes(before), "UTF-8"));
+ // Check that the bytes are stored correctly in Modified-UTF8 format.
+ // Note that the DataInput and DataOutput interfaces convert between
+ // bytes and Strings using the Modified-UTF8 format.
+ assertEquals(before, readModifiedUTF(UTF8.getBytes(before)));
}
}
+ private String readModifiedUTF(byte[] bytes) throws IOException {
+ final short lengthBytes = (short)2;
+ ByteBuffer bb = ByteBuffer.allocate(bytes.length + lengthBytes);
+ bb.putShort((short)bytes.length).put(bytes);
+ ByteArrayInputStream bis = new ByteArrayInputStream(bb.array());
+ DataInputStream dis = new DataInputStream(bis);
+ return dis.readUTF();
+ }
+
public void testIO() throws Exception {
DataOutputBuffer out = new DataOutputBuffer();
DataInputBuffer in = new DataInputBuffer();
@@ -80,11 +94,6 @@ public class TestUTF8 extends TestCase {
in.reset(out.getData(), out.getLength());
String after2 = in.readUTF();
assertEquals(before, after2);
-
- // test that it is compatible with Java's other decoder
- String after3 = new String(out.getData(), 2, out.getLength()-2, "UTF-8");
- assertEquals(before, after3);
-
}
}
[33/43] git commit: HADOOP-10714. AmazonS3Client.deleteObjects() need
to be limited to 1000 entries per call. Contributed by Juan Yu.
Posted by vi...@apache.org.
HADOOP-10714. AmazonS3Client.deleteObjects() need to be limited to 1000 entries per call. Contributed by Juan Yu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ba52d88
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ba52d88
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ba52d88
Branch: refs/heads/HDFS-EC
Commit: 6ba52d88ec11444cbac946ffadbc645acd0657de
Parents: 395275a
Author: Aaron T. Myers <at...@apache.org>
Authored: Wed Nov 5 17:17:04 2014 -0800
Committer: Aaron T. Myers <at...@apache.org>
Committed: Wed Nov 5 17:17:04 2014 -0800
----------------------------------------------------------------------
.gitignore | 1 +
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +
.../src/site/markdown/filesystem/testing.md | 47 ---
.../hadoop/fs/FileSystemContractBaseTest.java | 6 +-
.../fs/contract/AbstractContractDeleteTest.java | 27 ++
.../fs/contract/AbstractContractMkdirTest.java | 19 +
.../fs/contract/AbstractContractRenameTest.java | 41 ++
.../hadoop/fs/contract/ContractOptions.java | 7 +
.../hadoop/fs/contract/ContractTestUtils.java | 139 +++++++
.../src/test/resources/contract/localfs.xml | 4 +
hadoop-tools/hadoop-aws/pom.xml | 7 +
.../org/apache/hadoop/fs/s3/S3Credentials.java | 4 +-
.../fs/s3a/BasicAWSCredentialsProvider.java | 8 +-
.../org/apache/hadoop/fs/s3a/Constants.java | 7 +-
.../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 147 ++++---
.../apache/hadoop/fs/s3a/S3AInputStream.java | 38 +-
.../apache/hadoop/fs/s3a/S3AOutputStream.java | 18 +-
.../site/markdown/tools/hadoop-aws/index.md | 417 +++++++++++++++++++
.../fs/contract/s3a/TestS3AContractRename.java | 13 +-
.../fs/s3/S3FileSystemContractBaseTest.java | 11 +-
.../fs/s3a/S3AFileSystemContractBaseTest.java | 327 ---------------
.../org/apache/hadoop/fs/s3a/S3ATestUtils.java | 51 +++
.../fs/s3a/TestS3AFileSystemContract.java | 105 +++++
.../hadoop/fs/s3a/scale/S3AScaleTestBase.java | 89 ++++
.../fs/s3a/scale/TestS3ADeleteManyFiles.java | 131 ++++++
.../NativeS3FileSystemContractBaseTest.java | 11 +-
.../TestJets3tNativeFileSystemStore.java | 3 +
.../src/test/resources/contract/s3a.xml | 5 +
.../hadoop-aws/src/test/resources/core-site.xml | 51 +++
29 files changed, 1263 insertions(+), 474 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index 8b132cb..15c040c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -21,3 +21,4 @@ hadoop-common-project/hadoop-common/src/test/resources/contract-test-options.xml
hadoop-tools/hadoop-openstack/src/test/resources/contract-test-options.xml
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/tla/yarnregistry.toolbox
yarnregistry.pdf
+hadoop-tools/hadoop-aws/src/test/resources/contract-test-options.xml
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 422bc3e..8567e1e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -406,6 +406,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-11267. TestSecurityUtil fails when run with JDK8 because of empty
principal names. (Stephen Chu via wheat9)
+ HADOOP-10714. AmazonS3Client.deleteObjects() need to be limited to 1000
+ entries per call. (Juan Yu via atm)
+
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/testing.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/testing.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/testing.md
index bc66e67..444fb60 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/testing.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/testing.md
@@ -28,53 +28,6 @@ These filesystem bindings must be defined in an XML configuration file, usually
`hadoop-common-project/hadoop-common/src/test/resources/contract-test-options.xml`.
This file is excluded should not be checked in.
-### s3://
-
-In `contract-test-options.xml`, the filesystem name must be defined in the property `fs.contract.test.fs.s3`. The standard configuration options to define the S3 authentication details must also be provided.
-
-Example:
-
- <configuration>
- <property>
- <name>fs.contract.test.fs.s3</name>
- <value>s3://tests3hdfs/</value>
- </property>
-
- <property>
- <name>fs.s3.awsAccessKeyId</name>
- <value>DONOTPCOMMITTHISKEYTOSCM</value>
- </property>
-
- <property>
- <name>fs.s3.awsSecretAccessKey</name>
- <value>DONOTEVERSHARETHISSECRETKEY!</value>
- </property>
- </configuration>
-
-### s3n://
-
-
-In `contract-test-options.xml`, the filesystem name must be defined in the property `fs.contract.test.fs.s3n`. The standard configuration options to define the S3N authentication details muse also be provided.
-
-Example:
-
-
- <configuration>
- <property>
- <name>fs.contract.test.fs.s3n</name>
- <value>s3n://tests3contract</value>
- </property>
-
- <property>
- <name>fs.s3n.awsAccessKeyId</name>
- <value>DONOTPCOMMITTHISKEYTOSCM</value>
- </property>
-
- <property>
- <name>fs.s3n.awsSecretAccessKey</name>
- <value>DONOTEVERSHARETHISSECRETKEY!</value>
- </property>
-
### ftp://
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
index 5fcf10a..e2005be 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
@@ -484,10 +484,10 @@ public abstract class FileSystemContractBaseTest extends TestCase {
out.close();
}
- private void rename(Path src, Path dst, boolean renameSucceeded,
+ protected void rename(Path src, Path dst, boolean renameSucceeded,
boolean srcExists, boolean dstExists) throws IOException {
- assertEquals("mv " + src + " " + dst,renameSucceeded, fs.rename(src, dst));
- assertEquals("Source exists: " + src, srcExists, fs.exists(src));
+ assertEquals("Rename result", renameSucceeded, fs.rename(src, dst));
+ assertEquals("Source exists", srcExists, fs.exists(src));
assertEquals("Destination exists" + dst, dstExists, fs.exists(dst));
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
index c90efd1..2bd60ca 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.fs.contract;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileSystem;
import org.junit.Test;
import java.io.IOException;
@@ -94,4 +95,30 @@ public abstract class AbstractContractDeleteTest extends
ContractTestUtils.assertPathDoesNotExist(getFileSystem(), "not deleted", file);
}
+ @Test
+ public void testDeleteDeepEmptyDir() throws Throwable {
+ mkdirs(path("testDeleteDeepEmptyDir/d1/d2/d3/d4"));
+ assertDeleted(path("testDeleteDeepEmptyDir/d1/d2/d3"), true);
+
+ FileSystem fs = getFileSystem();
+ ContractTestUtils.assertPathDoesNotExist(fs,
+ "not deleted", path("testDeleteDeepEmptyDir/d1/d2/d3/d4"));
+ ContractTestUtils.assertPathDoesNotExist(fs,
+ "not deleted", path("testDeleteDeepEmptyDir/d1/d2/d3"));
+ ContractTestUtils.assertPathExists(fs, "parent dir is deleted",
+ path("testDeleteDeepEmptyDir/d1/d2"));
+ }
+
+ @Test
+ public void testDeleteSingleFile() throws Throwable {
+ // Test delete of just a file
+ Path path = path("testDeleteSingleFile/d1/d2");
+ mkdirs(path);
+ Path file = new Path(path, "childfile");
+ ContractTestUtils.writeTextFile(getFileSystem(), file,
+ "single file to be deleted.", true);
+ ContractTestUtils.assertPathExists(getFileSystem(),
+ "single file not created", file);
+ assertDeleted(file, false);
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
index dad3b7f..86fd61f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
@@ -112,4 +112,23 @@ public abstract class AbstractContractMkdirTest extends AbstractFSContractTestBa
assertPathExists("mkdir failed", path);
assertDeleted(path, true);
}
+
+ @Test
+ public void testMkdirSlashHandling() throws Throwable {
+ describe("verify mkdir slash handling");
+ FileSystem fs = getFileSystem();
+
+ // No trailing slash
+ assertTrue(fs.mkdirs(path("testmkdir/a")));
+ assertPathExists("mkdir without trailing slash failed",
+ path("testmkdir/a"));
+
+ // With trailing slash
+ assertTrue(fs.mkdirs(path("testmkdir/b/")));
+ assertPathExists("mkdir with trailing slash failed", path("testmkdir/b/"));
+
+ // Mismatched slashes
+ assertPathExists("check path existence without trailing slash failed",
+ path("testmkdir/b"));
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java
index 32f27a7..04c444d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java
@@ -182,4 +182,45 @@ public abstract class AbstractContractRenameTest extends
assertFalse(renameCreatesDestDirs);
}
}
+
+ @Test
+ public void testRenameWithNonEmptySubDir() throws Throwable {
+ final Path renameTestDir = path("testRenameWithNonEmptySubDir");
+ final Path srcDir = new Path(renameTestDir, "src1");
+ final Path srcSubDir = new Path(srcDir, "sub");
+ final Path finalDir = new Path(renameTestDir, "dest");
+ FileSystem fs = getFileSystem();
+ boolean renameRemoveEmptyDest = isSupported(RENAME_REMOVE_DEST_IF_EMPTY_DIR);
+ ContractTestUtils.rm(fs, renameTestDir, true, false);
+
+ fs.mkdirs(srcDir);
+ fs.mkdirs(finalDir);
+ ContractTestUtils.writeTextFile(fs, new Path(srcDir, "source.txt"),
+ "this is the file in src dir", false);
+ ContractTestUtils.writeTextFile(fs, new Path(srcSubDir, "subfile.txt"),
+ "this is the file in src/sub dir", false);
+
+ ContractTestUtils.assertPathExists(fs, "not created in src dir",
+ new Path(srcDir, "source.txt"));
+ ContractTestUtils.assertPathExists(fs, "not created in src/sub dir",
+ new Path(srcSubDir, "subfile.txt"));
+
+ fs.rename(srcDir, finalDir);
+ // Accept both POSIX rename behavior and CLI rename behavior
+ if (renameRemoveEmptyDest) {
+ // POSIX rename behavior
+ ContractTestUtils.assertPathExists(fs, "not renamed into dest dir",
+ new Path(finalDir, "source.txt"));
+ ContractTestUtils.assertPathExists(fs, "not renamed into dest/sub dir",
+ new Path(finalDir, "sub/subfile.txt"));
+ } else {
+ // CLI rename behavior
+ ContractTestUtils.assertPathExists(fs, "not renamed into dest dir",
+ new Path(finalDir, "src1/source.txt"));
+ ContractTestUtils.assertPathExists(fs, "not renamed into dest/sub dir",
+ new Path(finalDir, "src1/sub/subfile.txt"));
+ }
+ ContractTestUtils.assertPathDoesNotExist(fs, "not deleted",
+ new Path(srcDir, "source.txt"));
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractOptions.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractOptions.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractOptions.java
index 61279b0..d9427c6 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractOptions.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractOptions.java
@@ -80,6 +80,13 @@ public interface ContractOptions {
"rename-returns-false-if-source-missing";
/**
+ * Flag to indicate that the FS remove dest first if it is an empty directory
+ * mean the FS honors POSIX rename behavior.
+ * @{value}
+ */
+ String RENAME_REMOVE_DEST_IF_EMPTY_DIR = "rename-remove-dest-if-empty-dir";
+
+ /**
* Flag to indicate that append is supported
* @{value}
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
index cd9cc1b..3f16724 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
@@ -31,8 +31,11 @@ import org.slf4j.LoggerFactory;
import java.io.EOFException;
import java.io.FileNotFoundException;
import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
import java.util.Arrays;
import java.util.Properties;
+import java.util.UUID;
/**
* Utilities used across test cases
@@ -44,6 +47,13 @@ public class ContractTestUtils extends Assert {
public static final String IO_FILE_BUFFER_SIZE = "io.file.buffer.size";
+ // For scale testing, we can repeatedly write small chunk data to generate
+ // a large file.
+ public static final String IO_CHUNK_BUFFER_SIZE = "io.chunk.buffer.size";
+ public static final int DEFAULT_IO_CHUNK_BUFFER_SIZE = 128;
+ public static final String IO_CHUNK_MODULUS_SIZE = "io.chunk.modulus.size";
+ public static final int DEFAULT_IO_CHUNK_MODULUS_SIZE = 128;
+
/**
* Assert that a property in the property set matches the expected value
* @param props property set
@@ -755,5 +765,134 @@ public class ContractTestUtils extends Assert {
mismatch);
}
+ /**
+ * Receives test data from the given input file and checks the size of the
+ * data as well as the pattern inside the received data.
+ *
+ * @param fs FileSystem
+ * @param path Input file to be checked
+ * @param expectedSize the expected size of the data to be read from the
+ * input file in bytes
+ * @param bufferLen Pattern length
+ * @param modulus Pattern modulus
+ * @throws IOException
+ * thrown if an error occurs while reading the data
+ */
+ public static void verifyReceivedData(FileSystem fs, Path path,
+ final long expectedSize,
+ final int bufferLen,
+ final int modulus) throws IOException {
+ final byte[] testBuffer = new byte[bufferLen];
+
+ long totalBytesRead = 0;
+ int nextExpectedNumber = 0;
+ final InputStream inputStream = fs.open(path);
+ try {
+ while (true) {
+ final int bytesRead = inputStream.read(testBuffer);
+ if (bytesRead < 0) {
+ break;
+ }
+
+ totalBytesRead += bytesRead;
+
+ for (int i = 0; i < bytesRead; ++i) {
+ if (testBuffer[i] != nextExpectedNumber) {
+ throw new IOException("Read number " + testBuffer[i]
+ + " but expected " + nextExpectedNumber);
+ }
+
+ ++nextExpectedNumber;
+ if (nextExpectedNumber == modulus) {
+ nextExpectedNumber = 0;
+ }
+ }
+ }
+
+ if (totalBytesRead != expectedSize) {
+ throw new IOException("Expected to read " + expectedSize +
+ " bytes but only received " + totalBytesRead);
+ }
+ } finally {
+ inputStream.close();
+ }
+ }
+
+ /**
+ * Generates test data of the given size according to some specific pattern
+ * and writes it to the provided output file.
+ *
+ * @param fs FileSystem
+ * @param path Test file to be generated
+ * @param size The size of the test data to be generated in bytes
+ * @param bufferLen Pattern length
+ * @param modulus Pattern modulus
+ * @throws IOException
+ * thrown if an error occurs while writing the data
+ */
+ public static long generateTestFile(FileSystem fs, Path path,
+ final long size,
+ final int bufferLen,
+ final int modulus) throws IOException {
+ final byte[] testBuffer = new byte[bufferLen];
+ for (int i = 0; i < testBuffer.length; ++i) {
+ testBuffer[i] = (byte) (i % modulus);
+ }
+
+ final OutputStream outputStream = fs.create(path, false);
+ long bytesWritten = 0;
+ try {
+ while (bytesWritten < size) {
+ final long diff = size - bytesWritten;
+ if (diff < testBuffer.length) {
+ outputStream.write(testBuffer, 0, (int) diff);
+ bytesWritten += diff;
+ } else {
+ outputStream.write(testBuffer);
+ bytesWritten += testBuffer.length;
+ }
+ }
+
+ return bytesWritten;
+ } finally {
+ outputStream.close();
+ }
+ }
+
+ /**
+ * Creates and reads a file with the given size. The test file is generated
+ * according to a specific pattern so it can be easily verified even if it's
+ * a multi-GB one.
+ * During the read phase the incoming data stream is also checked against
+ * this pattern.
+ *
+ * @param fs FileSystem
+ * @param parent Test file parent dir path
+ * @throws IOException
+ * thrown if an I/O error occurs while writing or reading the test file
+ */
+ public static void createAndVerifyFile(FileSystem fs, Path parent, final long fileSize)
+ throws IOException {
+ int testBufferSize = fs.getConf()
+ .getInt(IO_CHUNK_BUFFER_SIZE, DEFAULT_IO_CHUNK_BUFFER_SIZE);
+ int modulus = fs.getConf()
+ .getInt(IO_CHUNK_MODULUS_SIZE, DEFAULT_IO_CHUNK_MODULUS_SIZE);
+
+ final String objectName = UUID.randomUUID().toString();
+ final Path objectPath = new Path(parent, objectName);
+
+ // Write test file in a specific pattern
+ assertEquals(fileSize,
+ generateTestFile(fs, objectPath, fileSize, testBufferSize, modulus));
+ assertPathExists(fs, "not created successful", objectPath);
+
+ // Now read the same file back and verify its content
+ try {
+ verifyReceivedData(fs, objectPath, fileSize, testBufferSize, modulus);
+ } finally {
+ // Delete test file
+ fs.delete(objectPath, false);
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-common-project/hadoop-common/src/test/resources/contract/localfs.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/contract/localfs.xml b/hadoop-common-project/hadoop-common/src/test/resources/contract/localfs.xml
index b8857eb..38d68b3 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/contract/localfs.xml
+++ b/hadoop-common-project/hadoop-common/src/test/resources/contract/localfs.xml
@@ -57,6 +57,10 @@ case sensitivity and permission options are determined at run time from OS type
<value>true</value>
</property>
+ <property>
+ <name>fs.contract.rename-remove-dest-if-empty-dir</name>
+ <value>true</value>
+ </property>
<!--
checksummed filesystems do not support append; see HADOOP-4292
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-tools/hadoop-aws/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index 00fd981..e2e821e 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -83,6 +83,13 @@
<dependencyLocationsEnabled>false</dependencyLocationsEnabled>
</configuration>
</plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <forkedProcessTimeoutInSeconds>3600</forkedProcessTimeoutInSeconds>
+ </configuration>
+ </plugin>
</plugins>
</build>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3Credentials.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3Credentials.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3Credentials.java
index 312bf65..6b78ad7 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3Credentials.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3Credentials.java
@@ -61,10 +61,10 @@ public class S3Credentials {
String secretAccessKeyProperty =
String.format("fs.%s.awsSecretAccessKey", scheme);
if (accessKey == null) {
- accessKey = conf.get(accessKeyProperty);
+ accessKey = conf.getTrimmed(accessKeyProperty);
}
if (secretAccessKey == null) {
- secretAccessKey = conf.get(secretAccessKeyProperty);
+ secretAccessKey = conf.getTrimmed(secretAccessKeyProperty);
}
if (accessKey == null && secretAccessKey == null) {
throw new IllegalArgumentException("AWS " +
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
index 8d45bc6..9a0adda 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
@@ -22,10 +22,11 @@ import com.amazonaws.AmazonClientException;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.auth.AWSCredentials;
+import org.apache.commons.lang.StringUtils;
public class BasicAWSCredentialsProvider implements AWSCredentialsProvider {
- private String accessKey;
- private String secretKey;
+ private final String accessKey;
+ private final String secretKey;
public BasicAWSCredentialsProvider(String accessKey, String secretKey) {
this.accessKey = accessKey;
@@ -33,10 +34,9 @@ public class BasicAWSCredentialsProvider implements AWSCredentialsProvider {
}
public AWSCredentials getCredentials() {
- if (accessKey != null && secretKey != null) {
+ if (!StringUtils.isEmpty(accessKey) && !StringUtils.isEmpty(secretKey)) {
return new BasicAWSCredentials(accessKey, secretKey);
}
-
throw new AmazonClientException(
"Access key or secret key is null");
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index 26b7ddd..ee4bf68 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -20,12 +20,7 @@ package org.apache.hadoop.fs.s3a;
public class Constants {
- // s3 access key
- public static final String ACCESS_KEY = "fs.s3a.access.key";
- // s3 secret key
- public static final String SECRET_KEY = "fs.s3a.secret.key";
-
// number of simultaneous connections to s3
public static final String MAXIMUM_CONNECTIONS = "fs.s3a.connection.maximum";
public static final int DEFAULT_MAXIMUM_CONNECTIONS = 15;
@@ -75,4 +70,6 @@ public class Constants {
"fs.s3a.server-side-encryption-algorithm";
public static final String S3N_FOLDER_SUFFIX = "_$folder$";
+ public static final String FS_S3A_BLOCK_SIZE = "fs.s3a.block.size";
+ public static final String FS_S3A = "s3a";
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index f6d053c..6bdd233 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -27,6 +27,8 @@ import java.util.ArrayList;
import java.util.Date;
import java.util.List;
+import org.apache.hadoop.fs.s3.S3Credentials;
+
import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.ClientConfiguration;
@@ -80,6 +82,8 @@ public class S3AFileSystem extends FileSystem {
private CannedAccessControlList cannedACL;
private String serverSideEncryptionAlgorithm;
+ // The maximum number of entries that can be deleted in any call to s3
+ private static final int MAX_ENTRIES_TO_DELETE = 1000;
/** Called after a new FileSystem instance is constructed.
* @param name a uri whose authority section names the host, port, etc.
@@ -95,22 +99,12 @@ public class S3AFileSystem extends FileSystem {
this.getWorkingDirectory());
// Try to get our credentials or just connect anonymously
- String accessKey = conf.get(ACCESS_KEY, null);
- String secretKey = conf.get(SECRET_KEY, null);
-
- String userInfo = name.getUserInfo();
- if (userInfo != null) {
- int index = userInfo.indexOf(':');
- if (index != -1) {
- accessKey = userInfo.substring(0, index);
- secretKey = userInfo.substring(index + 1);
- } else {
- accessKey = userInfo;
- }
- }
+ S3Credentials s3Credentials = new S3Credentials();
+ s3Credentials.initialize(name, conf);
AWSCredentialsProviderChain credentials = new AWSCredentialsProviderChain(
- new BasicAWSCredentialsProvider(accessKey, secretKey),
+ new BasicAWSCredentialsProvider(s3Credentials.getAccessKey(),
+ s3Credentials.getSecretAccessKey()),
new InstanceProfileCredentialsProvider(),
new AnonymousAWSCredentialsProvider()
);
@@ -295,15 +289,12 @@ public class S3AFileSystem extends FileSystem {
String dstKey = pathToKey(dst);
if (srcKey.length() == 0 || dstKey.length() == 0) {
- LOG.info("rename: src or dst are empty");
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("rename: src or dst are empty");
+ }
return false;
}
- if (srcKey.equals(dstKey)) {
- LOG.info("rename: src and dst refer to the same file");
- return true;
- }
-
S3AFileStatus srcStatus;
try {
srcStatus = getFileStatus(src);
@@ -312,20 +303,27 @@ public class S3AFileSystem extends FileSystem {
return false;
}
+ if (srcKey.equals(dstKey)) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("rename: src and dst refer to the same file or directory");
+ }
+ return srcStatus.isFile();
+ }
+
S3AFileStatus dstStatus = null;
try {
dstStatus = getFileStatus(dst);
- if (srcStatus.isFile() && dstStatus.isDirectory()) {
- LOG.info("rename: src is a file and dst is a directory");
+ if (srcStatus.isDirectory() && dstStatus.isFile()) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("rename: src is a directory and dst is a file");
+ }
return false;
}
- if (srcStatus.isDirectory() && dstStatus.isFile()) {
- LOG.info("rename: src is a directory and dst is a file");
+ if (dstStatus.isDirectory() && !dstStatus.isEmptyDirectory()) {
return false;
}
-
} catch (FileNotFoundException e) {
// Parent must exist
Path parent = dst.getParent();
@@ -346,7 +344,18 @@ public class S3AFileSystem extends FileSystem {
if (LOG.isDebugEnabled()) {
LOG.debug("rename: renaming file " + src + " to " + dst);
}
- copyFile(srcKey, dstKey);
+ if (dstStatus != null && dstStatus.isDirectory()) {
+ String newDstKey = dstKey;
+ if (!newDstKey.endsWith("/")) {
+ newDstKey = newDstKey + "/";
+ }
+ String filename =
+ srcKey.substring(pathToKey(src.getParent()).length()+1);
+ newDstKey = newDstKey + filename;
+ copyFile(srcKey, newDstKey);
+ } else {
+ copyFile(srcKey, dstKey);
+ }
delete(src, false);
} else {
if (LOG.isDebugEnabled()) {
@@ -362,12 +371,19 @@ public class S3AFileSystem extends FileSystem {
srcKey = srcKey + "/";
}
+ //Verify dest is not a child of the source directory
+ if (dstKey.startsWith(srcKey)) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("cannot rename a directory to a subdirectory of self");
+ }
+ return false;
+ }
+
List<DeleteObjectsRequest.KeyVersion> keysToDelete =
new ArrayList<DeleteObjectsRequest.KeyVersion>();
if (dstStatus != null && dstStatus.isEmptyDirectory()) {
- copyFile(srcKey, dstKey);
- statistics.incrementWriteOps(1);
- keysToDelete.add(new DeleteObjectsRequest.KeyVersion(srcKey));
+ // delete unnecessary fake directory.
+ keysToDelete.add(new DeleteObjectsRequest.KeyVersion(dstKey));
}
ListObjectsRequest request = new ListObjectsRequest();
@@ -383,23 +399,29 @@ public class S3AFileSystem extends FileSystem {
keysToDelete.add(new DeleteObjectsRequest.KeyVersion(summary.getKey()));
String newDstKey = dstKey + summary.getKey().substring(srcKey.length());
copyFile(summary.getKey(), newDstKey);
+
+ if (keysToDelete.size() == MAX_ENTRIES_TO_DELETE) {
+ DeleteObjectsRequest deleteRequest =
+ new DeleteObjectsRequest(bucket).withKeys(keysToDelete);
+ s3.deleteObjects(deleteRequest);
+ statistics.incrementWriteOps(1);
+ keysToDelete.clear();
+ }
}
if (objects.isTruncated()) {
objects = s3.listNextBatchOfObjects(objects);
statistics.incrementReadOps(1);
} else {
+ if (keysToDelete.size() > 0) {
+ DeleteObjectsRequest deleteRequest =
+ new DeleteObjectsRequest(bucket).withKeys(keysToDelete);
+ s3.deleteObjects(deleteRequest);
+ statistics.incrementWriteOps(1);
+ }
break;
}
}
-
-
- if (!keysToDelete.isEmpty()) {
- DeleteObjectsRequest deleteRequest = new DeleteObjectsRequest(bucket);
- deleteRequest.setKeys(keysToDelete);
- s3.deleteObjects(deleteRequest);
- statistics.incrementWriteOps(1);
- }
}
if (src.getParent() != dst.getParent()) {
@@ -419,7 +441,9 @@ public class S3AFileSystem extends FileSystem {
* @throws IOException
*/
public boolean delete(Path f, boolean recursive) throws IOException {
- LOG.info("Delete path " + f + " - recursive " + recursive);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Delete path " + f + " - recursive " + recursive);
+ }
S3AFileStatus status;
try {
status = getFileStatus(f);
@@ -479,18 +503,26 @@ public class S3AFileSystem extends FileSystem {
if (LOG.isDebugEnabled()) {
LOG.debug("Got object to delete " + summary.getKey());
}
- }
- DeleteObjectsRequest deleteRequest = new DeleteObjectsRequest(bucket);
- deleteRequest.setKeys(keys);
- s3.deleteObjects(deleteRequest);
- statistics.incrementWriteOps(1);
- keys.clear();
+ if (keys.size() == MAX_ENTRIES_TO_DELETE) {
+ DeleteObjectsRequest deleteRequest =
+ new DeleteObjectsRequest(bucket).withKeys(keys);
+ s3.deleteObjects(deleteRequest);
+ statistics.incrementWriteOps(1);
+ keys.clear();
+ }
+ }
if (objects.isTruncated()) {
objects = s3.listNextBatchOfObjects(objects);
statistics.incrementReadOps(1);
} else {
+ if (keys.size() > 0) {
+ DeleteObjectsRequest deleteRequest =
+ new DeleteObjectsRequest(bucket).withKeys(keys);
+ s3.deleteObjects(deleteRequest);
+ statistics.incrementWriteOps(1);
+ }
break;
}
}
@@ -530,7 +562,9 @@ public class S3AFileSystem extends FileSystem {
public FileStatus[] listStatus(Path f) throws FileNotFoundException,
IOException {
String key = pathToKey(f);
- LOG.info("List status for path: " + f);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("List status for path: " + f);
+ }
final List<FileStatus> result = new ArrayList<FileStatus>();
final FileStatus fileStatus = getFileStatus(f);
@@ -640,7 +674,10 @@ public class S3AFileSystem extends FileSystem {
// TODO: If we have created an empty file at /foo/bar and we then call
// mkdirs for /foo/bar/baz/roo what happens to the empty file /foo/bar/?
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
- LOG.info("Making directory: " + f);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Making directory: " + f);
+ }
+
try {
FileStatus fileStatus = getFileStatus(f);
@@ -680,8 +717,10 @@ public class S3AFileSystem extends FileSystem {
*/
public S3AFileStatus getFileStatus(Path f) throws IOException {
String key = pathToKey(f);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Getting path status for " + f + " (" + key + ")");
+ }
- LOG.info("Getting path status for " + f + " (" + key + ")");
if (!key.isEmpty()) {
try {
@@ -723,7 +762,7 @@ public class S3AFileSystem extends FileSystem {
}
return new S3AFileStatus(true, true, f.makeQualified(uri, workingDir));
} else {
- LOG.warn("Found file (with /): real file? should not happen: " + key);
+ LOG.warn("Found file (with /): real file? should not happen: {}", key);
return new S3AFileStatus(meta.getContentLength(), dateToLong(meta.getLastModified()),
f.makeQualified(uri, workingDir));
@@ -753,7 +792,8 @@ public class S3AFileSystem extends FileSystem {
ObjectListing objects = s3.listObjects(request);
statistics.incrementReadOps(1);
- if (objects.getCommonPrefixes().size() > 0 || objects.getObjectSummaries().size() > 0) {
+ if (!objects.getCommonPrefixes().isEmpty()
+ || objects.getObjectSummaries().size() > 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Found path as directory (with /): " +
objects.getCommonPrefixes().size() + "/" +
@@ -806,8 +846,9 @@ public class S3AFileSystem extends FileSystem {
if (!overwrite && exists(dst)) {
throw new IOException(dst + " already exists");
}
-
- LOG.info("Copying local file from " + src + " to " + dst);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Copying local file from " + src + " to " + dst);
+ }
// Since we have a local file, we don't need to stream into a temporary file
LocalFileSystem local = getLocal(getConf());
@@ -992,7 +1033,7 @@ public class S3AFileSystem extends FileSystem {
@Deprecated
public long getDefaultBlockSize() {
// default to 32MB: large enough to minimize the impact of seeks
- return getConf().getLong("fs.s3a.block.size", 32 * 1024 * 1024);
+ return getConf().getLong(FS_S3A_BLOCK_SIZE, 32 * 1024 * 1024);
}
private void printAmazonServiceException(AmazonServiceException ase) {
@@ -1010,6 +1051,6 @@ public class S3AFileSystem extends FileSystem {
LOG.info("Caught an AmazonClientException, which means the client encountered " +
"a serious internal problem while trying to communicate with S3, " +
"such as not being able to access the network.");
- LOG.info("Error Message: " + ace.getMessage());
+ LOG.info("Error Message: {}" + ace, ace);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
index f65a5b0..4c56b82 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
@@ -22,6 +22,7 @@ import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.model.GetObjectRequest;
import com.amazonaws.services.s3.model.S3Object;
import com.amazonaws.services.s3.model.S3ObjectInputStream;
+import org.apache.hadoop.fs.FSExceptionMessages;
import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.fs.FileSystem;
@@ -65,6 +66,7 @@ public class S3AInputStream extends FSInputStream {
}
private synchronized void reopen(long pos) throws IOException {
+
if (wrappedStream != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Aborting old stream to open at pos " + pos);
@@ -73,15 +75,17 @@ public class S3AInputStream extends FSInputStream {
}
if (pos < 0) {
- throw new EOFException("Trying to seek to a negative offset " + pos);
+ throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK
+ +" " + pos);
}
if (contentLength > 0 && pos > contentLength-1) {
- throw new EOFException("Trying to seek to an offset " + pos +
- " past the end of the file");
+ throw new EOFException(
+ FSExceptionMessages.CANNOT_SEEK_PAST_EOF
+ + " " + pos);
}
- LOG.info("Actually opening file " + key + " at pos " + pos);
+ LOG.debug("Actually opening file " + key + " at pos " + pos);
GetObjectRequest request = new GetObjectRequest(bucket, key);
request.setRange(pos, contentLength-1);
@@ -103,11 +107,14 @@ public class S3AInputStream extends FSInputStream {
@Override
public synchronized void seek(long pos) throws IOException {
+ checkNotClosed();
+
if (this.pos == pos) {
return;
}
- LOG.info("Reopening " + this.key + " to seek to new offset " + (pos - this.pos));
+ LOG.debug(
+ "Reopening " + this.key + " to seek to new offset " + (pos - this.pos));
reopen(pos);
}
@@ -118,9 +125,7 @@ public class S3AInputStream extends FSInputStream {
@Override
public synchronized int read() throws IOException {
- if (closed) {
- throw new IOException("Stream closed");
- }
+ checkNotClosed();
openIfNeeded();
@@ -148,10 +153,8 @@ public class S3AInputStream extends FSInputStream {
}
@Override
- public synchronized int read(byte buf[], int off, int len) throws IOException {
- if (closed) {
- throw new IOException("Stream closed");
- }
+ public synchronized int read(byte[] buf, int off, int len) throws IOException {
+ checkNotClosed();
openIfNeeded();
@@ -179,6 +182,12 @@ public class S3AInputStream extends FSInputStream {
return byteRead;
}
+ private void checkNotClosed() throws IOException {
+ if (closed) {
+ throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
+ }
+ }
+
@Override
public synchronized void close() throws IOException {
super.close();
@@ -190,9 +199,8 @@ public class S3AInputStream extends FSInputStream {
@Override
public synchronized int available() throws IOException {
- if (closed) {
- throw new IOException("Stream closed");
- }
+ checkNotClosed();
+
long remaining = this.contentLength - this.pos;
if (remaining > Integer.MAX_VALUE) {
return Integer.MAX_VALUE;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AOutputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AOutputStream.java
index 1609b59..7783b99 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AOutputStream.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AOutputStream.java
@@ -87,7 +87,10 @@ public class S3AOutputStream extends OutputStream {
backupFile = lDirAlloc.createTmpFileForWrite("output-", LocalDirAllocator.SIZE_UNKNOWN, conf);
closed = false;
- LOG.info("OutputStream for key '" + key + "' writing to tempfile: " + this.backupFile);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("OutputStream for key '" + key + "' writing to tempfile: " +
+ this.backupFile);
+ }
this.backupStream = new BufferedOutputStream(new FileOutputStream(backupFile));
}
@@ -104,8 +107,10 @@ public class S3AOutputStream extends OutputStream {
}
backupStream.close();
- LOG.info("OutputStream for key '" + key + "' closed. Now beginning upload");
- LOG.info("Minimum upload part size: " + partSize + " threshold " + partSizeThreshold);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("OutputStream for key '" + key + "' closed. Now beginning upload");
+ LOG.debug("Minimum upload part size: " + partSize + " threshold " + partSizeThreshold);
+ }
try {
@@ -146,13 +151,14 @@ public class S3AOutputStream extends OutputStream {
throw new IOException(e);
} finally {
if (!backupFile.delete()) {
- LOG.warn("Could not delete temporary s3a file: " + backupFile);
+ LOG.warn("Could not delete temporary s3a file: {}", backupFile);
}
super.close();
closed = true;
}
-
- LOG.info("OutputStream for key '" + key + "' upload complete");
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("OutputStream for key '" + key + "' upload complete");
+ }
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-tools/hadoop-aws/src/main/site/markdown/tools/hadoop-aws/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/main/site/markdown/tools/hadoop-aws/index.md
new file mode 100644
index 0000000..4a1956a
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/main/site/markdown/tools/hadoop-aws/index.md
@@ -0,0 +1,417 @@
+<!---
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+# Hadoop-AWS module: Integration with Amazon Web Services
+
+The `hadoop-aws` module provides support for AWS integration. The generated
+JAR file, `hadoop-aws.jar` also declares a transitive dependency on all
+external artifacts which are needed for this support —enabling downstream
+applications to easily use this support.
+
+Features
+
+1. The "classic" `s3:` filesystem for storing objects in Amazon S3 Storage
+1. The second-generation, `s3n:` filesystem, making it easy to share
+data between hadoop and other applications via the S3 object store
+1. The third generation, `s3a:` filesystem. Designed to be a switch in
+replacement for `s3n:`, this filesystem binding supports larger files and promises
+higher performance.
+
+The specifics of using these filesystems are documented below.
+
+## Warning: Object Stores are not filesystems.
+
+Amazon S3 is an example of "an object store". In order to achieve scalalablity
+and especially high availability, S3 has —as many other cloud object stores have
+done— relaxed some of the constraints which classic "POSIX" filesystems promise.
+
+Specifically
+
+1. Files that are newly created from the Hadoop Filesystem APIs may not be
+immediately visible.
+2. File delete and update operations may not immediately propagate. Old
+copies of the file may exist for an indeterminate time period.
+3. Directory operations: `delete()` and `rename()` are implemented by
+recursive file-by-file operations. They take time at least proportional to
+the number of files, during which time partial updates may be visible. If
+the operations are interrupted, the filesystem is left in an intermediate state.
+
+For further discussion on these topics, please consult
+[/filesystem](The Hadoop FileSystem API Definition).
+
+## Warning #2: your AWS credentials are valuable
+
+Your AWS credentials not only pay for services, they offer read and write
+access to the data. Anyone with the credentials can not only read your datasets
+—they can delete them.
+
+Do not inadvertently share these credentials through means such as
+1. Checking in Hadoop configuration files containing the credentials.
+1. Logging them to a console, as they invariably end up being seen.
+
+If you do any of these: change your credentials immediately!
+
+
+## S3
+
+### Authentication properties
+
+ <property>
+ <name>fs.s3.awsAccessKeyId</name>
+ <description>AWS access key ID</description>
+ </property>
+
+ <property>
+ <name>fs.s3.awsSecretAccessKey</name>
+ <description>AWS secret key</description>
+ </property>
+
+
+## S3N
+
+### Authentication properties
+
+ <property>
+ <name>fs.s3n.awsAccessKeyId</name>
+ <description>AWS access key ID</description>
+ </property>
+
+ <property>
+ <name>fs.s3n.awsSecretAccessKey</name>
+ <description>AWS secret key</description>
+ </property>
+
+### Other properties
+
+
+ <property>
+ <name>fs.s3n.block.size</name>
+ <value>67108864</value>
+ <description>Block size to use when reading files using the native S3
+ filesystem (s3n: URIs).</description>
+ </property>
+
+ <property>
+ <name>fs.s3n.multipart.uploads.enabled</name>
+ <value>false</value>
+ <description>Setting this property to true enables multiple uploads to
+ native S3 filesystem. When uploading a file, it is split into blocks
+ if the size is larger than fs.s3n.multipart.uploads.block.size.
+ </description>
+ </property>
+
+ <property>
+ <name>fs.s3n.multipart.uploads.block.size</name>
+ <value>67108864</value>
+ <description>The block size for multipart uploads to native S3 filesystem.
+ Default size is 64MB.
+ </description>
+ </property>
+
+ <property>
+ <name>fs.s3n.multipart.copy.block.size</name>
+ <value>5368709120</value>
+ <description>The block size for multipart copy in native S3 filesystem.
+ Default size is 5GB.
+ </description>
+ </property>
+
+ <property>
+ <name>fs.s3n.server-side-encryption-algorithm</name>
+ <value></value>
+ <description>Specify a server-side encryption algorithm for S3.
+ The default is NULL, and the only other currently allowable value is AES256.
+ </description>
+ </property>
+
+## S3A
+
+
+### Authentication properties
+
+ <property>
+ <name>fs.s3a.awsAccessKeyId</name>
+ <description>AWS access key ID. Omit for Role-based authentication.</description>
+ </property>
+
+ <property>
+ <name>fs.s3a.awsSecretAccessKey</name>
+ <description>AWS secret key. Omit for Role-based authentication.</description>
+ </property>
+
+### Other properties
+
+ <property>
+ <name>fs.s3a.connection.maximum</name>
+ <value>15</value>
+ <description>Controls the maximum number of simultaneous connections to S3.</description>
+ </property>
+
+ <property>
+ <name>fs.s3a.connection.ssl.enabled</name>
+ <value>true</value>
+ <description>Enables or disables SSL connections to S3.</description>
+ </property>
+
+ <property>
+ <name>fs.s3a.attempts.maximum</name>
+ <value>10</value>
+ <description>How many times we should retry commands on transient errors.</description>
+ </property>
+
+ <property>
+ <name>fs.s3a.connection.timeout</name>
+ <value>5000</value>
+ <description>Socket connection timeout in seconds.</description>
+ </property>
+
+ <property>
+ <name>fs.s3a.paging.maximum</name>
+ <value>5000</value>
+ <description>How many keys to request from S3 when doing
+ directory listings at a time.</description>
+ </property>
+
+ <property>
+ <name>fs.s3a.multipart.size</name>
+ <value>104857600</value>
+ <description>How big (in bytes) to split upload or copy operations up into.</description>
+ </property>
+
+ <property>
+ <name>fs.s3a.multipart.threshold</name>
+ <value>2147483647</value>
+ <description>Threshold before uploads or copies use parallel multipart operations.</description>
+ </property>
+
+ <property>
+ <name>fs.s3a.acl.default</name>
+ <description>Set a canned ACL for newly created and copied objects. Value may be private,
+ public-read, public-read-write, authenticated-read, log-delivery-write,
+ bucket-owner-read, or bucket-owner-full-control.</description>
+ </property>
+
+ <property>
+ <name>fs.s3a.multipart.purge</name>
+ <value>false</value>
+ <description>True if you want to purge existing multipart uploads that may not have been
+ completed/aborted correctly</description>
+ </property>
+
+ <property>
+ <name>fs.s3a.multipart.purge.age</name>
+ <value>86400</value>
+ <description>Minimum age in seconds of multipart uploads to purge</description>
+ </property>
+
+ <property>
+ <name>fs.s3a.buffer.dir</name>
+ <value>${hadoop.tmp.dir}/s3a</value>
+ <description>Comma separated list of directories that will be used to buffer file
+ uploads to.</description>
+ </property>
+
+ <property>
+ <name>fs.s3a.impl</name>
+ <value>org.apache.hadoop.fs.s3a.S3AFileSystem</value>
+ <description>The implementation class of the S3A Filesystem</description>
+ </property>
+
+
+## Testing the S3 filesystem clients
+
+To test the S3* filesystem clients, you need to provide two files
+which pass in authentication details to the test runner
+
+1. `auth-keys.xml`
+1. `core-site.xml`
+
+These are both Hadoop XML configuration files, which must be placed into
+`hadoop-tools/hadoop-aws/src/test/resources`.
+
+
+### `auth-keys.xml`
+
+The presence of this file triggers the testing of the S3 classes.
+
+Without this file, *none of the tests in this module will be executed*
+
+The XML file must contain all the ID/key information needed to connect
+each of the filesystem clients to the object stores, and a URL for
+each filesystem for its testing.
+
+1. `test.fs.s3n.name` : the URL of the bucket for S3n tests
+1. `test.fs.s3a.name` : the URL of the bucket for S3a tests
+2. `test.fs.s3.name` : the URL of the bucket for "S3" tests
+
+The contents of each bucket will be destroyed during the test process:
+do not use the bucket for any purpose other than testing.
+
+Example:
+
+ <configuration>
+
+ <property>
+ <name>test.fs.s3n.name</name>
+ <value>s3n://test-aws-s3n/</value>
+ </property>
+
+ <property>
+ <name>test.fs.s3a.name</name>
+ <value>s3a://test-aws-s3a/</value>
+ </property>
+
+ <property>
+ <name>test.fs.s3.name</name>
+ <value>s3a://test-aws-s3/</value>
+ </property>
+
+ <property>
+ <name>fs.s3.awsAccessKeyId</name>
+ <value>DONOTPCOMMITTHISKEYTOSCM</value>
+ </property>
+
+ <property>
+ <name>fs.s3.awsSecretAccessKey</name>
+ <value>DONOTEVERSHARETHISSECRETKEY!</value>
+ </property>
+
+ <property>
+ <name>fs.s3n.awsAccessKeyId</name>
+ <value>DONOTPCOMMITTHISKEYTOSCM</value>
+ </property>
+
+ <property>
+ <name>fs.s3n.awsSecretAccessKey</name>
+ <value>DONOTEVERSHARETHISSECRETKEY!</value>
+ </property>
+
+ <property>
+ <name>fs.s3a.awsAccessKeyId</name>
+ <description>AWS access key ID. Omit for Role-based authentication.</description>
+ <value>DONOTPCOMMITTHISKEYTOSCM</value>
+ </property>
+
+ <property>
+ <name>fs.s3a.awsSecretAccessKey</name>
+ <description>AWS secret key. Omit for Role-based authentication.</description>
+ <value>DONOTEVERSHARETHISSECRETKEY!</value>
+ </property>
+ </configuration>
+
+## File `contract-test-options.xml`
+
+The file `hadoop-tools/hadoop-aws/src/test/resources/contract-test-options.xml`
+must be created and configured for the test fileystems.
+
+If a specific file `fs.contract.test.fs.*` test path is not defined for
+any of the filesystems, those tests will be skipped.
+
+The standard S3 authentication details must also be provided. This can be
+through copy-and-paste of the `auth-keys.xml` credentials, or it can be
+through direct XInclude inclustion.
+
+#### s3://
+
+The filesystem name must be defined in the property `fs.contract.test.fs.s3`.
+
+
+Example:
+
+ <property>
+ <name>fs.contract.test.fs.s3</name>
+ <value>s3://test-aws-s3/</value>
+ </property>
+
+### s3n://
+
+
+In the file `src/test/resources/contract-test-options.xml`, the filesystem
+name must be defined in the property `fs.contract.test.fs.s3n`.
+The standard configuration options to define the S3N authentication details
+must also be provided.
+
+Example:
+
+ <property>
+ <name>fs.contract.test.fs.s3n</name>
+ <value>s3n://test-aws-s3n/</value>
+ </property>
+
+### s3a://
+
+
+In the file `src/test/resources/contract-test-options.xml`, the filesystem
+name must be defined in the property `fs.contract.test.fs.s3a`.
+The standard configuration options to define the S3N authentication details
+must also be provided.
+
+Example:
+
+ <property>
+ <name>fs.contract.test.fs.s3a</name>
+ <value>s3a://test-aws-s3a/</value>
+ </property>
+
+### Complete example of `contract-test-options.xml`
+
+
+
+ <?xml version="1.0"?>
+ <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+ <!--
+ ~ Licensed to the Apache Software Foundation (ASF) under one
+ ~ or more contributor license agreements. See the NOTICE file
+ ~ distributed with this work for additional information
+ ~ regarding copyright ownership. The ASF licenses this file
+ ~ to you under the Apache License, Version 2.0 (the
+ ~ "License"); you may not use this file except in compliance
+ ~ with the License. You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing, software
+ ~ distributed under the License is distributed on an "AS IS" BASIS,
+ ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ~ See the License for the specific language governing permissions and
+ ~ limitations under the License.
+ -->
+
+ <configuration>
+
+ <include xmlns="http://www.w3.org/2001/XInclude"
+ href="auth-keys.xml"/>
+
+ <property>
+ <name>fs.contract.test.fs.s3</name>
+ <value>s3://test-aws-s3/</value>
+ </property>
+
+
+ <property>
+ <name>fs.contract.test.fs.s3a</name>
+ <value>s3a://test-aws-s3a/</value>
+ </property>
+
+ <property>
+ <name>fs.contract.test.fs.s3n</name>
+ <value>s3n://test-aws-s3n/</value>
+ </property>
+
+ </configuration>
+
+This example pulls in the `auth-keys.xml` file for the credentials.
+This provides one single place to keep the keys up to date —and means
+that the file `contract-test-options.xml` does not contain any
+secret credentials itself.
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractRename.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractRename.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractRename.java
index 88ed6d6..af1ed37 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractRename.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractRename.java
@@ -21,10 +21,10 @@ package org.apache.hadoop.fs.contract.s3a;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.apache.hadoop.fs.contract.AbstractFSContractTestBase;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.junit.Test;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset;
@@ -51,14 +51,11 @@ public class TestS3AContractRename extends AbstractContractRenameTest {
Path destFilePath = new Path(destDir, "dest-512.txt");
byte[] destDateset = dataset(512, 'A', 'Z');
- writeDataset(fs, destFilePath, destDateset, destDateset.length, 1024, false);
+ writeDataset(fs, destFilePath, destDateset, destDateset.length, 1024,
+ false);
assertIsFile(destFilePath);
boolean rename = fs.rename(srcDir, destDir);
- Path renamedSrcFilePath = new Path(destDir, "source-256.txt");
- assertIsFile(destFilePath);
- assertIsFile(renamedSrcFilePath);
- ContractTestUtils.verifyFileContents(fs, destFilePath, destDateset);
- assertTrue("rename returned false though the contents were copied", rename);
+ assertFalse("s3a doesn't support rename to non-empty directory", rename);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3/S3FileSystemContractBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3/S3FileSystemContractBaseTest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3/S3FileSystemContractBaseTest.java
index 28b0507..de106f8 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3/S3FileSystemContractBaseTest.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3/S3FileSystemContractBaseTest.java
@@ -21,13 +21,15 @@ package org.apache.hadoop.fs.s3;
import java.io.IOException;
import java.net.URI;
+import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystemContractBaseTest;
-import org.apache.hadoop.fs.Path;
+import org.junit.internal.AssumptionViolatedException;
public abstract class S3FileSystemContractBaseTest
extends FileSystemContractBaseTest {
+ public static final String KEY_TEST_FS = "test.fs.s3.name";
private FileSystemStore store;
abstract FileSystemStore getFileSystemStore() throws IOException;
@@ -37,7 +39,12 @@ public abstract class S3FileSystemContractBaseTest
Configuration conf = new Configuration();
store = getFileSystemStore();
fs = new S3FileSystem(store);
- fs.initialize(URI.create(conf.get("test.fs.s3.name")), conf);
+ String fsname = conf.get(KEY_TEST_FS);
+ if (StringUtils.isEmpty(fsname)) {
+ throw new AssumptionViolatedException(
+ "No test FS defined in :" + KEY_TEST_FS);
+ }
+ fs.initialize(URI.create(fsname), conf);
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3AFileSystemContractBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3AFileSystemContractBaseTest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3AFileSystemContractBaseTest.java
deleted file mode 100644
index 8455233..0000000
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3AFileSystemContractBaseTest.java
+++ /dev/null
@@ -1,327 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3a;
-
-import static org.junit.Assume.*;
-
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileSystemContractBaseTest;
-import org.apache.hadoop.fs.Path;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.URI;
-import java.util.UUID;
-
-/**
- * Tests a live S3 system. If you keys and bucket aren't specified, all tests
- * are marked as passed
- *
- * This uses BlockJUnit4ClassRunner because FileSystemContractBaseTest from
- * TestCase which uses the old Junit3 runner that doesn't ignore assumptions
- * properly making it impossible to skip the tests if we don't have a valid
- * bucket.
- **/
-public class S3AFileSystemContractBaseTest extends FileSystemContractBaseTest {
- private static final int TEST_BUFFER_SIZE = 128;
- private static final int MODULUS = 128;
-
- protected static final Logger LOG = LoggerFactory.getLogger(S3AFileSystemContractBaseTest.class);
-
- @Override
- public void setUp() throws Exception {
- Configuration conf = new Configuration();
-
- URI testURI = URI.create(conf.get("test.fs.s3a.name"));
-
- boolean liveTest = testURI != null && !testURI.equals("s3a:///");
-
- // This doesn't work with our JUnit 3 style test cases, so instead we'll
- // make this whole class not run by default
- assumeTrue(liveTest);
-
- fs = new S3AFileSystem();
- fs.initialize(testURI, conf);
- super.setUp();
- }
-
- @Override
- protected void tearDown() throws Exception {
- if (fs != null) {
- fs.delete(path("/tests3a"), true);
- }
- super.tearDown();
- }
-
- @Test(timeout = 10000)
- public void testMkdirs() throws IOException {
- // No trailing slash
- assertTrue(fs.mkdirs(path("/tests3a/a")));
- assertTrue(fs.exists(path("/tests3a/a")));
-
- // With trailing slash
- assertTrue(fs.mkdirs(path("/tests3a/b/")));
- assertTrue(fs.exists(path("/tests3a/b/")));
-
- // Two levels deep
- assertTrue(fs.mkdirs(path("/tests3a/c/a/")));
- assertTrue(fs.exists(path("/tests3a/c/a/")));
-
- // Mismatched slashes
- assertTrue(fs.exists(path("/tests3a/c/a")));
- }
-
-
- @Test(timeout=20000)
- public void testDelete() throws IOException {
- // Test deleting an empty directory
- assertTrue(fs.mkdirs(path("/tests3a/d")));
- assertTrue(fs.delete(path("/tests3a/d"), true));
- assertFalse(fs.exists(path("/tests3a/d")));
-
- // Test deleting a deep empty directory
- assertTrue(fs.mkdirs(path("/tests3a/e/f/g/h")));
- assertTrue(fs.delete(path("/tests3a/e/f/g"), true));
- assertFalse(fs.exists(path("/tests3a/e/f/g/h")));
- assertFalse(fs.exists(path("/tests3a/e/f/g")));
- assertTrue(fs.exists(path("/tests3a/e/f")));
-
- // Test delete of just a file
- writeFile(path("/tests3a/f/f/file"), 1000);
- assertTrue(fs.exists(path("/tests3a/f/f/file")));
- assertTrue(fs.delete(path("/tests3a/f/f/file"), false));
- assertFalse(fs.exists(path("/tests3a/f/f/file")));
-
-
- // Test delete of a path with files in various directories
- writeFile(path("/tests3a/g/h/i/file"), 1000);
- assertTrue(fs.exists(path("/tests3a/g/h/i/file")));
- writeFile(path("/tests3a/g/h/j/file"), 1000);
- assertTrue(fs.exists(path("/tests3a/g/h/j/file")));
- try {
- assertFalse(fs.delete(path("/tests3a/g/h"), false));
- fail("Expected delete to fail with recursion turned off");
- } catch (IOException e) {}
- assertTrue(fs.exists(path("/tests3a/g/h/j/file")));
- assertTrue(fs.delete(path("/tests3a/g/h"), true));
- assertFalse(fs.exists(path("/tests3a/g/h/j")));
- }
-
-
- @Test(timeout = 3600000)
- public void testOpenCreate() throws IOException {
- try {
- createAndReadFileTest(1024);
- } catch (IOException e) {
- fail(e.getMessage());
- }
-
- try {
- createAndReadFileTest(5 * 1024 * 1024);
- } catch (IOException e) {
- fail(e.getMessage());
- }
-
- try {
- createAndReadFileTest(20 * 1024 * 1024);
- } catch (IOException e) {
- fail(e.getMessage());
- }
-
- /*
- Enable to test the multipart upload
- try {
- createAndReadFileTest((long)6 * 1024 * 1024 * 1024);
- } catch (IOException e) {
- fail(e.getMessage());
- }
- */
- }
-
- @Test(timeout = 1200000)
- public void testRenameFile() throws IOException {
- Path srcPath = path("/tests3a/a/srcfile");
-
- final OutputStream outputStream = fs.create(srcPath, false);
- generateTestData(outputStream, 11 * 1024 * 1024);
- outputStream.close();
-
- assertTrue(fs.exists(srcPath));
-
- Path dstPath = path("/tests3a/b/dstfile");
-
- assertFalse(fs.rename(srcPath, dstPath));
- assertTrue(fs.mkdirs(dstPath.getParent()));
- assertTrue(fs.rename(srcPath, dstPath));
- assertTrue(fs.exists(dstPath));
- assertFalse(fs.exists(srcPath));
- assertTrue(fs.exists(srcPath.getParent()));
- }
-
-
- @Test(timeout = 10000)
- public void testRenameDirectory() throws IOException {
- Path srcPath = path("/tests3a/a");
-
- assertTrue(fs.mkdirs(srcPath));
- writeFile(new Path(srcPath, "b/testfile"), 1024);
-
- Path nonEmptyPath = path("/tests3a/nonempty");
- writeFile(new Path(nonEmptyPath, "b/testfile"), 1024);
-
- assertFalse(fs.rename(srcPath, nonEmptyPath));
-
- Path dstPath = path("/tests3a/b");
- assertTrue(fs.rename(srcPath, dstPath));
- assertFalse(fs.exists(srcPath));
- assertTrue(fs.exists(new Path(dstPath, "b/testfile")));
- }
-
-
- @Test(timeout=10000)
- public void testSeek() throws IOException {
- Path path = path("/tests3a/testfile.seek");
- writeFile(path, TEST_BUFFER_SIZE * 10);
-
-
- FSDataInputStream inputStream = fs.open(path, TEST_BUFFER_SIZE);
- inputStream.seek(inputStream.getPos() + MODULUS);
-
- testReceivedData(inputStream, TEST_BUFFER_SIZE * 10 - MODULUS);
- }
-
- /**
- * Creates and reads a file with the given size in S3. The test file is
- * generated according to a specific pattern.
- * During the read phase the incoming data stream is also checked against this pattern.
- *
- * @param fileSize
- * the size of the file to be generated in bytes
- * @throws IOException
- * thrown if an I/O error occurs while writing or reading the test file
- */
- private void createAndReadFileTest(final long fileSize) throws IOException {
- final String objectName = UUID.randomUUID().toString();
- final Path objectPath = new Path("/tests3a/", objectName);
-
- // Write test file to S3
- final OutputStream outputStream = fs.create(objectPath, false);
- generateTestData(outputStream, fileSize);
- outputStream.close();
-
- // Now read the same file back from S3
- final InputStream inputStream = fs.open(objectPath);
- testReceivedData(inputStream, fileSize);
- inputStream.close();
-
- // Delete test file
- fs.delete(objectPath, false);
- }
-
-
- /**
- * Receives test data from the given input stream and checks the size of the
- * data as well as the pattern inside the received data.
- *
- * @param inputStream
- * the input stream to read the test data from
- * @param expectedSize
- * the expected size of the data to be read from the input stream in bytes
- * @throws IOException
- * thrown if an error occurs while reading the data
- */
- private void testReceivedData(final InputStream inputStream,
- final long expectedSize) throws IOException {
- final byte[] testBuffer = new byte[TEST_BUFFER_SIZE];
-
- long totalBytesRead = 0;
- int nextExpectedNumber = 0;
- while (true) {
- final int bytesRead = inputStream.read(testBuffer);
- if (bytesRead < 0) {
- break;
- }
-
- totalBytesRead += bytesRead;
-
- for (int i = 0; i < bytesRead; ++i) {
- if (testBuffer[i] != nextExpectedNumber) {
- throw new IOException("Read number " + testBuffer[i] + " but expected "
- + nextExpectedNumber);
- }
-
- ++nextExpectedNumber;
-
- if (nextExpectedNumber == MODULUS) {
- nextExpectedNumber = 0;
- }
- }
- }
-
- if (totalBytesRead != expectedSize) {
- throw new IOException("Expected to read " + expectedSize +
- " bytes but only received " + totalBytesRead);
- }
- }
-
-
- /**
- * Generates test data of the given size according to some specific pattern
- * and writes it to the provided output stream.
- *
- * @param outputStream
- * the output stream to write the data to
- * @param size
- * the size of the test data to be generated in bytes
- * @throws IOException
- * thrown if an error occurs while writing the data
- */
- private void generateTestData(final OutputStream outputStream,
- final long size) throws IOException {
-
- final byte[] testBuffer = new byte[TEST_BUFFER_SIZE];
- for (int i = 0; i < testBuffer.length; ++i) {
- testBuffer[i] = (byte) (i % MODULUS);
- }
-
- long bytesWritten = 0;
- while (bytesWritten < size) {
-
- final long diff = size - bytesWritten;
- if (diff < testBuffer.length) {
- outputStream.write(testBuffer, 0, (int)diff);
- bytesWritten += diff;
- } else {
- outputStream.write(testBuffer);
- bytesWritten += testBuffer.length;
- }
- }
- }
-
- private void writeFile(Path name, int fileSize) throws IOException {
- final OutputStream outputStream = fs.create(name, false);
- generateTestData(outputStream, fileSize);
- outputStream.close();
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
new file mode 100644
index 0000000..514647c
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.internal.AssumptionViolatedException;
+
+import java.io.IOException;
+import java.net.URI;
+
+public class S3ATestUtils {
+
+ public static S3AFileSystem createTestFileSystem(Configuration conf) throws
+ IOException {
+ String fsname = conf.getTrimmed(TestS3AFileSystemContract.TEST_FS_S3A_NAME, "");
+
+
+ boolean liveTest = !StringUtils.isEmpty(fsname);
+ URI testURI = null;
+ if (liveTest) {
+ testURI = URI.create(fsname);
+ liveTest = testURI.getScheme().equals(Constants.FS_S3A);
+ }
+ if (!liveTest) {
+ // This doesn't work with our JUnit 3 style test cases, so instead we'll
+ // make this whole class not run by default
+ throw new AssumptionViolatedException(
+ "No test filesystem in " + TestS3AFileSystemContract.TEST_FS_S3A_NAME);
+ }
+ S3AFileSystem fs1 = new S3AFileSystem();
+ fs1.initialize(testURI, conf);
+ return fs1;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AFileSystemContract.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AFileSystemContract.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AFileSystemContract.java
new file mode 100644
index 0000000..5c88358
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AFileSystemContract.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystemContractBaseTest;
+import org.apache.hadoop.fs.Path;
+
+/**
+ * Tests a live S3 system. If your keys and bucket aren't specified, all tests
+ * are marked as passed.
+ *
+ * This uses BlockJUnit4ClassRunner because FileSystemContractBaseTest from
+ * TestCase which uses the old Junit3 runner that doesn't ignore assumptions
+ * properly making it impossible to skip the tests if we don't have a valid
+ * bucket.
+ **/
+public class TestS3AFileSystemContract extends FileSystemContractBaseTest {
+
+ protected static final Logger LOG =
+ LoggerFactory.getLogger(TestS3AFileSystemContract.class);
+ public static final String TEST_FS_S3A_NAME = "test.fs.s3a.name";
+
+ @Override
+ public void setUp() throws Exception {
+ Configuration conf = new Configuration();
+
+ fs = S3ATestUtils.createTestFileSystem(conf);
+ super.setUp();
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ if (fs != null) {
+ fs.delete(path("test"), true);
+ }
+ super.tearDown();
+ }
+
+ @Override
+ public void testMkdirsWithUmask() throws Exception {
+ // not supported
+ }
+
+ @Override
+ public void testRenameFileAsExistingFile() throws Exception {
+ if (!renameSupported()) return;
+
+ Path src = path("/test/hadoop/file");
+ createFile(src);
+ Path dst = path("/test/new/newfile");
+ createFile(dst);
+ // s3 doesn't support rename option
+ // rename-overwrites-dest is always allowed.
+ rename(src, dst, true, false, true);
+ }
+
+ @Override
+ public void testRenameDirectoryAsExistingDirectory() throws Exception {
+ if (!renameSupported()) {
+ return;
+ }
+
+ Path src = path("/test/hadoop/dir");
+ fs.mkdirs(src);
+ createFile(path("/test/hadoop/dir/file1"));
+ createFile(path("/test/hadoop/dir/subdir/file2"));
+
+ Path dst = path("/test/new/newdir");
+ fs.mkdirs(dst);
+ rename(src, dst, true, false, true);
+ assertFalse("Nested file1 exists",
+ fs.exists(path("/test/hadoop/dir/file1")));
+ assertFalse("Nested file2 exists",
+ fs.exists(path("/test/hadoop/dir/subdir/file2")));
+ assertTrue("Renamed nested file1 exists",
+ fs.exists(path("/test/new/newdir/file1")));
+ assertTrue("Renamed nested exists",
+ fs.exists(path("/test/new/newdir/subdir/file2")));
+ }
+
+// @Override
+ public void testMoveDirUnderParent() throws Throwable {
+ // not support because
+ // Fails if dst is a directory that is not empty.
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java
new file mode 100644
index 0000000..e0cbc92
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.scale;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.fs.Path;
+
+import org.apache.hadoop.fs.s3a.S3ATestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.URI;
+
+import static org.junit.Assume.assumeTrue;
+
+/**
+ * Base class for scale tests; here is where the common scale configuration
+ * keys are defined
+ */
+public class S3AScaleTestBase {
+
+ public static final String SCALE_TEST = "scale.test.";
+ public static final String KEY_OPERATION_COUNT =
+ SCALE_TEST + "operation.count";
+ public static final long DEFAULT_OPERATION_COUNT = 2005;
+
+ protected S3AFileSystem fs;
+ private static final Logger LOG =
+ LoggerFactory.getLogger(S3AScaleTestBase.class);
+
+ private Configuration conf;
+
+ /**
+ * Configuration generator. May be overridden to inject
+ * some custom options
+ * @return a configuration with which to create FS instances
+ */
+ protected Configuration createConfiguration() {
+ return new Configuration();
+ }
+
+ /**
+ * Get the configuration used to set up the FS
+ * @return the configuration
+ */
+ public Configuration getConf() {
+ return conf;
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ conf = createConfiguration();
+ fs = S3ATestUtils.createTestFileSystem(conf);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ ContractTestUtils.rm(fs, getTestPath(), true, true);
+ }
+
+ protected Path getTestPath() {
+ return new Path("/tests3a");
+ }
+
+ protected long getOperationCount() {
+ return getConf().getLong(KEY_OPERATION_COUNT, DEFAULT_OPERATION_COUNT);
+ }
+}
[21/43] git commit: HDFS-7335. Redundant checkOperation() in
FSN.analyzeFileState(). Contributed by Milan Desai.
Posted by vi...@apache.org.
HDFS-7335. Redundant checkOperation() in FSN.analyzeFileState(). Contributed by Milan Desai.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e8722e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e8722e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e8722e4
Branch: refs/heads/HDFS-EC
Commit: 6e8722e49c29a19dd13e161001d2464bb1f22189
Parents: a7fbd4e
Author: Konstantin V Shvachko <sh...@apache.org>
Authored: Wed Nov 5 09:32:32 2014 -0800
Committer: Konstantin V Shvachko <sh...@apache.org>
Committed: Wed Nov 5 09:32:32 2014 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 1 -
2 files changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e8722e4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 707929e..75a7834 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -333,6 +333,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7356. Use DirectoryListing.hasMore() directly in nfs. (Li Lu via jing9)
+ HDFS-7335. Redundant checkOperation() in FSN.analyzeFileState().
+ (Milan Desai via shv)
+
OPTIMIZATIONS
BUG FIXES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e8722e4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 76c1423..8c35315 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3287,7 +3287,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
checkBlock(previous);
onRetryBlock[0] = null;
- checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot add block to " + src);
// have we exceeded the configured limit of fs objects.
[41/43] git commit: HDFS-7329. Improve logging when MiniDFSCluster
fails to start. Contributed by Byron Wong.
Posted by vi...@apache.org.
HDFS-7329. Improve logging when MiniDFSCluster fails to start. Contributed by Byron Wong.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/228afed1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/228afed1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/228afed1
Branch: refs/heads/HDFS-EC
Commit: 228afed14c4b274e63002540d36b85c99eeddee8
Parents: b0b52c4
Author: Byron Wong <by...@wandisco.com>
Authored: Wed Nov 5 20:56:21 2014 -0800
Committer: Konstantin V Shvachko <sh...@apache.org>
Committed: Wed Nov 5 20:56:21 2014 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java | 2 +-
2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/228afed1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 59f6d92..d03074b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -344,6 +344,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7361. TestCheckpoint fails after change of log message related to
locking violation. (shv)
+ HDFS-7329. Improve logging when MiniDFSCluster fails to start.
+ (Byron Wong via shv)
+
OPTIMIZATIONS
BUG FIXES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/228afed1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 178b369..e3c6fc5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -807,7 +807,7 @@ public class MiniDFSCluster {
format, startOpt, clusterId, conf);
} catch (IOException ioe) {
LOG.error("IOE creating namenodes. Permissions dump:\n" +
- createPermissionsDiagnosisString(data_dir));
+ createPermissionsDiagnosisString(data_dir), ioe);
throw ioe;
}
if (format) {
[32/43] HADOOP-10714. AmazonS3Client.deleteObjects() need to be
limited to 1000 entries per call. Contributed by Juan Yu.
Posted by vi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/TestS3ADeleteManyFiles.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/TestS3ADeleteManyFiles.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/TestS3ADeleteManyFiles.java
new file mode 100644
index 0000000..c913a67
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/TestS3ADeleteManyFiles.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.scale;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+import static org.junit.Assert.assertEquals;
+
+public class TestS3ADeleteManyFiles extends S3AScaleTestBase {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestS3ADeleteManyFiles.class);
+
+
+ @Rule
+ public Timeout testTimeout = new Timeout(30 * 60 * 1000);
+
+ @Test
+ public void testBulkRenameAndDelete() throws Throwable {
+ final Path scaleTestDir = getTestPath();
+ final Path srcDir = new Path(scaleTestDir, "src");
+ final Path finalDir = new Path(scaleTestDir, "final");
+ final long count = getOperationCount();
+ ContractTestUtils.rm(fs, scaleTestDir, true, false);
+
+ fs.mkdirs(srcDir);
+ fs.mkdirs(finalDir);
+
+ int testBufferSize = fs.getConf()
+ .getInt(ContractTestUtils.IO_CHUNK_BUFFER_SIZE,
+ ContractTestUtils.DEFAULT_IO_CHUNK_BUFFER_SIZE);
+ // use Executor to speed up file creation
+ ExecutorService exec = Executors.newFixedThreadPool(16);
+ final ExecutorCompletionService<Boolean> completionService =
+ new ExecutorCompletionService<Boolean>(exec);
+ try {
+ final byte[] data = ContractTestUtils.dataset(testBufferSize, 'a', 'z');
+
+ for (int i = 0; i < count; ++i) {
+ final String fileName = "foo-" + i;
+ completionService.submit(new Callable<Boolean>() {
+ @Override
+ public Boolean call() throws IOException {
+ ContractTestUtils.createFile(fs, new Path(srcDir, fileName),
+ false, data);
+ return fs.exists(new Path(srcDir, fileName));
+ }
+ });
+ }
+ for (int i = 0; i < count; ++i) {
+ final Future<Boolean> future = completionService.take();
+ try {
+ if (!future.get()) {
+ LOG.warn("cannot create file");
+ }
+ } catch (ExecutionException e) {
+ LOG.warn("Error while uploading file", e.getCause());
+ throw e;
+ }
+ }
+ } finally {
+ exec.shutdown();
+ }
+
+ int nSrcFiles = fs.listStatus(srcDir).length;
+ fs.rename(srcDir, finalDir);
+ assertEquals(nSrcFiles, fs.listStatus(finalDir).length);
+ ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
+ new Path(srcDir, "foo-" + 0));
+ ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
+ new Path(srcDir, "foo-" + count / 2));
+ ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
+ new Path(srcDir, "foo-" + (count - 1)));
+ ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
+ new Path(finalDir, "foo-" + 0));
+ ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
+ new Path(finalDir, "foo-" + count/2));
+ ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
+ new Path(finalDir, "foo-" + (count-1)));
+
+ ContractTestUtils.assertDeleted(fs, finalDir, true, false);
+ }
+
+ @Test
+ public void testOpenCreate() throws IOException {
+ Path dir = new Path("/tests3a");
+ ContractTestUtils.createAndVerifyFile(fs, dir, 1024);
+ ContractTestUtils.createAndVerifyFile(fs, dir, 5 * 1024 * 1024);
+ ContractTestUtils.createAndVerifyFile(fs, dir, 20 * 1024 * 1024);
+
+
+ /*
+ Enable to test the multipart upload
+ try {
+ ContractTestUtils.createAndVerifyFile(fs, dir,
+ (long)6 * 1024 * 1024 * 1024);
+ } catch (IOException e) {
+ fail(e.getMessage());
+ }
+ */
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
index f6f9ae9..79ef9da 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
@@ -22,15 +22,17 @@ import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
+import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystemContractBaseTest;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.s3native.NativeS3FileSystem.NativeS3FsInputStream;
+import org.junit.internal.AssumptionViolatedException;
public abstract class NativeS3FileSystemContractBaseTest
extends FileSystemContractBaseTest {
-
+ public static final String KEY_TEST_FS = "test.fs.s3n.name";
private NativeFileSystemStore store;
abstract NativeFileSystemStore getNativeFileSystemStore() throws IOException;
@@ -40,7 +42,12 @@ public abstract class NativeS3FileSystemContractBaseTest
Configuration conf = new Configuration();
store = getNativeFileSystemStore();
fs = new NativeS3FileSystem(store);
- fs.initialize(URI.create(conf.get("test.fs.s3n.name")), conf);
+ String fsname = conf.get(KEY_TEST_FS);
+ if (StringUtils.isEmpty(fsname)) {
+ throw new AssumptionViolatedException(
+ "No test FS defined in :" + KEY_TEST_FS);
+ }
+ fs.initialize(URI.create(fsname), conf);
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestJets3tNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestJets3tNativeFileSystemStore.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestJets3tNativeFileSystemStore.java
index b1078a4..dbd476e 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestJets3tNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestJets3tNativeFileSystemStore.java
@@ -117,10 +117,13 @@ public class TestJets3tNativeFileSystemStore {
writeRenameReadCompare(new Path("/test/medium"), 33554432); // 100 MB
}
+ /*
+ Enable Multipart upload to run this test
@Test
public void testExtraLargeUpload()
throws IOException, NoSuchAlgorithmException {
// Multipart upload, multipart copy
writeRenameReadCompare(new Path("/test/xlarge"), 5368709121L); // 5GB+1byte
}
+ */
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-tools/hadoop-aws/src/test/resources/contract/s3a.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/resources/contract/s3a.xml b/hadoop-tools/hadoop-aws/src/test/resources/contract/s3a.xml
index 4142471..4f9c081 100644
--- a/hadoop-tools/hadoop-aws/src/test/resources/contract/s3a.xml
+++ b/hadoop-tools/hadoop-aws/src/test/resources/contract/s3a.xml
@@ -48,6 +48,11 @@
</property>
<property>
+ <name>fs.contract.rename-remove-dest-if-empty-dir</name>
+ <value>true</value>
+ </property>
+
+ <property>
<name>fs.contract.supports-append</name>
<value>false</value>
</property>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba52d88/hadoop-tools/hadoop-aws/src/test/resources/core-site.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/resources/core-site.xml b/hadoop-tools/hadoop-aws/src/test/resources/core-site.xml
new file mode 100644
index 0000000..3397769
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/resources/core-site.xml
@@ -0,0 +1,51 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ ~ Licensed to the Apache Software Foundation (ASF) under one
+ ~ or more contributor license agreements. See the NOTICE file
+ ~ distributed with this work for additional information
+ ~ regarding copyright ownership. The ASF licenses this file
+ ~ to you under the Apache License, Version 2.0 (the
+ ~ "License"); you may not use this file except in compliance
+ ~ with the License. You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing, software
+ ~ distributed under the License is distributed on an "AS IS" BASIS,
+ ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ~ See the License for the specific language governing permissions and
+ ~ limitations under the License.
+ -->
+
+<!-- Values used when running unit tests. Specify any values in here that
+ should override the default values. -->
+
+<configuration>
+
+ <property>
+ <name>hadoop.tmp.dir</name>
+ <value>target/build/test</value>
+ <description>A base for other temporary directories.</description>
+ <final>true</final>
+ </property>
+
+ <!-- Turn security off for tests by default -->
+ <property>
+ <name>hadoop.security.authentication</name>
+ <value>simple</value>
+ </property>
+
+ <!--
+ To run these tests.
+
+ # Create a file auth-keys.xml - DO NOT ADD TO REVISION CONTROL
+ # add the property test.fs.s3n.name to point to an S3 filesystem URL
+ # Add the credentials for the service you are testing against
+ -->
+ <include xmlns="http://www.w3.org/2001/XInclude"
+ href="auth-keys.xml"/>
+
+
+
+</configuration>
[38/43] git commit: HDFS-7361. TestCheckpoint fails after change of
log message related to locking violation. Contributed by Konstantin Shvachko.
Posted by vi...@apache.org.
HDFS-7361. TestCheckpoint fails after change of log message related to locking violation. Contributed by Konstantin Shvachko.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/240cb59b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/240cb59b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/240cb59b
Branch: refs/heads/HDFS-EC
Commit: 240cb59b9fb6841c64a47c6797c605a454f7b47f
Parents: e4b4901
Author: Konstantin V Shvachko <sh...@apache.org>
Authored: Wed Nov 5 20:14:28 2014 -0800
Committer: Konstantin V Shvachko <sh...@apache.org>
Committed: Wed Nov 5 20:15:16 2014 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 7 +++++--
.../hadoop/hdfs/server/namenode/TestCheckpoint.java | 10 ++++++----
2 files changed, 11 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/240cb59b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6ec8199..c6e848f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -333,13 +333,16 @@ Release 2.7.0 - UNRELEASED
HDFS-7356. Use DirectoryListing.hasMore() directly in nfs. (Li Lu via jing9)
+ HDFS-7357. FSNamesystem.checkFileProgress should log file path.
+ (Tsz Wo Nicholas Sze via wheat9)
+
HDFS-7335. Redundant checkOperation() in FSN.analyzeFileState().
(Milan Desai via shv)
HDFS-7333. Improve logging in Storage.tryLock(). (shv)
- HDFS-7357. FSNamesystem.checkFileProgress should log file path.
- (Tsz Wo Nicholas Sze via wheat9)
+ HDFS-7361. TestCheckpoint fails after change of log message related to
+ locking violation. (shv)
OPTIMIZATIONS
http://git-wip-us.apache.org/repos/asf/hadoop/blob/240cb59b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
index d51c1cc..bb4689d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
@@ -837,7 +837,7 @@ public class TestCheckpoint {
}
/**
- * Test that, an attempt to lock a storage that is already locked by a nodename,
+ * Test that, an attempt to lock a storage that is already locked by nodename,
* logs error message that includes JVM name of the namenode that locked it.
*/
@Test
@@ -853,16 +853,18 @@ public class TestCheckpoint {
savedSd = sd;
}
- LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(Storage.class));
+ LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
+ LogFactory.getLog(Storage.class));
try {
// try to lock the storage that's already locked
savedSd.lock();
- fail("Namenode should not be able to lock a storage that is already locked");
+ fail("Namenode should not be able to lock a storage" +
+ " that is already locked");
} catch (IOException ioe) {
// cannot read lock file on Windows, so message cannot get JVM name
String lockingJvmName = Path.WINDOWS ? "" :
" " + ManagementFactory.getRuntimeMXBean().getName();
- String expectedLogMessage = "It appears that another namenode"
+ String expectedLogMessage = "It appears that another node "
+ lockingJvmName + " has already locked the storage directory";
assertTrue("Log output does not contain expected log message: "
+ expectedLogMessage, logs.getOutput().contains(expectedLogMessage));
[05/43] git commit: Update CHANGES.txt for committing HADOOP-10563 to
branch-2.
Posted by vi...@apache.org.
Update CHANGES.txt for committing HADOOP-10563 to branch-2.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ad21d28e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ad21d28e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ad21d28e
Branch: refs/heads/HDFS-EC
Commit: ad21d28e07cb0f4069c62323edc5e3eee0aaab7b
Parents: d0449bd
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Nov 4 14:57:08 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Tue Nov 4 14:57:08 2014 -0800
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad21d28e/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 22e9ae6..cc8048d 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -118,8 +118,6 @@ Trunk (Unreleased)
HADOOP-10342. Add a new method to UGI to use a Kerberos login subject to
build a new UGI. (Larry McCay via omalley)
- HADOOP-10563. Remove the dependency of jsp in trunk. (wheat9)
-
HADOOP-10485. Remove dead classes in hadoop-streaming. (wheat9)
HADOOP-11013. CLASSPATH handling should be consolidated, debuggable (aw)
@@ -372,6 +370,8 @@ Release 2.7.0 - UNRELEASED
HADOOP-10847. Remove the usage of sun.security.x509.* in testing code.
(Pascal Oliva via wheat9)
+ HADOOP-10563. Remove the dependency of jsp in trunk. (wheat9)
+
OPTIMIZATIONS
BUG FIXES
[39/43] git commit: HDFS-7367. HDFS short-circuit read cannot
negotiate shared memory slot and file descriptors when SASL is enabled on
DataTransferProtocol. Contributed by Chris Nauroth.
Posted by vi...@apache.org.
HDFS-7367. HDFS short-circuit read cannot negotiate shared memory slot and file descriptors when SASL is enabled on DataTransferProtocol. Contributed by Chris Nauroth.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e33d4bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e33d4bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e33d4bd
Branch: refs/heads/HDFS-EC
Commit: 8e33d4bdb35eed8b60cf197247fad465b1912ef7
Parents: 240cb59
Author: Haohui Mai <wh...@apache.org>
Authored: Wed Nov 5 20:29:17 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Wed Nov 5 20:31:39 2014 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++++-
.../protocol/datatransfer/sasl/SaslDataTransferServer.java | 5 +++++
2 files changed, 10 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e33d4bd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c6e848f..59f6d92 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1027,9 +1027,13 @@ Release 2.6.0 - UNRELEASED
HDFS-7218. FSNamesystem ACL operations should write to audit log on
failure. (clamb via yliu)
- HDFS-7199. DFSOutputStream should not silently drop data if DataStreamer
+ HDFS-7199. DFSOutputStream should not silently drop data if DataStreamer
crashes with an unchecked exception (rushabhs via cmccabe)
+ HDFS-7367. HDFS short-circuit read cannot negotiate shared memory slot and
+ file descriptors when SASL is enabled on DataTransferProtocol.
+ (Chris Nauroth via wheat9)
+
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
HDFS-6387. HDFS CLI admin tool for creating & deleting an
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e33d4bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java
index 005856d..9f94534 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java
@@ -277,6 +277,11 @@ public class SaslDataTransferServer {
*/
private IOStreamPair getSaslStreams(Peer peer, OutputStream underlyingOut,
InputStream underlyingIn, final DatanodeID datanodeId) throws IOException {
+ if (peer.hasSecureChannel() ||
+ dnConf.getTrustedChannelResolver().isTrusted(getPeerAddress(peer))) {
+ return new IOStreamPair(underlyingIn, underlyingOut);
+ }
+
SaslPropertiesResolver saslPropsResolver = dnConf.getSaslPropsResolver();
Map<String, String> saslProps = saslPropsResolver.getServerProperties(
getPeerAddress(peer));
[36/43] git commit: HADOOP-11187 NameNode - KMS communication fails
after a long period of inactivity. Contributed by Arun Suresh.
Posted by vi...@apache.org.
HADOOP-11187 NameNode - KMS communication fails after a long period of inactivity. Contributed by Arun Suresh.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef5af4f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef5af4f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef5af4f8
Branch: refs/heads/HDFS-EC
Commit: ef5af4f8de91fbe7891ae3471eb03397e74e1811
Parents: 86eb27b
Author: Aaron T. Myers <at...@apache.org>
Authored: Wed Nov 5 18:15:12 2014 -0800
Committer: Aaron T. Myers <at...@apache.org>
Committed: Wed Nov 5 18:17:49 2014 -0800
----------------------------------------------------------------------
.../server/AuthenticationFilter.java | 9 ++++++
.../server/AuthenticationHandler.java | 3 ++
.../server/KerberosAuthenticationHandler.java | 2 +-
.../server/PseudoAuthenticationHandler.java | 9 ++++--
.../client/TestPseudoAuthenticator.java | 5 +--
.../server/TestAuthenticationFilter.java | 4 ++-
.../server/TestPseudoAuthenticationHandler.java | 9 ++----
hadoop-common-project/hadoop-common/CHANGES.txt | 3 ++
.../crypto/key/kms/KMSClientProvider.java | 5 ++-
.../hadoop/crypto/key/kms/server/TestKMS.java | 32 ++++++++------------
10 files changed, 49 insertions(+), 32 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef5af4f8/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
index 58d97ca..0ac352b 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
@@ -17,6 +17,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
import org.apache.hadoop.security.authentication.util.Signer;
import org.apache.hadoop.security.authentication.util.SignerException;
import org.apache.hadoop.security.authentication.util.RandomSignerSecretProvider;
@@ -36,6 +37,7 @@ import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import javax.servlet.http.HttpServletResponse;
+
import java.io.IOException;
import java.security.Principal;
import java.text.SimpleDateFormat;
@@ -565,6 +567,13 @@ public class AuthenticationFilter implements Filter {
if (!httpResponse.isCommitted()) {
createAuthCookie(httpResponse, "", getCookieDomain(),
getCookiePath(), 0, isHttps);
+ // If response code is 401. Then WWW-Authenticate Header should be
+ // present.. reset to 403 if not found..
+ if ((errCode == HttpServletResponse.SC_UNAUTHORIZED)
+ && (!httpResponse.containsHeader(
+ KerberosAuthenticator.WWW_AUTHENTICATE))) {
+ errCode = HttpServletResponse.SC_FORBIDDEN;
+ }
if (authenticationEx == null) {
httpResponse.sendError(errCode, "Authentication required");
} else {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef5af4f8/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java
index 7cafe8b..04984be 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java
@@ -18,6 +18,7 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
+
import java.io.IOException;
import java.util.Properties;
@@ -30,6 +31,8 @@ import java.util.Properties;
*/
public interface AuthenticationHandler {
+ public static final String WWW_AUTHENTICATE = "WWW-Authenticate";
+
/**
* Returns the authentication type of the authentication handler.
* <p/>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef5af4f8/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
index 9852460..92bc57c 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
@@ -331,7 +331,7 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
String authorization = request.getHeader(KerberosAuthenticator.AUTHORIZATION);
if (authorization == null || !authorization.startsWith(KerberosAuthenticator.NEGOTIATE)) {
- response.setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, KerberosAuthenticator.NEGOTIATE);
+ response.setHeader(WWW_AUTHENTICATE, KerberosAuthenticator.NEGOTIATE);
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
if (authorization == null) {
LOG.trace("SPNEGO starting");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef5af4f8/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java
index 0b329e0..2c7db88 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java
@@ -15,13 +15,13 @@ package org.apache.hadoop.security.authentication.server;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
-
import org.apache.http.client.utils.URLEncodedUtils;
import org.apache.http.NameValuePair;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
+
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.List;
@@ -54,6 +54,9 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
public static final String ANONYMOUS_ALLOWED = TYPE + ".anonymous.allowed";
private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
+
+ private static final String PSEUDO_AUTH = "PseudoAuth";
+
private boolean acceptAnonymous;
private String type;
@@ -181,7 +184,9 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
if (getAcceptAnonymous()) {
token = AuthenticationToken.ANONYMOUS;
} else {
- throw new AuthenticationException("Anonymous requests are disallowed");
+ response.setStatus(HttpServletResponse.SC_FORBIDDEN);
+ response.setHeader(WWW_AUTHENTICATE, PSEUDO_AUTH);
+ token = null;
}
} else {
token = new AuthenticationToken(userName, userName, getType());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef5af4f8/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java
index 4a33fa9..3a5764b 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java
@@ -63,8 +63,9 @@ public class TestPseudoAuthenticator {
URL url = new URL(auth.getBaseURL());
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.connect();
- Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN, conn.getResponseCode());
- Assert.assertEquals("Anonymous requests are disallowed", conn.getResponseMessage());
+ Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, conn.getResponseCode());
+ Assert.assertTrue(conn.getHeaderFields().containsKey("WWW-Authenticate"));
+ Assert.assertEquals("Authentication required", conn.getResponseMessage());
} finally {
auth.stop();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef5af4f8/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
index 3b6b958..c01c182 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
@@ -537,11 +537,11 @@ public class TestAuthenticationFilter {
}
).when(chain).doFilter(Mockito.<ServletRequest>anyObject(), Mockito.<ServletResponse>anyObject());
+ Mockito.when(response.containsHeader("WWW-Authenticate")).thenReturn(true);
filter.doFilter(request, response, chain);
Mockito.verify(response).sendError(
HttpServletResponse.SC_UNAUTHORIZED, "Authentication required");
- Mockito.verify(response).setHeader("WWW-Authenticate", "dummyauth");
} finally {
filter.destroy();
}
@@ -852,6 +852,7 @@ public class TestAuthenticationFilter {
Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+ Mockito.when(response.containsHeader("WWW-Authenticate")).thenReturn(true);
FilterChain chain = Mockito.mock(FilterChain.class);
verifyUnauthorized(filter, request, response, chain);
@@ -930,6 +931,7 @@ public class TestAuthenticationFilter {
Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+ Mockito.when(response.containsHeader("WWW-Authenticate")).thenReturn(true);
FilterChain chain = Mockito.mock(FilterChain.class);
verifyUnauthorized(filter, request, response, chain);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef5af4f8/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java
index 91c1103..b52915d 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java
@@ -21,6 +21,7 @@ import org.mockito.Mockito;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
+
import java.util.Properties;
public class TestPseudoAuthenticationHandler {
@@ -74,12 +75,8 @@ public class TestPseudoAuthenticationHandler {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
- handler.authenticate(request, response);
- Assert.fail();
- } catch (AuthenticationException ex) {
- // Expected
- } catch (Exception ex) {
- Assert.fail();
+ AuthenticationToken token = handler.authenticate(request, response);
+ Assert.assertNull(token);
} finally {
handler.destroy();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef5af4f8/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 55ef9d3..8587f12 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -412,6 +412,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-11272. Allow ZKSignerSecretProvider and
ZKDelegationTokenSecretManager to use the same curator client. (Arun Suresh via atm)
+ HADOOP-11187 NameNode - KMS communication fails after a long period of
+ inactivity. (Arun Suresh via atm)
+
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef5af4f8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 4c24f58..cb03683 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -81,6 +81,8 @@ import com.google.common.base.Preconditions;
public class KMSClientProvider extends KeyProvider implements CryptoExtension,
KeyProviderDelegationTokenExtension.DelegationTokenExtension {
+ private static final String INVALID_SIGNATURE = "Invalid signature";
+
private static final String ANONYMOUS_REQUESTS_DISALLOWED = "Anonymous requests are disallowed";
public static final String TOKEN_KIND = "kms-dt";
@@ -453,7 +455,8 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
throw ex;
}
if ((conn.getResponseCode() == HttpURLConnection.HTTP_FORBIDDEN
- && conn.getResponseMessage().equals(ANONYMOUS_REQUESTS_DISALLOWED))
+ && (conn.getResponseMessage().equals(ANONYMOUS_REQUESTS_DISALLOWED) ||
+ conn.getResponseMessage().contains(INVALID_SIGNATURE)))
|| conn.getResponseCode() == HttpURLConnection.HTTP_UNAUTHORIZED) {
// Ideally, this should happen only when there is an Authentication
// failure. Unfortunately, the AuthenticationFilter returns 403 when it
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef5af4f8/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index 9e76178..86e6484 100644
--- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -900,6 +900,7 @@ public class TestKMS {
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
+ conf.set("hadoop.kms.authentication.token.validity", "1");
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getAclConfigKey(), type.toString());
@@ -930,11 +931,16 @@ public class TestKMS {
@Override
public Void run() throws Exception {
KMSClientProvider kp = new KMSClientProvider(uri, conf);
+
+ kp.createKey("k0", new byte[16],
+ new KeyProvider.Options(conf));
+ // This happens before rollover
kp.createKey("k1", new byte[16],
new KeyProvider.Options(conf));
- makeAuthTokenStale(kp);
+ // Atleast 2 rollovers.. so should induce signer Exception
+ Thread.sleep(3500);
kp.createKey("k2", new byte[16],
- new KeyProvider.Options(conf));
+ new KeyProvider.Options(conf));
return null;
}
});
@@ -958,15 +964,16 @@ public class TestKMS {
KMSClientProvider kp = new KMSClientProvider(uri, conf);
kp.createKey("k3", new byte[16],
new KeyProvider.Options(conf));
- makeAuthTokenStale(kp);
+ // Atleast 2 rollovers.. so should induce signer Exception
+ Thread.sleep(3500);
try {
kp.createKey("k4", new byte[16],
new KeyProvider.Options(conf));
- Assert.fail("Shoud fail since retry count == 0");
+ Assert.fail("This should not succeed..");
} catch (IOException e) {
Assert.assertTrue(
- "HTTP exception must be a 403 : " + e.getMessage(), e
- .getMessage().contains("403"));
+ "HTTP exception must be a 401 : " + e.getMessage(), e
+ .getMessage().contains("401"));
}
return null;
}
@@ -976,19 +983,6 @@ public class TestKMS {
});
}
- private void makeAuthTokenStale(KMSClientProvider kp) throws Exception {
- Field tokF = KMSClientProvider.class.getDeclaredField("authToken");
- tokF.setAccessible(true);
- DelegationTokenAuthenticatedURL.Token delToken =
- (DelegationTokenAuthenticatedURL.Token) tokF.get(kp);
- String oldTokStr = delToken.toString();
- Method setM =
- AuthenticatedURL.Token.class.getDeclaredMethod("set", String.class);
- setM.setAccessible(true);
- String newTokStr = oldTokStr.replaceAll("e=[^&]*", "e=1000");
- setM.invoke(((AuthenticatedURL.Token)delToken), newTokStr);
- }
-
@Test
public void testACLs() throws Exception {
Configuration conf = new Configuration();
[19/43] git commit: HADOOP-11271. Use Time.monotonicNow() in
Shell.java instead of Time.now() (Contributed by Vinayakumar B)
Posted by vi...@apache.org.
HADOOP-11271. Use Time.monotonicNow() in Shell.java instead of Time.now() (Contributed by Vinayakumar B)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e9502e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e9502e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e9502e0
Branch: refs/heads/HDFS-EC
Commit: 8e9502e05d09e31b76fda7cc6691e78a39db8de6
Parents: 73e6012
Author: Vinayakumar B <vi...@apache.org>
Authored: Wed Nov 5 14:47:52 2014 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Wed Nov 5 14:47:52 2014 +0530
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../src/main/java/org/apache/hadoop/util/Shell.java | 4 ++--
2 files changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e9502e0/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 7827270..4bfe46b 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -397,6 +397,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-11269. Add java 8 profile for hadoop-annotations. (Li Lu via wheat9)
+ HADOOP-11271. Use Time.monotonicNow() in Shell.java instead of Time.now()
+ (vinayakumarb)
+
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e9502e0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
index bd25b9d..a44e992 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
@@ -449,7 +449,7 @@ abstract public class Shell {
/** check to see if a command needs to be executed and execute if needed */
protected void run() throws IOException {
- if (lastTime + interval > Time.now())
+ if (lastTime + interval > Time.monotonicNow())
return;
exitCode = 0; // reset for next run
runCommand();
@@ -578,7 +578,7 @@ abstract public class Shell {
LOG.warn("Error while closing the error stream", ioe);
}
process.destroy();
- lastTime = Time.now();
+ lastTime = Time.monotonicNow();
}
}
[13/43] git commit: YARN-2010. Handle app-recovery failures
gracefully. (Jian He and Karthik Kambatla via kasha)
Posted by vi...@apache.org.
YARN-2010. Handle app-recovery failures gracefully. (Jian He and Karthik Kambatla via kasha)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2cd2698
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2cd2698
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2cd2698
Branch: refs/heads/HDFS-EC
Commit: b2cd2698028118b6384904732dbf94942f644732
Parents: d78191a
Author: Karthik Kambatla <ka...@apache.org>
Authored: Tue Nov 4 17:44:59 2014 -0800
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Tue Nov 4 17:45:24 2014 -0800
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +
.../server/resourcemanager/RMAppManager.java | 55 +++--------------
.../server/resourcemanager/rmapp/RMAppImpl.java | 47 ++++++++++++++
.../rmapp/RMAppRecoverEvent.java | 36 +++++++++++
.../rmapp/attempt/RMAppAttemptImpl.java | 6 +-
.../scheduler/QueueNotFoundException.java | 32 ++++++++++
.../scheduler/capacity/CapacityScheduler.java | 7 +--
.../TestWorkPreservingRMRestart.java | 18 ++----
.../rmapp/TestRMAppTransitions.java | 65 +++++++++++++++++---
9 files changed, 196 insertions(+), 73 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2cd2698/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 118cdc4..f9fcf5b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -856,6 +856,9 @@ Release 2.6.0 - UNRELEASED
of races between the launch and the stop-container call and when root
processes crash. (Billie Rinaldi via vinodkv)
+ YARN-2010. Handle app-recovery failures gracefully.
+ (Jian He and Karthik Kambatla via kasha)
+
Release 2.5.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2cd2698/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 63333b8..02c6d2f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRecoverEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRejectedEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
@@ -274,12 +275,11 @@ public class RMAppManager implements EventHandler<RMAppManagerEvent>,
ApplicationId appId = submissionContext.getApplicationId();
if (UserGroupInformation.isSecurityEnabled()) {
- Credentials credentials = null;
try {
- credentials = parseCredentials(submissionContext);
this.rmContext.getDelegationTokenRenewer().addApplicationAsync(appId,
- credentials, submissionContext.getCancelTokensWhenComplete(),
- application.getUser());
+ parseCredentials(submissionContext),
+ submissionContext.getCancelTokensWhenComplete(),
+ application.getUser());
} catch (Exception e) {
LOG.warn("Unable to parse credentials.", e);
// Sending APP_REJECTED is fine, since we assume that the
@@ -299,10 +299,8 @@ public class RMAppManager implements EventHandler<RMAppManagerEvent>,
}
}
- @SuppressWarnings("unchecked")
- protected void
- recoverApplication(ApplicationState appState, RMState rmState)
- throws Exception {
+ protected void recoverApplication(ApplicationState appState, RMState rmState)
+ throws Exception {
ApplicationSubmissionContext appContext =
appState.getApplicationSubmissionContext();
ApplicationId appId = appState.getAppId();
@@ -311,33 +309,7 @@ public class RMAppManager implements EventHandler<RMAppManagerEvent>,
RMAppImpl application =
createAndPopulateNewRMApp(appContext, appState.getSubmitTime(),
appState.getUser());
- application.recover(rmState);
- if (isApplicationInFinalState(appState.getState())) {
- // We are synchronously moving the application into final state so that
- // momentarily client will not see this application in NEW state. Also
- // for finished applications we will avoid renewing tokens.
- application.handle(new RMAppEvent(appId, RMAppEventType.RECOVER));
- return;
- }
-
- if (UserGroupInformation.isSecurityEnabled()) {
- Credentials credentials = null;
- try {
- credentials = parseCredentials(appContext);
- // synchronously renew delegation token on recovery.
- rmContext.getDelegationTokenRenewer().addApplicationSync(appId,
- credentials, appContext.getCancelTokensWhenComplete(),
- application.getUser());
- application.handle(new RMAppEvent(appId, RMAppEventType.RECOVER));
- } catch (Exception e) {
- LOG.warn("Unable to parse and renew delegation tokens.", e);
- this.rmContext.getDispatcher().getEventHandler()
- .handle(new RMAppRejectedEvent(appId, e.getMessage()));
- throw e;
- }
- } else {
- application.handle(new RMAppEvent(appId, RMAppEventType.RECOVER));
- }
+ application.handle(new RMAppRecoverEvent(appId, rmState));
}
private RMAppImpl createAndPopulateNewRMApp(
@@ -416,18 +388,9 @@ public class RMAppManager implements EventHandler<RMAppManagerEvent>,
return null;
}
-
- private boolean isApplicationInFinalState(RMAppState rmAppState) {
- if (rmAppState == RMAppState.FINISHED || rmAppState == RMAppState.FAILED
- || rmAppState == RMAppState.KILLED) {
- return true;
- } else {
- return false;
- }
- }
- protected Credentials parseCredentials(ApplicationSubmissionContext application)
- throws IOException {
+ protected Credentials parseCredentials(
+ ApplicationSubmissionContext application) throws IOException {
Credentials credentials = new Credentials();
DataInputByteBuffer dibb = new DataInputByteBuffer();
ByteBuffer tokens = application.getAMContainerSpec().getTokens();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2cd2698/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 1994b36..9b10872 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -18,8 +18,10 @@
package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
+import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
+import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
@@ -36,6 +38,8 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.DataInputByteBuffer;
+import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@@ -825,6 +829,15 @@ public class RMAppImpl implements RMApp, Recoverable {
@Override
public RMAppState transition(RMAppImpl app, RMAppEvent event) {
+ RMAppRecoverEvent recoverEvent = (RMAppRecoverEvent) event;
+ try {
+ app.recover(recoverEvent.getRMState());
+ } catch (Exception e) {
+ String msg = app.applicationId + " failed to recover. " + e.getMessage();
+ failToRecoverApp(app, event, msg, e);
+ return RMAppState.FINAL_SAVING;
+ }
+
// The app has completed.
if (app.recoveredFinalState != null) {
app.recoverAppAttempts();
@@ -832,6 +845,20 @@ public class RMAppImpl implements RMApp, Recoverable {
return app.recoveredFinalState;
}
+ if (UserGroupInformation.isSecurityEnabled()) {
+ // synchronously renew delegation token on recovery.
+ try {
+ app.rmContext.getDelegationTokenRenewer().addApplicationSync(
+ app.getApplicationId(), app.parseCredentials(),
+ app.submissionContext.getCancelTokensWhenComplete(), app.getUser());
+ } catch (Exception e) {
+ String msg = "Failed to renew delegation token on recovery for "
+ + app.applicationId + e.getMessage();
+ failToRecoverApp(app, event, msg, e);
+ return RMAppState.FINAL_SAVING;
+ }
+ }
+
// No existent attempts means the attempt associated with this app was not
// started or started but not yet saved.
if (app.attempts.isEmpty()) {
@@ -865,6 +892,14 @@ public class RMAppImpl implements RMApp, Recoverable {
// Thus we return ACCECPTED state on recovery.
return RMAppState.ACCEPTED;
}
+
+ private void failToRecoverApp(RMAppImpl app, RMAppEvent event, String msg,
+ Exception e) {
+ app.diagnostics.append(msg);
+ LOG.error(msg, e);
+ app.rememberTargetTransitionsAndStoreState(event, new FinalTransition(
+ RMAppState.FAILED), RMAppState.FAILED, RMAppState.FAILED);
+ }
}
private static final class AddApplicationToSchedulerTransition extends
@@ -1296,4 +1331,16 @@ public class RMAppImpl implements RMApp, Recoverable {
public ReservationId getReservationId() {
return submissionContext.getReservationID();
}
+
+ protected Credentials parseCredentials() throws IOException {
+ Credentials credentials = new Credentials();
+ DataInputByteBuffer dibb = new DataInputByteBuffer();
+ ByteBuffer tokens = submissionContext.getAMContainerSpec().getTokens();
+ if (tokens != null) {
+ dibb.reset(tokens);
+ credentials.readTokenStorageStream(dibb);
+ tokens.rewind();
+ }
+ return credentials;
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2cd2698/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppRecoverEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppRecoverEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppRecoverEvent.java
new file mode 100644
index 0000000..b8c91a9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppRecoverEvent.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState;
+
+public class RMAppRecoverEvent extends RMAppEvent {
+
+ private final RMState state;
+
+ public RMAppRecoverEvent(ApplicationId appId, RMState state) {
+ super(appId, RMAppEventType.RECOVER);
+ this.state = state;
+ }
+
+ public RMState getRMState() {
+ return state;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2cd2698/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index b5a6237..ae11b07 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -833,8 +833,10 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
if (UserGroupInformation.isSecurityEnabled()) {
byte[] clientTokenMasterKeyBytes = appAttemptTokens.getSecretKey(
RMStateStore.AM_CLIENT_TOKEN_MASTER_KEY_NAME);
- clientTokenMasterKey = rmContext.getClientToAMTokenSecretManager()
- .registerMasterKey(applicationAttemptId, clientTokenMasterKeyBytes);
+ if (clientTokenMasterKeyBytes != null) {
+ clientTokenMasterKey = rmContext.getClientToAMTokenSecretManager()
+ .registerMasterKey(applicationAttemptId, clientTokenMasterKeyBytes);
+ }
}
this.amrmToken =
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2cd2698/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueNotFoundException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueNotFoundException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueNotFoundException.java
new file mode 100644
index 0000000..35a1d66
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueNotFoundException.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+
+@Private
+public class QueueNotFoundException extends YarnRuntimeException {
+
+ private static final long serialVersionUID = 187239430L;
+
+ public QueueNotFoundException(String message) {
+ super(message);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2cd2698/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 9332228..c383e43 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -80,6 +80,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnSched
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.PreemptableResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueNotFoundException;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.QueueMapping;
@@ -676,15 +677,13 @@ public class CapacityScheduler extends
//During a restart, this indicates a queue was removed, which is
//not presently supported
if (isAppRecovering) {
- //throwing RuntimeException because some other exceptions are caught
- //(including YarnRuntimeException) and we want this to force an exit
- String queueErrorMsg = "Queue named " + queueName
+ String queueErrorMsg = "Queue named " + queueName
+ " missing during application recovery."
+ " Queue removal during recovery is not presently supported by the"
+ " capacity scheduler, please restart with all queues configured"
+ " which were present before shutdown/restart.";
LOG.fatal(queueErrorMsg);
- throw new RuntimeException(queueErrorMsg);
+ throw new QueueNotFoundException(queueErrorMsg);
}
String message = "Application " + applicationId +
" submitted by user " + user + " to unknown queue: " + queueName;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2cd2698/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
index 85d3895..536dbd7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
@@ -37,6 +37,7 @@ import java.util.Set;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.service.Service;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@@ -61,6 +62,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerStat
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueNotFoundException;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
@@ -570,10 +572,10 @@ public class TestWorkPreservingRMRestart {
// submission
//2. Remove one of the queues, restart the RM
//3. Verify that the expected exception was thrown
- @Test (timeout = 30000)
+ @Test (timeout = 30000, expected = QueueNotFoundException.class)
public void testCapacitySchedulerQueueRemovedRecovery() throws Exception {
if (!schedulerClass.equals(CapacityScheduler.class)) {
- return;
+ throw new QueueNotFoundException("Dummy");
}
conf.setBoolean(CapacitySchedulerConfiguration.ENABLE_USER_METRICS, true);
conf.set(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,
@@ -614,17 +616,7 @@ public class TestWorkPreservingRMRestart {
new CapacitySchedulerConfiguration(conf);
setupQueueConfigurationOnlyA(csConf);
rm2 = new MockRM(csConf, memStore);
- boolean runtimeThrown = false;
- try {
- rm2.start();
- } catch (RuntimeException e) {
- //we're catching it because we want to verify the message
- //and we don't want to set it as an expected exception for the
- //test because we only want it to happen here
- assertTrue(e.getMessage().contains(B + " missing"));
- runtimeThrown = true;
- }
- assertTrue(runtimeThrown);
+ rm2.start();
}
private void checkParentQueue(ParentQueue parentQueue, int numContainers,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2cd2698/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
index 6a66385..ecb6b5c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
@@ -28,6 +28,7 @@ import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import java.io.IOException;
+import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collection;
import java.util.Map;
@@ -35,6 +36,8 @@ import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
@@ -43,6 +46,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl;
@@ -73,9 +77,11 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEv
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
+import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer;
import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;
import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.apache.hadoop.yarn.util.Records;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@@ -199,10 +205,11 @@ public class TestRMAppTransitions {
AMLivelinessMonitor amFinishingMonitor = mock(AMLivelinessMonitor.class);
store = mock(RMStateStore.class);
writer = mock(RMApplicationHistoryWriter.class);
+ DelegationTokenRenewer renewer = mock(DelegationTokenRenewer.class);
RMContext realRMContext =
new RMContextImpl(rmDispatcher,
containerAllocationExpirer, amLivelinessMonitor, amFinishingMonitor,
- null, new AMRMTokenSecretManager(conf, this.rmContext),
+ renewer, new AMRMTokenSecretManager(conf, this.rmContext),
new RMContainerTokenSecretManager(conf),
new NMTokenSecretManagerInRM(conf),
new ClientToAMTokenSecretManagerInRM(),
@@ -387,8 +394,12 @@ public class TestRMAppTransitions {
ApplicationSubmissionContext submissionContext) throws IOException {
RMApp application = createNewTestApp(submissionContext);
// NEW => SUBMITTED event RMAppEventType.RECOVER
+ RMState state = new RMState();
+ ApplicationState appState = new ApplicationState(123, 123, null, "user");
+ state.getApplicationState().put(application.getApplicationId(), appState);
RMAppEvent event =
- new RMAppEvent(application.getApplicationId(), RMAppEventType.RECOVER);
+ new RMAppRecoverEvent(application.getApplicationId(), state);
+
application.handle(event);
assertStartTimeSet(application);
assertAppState(RMAppState.SUBMITTED, application);
@@ -514,7 +525,46 @@ public class TestRMAppTransitions {
@Test (timeout = 30000)
public void testAppRecoverPath() throws IOException {
LOG.info("--- START: testAppRecoverPath ---");
- testCreateAppSubmittedRecovery(null);
+ ApplicationSubmissionContext sub =
+ Records.newRecord(ApplicationSubmissionContext.class);
+ ContainerLaunchContext clc =
+ Records.newRecord(ContainerLaunchContext.class);
+ Credentials credentials = new Credentials();
+ DataOutputBuffer dob = new DataOutputBuffer();
+ credentials.writeTokenStorageToStream(dob);
+ ByteBuffer securityTokens =
+ ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
+ clc.setTokens(securityTokens);
+ sub.setAMContainerSpec(clc);
+ testCreateAppSubmittedRecovery(sub);
+ }
+
+ @Test (timeout = 30000)
+ public void testAppRecoverToFailed() throws IOException {
+ LOG.info("--- START: testAppRecoverToFailed ---");
+ ApplicationSubmissionContext sub =
+ Records.newRecord(ApplicationSubmissionContext.class);
+ ContainerLaunchContext clc =
+ Records.newRecord(ContainerLaunchContext.class);
+ Credentials credentials = new Credentials();
+ DataOutputBuffer dob = new DataOutputBuffer();
+ credentials.writeTokenStorageToStream(dob);
+ ByteBuffer securityTokens =
+ ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
+ clc.setTokens(securityTokens);
+ sub.setAMContainerSpec(clc);
+
+ RMApp application = createNewTestApp(sub);
+ // NEW => FINAL_SAVING, event RMAppEventType.RECOVER
+ RMState state = new RMState();
+ RMAppEvent event =
+ new RMAppRecoverEvent(application.getApplicationId(), state);
+ // NPE will throw on recovery.
+ application.handle(event);
+ assertAppState(RMAppState.FINAL_SAVING, application);
+ sendAppUpdateSavedEvent(application);
+ rmDispatcher.await();
+ assertAppState(RMAppState.FAILED, application);
}
@Test (timeout = 30000)
@@ -917,7 +967,6 @@ public class TestRMAppTransitions {
}
}
- @SuppressWarnings("deprecation")
public void testRecoverApplication(ApplicationState appState, RMState rmState)
throws Exception {
ApplicationSubmissionContext submissionContext =
@@ -932,15 +981,15 @@ public class TestRMAppTransitions {
RMAppAttemptImpl.AM_CONTAINER_PRIORITY, ResourceRequest.ANY,
submissionContext.getResource(), 1));
Assert.assertEquals(RMAppState.NEW, application.getState());
- application.recover(rmState);
+ RMAppEvent recoverEvent =
+ new RMAppRecoverEvent(application.getApplicationId(), rmState);
+ // Trigger RECOVER event.
+ application.handle(recoverEvent);
// Application final status looked from recoveredFinalStatus
Assert.assertTrue("Application is not in recoveredFinalStatus.",
RMAppImpl.isAppInFinalState(application));
- // Trigger RECOVER event.
- application.handle(new RMAppEvent(appState.getAppId(),
- RMAppEventType.RECOVER));
rmDispatcher.await();
RMAppState finalState = appState.getState();
Assert.assertEquals("Application is not in finalState.", finalState,
[28/43] git commit: HDFS-7359. NameNode in secured HA cluster fails
to start if dfs.namenode.secondary.http-address cannot be interpreted as a
network address. Contributed by Chris Nauroth.
Posted by vi...@apache.org.
HDFS-7359. NameNode in secured HA cluster fails to start if dfs.namenode.secondary.http-address cannot be interpreted as a network address. Contributed by Chris Nauroth.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba1d4ad2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba1d4ad2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba1d4ad2
Branch: refs/heads/HDFS-EC
Commit: ba1d4ad25b301f7247f3f23df15e7f800e50feed
Parents: bc80251
Author: cnauroth <cn...@apache.org>
Authored: Wed Nov 5 15:09:22 2014 -0800
Committer: cnauroth <cn...@apache.org>
Committed: Wed Nov 5 15:09:22 2014 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 +
.../qjournal/server/GetJournalEditServlet.java | 21 +-
.../hdfs/server/namenode/ImageServlet.java | 18 +-
.../hadoop/hdfs/qjournal/TestNNWithQJM.java | 2 +-
.../hdfs/qjournal/TestSecureNNWithQJM.java | 216 +++++++++++++++++++
5 files changed, 254 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba1d4ad2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bb4f194..b0655cf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1384,6 +1384,10 @@ Release 2.6.0 - UNRELEASED
HDFS-7328. TestTraceAdmin assumes Unix line endings. (cnauroth)
+ HDFS-7359. NameNode in secured HA cluster fails to start if
+ dfs.namenode.secondary.http-address cannot be interpreted as a network
+ address. (cnauroth)
+
Release 2.5.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba1d4ad2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java
index 070e669..2335ea5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java
@@ -91,10 +91,23 @@ public class GetJournalEditServlet extends HttpServlet {
Set<String> validRequestors = new HashSet<String>();
validRequestors.addAll(DFSUtil.getAllNnPrincipals(conf));
- validRequestors.add(
- SecurityUtil.getServerPrincipal(conf
- .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY),
- SecondaryNameNode.getHttpAddress(conf).getHostName()));
+ try {
+ validRequestors.add(
+ SecurityUtil.getServerPrincipal(conf
+ .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY),
+ SecondaryNameNode.getHttpAddress(conf).getHostName()));
+ } catch (Exception e) {
+ // Don't halt if SecondaryNameNode principal could not be added.
+ LOG.debug("SecondaryNameNode principal could not be added", e);
+ String msg = String.format(
+ "SecondaryNameNode principal not considered, %s = %s, %s = %s",
+ DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY,
+ conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY),
+ DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
+ conf.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
+ DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
+ LOG.warn(msg);
+ }
// Check the full principal name of all the configured valid requestors.
for (String v : validRequestors) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba1d4ad2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java
index 0495ca1..d10aacc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java
@@ -237,9 +237,23 @@ public class ImageServlet extends HttpServlet {
validRequestors.add(SecurityUtil.getServerPrincipal(conf
.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
NameNode.getAddress(conf).getHostName()));
- validRequestors.add(SecurityUtil.getServerPrincipal(
+ try {
+ validRequestors.add(
+ SecurityUtil.getServerPrincipal(conf
+ .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY),
+ SecondaryNameNode.getHttpAddress(conf).getHostName()));
+ } catch (Exception e) {
+ // Don't halt if SecondaryNameNode principal could not be added.
+ LOG.debug("SecondaryNameNode principal could not be added", e);
+ String msg = String.format(
+ "SecondaryNameNode principal not considered, %s = %s, %s = %s",
+ DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY,
conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY),
- SecondaryNameNode.getHttpAddress(conf).getHostName()));
+ DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
+ conf.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
+ DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
+ LOG.warn(msg);
+ }
if (HAUtil.isHAEnabled(conf, DFSUtil.getNamenodeNameServiceId(conf))) {
Configuration otherNnConf = HAUtil.getConfForOtherNode(conf);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba1d4ad2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
index 8a12eea..7e81b67 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
@@ -42,7 +42,7 @@ public class TestNNWithQJM {
final Configuration conf = new HdfsConfiguration();
private MiniJournalCluster mjc = null;
private final Path TEST_PATH = new Path("/test-dir");
- private final Path TEST_PATH_2 = new Path("/test-dir");
+ private final Path TEST_PATH_2 = new Path("/test-dir-2");
@Before
public void resetSystemExit() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba1d4ad2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
new file mode 100644
index 0000000..1da92a1
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
@@ -0,0 +1,216 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.qjournal;
+
+import static org.junit.Assert.*;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTP_POLICY_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Properties;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+public class TestSecureNNWithQJM {
+
+ private static final Path TEST_PATH = new Path("/test-dir");
+ private static final Path TEST_PATH_2 = new Path("/test-dir-2");
+
+ private static HdfsConfiguration baseConf;
+ private static File baseDir;
+ private static MiniKdc kdc;
+
+ private MiniDFSCluster cluster;
+ private HdfsConfiguration conf;
+ private FileSystem fs;
+ private MiniJournalCluster mjc;
+
+ @Rule
+ public Timeout timeout = new Timeout(30000);
+
+ @BeforeClass
+ public static void init() throws Exception {
+ baseDir = new File(System.getProperty("test.build.dir", "target/test-dir"),
+ TestSecureNNWithQJM.class.getSimpleName());
+ FileUtil.fullyDelete(baseDir);
+ assertTrue(baseDir.mkdirs());
+
+ Properties kdcConf = MiniKdc.createConf();
+ kdc = new MiniKdc(kdcConf, baseDir);
+ kdc.start();
+
+ baseConf = new HdfsConfiguration();
+ SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS,
+ baseConf);
+ UserGroupInformation.setConfiguration(baseConf);
+ assertTrue("Expected configuration to enable security",
+ UserGroupInformation.isSecurityEnabled());
+
+ String userName = UserGroupInformation.getLoginUser().getShortUserName();
+ File keytabFile = new File(baseDir, userName + ".keytab");
+ String keytab = keytabFile.getAbsolutePath();
+ // Windows will not reverse name lookup "127.0.0.1" to "localhost".
+ String krbInstance = Path.WINDOWS ? "127.0.0.1" : "localhost";
+ kdc.createPrincipal(keytabFile,
+ userName + "/" + krbInstance,
+ "HTTP/" + krbInstance);
+ String hdfsPrincipal = userName + "/" + krbInstance + "@" + kdc.getRealm();
+ String spnegoPrincipal = "HTTP/" + krbInstance + "@" + kdc.getRealm();
+
+ baseConf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
+ baseConf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
+ baseConf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
+ baseConf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
+ baseConf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
+ baseConf.set(DFS_JOURNALNODE_KEYTAB_FILE_KEY, keytab);
+ baseConf.set(DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
+ baseConf.set(DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
+ spnegoPrincipal);
+ baseConf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
+ baseConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication");
+ baseConf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
+ baseConf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
+ baseConf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
+ baseConf.set(DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, "localhost:0");
+ baseConf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);
+
+ String keystoresDir = baseDir.getAbsolutePath();
+ String sslConfDir = KeyStoreTestUtil.getClasspathDir(
+ TestSecureNNWithQJM.class);
+ KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, baseConf, false);
+ }
+
+ @AfterClass
+ public static void destroy() {
+ if (kdc != null) {
+ kdc.stop();
+ }
+ FileUtil.fullyDelete(baseDir);
+ }
+
+ @Before
+ public void setup() throws Exception {
+ conf = new HdfsConfiguration(baseConf);
+ }
+
+ @After
+ public void shutdown() throws IOException {
+ IOUtils.cleanup(null, fs);
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ if (mjc != null) {
+ mjc.shutdown();
+ }
+ }
+
+ @Test
+ public void testSecureMode() throws Exception {
+ doNNWithQJMTest();
+ }
+
+ @Test
+ public void testSecondaryNameNodeHttpAddressNotNeeded() throws Exception {
+ conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "null");
+ doNNWithQJMTest();
+ }
+
+ /**
+ * Tests use of QJM with the defined cluster.
+ *
+ * @throws IOException if there is an I/O error
+ */
+ private void doNNWithQJMTest() throws IOException {
+ startCluster();
+ assertTrue(fs.mkdirs(TEST_PATH));
+
+ // Restart the NN and make sure the edit was persisted
+ // and loaded again
+ restartNameNode();
+
+ assertTrue(fs.exists(TEST_PATH));
+ assertTrue(fs.mkdirs(TEST_PATH_2));
+
+ // Restart the NN again and make sure both edits are persisted.
+ restartNameNode();
+ assertTrue(fs.exists(TEST_PATH));
+ assertTrue(fs.exists(TEST_PATH_2));
+ }
+
+ /**
+ * Restarts the NameNode and obtains a new FileSystem.
+ *
+ * @throws IOException if there is an I/O error
+ */
+ private void restartNameNode() throws IOException {
+ IOUtils.cleanup(null, fs);
+ cluster.restartNameNode();
+ fs = cluster.getFileSystem();
+ }
+
+ /**
+ * Starts a cluster using QJM with the defined configuration.
+ *
+ * @throws IOException if there is an I/O error
+ */
+ private void startCluster() throws IOException {
+ mjc = new MiniJournalCluster.Builder(conf)
+ .build();
+ conf.set(DFS_NAMENODE_EDITS_DIR_KEY,
+ mjc.getQuorumJournalURI("myjournal").toString());
+ cluster = new MiniDFSCluster.Builder(conf)
+ .build();
+ cluster.waitActive();
+ fs = cluster.getFileSystem();
+ }
+}
[07/43] git commit: Update CHANGES.txt for committing HADOOP-10717 to
branch-2.
Posted by vi...@apache.org.
Update CHANGES.txt for committing HADOOP-10717 to branch-2.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e1d9a41
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e1d9a41
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e1d9a41
Branch: refs/heads/HDFS-EC
Commit: 2e1d9a41273d2374b1f3f707bd44bdf2b7ce69a8
Parents: 27f106e
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Nov 4 15:07:04 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Tue Nov 4 15:07:46 2014 -0800
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e1d9a41/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index cc8048d..ede1148 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -306,9 +306,6 @@ Trunk (Unreleased)
HADOOP-10625. Trim configuration names when putting/getting them
to properties. (Wangda Tan via xgong)
- HADOOP-10717. HttpServer2 should load jsp DTD from local jars instead of
- going remote. (Dapeng Sun via wheat9)
-
HADOOP-10689. InputStream is not closed in
AzureNativeFileSystemStore#retrieve(). (Chen He via cnauroth)
@@ -389,6 +386,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-11165. TestUTF8 fails when run against java 8.
(Stephen Chu via cnauroth)
+ HADOOP-10717. HttpServer2 should load jsp DTD from local jars instead of
+ going remote. (Dapeng Sun via wheat9)
+
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
[40/43] git commit: YARN-2812. TestApplicationHistoryServer is likely
to fail on less powerful machine. Contributed by Zhijie Shen
Posted by vi...@apache.org.
YARN-2812. TestApplicationHistoryServer is likely to fail on less powerful machine. Contributed by Zhijie Shen
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0b52c4e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0b52c4e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0b52c4e
Branch: refs/heads/HDFS-EC
Commit: b0b52c4e11336ca2ad6a02d64c0b5d5a8f1339ae
Parents: 8e33d4b
Author: Xuan <xg...@apache.org>
Authored: Wed Nov 5 20:42:38 2014 -0800
Committer: Xuan <xg...@apache.org>
Committed: Wed Nov 5 20:42:38 2014 -0800
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +
.../TestApplicationHistoryServer.java | 77 ++++++++++++--------
2 files changed, 48 insertions(+), 32 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0b52c4e/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d65860c..aefc59f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -874,6 +874,9 @@ Release 2.6.0 - UNRELEASED
YARN-2813. Fixed NPE from MemoryTimelineStore.getDomains. (Zhijie Shen via xgong)
+ YARN-2812. TestApplicationHistoryServer is likely to fail on less powerful machine.
+ (Zhijie Shen via xgong)
+
Release 2.5.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0b52c4e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
index b11cda7..7a4062d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
@@ -29,6 +29,8 @@ import org.apache.hadoop.service.Service.STATE;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.AHSWebApp;
+import org.apache.hadoop.yarn.server.timeline.MemoryTimelineStore;
+import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilterInitializer;
import org.junit.After;
import org.junit.Test;
@@ -38,45 +40,56 @@ import java.util.Map;
public class TestApplicationHistoryServer {
- ApplicationHistoryServer historyServer = null;
-
// simple test init/start/stop ApplicationHistoryServer. Status should change.
- @Test(timeout = 50000)
+ @Test(timeout = 60000)
public void testStartStopServer() throws Exception {
- historyServer = new ApplicationHistoryServer();
+ ApplicationHistoryServer historyServer = new ApplicationHistoryServer();
Configuration config = new YarnConfiguration();
- historyServer.init(config);
- assertEquals(STATE.INITED, historyServer.getServiceState());
- assertEquals(5, historyServer.getServices().size());
- ApplicationHistoryClientService historyService =
- historyServer.getClientService();
- assertNotNull(historyServer.getClientService());
- assertEquals(STATE.INITED, historyService.getServiceState());
+ config.setClass(YarnConfiguration.TIMELINE_SERVICE_STORE,
+ MemoryTimelineStore.class, TimelineStore.class);
+ config.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS, "localhost:0");
+ try {
+ historyServer.init(config);
+ assertEquals(STATE.INITED, historyServer.getServiceState());
+ assertEquals(5, historyServer.getServices().size());
+ ApplicationHistoryClientService historyService =
+ historyServer.getClientService();
+ assertNotNull(historyServer.getClientService());
+ assertEquals(STATE.INITED, historyService.getServiceState());
- historyServer.start();
- assertEquals(STATE.STARTED, historyServer.getServiceState());
- assertEquals(STATE.STARTED, historyService.getServiceState());
- historyServer.stop();
- assertEquals(STATE.STOPPED, historyServer.getServiceState());
+ historyServer.start();
+ assertEquals(STATE.STARTED, historyServer.getServiceState());
+ assertEquals(STATE.STARTED, historyService.getServiceState());
+ historyServer.stop();
+ assertEquals(STATE.STOPPED, historyServer.getServiceState());
+ } finally {
+ historyServer.stop();
+ }
}
// test launch method
@Test(timeout = 60000)
public void testLaunch() throws Exception {
-
ExitUtil.disableSystemExit();
+ ApplicationHistoryServer historyServer = null;
try {
+ // Not able to modify the config of this test case,
+ // but others have been customized to avoid conflicts
historyServer =
ApplicationHistoryServer.launchAppHistoryServer(new String[0]);
} catch (ExitUtil.ExitException e) {
assertEquals(0, e.status);
ExitUtil.resetFirstExitException();
fail();
+ } finally {
+ if (historyServer != null) {
+ historyServer.stop();
+ }
}
}
- @Test(timeout = 50000)
- public void testFilteOverrides() throws Exception {
+ @Test(timeout = 240000)
+ public void testFilterOverrides() throws Exception {
HashMap<String, String> driver = new HashMap<String, String>();
driver.put("", TimelineAuthenticationFilterInitializer.class.getName());
@@ -97,21 +110,21 @@ public class TestApplicationHistoryServer {
for (Map.Entry<String, String> entry : driver.entrySet()) {
String filterInitializer = entry.getKey();
String expectedValue = entry.getValue();
- historyServer = new ApplicationHistoryServer();
+ ApplicationHistoryServer historyServer = new ApplicationHistoryServer();
Configuration config = new YarnConfiguration();
- config.set("hadoop.http.filter.initializers", filterInitializer);
- historyServer.init(config);
- historyServer.start();
- Configuration tmp = historyServer.getConfig();
- assertEquals(expectedValue, tmp.get("hadoop.http.filter.initializers"));
- historyServer.stop();
+ config.setClass(YarnConfiguration.TIMELINE_SERVICE_STORE,
+ MemoryTimelineStore.class, TimelineStore.class);
+ config.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS, "localhost:0");
+ try {
+ config.set("hadoop.http.filter.initializers", filterInitializer);
+ historyServer.init(config);
+ historyServer.start();
+ Configuration tmp = historyServer.getConfig();
+ assertEquals(expectedValue, tmp.get("hadoop.http.filter.initializers"));
+ } finally {
+ historyServer.stop();
+ }
}
}
- @After
- public void stop() {
- if (historyServer != null) {
- historyServer.stop();
- }
- }
}
[43/43] git commit: Merge remote-tracking branch 'origin/trunk' into
HDFS-EC
Posted by vi...@apache.org.
Merge remote-tracking branch 'origin/trunk' into HDFS-EC
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4ce3a132
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4ce3a132
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4ce3a132
Branch: refs/heads/HDFS-EC
Commit: 4ce3a132e3a0fc0c7b9cd0017df758862c3e237e
Parents: 094cfd22 80d7d18
Author: Vinayakumar B <vi...@apache.org>
Authored: Thu Nov 6 13:29:04 2014 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Thu Nov 6 13:29:04 2014 +0530
----------------------------------------------------------------------
.gitignore | 1 +
BUILDING.txt | 13 -
.../hadoop-annotations/pom.xml | 15 +
.../server/AuthenticationFilter.java | 30 +-
.../server/AuthenticationHandler.java | 3 +
.../server/KerberosAuthenticationHandler.java | 2 +-
.../server/PseudoAuthenticationHandler.java | 9 +-
.../client/TestPseudoAuthenticator.java | 5 +-
.../server/TestAuthenticationFilter.java | 6 +-
.../server/TestPseudoAuthenticationHandler.java | 9 +-
hadoop-common-project/hadoop-common/CHANGES.txt | 64 ++-
hadoop-common-project/hadoop-common/pom.xml | 5 +
.../hadoop-common/src/main/bin/hadoop.cmd | 12 +-
.../crypto/key/kms/KMSClientProvider.java | 5 +-
.../org/apache/hadoop/http/HttpServer2.java | 3 +-
.../security/ssl/SslSocketConnectorSecure.java | 58 +++
.../ZKDelegationTokenSecretManager.java | 6 +-
.../DelegationTokenAuthenticationFilter.java | 22 +-
.../main/java/org/apache/hadoop/util/Shell.java | 4 +-
.../src/site/markdown/filesystem/testing.md | 47 ---
.../hadoop/fs/FileSystemContractBaseTest.java | 6 +-
.../fs/contract/AbstractContractDeleteTest.java | 27 ++
.../fs/contract/AbstractContractMkdirTest.java | 19 +
.../fs/contract/AbstractContractRenameTest.java | 41 ++
.../hadoop/fs/contract/ContractOptions.java | 7 +
.../hadoop/fs/contract/ContractTestUtils.java | 139 +++++++
.../java/org/apache/hadoop/io/TestUTF8.java | 23 +-
.../hadoop/security/TestSecurityUtil.java | 4 -
.../hadoop/security/ssl/KeyStoreTestUtil.java | 73 ++--
.../src/test/resources/contract/localfs.xml | 4 +
hadoop-common-project/hadoop-kms/pom.xml | 6 +
.../hadoop/crypto/key/kms/server/MiniKMS.java | 5 +-
.../hadoop/crypto/key/kms/server/TestKMS.java | 133 +++++-
hadoop-dist/pom.xml | 2 +-
hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml | 6 +
.../org/apache/hadoop/test/TestJettyHelper.java | 3 +-
.../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 6 +-
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 62 ++-
hadoop-hdfs-project/hadoop-hdfs/pom.xml | 6 +
.../java/org/apache/hadoop/hdfs/DFSClient.java | 2 +-
.../org/apache/hadoop/hdfs/DFSInputStream.java | 7 +-
.../org/apache/hadoop/hdfs/DFSOutputStream.java | 2 +
.../sasl/SaslDataTransferServer.java | 5 +
.../qjournal/server/GetJournalEditServlet.java | 21 +-
.../hdfs/server/blockmanagement/BlockInfo.java | 4 +-
.../BlockInfoUnderConstruction.java | 8 +-
.../server/blockmanagement/BlockManager.java | 2 +-
.../hadoop/hdfs/server/common/Storage.java | 7 +-
.../namenode/EditLogFileOutputStream.java | 6 +-
.../hdfs/server/namenode/FSEditLogLoader.java | 4 +-
.../hdfs/server/namenode/FSNamesystem.java | 96 +++--
.../hdfs/server/namenode/ImageServlet.java | 18 +-
.../hdfs/server/namenode/NameNodeRpcServer.java | 6 +-
.../org/apache/hadoop/hdfs/tools/DFSAdmin.java | 5 +-
.../org/apache/hadoop/hdfs/MiniDFSCluster.java | 2 +-
.../apache/hadoop/hdfs/TestRollingUpgrade.java | 8 +-
.../hadoop/hdfs/qjournal/TestNNWithQJM.java | 2 +-
.../hdfs/qjournal/TestSecureNNWithQJM.java | 216 ++++++++++
.../server/blockmanagement/TestBlockInfo.java | 6 +-
.../TestBlockInfoUnderConstruction.java | 2 +-
.../blockmanagement/TestBlockManager.java | 2 +-
.../blockmanagement/TestDatanodeDescriptor.java | 4 +-
.../blockmanagement/TestHeartbeatHandling.java | 6 +-
.../blockmanagement/TestReplicationPolicy.java | 4 +-
.../datanode/TestDataNodeVolumeFailure.java | 5 +
.../hdfs/server/namenode/TestAuditLogger.java | 95 +++++
.../hdfs/server/namenode/TestCheckpoint.java | 14 +-
.../TestCommitBlockSynchronization.java | 6 +-
hadoop-mapreduce-project/CHANGES.txt | 16 +
.../hadoop-mapreduce-client-jobclient/pom.xml | 6 +
.../hadoop/mapred/TestJavaSerialization.java | 34 +-
hadoop-project/pom.xml | 6 +
hadoop-tools/hadoop-aws/pom.xml | 7 +
.../org/apache/hadoop/fs/s3/S3Credentials.java | 4 +-
.../fs/s3a/BasicAWSCredentialsProvider.java | 8 +-
.../org/apache/hadoop/fs/s3a/Constants.java | 7 +-
.../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 147 ++++---
.../apache/hadoop/fs/s3a/S3AInputStream.java | 38 +-
.../apache/hadoop/fs/s3a/S3AOutputStream.java | 18 +-
.../site/markdown/tools/hadoop-aws/index.md | 417 +++++++++++++++++++
.../fs/contract/s3a/TestS3AContractRename.java | 13 +-
.../fs/s3/S3FileSystemContractBaseTest.java | 11 +-
.../fs/s3a/S3AFileSystemContractBaseTest.java | 327 ---------------
.../org/apache/hadoop/fs/s3a/S3ATestUtils.java | 51 +++
.../fs/s3a/TestS3AFileSystemContract.java | 105 +++++
.../hadoop/fs/s3a/scale/S3AScaleTestBase.java | 89 ++++
.../fs/s3a/scale/TestS3ADeleteManyFiles.java | 131 ++++++
.../NativeS3FileSystemContractBaseTest.java | 11 +-
.../TestJets3tNativeFileSystemStore.java | 3 +
.../src/test/resources/contract/s3a.xml | 5 +
.../hadoop-aws/src/test/resources/core-site.xml | 51 +++
hadoop-yarn-project/CHANGES.txt | 34 ++
.../api/records/timeline/TimelineEntity.java | 49 ++-
.../api/records/timeline/TimelineEvent.java | 17 +-
.../hadoop/yarn/client/TestRMFailover.java | 8 +-
.../records/timeline/TestTimelineRecords.java | 128 +++++-
.../pom.xml | 7 +
.../server/timeline/MemoryTimelineStore.java | 4 +
.../timeline/webapp/TimelineWebServices.java | 9 +-
.../TestApplicationHistoryServer.java | 77 ++--
.../server/timeline/TimelineStoreTestUtils.java | 4 +
.../server/resourcemanager/RMAppManager.java | 55 +--
.../resourcemanager/RMFatalEventType.java | 1 -
.../server/resourcemanager/ResourceManager.java | 60 ++-
.../resourcemanager/recovery/RMStateStore.java | 24 +-
.../server/resourcemanager/rmapp/RMAppImpl.java | 47 +++
.../rmapp/RMAppRecoverEvent.java | 36 ++
.../rmapp/attempt/RMAppAttemptImpl.java | 6 +-
.../scheduler/QueueNotFoundException.java | 32 ++
.../scheduler/capacity/CapacityScheduler.java | 7 +-
.../yarn/server/resourcemanager/TestRMHA.java | 62 +++
.../TestWorkPreservingRMRestart.java | 18 +-
.../rmapp/TestRMAppTransitions.java | 65 ++-
...tRMWebServicesHttpStaticUserPermissions.java | 195 +++++++++
114 files changed, 3023 insertions(+), 883 deletions(-)
----------------------------------------------------------------------
[22/43] git commit: HDFS-7333. Improve logging in Storage.tryLock().
Contributed by Konstantin Shvachko.
Posted by vi...@apache.org.
HDFS-7333. Improve logging in Storage.tryLock(). Contributed by Konstantin Shvachko.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/203c6303
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/203c6303
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/203c6303
Branch: refs/heads/HDFS-EC
Commit: 203c63030f625866e220656a8efdf05109dc7627
Parents: 6e8722e
Author: Konstantin V Shvachko <sh...@apache.org>
Authored: Wed Nov 5 09:53:23 2014 -0800
Committer: Konstantin V Shvachko <sh...@apache.org>
Committed: Wed Nov 5 09:53:23 2014 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
.../java/org/apache/hadoop/hdfs/server/common/Storage.java | 7 ++++---
2 files changed, 6 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/203c6303/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 75a7834..be672a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -336,6 +336,8 @@ Release 2.7.0 - UNRELEASED
HDFS-7335. Redundant checkOperation() in FSN.analyzeFileState().
(Milan Desai via shv)
+ HDFS-7333. Improve logging in Storage.tryLock(). (shv)
+
OPTIMIZATIONS
BUG FIXES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/203c6303/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index f83cf3b..31fdb84 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -716,12 +716,13 @@ public abstract class Storage extends StorageInfo {
} catch(OverlappingFileLockException oe) {
// Cannot read from the locked file on Windows.
String lockingJvmName = Path.WINDOWS ? "" : (" " + file.readLine());
- LOG.error("It appears that another namenode" + lockingJvmName
- + " has already locked the storage directory");
+ LOG.error("It appears that another node " + lockingJvmName
+ + " has already locked the storage directory: " + root, oe);
file.close();
return null;
} catch(IOException e) {
- LOG.error("Failed to acquire lock on " + lockF + ". If this storage directory is mounted via NFS, "
+ LOG.error("Failed to acquire lock on " + lockF
+ + ". If this storage directory is mounted via NFS, "
+ "ensure that the appropriate nfs lock services are running.", e);
file.close();
throw e;
[20/43] git commit: HADOOP-11265. Credential and Key Shell Commands
not available on Windows. Contributed by Larry McCay.
Posted by vi...@apache.org.
HADOOP-11265. Credential and Key Shell Commands not available on Windows. Contributed by Larry McCay.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7fbd4e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7fbd4e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7fbd4e6
Branch: refs/heads/HDFS-EC
Commit: a7fbd4e633c18aeeda93c0f013c0a1fcd5963556
Parents: 8e9502e
Author: cnauroth <cn...@apache.org>
Authored: Wed Nov 5 08:38:50 2014 -0800
Committer: cnauroth <cn...@apache.org>
Committed: Wed Nov 5 08:38:50 2014 -0800
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
.../hadoop-common/src/main/bin/hadoop.cmd | 12 +++++++++++-
2 files changed, 13 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7fbd4e6/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4bfe46b..fd8528e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1065,6 +1065,8 @@ Release 2.6.0 - UNRELEASED
HADOOP-11241. Fixed intermittent TestNMSimulator failure due to timing issue.
(Varun Vasudev via zjshen)
+ HADOOP-11265. Credential and Key Shell Commands not available on Windows.
+ (Larry McCay via cnauroth)
Release 2.5.2 - UNRELEASED
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7fbd4e6/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd b/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd
index f9cfe14..2e3e86f 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd
@@ -142,7 +142,7 @@ call :updatepath %HADOOP_BIN_PATH%
)
)
- set corecommands=fs version jar checknative distcp daemonlog archive classpath
+ set corecommands=fs version jar checknative distcp daemonlog archive classpath credential key
for %%i in ( %corecommands% ) do (
if %hadoop-command% == %%i set corecommand=true
)
@@ -202,6 +202,14 @@ call :updatepath %HADOOP_BIN_PATH%
set CLASS=org.apache.hadoop.util.Classpath
goto :eof
+:credential
+ set CLASS=org.apache.hadoop.security.alias.CredentialShell
+ goto :eof
+
+:key
+ set CLASS=org.apache.hadoop.crypto.key.KeyShell
+ goto :eof
+
:updatepath
set path_to_add=%*
set current_path_comparable=%path%
@@ -258,6 +266,8 @@ call :updatepath %HADOOP_BIN_PATH%
@echo archive -archiveName NAME -p ^<parent path^> ^<src^>* ^<dest^> create a hadoop archive
@echo classpath prints the class path needed to get the
@echo Hadoop jar and the required libraries
+ @echo credential interact with credential providers
+ @echo key manage keys via the KeyProvider
@echo daemonlog get/set the log level for each daemon
@echo or
@echo CLASSNAME run the class named CLASSNAME
[24/43] git commit: HDFS-7199. DFSOutputStream should not silently
drop data if DataStreamer crashes with an unchecked exception (rushabhs via
cmccabe)
Posted by vi...@apache.org.
HDFS-7199. DFSOutputStream should not silently drop data if DataStreamer crashes with an unchecked exception (rushabhs via cmccabe)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/56257fab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/56257fab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/56257fab
Branch: refs/heads/HDFS-EC
Commit: 56257fab1d5a7f66bebd9149c7df0436c0a57adb
Parents: 1831280
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Wed Nov 5 10:51:49 2014 -0800
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Wed Nov 5 10:51:49 2014 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java | 2 ++
2 files changed, 5 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56257fab/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4a120e9..cf9616b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -397,6 +397,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7324. haadmin command usage prints incorrect command name.
(Brahma Reddy Battula via suresh)
+ HDFS-7199. DFSOutputStream should not silently drop data if DataStreamer
+ crashes with an unchecked exception (rushabhs via cmccabe)
+
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56257fab/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 0e1f206..17942f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -692,6 +692,8 @@ public class DFSOutputStream extends FSOutputSummer
}
if (e instanceof IOException) {
setLastException((IOException)e);
+ } else {
+ setLastException(new IOException("DataStreamer Exception: ",e));
}
hasError = true;
if (errorIndex == -1 && restartingNodeIndex == -1) {
[37/43] git commit: YARN-2813. Fixed NPE from
MemoryTimelineStore.getDomains. Contributed by Zhijie Shen
Posted by vi...@apache.org.
YARN-2813. Fixed NPE from MemoryTimelineStore.getDomains. Contributed by Zhijie Shen
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4b4901d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4b4901d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4b4901d
Branch: refs/heads/HDFS-EC
Commit: e4b4901d36875faa98ec8628e22e75499e0741ab
Parents: ef5af4f
Author: Xuan <xg...@apache.org>
Authored: Wed Nov 5 18:24:41 2014 -0800
Committer: Xuan <xg...@apache.org>
Committed: Wed Nov 5 18:24:41 2014 -0800
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 2 ++
.../apache/hadoop/yarn/server/timeline/MemoryTimelineStore.java | 4 ++++
.../hadoop/yarn/server/timeline/TimelineStoreTestUtils.java | 4 ++++
3 files changed, 10 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4b4901d/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 887d1d4..d65860c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -872,6 +872,8 @@ Release 2.6.0 - UNRELEASED
FatalEventDispatcher try to transition RM to StandBy at the same time.
(Rohith Sharmaks via jianhe)
+ YARN-2813. Fixed NPE from MemoryTimelineStore.getDomains. (Zhijie Shen via xgong)
+
Release 2.5.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4b4901d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/MemoryTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/MemoryTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/MemoryTimelineStore.java
index 2d126b4..af714b1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/MemoryTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/MemoryTimelineStore.java
@@ -241,6 +241,10 @@ public class MemoryTimelineStore
public TimelineDomains getDomains(String owner)
throws IOException {
List<TimelineDomain> domains = new ArrayList<TimelineDomain>();
+ Set<TimelineDomain> domainsOfOneOwner = domainsByOwner.get(owner);
+ if (domainsOfOneOwner == null) {
+ return new TimelineDomains();
+ }
for (TimelineDomain domain : domainsByOwner.get(owner)) {
TimelineDomain domainToReturn = createTimelineDomain(
domain.getId(),
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4b4901d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineStoreTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineStoreTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineStoreTestUtils.java
index 868838e..242478c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineStoreTestUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineStoreTestUtils.java
@@ -946,6 +946,10 @@ public class TimelineStoreTestUtils {
assertEquals(2, actualDomains.getDomains().size());
verifyDomainInfo(domain3, actualDomains.getDomains().get(0));
verifyDomainInfo(domain1, actualDomains.getDomains().get(1));
+
+ // owner without any domain
+ actualDomains = store.getDomains("owner_4");
+ assertEquals(0, actualDomains.getDomains().size());
}
private static void verifyDomainInfo(
[09/43] git commit: HDFS-7233. NN logs unnecessary
org.apache.hadoop.hdfs.protocol.UnresolvedPathException. Contributed by
Rushabh S Shah.
Posted by vi...@apache.org.
HDFS-7233. NN logs unnecessary org.apache.hadoop.hdfs.protocol.UnresolvedPathException. Contributed by Rushabh S Shah.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5bd3a569
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5bd3a569
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5bd3a569
Branch: refs/heads/HDFS-EC
Commit: 5bd3a569f941ffcfc425a55288bec78a37a75aa1
Parents: 99d7103
Author: Jing Zhao <ji...@apache.org>
Authored: Tue Nov 4 16:02:07 2014 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Tue Nov 4 16:02:07 2014 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java | 3 ++-
2 files changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5bd3a569/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c896ead..3644ce8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -694,6 +694,9 @@ Release 2.6.0 - UNRELEASED
HDFS-7276. Limit the number of byte arrays used by DFSOutputStream and
provide a mechanism for recycling arrays. (szetszwo)
+ HDFS-7233. NN logs unnecessary org.apache.hadoop.hdfs.protocol.UnresolvedPathException.
+ (Rushabh S Shah via jing9)
+
OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5bd3a569/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 5b36154..0432526 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -399,7 +399,8 @@ class NameNodeRpcServer implements NamenodeProtocols {
DSQuotaExceededException.class,
AclException.class,
FSLimitException.PathComponentTooLongException.class,
- FSLimitException.MaxDirectoryItemsExceededException.class);
+ FSLimitException.MaxDirectoryItemsExceededException.class,
+ UnresolvedPathException.class);
}
/** Allow access to the client RPC server for testing */
[10/43] git commit: HADOOP-11260. Patch up Jetty to disable SSLv3.
(Mike Yoder via kasha)
Posted by vi...@apache.org.
HADOOP-11260. Patch up Jetty to disable SSLv3. (Mike Yoder via kasha)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dbf30e3c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dbf30e3c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dbf30e3c
Branch: refs/heads/HDFS-EC
Commit: dbf30e3c0e1522e6588aecac71c990c0b01fd8fb
Parents: 5bd3a56
Author: Karthik Kambatla <ka...@apache.org>
Authored: Tue Nov 4 16:18:24 2014 -0800
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Tue Nov 4 16:18:24 2014 -0800
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 2 +
.../org/apache/hadoop/http/HttpServer2.java | 3 +-
.../security/ssl/SslSocketConnectorSecure.java | 58 ++++++++++++++++++++
.../hadoop/crypto/key/kms/server/MiniKMS.java | 5 +-
.../org/apache/hadoop/test/TestJettyHelper.java | 3 +-
5 files changed, 67 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbf30e3c/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index ede1148..7f01207 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1056,6 +1056,8 @@ Release 2.6.0 - UNRELEASED
HADOOP-11241. Fixed intermittent TestNMSimulator failure due to timing issue.
(Varun Vasudev via zjshen)
+ HADOOP-11260. Patch up Jetty to disable SSLv3. (Mike Yoder via kasha)
+
Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbf30e3c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 8aa777b..168fd77 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.ConfServlet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.security.ssl.SslSocketConnectorSecure;
import org.apache.hadoop.jmx.JMXJsonServlet;
import org.apache.hadoop.log.LogLevel;
import org.apache.hadoop.metrics.MetricsServlet;
@@ -306,7 +307,7 @@ public final class HttpServer2 implements FilterContainer {
if ("http".equals(scheme)) {
listener = HttpServer2.createDefaultChannelConnector();
} else if ("https".equals(scheme)) {
- SslSocketConnector c = new SslSocketConnector();
+ SslSocketConnector c = new SslSocketConnectorSecure();
c.setNeedClientAuth(needsClientAuth);
c.setKeyPassword(keyPassword);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbf30e3c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SslSocketConnectorSecure.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SslSocketConnectorSecure.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SslSocketConnectorSecure.java
new file mode 100644
index 0000000..52ab7ad
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SslSocketConnectorSecure.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.security.ssl;
+
+import org.mortbay.jetty.security.SslSocketConnector;
+
+import javax.net.ssl.SSLServerSocket;
+import java.io.IOException;
+import java.net.ServerSocket;
+import java.util.ArrayList;
+
+/**
+ * This subclass of the Jetty SslSocketConnector exists solely to control
+ * the TLS protocol versions allowed. This is fallout from the POODLE
+ * vulnerability (CVE-2014-3566), which requires that SSLv3 be disabled.
+ * Only TLS 1.0 and later protocols are allowed.
+ */
+public class SslSocketConnectorSecure extends SslSocketConnector {
+
+ public SslSocketConnectorSecure() {
+ super();
+ }
+
+ /**
+ * Create a new ServerSocket that will not accept SSLv3 connections,
+ * but will accept TLSv1.x connections.
+ */
+ protected ServerSocket newServerSocket(String host, int port,int backlog)
+ throws IOException {
+ SSLServerSocket socket = (SSLServerSocket)
+ super.newServerSocket(host, port, backlog);
+ ArrayList<String> nonSSLProtocols = new ArrayList<String>();
+ for (String p : socket.getEnabledProtocols()) {
+ if (!p.contains("SSLv3")) {
+ nonSSLProtocols.add(p);
+ }
+ }
+ socket.setEnabledProtocols(nonSSLProtocols.toArray(
+ new String[nonSSLProtocols.size()]));
+ return socket;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbf30e3c/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java
index 51cc026..4f802cc 100644
--- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java
+++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java
@@ -22,6 +22,7 @@ import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.ssl.SslSocketConnectorSecure;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.security.SslSocketConnector;
@@ -56,7 +57,7 @@ public class MiniKMS {
server.getConnectors()[0].setHost(host);
server.getConnectors()[0].setPort(port);
} else {
- SslSocketConnector c = new SslSocketConnector();
+ SslSocketConnector c = new SslSocketConnectorSecure();
c.setHost(host);
c.setPort(port);
c.setNeedClientAuth(false);
@@ -74,7 +75,7 @@ public class MiniKMS {
private static URL getJettyURL(Server server) {
boolean ssl = server.getConnectors()[0].getClass()
- == SslSocketConnector.class;
+ == SslSocketConnectorSecure.class;
try {
String scheme = (ssl) ? "https" : "http";
return new URL(scheme + "://" +
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dbf30e3c/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJettyHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJettyHelper.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJettyHelper.java
index 5dd6124..50b4327 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJettyHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJettyHelper.java
@@ -24,6 +24,7 @@ import java.net.ServerSocket;
import java.net.URL;
import java.net.UnknownHostException;
+import org.apache.hadoop.security.ssl.SslSocketConnectorSecure;
import org.junit.Test;
import org.junit.rules.MethodRule;
import org.junit.runners.model.FrameworkMethod;
@@ -92,7 +93,7 @@ public class TestJettyHelper implements MethodRule {
server.getConnectors()[0].setHost(host);
server.getConnectors()[0].setPort(port);
} else {
- SslSocketConnector c = new SslSocketConnector();
+ SslSocketConnector c = new SslSocketConnectorSecure();
c.setHost(host);
c.setPort(port);
c.setNeedClientAuth(false);
[34/43] git commit: HADOOP-11272. Allow ZKSignerSecretProvider and
ZKDelegationTokenSecretManager to use the same curator client. Contributed by
Arun Suresh.
Posted by vi...@apache.org.
HADOOP-11272. Allow ZKSignerSecretProvider and ZKDelegationTokenSecretManager to use the same curator client. Contributed by Arun Suresh.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a261e68
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a261e68
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a261e68
Branch: refs/heads/HDFS-EC
Commit: 8a261e68e4177b47be01ceae7310ea56aeb7ca38
Parents: 6ba52d8
Author: Aaron T. Myers <at...@apache.org>
Authored: Wed Nov 5 17:47:22 2014 -0800
Committer: Aaron T. Myers <at...@apache.org>
Committed: Wed Nov 5 17:47:22 2014 -0800
----------------------------------------------------------------------
.../server/AuthenticationFilter.java | 21 +++-
.../server/TestAuthenticationFilter.java | 2 +
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +
.../ZKDelegationTokenSecretManager.java | 6 +-
.../DelegationTokenAuthenticationFilter.java | 22 ++--
.../hadoop/crypto/key/kms/server/TestKMS.java | 101 +++++++++++++++++++
6 files changed, 142 insertions(+), 13 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a261e68/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
index a070345..58d97ca 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
@@ -219,6 +219,19 @@ public class AuthenticationFilter implements Filter {
authHandlerClassName = authHandlerName;
}
+ validity = Long.parseLong(config.getProperty(AUTH_TOKEN_VALIDITY, "36000"))
+ * 1000; //10 hours
+ initializeSecretProvider(filterConfig);
+
+ initializeAuthHandler(authHandlerClassName, filterConfig);
+
+
+ cookieDomain = config.getProperty(COOKIE_DOMAIN, null);
+ cookiePath = config.getProperty(COOKIE_PATH, null);
+ }
+
+ protected void initializeAuthHandler(String authHandlerClassName, FilterConfig filterConfig)
+ throws ServletException {
try {
Class<?> klass = Thread.currentThread().getContextClassLoader().loadClass(authHandlerClassName);
authHandler = (AuthenticationHandler) klass.newInstance();
@@ -230,9 +243,10 @@ public class AuthenticationFilter implements Filter {
} catch (IllegalAccessException ex) {
throw new ServletException(ex);
}
+ }
- validity = Long.parseLong(config.getProperty(AUTH_TOKEN_VALIDITY, "36000"))
- * 1000; //10 hours
+ protected void initializeSecretProvider(FilterConfig filterConfig)
+ throws ServletException {
secretProvider = (SignerSecretProvider) filterConfig.getServletContext().
getAttribute(SIGNER_SECRET_PROVIDER_ATTRIBUTE);
if (secretProvider == null) {
@@ -254,9 +268,6 @@ public class AuthenticationFilter implements Filter {
customSecretProvider = true;
}
signer = new Signer(secretProvider);
-
- cookieDomain = config.getProperty(COOKIE_DOMAIN, null);
- cookiePath = config.getProperty(COOKIE_PATH, null);
}
@SuppressWarnings("unchecked")
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a261e68/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
index 5d93fcf..3b6b958 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
@@ -283,6 +283,8 @@ public class TestAuthenticationFilter {
filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
+ ServletContext sc = Mockito.mock(ServletContext.class);
+ Mockito.when(config.getServletContext()).thenReturn(sc);
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("kerberos");
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a261e68/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8567e1e..55ef9d3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -409,6 +409,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-10714. AmazonS3Client.deleteObjects() need to be limited to 1000
entries per call. (Juan Yu via atm)
+ HADOOP-11272. Allow ZKSignerSecretProvider and
+ ZKDelegationTokenSecretManager to use the same curator client. (Arun Suresh via atm)
+
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a261e68/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
index 82dd2da..ebc45a5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
@@ -136,7 +136,11 @@ public abstract class ZKDelegationTokenSecretManager<TokenIdent extends Abstract
conf.getLong(DelegationTokenManager.REMOVAL_SCAN_INTERVAL,
DelegationTokenManager.REMOVAL_SCAN_INTERVAL_DEFAULT) * 1000);
if (CURATOR_TL.get() != null) {
- zkClient = CURATOR_TL.get();
+ zkClient =
+ CURATOR_TL.get().usingNamespace(
+ conf.get(ZK_DTSM_ZNODE_WORKING_PATH,
+ ZK_DTSM_ZNODE_WORKING_PATH_DEAFULT)
+ + "/" + ZK_DTSM_NAMESPACE);
isExternalClient = true;
} else {
String connString = conf.get(ZK_DTSM_ZK_CONNECTION_STRING);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a261e68/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
index aa9ec99..fbd1129 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.security.token.delegation.web;
import com.google.common.annotations.VisibleForTesting;
+
import org.apache.curator.framework.CuratorFramework;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -46,6 +47,7 @@ import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import javax.servlet.http.HttpServletResponse;
+
import java.io.IOException;
import java.io.Writer;
import java.nio.charset.Charset;
@@ -156,14 +158,7 @@ public class DelegationTokenAuthenticationFilter
@Override
public void init(FilterConfig filterConfig) throws ServletException {
- // A single CuratorFramework should be used for a ZK cluster.
- // If the ZKSignerSecretProvider has already created it, it has to
- // be set here... to be used by the ZKDelegationTokenSecretManager
- ZKDelegationTokenSecretManager.setCurator((CuratorFramework)
- filterConfig.getServletContext().getAttribute(ZKSignerSecretProvider.
- ZOOKEEPER_SIGNER_SECRET_PROVIDER_CURATOR_CLIENT_ATTRIBUTE));
super.init(filterConfig);
- ZKDelegationTokenSecretManager.setCurator(null);
AuthenticationHandler handler = getAuthenticationHandler();
AbstractDelegationTokenSecretManager dtSecretManager =
(AbstractDelegationTokenSecretManager) filterConfig.getServletContext().
@@ -188,6 +183,19 @@ public class DelegationTokenAuthenticationFilter
ProxyUsers.refreshSuperUserGroupsConfiguration(conf, PROXYUSER_PREFIX);
}
+ @Override
+ protected void initializeAuthHandler(String authHandlerClassName,
+ FilterConfig filterConfig) throws ServletException {
+ // A single CuratorFramework should be used for a ZK cluster.
+ // If the ZKSignerSecretProvider has already created it, it has to
+ // be set here... to be used by the ZKDelegationTokenSecretManager
+ ZKDelegationTokenSecretManager.setCurator((CuratorFramework)
+ filterConfig.getServletContext().getAttribute(ZKSignerSecretProvider.
+ ZOOKEEPER_SIGNER_SECRET_PROVIDER_CURATOR_CLIENT_ATTRIBUTE));
+ super.initializeAuthHandler(authHandlerClassName, filterConfig);
+ ZKDelegationTokenSecretManager.setCurator(null);
+ }
+
protected void setHandlerAuthMethod(SaslRpcServer.AuthMethod authMethod) {
this.handlerAuthMethod = authMethod;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a261e68/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index 4628e36..9e76178 100644
--- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.crypto.key.kms.server;
+import org.apache.curator.test.TestingServer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.kms.server.KeyAuthorizationKeyProvider;
import org.apache.hadoop.crypto.key.KeyProvider;
@@ -1585,6 +1586,106 @@ public class TestKMS {
}
@Test
+ public void testKMSWithZKSigner() throws Exception {
+ doKMSWithZK(true, false);
+ }
+
+ @Test
+ public void testKMSWithZKDTSM() throws Exception {
+ doKMSWithZK(false, true);
+ }
+
+ @Test
+ public void testKMSWithZKSignerAndDTSM() throws Exception {
+ doKMSWithZK(true, true);
+ }
+
+ public void doKMSWithZK(boolean zkDTSM, boolean zkSigner) throws Exception {
+ TestingServer zkServer = null;
+ try {
+ zkServer = new TestingServer();
+ zkServer.start();
+
+ Configuration conf = new Configuration();
+ conf.set("hadoop.security.authentication", "kerberos");
+ UserGroupInformation.setConfiguration(conf);
+ final File testDir = getTestDir();
+ conf = createBaseKMSConf(testDir);
+ conf.set("hadoop.kms.authentication.type", "kerberos");
+ conf.set("hadoop.kms.authentication.kerberos.keytab", keytab.getAbsolutePath());
+ conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
+ conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
+
+ if (zkSigner) {
+ conf.set("hadoop.kms.authentication.signer.secret.provider", "zookeeper");
+ conf.set("hadoop.kms.authentication.signer.secret.provider.zookeeper.path","/testKMSWithZKDTSM");
+ conf.set("hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string",zkServer.getConnectString());
+ }
+
+ if (zkDTSM) {
+ conf.set("hadoop.kms.authentication.zk-dt-secret-manager.enable", "true");
+ }
+ if (zkDTSM && !zkSigner) {
+ conf.set("hadoop.kms.authentication.zk-dt-secret-manager.zkConnectionString", zkServer.getConnectString());
+ conf.set("hadoop.kms.authentication.zk-dt-secret-manager.znodeWorkingPath", "testZKPath");
+ conf.set("hadoop.kms.authentication.zk-dt-secret-manager.zkAuthType", "none");
+ }
+
+ for (KMSACLs.Type type : KMSACLs.Type.values()) {
+ conf.set(type.getAclConfigKey(), type.toString());
+ }
+ conf.set(KMSACLs.Type.CREATE.getAclConfigKey(),
+ KMSACLs.Type.CREATE.toString() + ",SET_KEY_MATERIAL");
+
+ conf.set(KMSACLs.Type.ROLLOVER.getAclConfigKey(),
+ KMSACLs.Type.ROLLOVER.toString() + ",SET_KEY_MATERIAL");
+
+ conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k0.ALL", "*");
+ conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k1.ALL", "*");
+ conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k2.ALL", "*");
+ conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k3.ALL", "*");
+
+ writeConf(testDir, conf);
+
+ KMSCallable<KeyProvider> c =
+ new KMSCallable<KeyProvider>() {
+ @Override
+ public KeyProvider call() throws Exception {
+ final Configuration conf = new Configuration();
+ conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
+ final URI uri = createKMSUri(getKMSUrl());
+
+ final KeyProvider kp =
+ doAs("SET_KEY_MATERIAL",
+ new PrivilegedExceptionAction<KeyProvider>() {
+ @Override
+ public KeyProvider run() throws Exception {
+ KMSClientProvider kp = new KMSClientProvider(uri, conf);
+ kp.createKey("k1", new byte[16],
+ new KeyProvider.Options(conf));
+ kp.createKey("k2", new byte[16],
+ new KeyProvider.Options(conf));
+ kp.createKey("k3", new byte[16],
+ new KeyProvider.Options(conf));
+ return kp;
+ }
+ });
+ return kp;
+ }
+ };
+
+ runServer(null, null, testDir, c);
+ } finally {
+ if (zkServer != null) {
+ zkServer.stop();
+ zkServer.close();
+ }
+ }
+
+ }
+
+
+ @Test
public void testProxyUserKerb() throws Exception {
doProxyUserTest(true);
}
[03/43] git commit: HADOOP-10847. Remove the usage of
sun.security.x509.* in testing code. Contributed by Pascal Oliva.
Posted by vi...@apache.org.
HADOOP-10847. Remove the usage of sun.security.x509.* in testing code. Contributed by Pascal Oliva.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1eed1020
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1eed1020
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1eed1020
Branch: refs/heads/HDFS-EC
Commit: 1eed1020234b8b5e5444bbc88299bc6689e6b015
Parents: 85da71c
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Nov 4 10:52:05 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Tue Nov 4 10:52:05 2014 -0800
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +
hadoop-common-project/hadoop-common/pom.xml | 5 ++
.../hadoop/security/ssl/KeyStoreTestUtil.java | 73 ++++++++------------
hadoop-project/pom.xml | 6 ++
4 files changed, 43 insertions(+), 44 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1eed1020/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1d6adc3..22e9ae6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -369,6 +369,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-6857. FsShell should report raw disk usage including replication
factor. (Byron Wong via shv)
+ HADOOP-10847. Remove the usage of sun.security.x509.* in testing code.
+ (Pascal Oliva via wheat9)
+
OPTIMIZATIONS
BUG FIXES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1eed1020/hadoop-common-project/hadoop-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index df8cd58..6a63ccd 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -280,6 +280,11 @@
<groupId>org.apache.commons</groupId>
<artifactId>commons-compress</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcprov-jdk16</artifactId>
+ <scope>test</scope>
+ </dependency>
</dependencies>
<build>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1eed1020/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
index b2a839c..07cae8b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
@@ -24,18 +24,6 @@ import org.apache.hadoop.security.alias.CredentialProvider;
import org.apache.hadoop.security.alias.CredentialProviderFactory;
import org.apache.hadoop.security.alias.JavaKeyStoreProvider;
-import sun.security.x509.AlgorithmId;
-import sun.security.x509.CertificateAlgorithmId;
-import sun.security.x509.CertificateIssuerName;
-import sun.security.x509.CertificateSerialNumber;
-import sun.security.x509.CertificateSubjectName;
-import sun.security.x509.CertificateValidity;
-import sun.security.x509.CertificateVersion;
-import sun.security.x509.CertificateX509Key;
-import sun.security.x509.X500Name;
-import sun.security.x509.X509CertImpl;
-import sun.security.x509.X509CertInfo;
-
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
@@ -57,6 +45,15 @@ import java.util.Date;
import java.util.HashMap;
import java.util.Map;
+import java.security.InvalidKeyException;
+import java.security.NoSuchProviderException;
+import java.security.SignatureException;
+import java.security.cert.CertificateEncodingException;
+import java.security.cert.CertificateException;
+import java.security.cert.CertificateFactory;
+import javax.security.auth.x500.X500Principal;
+import org.bouncycastle.x509.X509V1CertificateGenerator;
+
public class KeyStoreTestUtil {
public static String getClasspathDir(Class klass) throws Exception {
@@ -68,50 +65,38 @@ public class KeyStoreTestUtil {
return baseDir;
}
+ @SuppressWarnings("deprecation")
/**
* Create a self-signed X.509 Certificate.
- * From http://bfo.com/blog/2011/03/08/odds_and_ends_creating_a_new_x_509_certificate.html.
*
* @param dn the X.509 Distinguished Name, eg "CN=Test, L=London, C=GB"
* @param pair the KeyPair
* @param days how many days from now the Certificate is valid for
* @param algorithm the signing algorithm, eg "SHA1withRSA"
* @return the self-signed certificate
- * @throws IOException thrown if an IO error ocurred.
- * @throws GeneralSecurityException thrown if an Security error ocurred.
*/
- public static X509Certificate generateCertificate(String dn, KeyPair pair,
- int days, String algorithm)
- throws GeneralSecurityException, IOException {
- PrivateKey privkey = pair.getPrivate();
- X509CertInfo info = new X509CertInfo();
+ public static X509Certificate generateCertificate(String dn, KeyPair pair, int days, String algorithm)
+ throws CertificateEncodingException,
+ InvalidKeyException,
+ IllegalStateException,
+ NoSuchProviderException, NoSuchAlgorithmException, SignatureException{
+
Date from = new Date();
Date to = new Date(from.getTime() + days * 86400000l);
- CertificateValidity interval = new CertificateValidity(from, to);
BigInteger sn = new BigInteger(64, new SecureRandom());
- X500Name owner = new X500Name(dn);
-
- info.set(X509CertInfo.VALIDITY, interval);
- info.set(X509CertInfo.SERIAL_NUMBER, new CertificateSerialNumber(sn));
- info.set(X509CertInfo.SUBJECT, new CertificateSubjectName(owner));
- info.set(X509CertInfo.ISSUER, new CertificateIssuerName(owner));
- info.set(X509CertInfo.KEY, new CertificateX509Key(pair.getPublic()));
- info
- .set(X509CertInfo.VERSION, new CertificateVersion(CertificateVersion.V3));
- AlgorithmId algo = new AlgorithmId(AlgorithmId.md5WithRSAEncryption_oid);
- info.set(X509CertInfo.ALGORITHM_ID, new CertificateAlgorithmId(algo));
-
- // Sign the cert to identify the algorithm that's used.
- X509CertImpl cert = new X509CertImpl(info);
- cert.sign(privkey, algorithm);
-
- // Update the algorith, and resign.
- algo = (AlgorithmId) cert.get(X509CertImpl.SIG_ALG);
- info
- .set(CertificateAlgorithmId.NAME + "." + CertificateAlgorithmId.ALGORITHM,
- algo);
- cert = new X509CertImpl(info);
- cert.sign(privkey, algorithm);
+ KeyPair keyPair = pair;
+ X509V1CertificateGenerator certGen = new X509V1CertificateGenerator();
+ X500Principal dnName = new X500Principal(dn);
+
+ certGen.setSerialNumber(sn);
+ certGen.setIssuerDN(dnName);
+ certGen.setNotBefore(from);
+ certGen.setNotAfter(to);
+ certGen.setSubjectDN(dnName);
+ certGen.setPublicKey(keyPair.getPublic());
+ certGen.setSignatureAlgorithm(algorithm);
+
+ X509Certificate cert = certGen.generate(pair.getPrivate());
return cert;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1eed1020/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index cac900f..d3c404e 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -900,6 +900,12 @@
<artifactId>curator-test</artifactId>
<version>2.6.0</version>
</dependency>
+ <dependency>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcprov-jdk16</artifactId>
+ <version>1.46</version>
+ <scope>test</scope>
+ </dependency>
</dependencies>
</dependencyManagement>
[35/43] git commit: HDFS-7366. BlockInfo should take replication as
an short in the constructor. Contributed by Li Lu.
Posted by vi...@apache.org.
HDFS-7366. BlockInfo should take replication as an short in the constructor. Contributed by Li Lu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86eb27ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86eb27ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86eb27ba
Branch: refs/heads/HDFS-EC
Commit: 86eb27ba1deef24f0cbd282e453971027b0bfa36
Parents: 8a261e6
Author: Haohui Mai <wh...@apache.org>
Authored: Wed Nov 5 17:50:01 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Wed Nov 5 18:14:18 2014 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java | 4 ++--
.../server/blockmanagement/BlockInfoUnderConstruction.java | 4 ++--
.../hadoop/hdfs/server/blockmanagement/BlockManager.java | 2 +-
.../hadoop/hdfs/server/blockmanagement/TestBlockInfo.java | 6 +++---
.../server/blockmanagement/TestBlockInfoUnderConstruction.java | 2 +-
.../hadoop/hdfs/server/blockmanagement/TestBlockManager.java | 2 +-
.../hdfs/server/blockmanagement/TestDatanodeDescriptor.java | 4 ++--
.../hdfs/server/blockmanagement/TestHeartbeatHandling.java | 6 +++---
.../hdfs/server/blockmanagement/TestReplicationPolicy.java | 4 ++--
.../hdfs/server/namenode/TestCommitBlockSynchronization.java | 6 +++---
11 files changed, 23 insertions(+), 20 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86eb27ba/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b0655cf..6ec8199 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -397,6 +397,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7324. haadmin command usage prints incorrect command name.
(Brahma Reddy Battula via suresh)
+ HDFS-7366. BlockInfo should take replication as an short in the constructor.
+ (Li Lu via wheat9)
+
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86eb27ba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index 0ce1121..f547b1a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -56,12 +56,12 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
* Construct an entry for blocksmap
* @param replication the block's replication factor
*/
- public BlockInfo(int replication) {
+ public BlockInfo(short replication) {
this.triplets = new Object[3*replication];
this.bc = null;
}
- public BlockInfo(Block blk, int replication) {
+ public BlockInfo(Block blk, short replication) {
super(blk);
this.triplets = new Object[3*replication];
this.bc = null;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86eb27ba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
index 0ea7c2a..f19ad1c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
@@ -153,14 +153,14 @@ public class BlockInfoUnderConstruction extends BlockInfo {
* Create block and set its state to
* {@link BlockUCState#UNDER_CONSTRUCTION}.
*/
- public BlockInfoUnderConstruction(Block blk, int replication) {
+ public BlockInfoUnderConstruction(Block blk, short replication) {
this(blk, replication, BlockUCState.UNDER_CONSTRUCTION, null);
}
/**
* Create a block that is currently being constructed.
*/
- public BlockInfoUnderConstruction(Block blk, int replication,
+ public BlockInfoUnderConstruction(Block blk, short replication,
BlockUCState state,
DatanodeStorageInfo[] targets) {
super(blk, replication);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86eb27ba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 94e1c20..5531400 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1987,7 +1987,7 @@ public class BlockManager {
// place a delimiter in the list which separates blocks
// that have been reported from those that have not
- BlockInfo delimiter = new BlockInfo(new Block(), 1);
+ BlockInfo delimiter = new BlockInfo(new Block(), (short) 1);
boolean added = storageInfo.addBlock(delimiter);
assert added : "Delimiting block cannot be present in the node";
int headIndex = 0; //currently the delimiter is in the head of the list
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86eb27ba/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
index 61094df..f8c583a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
@@ -48,7 +48,7 @@ public class TestBlockInfo {
@Test
public void testAddStorage() throws Exception {
- BlockInfo blockInfo = new BlockInfo(3);
+ BlockInfo blockInfo = new BlockInfo((short) 3);
final DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo("storageID", "127.0.0.1");
@@ -70,7 +70,7 @@ public class TestBlockInfo {
// Create a few dummy blocks and add them to the first storage.
for (int i = 0; i < NUM_BLOCKS; ++i) {
- blockInfos[i] = new BlockInfo(3);
+ blockInfos[i] = new BlockInfo((short) 3);
storage1.addBlock(blockInfos[i]);
}
@@ -95,7 +95,7 @@ public class TestBlockInfo {
LOG.info("Building block list...");
for (int i = 0; i < MAX_BLOCKS; i++) {
blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
- blockInfoList.add(new BlockInfo(blockList.get(i), 3));
+ blockInfoList.add(new BlockInfo(blockList.get(i), (short) 3));
dd.addBlock(blockInfoList.get(i));
// index of the datanode should be 0
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86eb27ba/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
index 703d344..4c36448 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
@@ -41,7 +41,7 @@ public class TestBlockInfoUnderConstruction {
dd1.isAlive = dd2.isAlive = dd3.isAlive = true;
BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction(
new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP),
- 3,
+ (short) 3,
BlockUCState.UNDER_CONSTRUCTION,
new DatanodeStorageInfo[] {s1, s2, s3});
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86eb27ba/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index b444ccc..14f2b59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -385,7 +385,7 @@ public class TestBlockManager {
private BlockInfo blockOnNodes(long blkId, List<DatanodeDescriptor> nodes) {
Block block = new Block(blkId);
- BlockInfo blockInfo = new BlockInfo(block, 3);
+ BlockInfo blockInfo = new BlockInfo(block, (short) 3);
for (DatanodeDescriptor dn : nodes) {
for (DatanodeStorageInfo storage : dn.getStorageInfos()) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86eb27ba/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java
index 2d7eaf3..e00a4c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java
@@ -57,8 +57,8 @@ public class TestDatanodeDescriptor {
public void testBlocksCounter() throws Exception {
DatanodeDescriptor dd = BlockManagerTestUtil.getLocalDatanodeDescriptor(true);
assertEquals(0, dd.numBlocks());
- BlockInfo blk = new BlockInfo(new Block(1L), 1);
- BlockInfo blk1 = new BlockInfo(new Block(2L), 2);
+ BlockInfo blk = new BlockInfo(new Block(1L), (short) 1);
+ BlockInfo blk1 = new BlockInfo(new Block(2L), (short) 2);
DatanodeStorageInfo[] storages = dd.getStorageInfos();
assertTrue(storages.length > 0);
final String storageID = storages[0].getStorageID();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86eb27ba/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
index 510f159..988a0ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
@@ -172,7 +172,7 @@ public class TestHeartbeatHandling {
dd2.getStorageInfos()[0],
dd3.getStorageInfos()[0]};
BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction(
- new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), 3,
+ new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3,
BlockUCState.UNDER_RECOVERY, storages);
dd1.addBlockToBeRecovered(blockInfo);
DatanodeCommand[] cmds =
@@ -194,7 +194,7 @@ public class TestHeartbeatHandling {
dd2.setLastUpdate(System.currentTimeMillis() - 40 * 1000);
dd3.setLastUpdate(System.currentTimeMillis());
blockInfo = new BlockInfoUnderConstruction(
- new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), 3,
+ new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3,
BlockUCState.UNDER_RECOVERY, storages);
dd1.addBlockToBeRecovered(blockInfo);
cmds = NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem).getCommands();
@@ -215,7 +215,7 @@ public class TestHeartbeatHandling {
dd2.setLastUpdate(System.currentTimeMillis() - 40 * 1000);
dd3.setLastUpdate(System.currentTimeMillis() - 80 * 1000);
blockInfo = new BlockInfoUnderConstruction(
- new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), 3,
+ new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3,
BlockUCState.UNDER_RECOVERY, storages);
dd1.addBlockToBeRecovered(blockInfo);
cmds = NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem).getCommands();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86eb27ba/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index 34f1b9d..4febd28 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -1170,7 +1170,7 @@ public class TestReplicationPolicy {
// block under construction, the BlockManager will realize the expected
// replication has been achieved and remove it from the under-replicated
// queue.
- BlockInfoUnderConstruction info = new BlockInfoUnderConstruction(block1, 1);
+ BlockInfoUnderConstruction info = new BlockInfoUnderConstruction(block1, (short) 1);
BlockCollection bc = mock(BlockCollection.class);
when(bc.getBlockReplication()).thenReturn((short)1);
bm.addBlockCollection(info, bc);
@@ -1214,7 +1214,7 @@ public class TestReplicationPolicy {
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
- final BlockInfo info = new BlockInfo(block1, 1);
+ final BlockInfo info = new BlockInfo(block1, (short) 1);
final BlockCollection mbc = mock(BlockCollection.class);
when(mbc.getLastBlock()).thenReturn(info);
when(mbc.getPreferredBlockSize()).thenReturn(block1.getNumBytes() + 1);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86eb27ba/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
index bd71870..d0502b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
@@ -63,7 +63,7 @@ public class TestCommitBlockSynchronization {
FSNamesystem namesystemSpy = spy(namesystem);
BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction(
- block, 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
+ block, (short) 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
blockInfo.setBlockCollection(file);
blockInfo.setGenerationStamp(genStamp);
blockInfo.initializeBlockRecovery(genStamp);
@@ -100,7 +100,7 @@ public class TestCommitBlockSynchronization {
lastBlock, genStamp, length, false, false, newTargets, null);
// Simulate 'completing' the block.
- BlockInfo completedBlockInfo = new BlockInfo(block, 1);
+ BlockInfo completedBlockInfo = new BlockInfo(block, (short) 1);
completedBlockInfo.setBlockCollection(file);
completedBlockInfo.setGenerationStamp(genStamp);
doReturn(completedBlockInfo).when(namesystemSpy)
@@ -171,7 +171,7 @@ public class TestCommitBlockSynchronization {
namesystemSpy.commitBlockSynchronization(
lastBlock, genStamp, length, true, false, newTargets, null);
- BlockInfo completedBlockInfo = new BlockInfo(block, 1);
+ BlockInfo completedBlockInfo = new BlockInfo(block, (short) 1);
completedBlockInfo.setBlockCollection(file);
completedBlockInfo.setGenerationStamp(genStamp);
doReturn(completedBlockInfo).when(namesystemSpy)
[27/43] git commit: HADOOP-11266. Remove no longer supported
activation properties for packaging from pom. Contributed by Masatake
Iwasaki.
Posted by vi...@apache.org.
HADOOP-11266. Remove no longer supported activation properties for packaging from pom. Contributed by Masatake Iwasaki.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc80251b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc80251b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc80251b
Branch: refs/heads/HDFS-EC
Commit: bc80251b1212eca286b48c27b2d1ec1094d20a6a
Parents: b4c951a
Author: Haohui Mai <wh...@apache.org>
Authored: Wed Nov 5 11:15:28 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Wed Nov 5 11:15:28 2014 -0800
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
hadoop-dist/pom.xml | 2 +-
2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc80251b/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index fd8528e..dbfb7df 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -400,6 +400,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-11271. Use Time.monotonicNow() in Shell.java instead of Time.now()
(vinayakumarb)
+ HADOOP-11266. Remove no longer supported activation properties for packaging
+ from pom. (Masatake Iwasaki via wheat9)
+
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc80251b/hadoop-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index 679ae1d..0c82332 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -77,7 +77,7 @@
<activation>
<activeByDefault>false</activeByDefault>
<property>
- <name>tar|rpm|deb</name>
+ <name>tar</name>
</property>
</activation>
<build>
[11/43] git commit: Adding release 2.5.2 to CHANGES.txt
Posted by vi...@apache.org.
Adding release 2.5.2 to CHANGES.txt
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4de56d27
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4de56d27
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4de56d27
Branch: refs/heads/HDFS-EC
Commit: 4de56d27fbed566f143b286aa74b16ef94260190
Parents: dbf30e3
Author: Karthik Kambatla <ka...@apache.org>
Authored: Tue Nov 4 16:50:40 2014 -0800
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Tue Nov 4 16:50:40 2014 -0800
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 18 ++++++++++++++++--
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 15 ++++++++++++++-
hadoop-mapreduce-project/CHANGES.txt | 13 +++++++++++++
hadoop-yarn-project/CHANGES.txt | 13 +++++++++++++
4 files changed, 56 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4de56d27/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 7f01207..41951e9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1042,8 +1042,6 @@ Release 2.6.0 - UNRELEASED
HADOOP-11217. Disable SSLv3 in KMS. (Robert Kanter via kasha)
- HADOOP-11243. SSLFactory shouldn't allow SSLv3. (Wei Yan via kasha)
-
HADOOP-11068. Match hadoop.auth cookie format to jetty output.
(Gregory Chanan via cnauroth)
@@ -1056,8 +1054,24 @@ Release 2.6.0 - UNRELEASED
HADOOP-11241. Fixed intermittent TestNMSimulator failure due to timing issue.
(Varun Vasudev via zjshen)
+
+Release 2.5.2 - UNRELEASED
+
+ INCOMPATIBLE CHANGES
+
+ NEW FEATURES
+
+ IMPROVEMENTS
+
+ OPTIMIZATIONS
+
+ BUG FIXES
+
+ HADOOP-11243. SSLFactory shouldn't allow SSLv3. (Wei Yan via kasha)
+
HADOOP-11260. Patch up Jetty to disable SSLv3. (Mike Yoder via kasha)
+
Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4de56d27/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3644ce8..098d4ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1368,9 +1368,22 @@ Release 2.6.0 - UNRELEASED
HDFS-7291. Persist in-memory replicas with appropriate unbuffered copy API
on POSIX and Windows. (Xiaoyu Yao via cnauroth)
+ HDFS-7328. TestTraceAdmin assumes Unix line endings. (cnauroth)
+
+Release 2.5.2 - UNRELEASED
+
+ INCOMPATIBLE CHANGES
+
+ NEW FEATURES
+
+ IMPROVEMENTS
+
+ OPTIMIZATIONS
+
+ BUG FIXES
+
HDFS-7274. Disable SSLv3 in HttpFS. (Robert Kanter via kasha)
- HDFS-7328. TestTraceAdmin assumes Unix line endings. (cnauroth)
Release 2.5.1 - 2014-09-05
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4de56d27/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index deb2311..dce4778 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -456,6 +456,19 @@ Release 2.6.0 - UNRELEASED
MAPREDUCE-6022. map_input_file is missing from streaming job environment.
(jlowe via kihwal)
+Release 2.5.2 - UNRELEASED
+
+ INCOMPATIBLE CHANGES
+
+ NEW FEATURES
+
+ IMPROVEMENTS
+
+ OPTIMIZATIONS
+
+ BUG FIXES
+
+
Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4de56d27/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f7c0dfa..118cdc4 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -856,6 +856,19 @@ Release 2.6.0 - UNRELEASED
of races between the launch and the stop-container call and when root
processes crash. (Billie Rinaldi via vinodkv)
+Release 2.5.2 - UNRELEASED
+
+ INCOMPATIBLE CHANGES
+
+ NEW FEATURES
+
+ IMPROVEMENTS
+
+ OPTIMIZATIONS
+
+ BUG FIXES
+
+
Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES
[12/43] git commit: HADOOP-11268. Update BUILDING.txt to remove the
workaround for tools.jar. Contributed by Li Lu.
Posted by vi...@apache.org.
HADOOP-11268. Update BUILDING.txt to remove the workaround for tools.jar. Contributed by Li Lu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d78191a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d78191a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d78191a7
Branch: refs/heads/HDFS-EC
Commit: d78191a716738135169bf42c366fa5f8b9f89f23
Parents: 4de56d2
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Nov 4 16:16:02 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Tue Nov 4 16:51:11 2014 -0800
----------------------------------------------------------------------
BUILDING.txt | 13 -------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
2 files changed, 3 insertions(+), 13 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d78191a7/BUILDING.txt
----------------------------------------------------------------------
diff --git a/BUILDING.txt b/BUILDING.txt
index 621a221..06bef1f 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -198,19 +198,6 @@ export MAVEN_OPTS="-Xms256m -Xmx512m"
----------------------------------------------------------------------------------
-Building on OS/X
-
-----------------------------------------------------------------------------------
-
-A one-time manual step is required to enable building Hadoop OS X with Java 7
-every time the JDK is updated.
-see: https://issues.apache.org/jira/browse/HADOOP-9350
-
-$ sudo mkdir `/usr/libexec/java_home`/Classes
-$ sudo ln -s `/usr/libexec/java_home`/lib/tools.jar `/usr/libexec/java_home`/Classes/classes.jar
-
-----------------------------------------------------------------------------------
-
Building on Windows
----------------------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d78191a7/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 41951e9..af21a16 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -389,6 +389,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-10717. HttpServer2 should load jsp DTD from local jars instead of
going remote. (Dapeng Sun via wheat9)
+ HADOOP-11268. Update BUILDING.txt to remove the workaround for tools.jar.
+ (Li Lu via wheat9)
+
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
[18/43] git commit: HDFS-7218. FSNamesystem ACL operations should
write to audit log on failure. (clamb via yliu)
Posted by vi...@apache.org.
HDFS-7218. FSNamesystem ACL operations should write to audit log on failure. (clamb via yliu)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73e60125
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73e60125
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73e60125
Branch: refs/heads/HDFS-EC
Commit: 73e601259fed0646f115b09112995b51ffef3468
Parents: 73068f6
Author: yliu <yl...@apache.org>
Authored: Wed Nov 5 15:49:37 2014 +0800
Committer: yliu <yl...@apache.org>
Committed: Wed Nov 5 15:49:37 2014 +0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
.../hdfs/server/namenode/FSNamesystem.java | 26 +++++-
.../hdfs/server/namenode/TestAuditLogger.java | 95 ++++++++++++++++++++
3 files changed, 123 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e60125/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 098d4ba..707929e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1010,6 +1010,9 @@ Release 2.6.0 - UNRELEASED
fails on Windows, because we cannot deny access to the file owner.
(Chris Nauroth via wheat9)
+ HDFS-7218. FSNamesystem ACL operations should write to audit log on
+ failure. (clamb via yliu)
+
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
HDFS-6387. HDFS CLI admin tool for creating & deleting an
http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e60125/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 2bc4ba0..76c1423 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7862,6 +7862,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
public FSDirectory getFSDirectory() {
return dir;
}
+ /** Set the FSDirectory. */
+ @VisibleForTesting
+ public void setFSDirectory(FSDirectory dir) {
+ this.dir = dir;
+ }
/** @return the cache manager. */
public CacheManager getCacheManager() {
return cacheManager;
@@ -8728,6 +8733,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
List<AclEntry> newAcl = dir.modifyAclEntries(src, aclSpec);
getEditLog().logSetAcl(src, newAcl);
resultingStat = getAuditFileInfo(src, false);
+ } catch (AccessControlException e) {
+ logAuditEvent(false, "modifyAclEntries", srcArg);
+ throw e;
} finally {
writeUnlock();
}
@@ -8752,6 +8760,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
List<AclEntry> newAcl = dir.removeAclEntries(src, aclSpec);
getEditLog().logSetAcl(src, newAcl);
resultingStat = getAuditFileInfo(src, false);
+ } catch (AccessControlException e) {
+ logAuditEvent(false, "removeAclEntries", srcArg);
+ throw e;
} finally {
writeUnlock();
}
@@ -8775,6 +8786,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
List<AclEntry> newAcl = dir.removeDefaultAcl(src);
getEditLog().logSetAcl(src, newAcl);
resultingStat = getAuditFileInfo(src, false);
+ } catch (AccessControlException e) {
+ logAuditEvent(false, "removeDefaultAcl", srcArg);
+ throw e;
} finally {
writeUnlock();
}
@@ -8798,6 +8812,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
dir.removeAcl(src);
getEditLog().logSetAcl(src, AclFeature.EMPTY_ENTRY_LIST);
resultingStat = getAuditFileInfo(src, false);
+ } catch (AccessControlException e) {
+ logAuditEvent(false, "removeAcl", srcArg);
+ throw e;
} finally {
writeUnlock();
}
@@ -8821,6 +8838,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
List<AclEntry> newAcl = dir.setAcl(src, aclSpec);
getEditLog().logSetAcl(src, newAcl);
resultingStat = getAuditFileInfo(src, false);
+ } catch (AccessControlException e) {
+ logAuditEvent(false, "setAcl", srcArg);
+ throw e;
} finally {
writeUnlock();
}
@@ -8833,6 +8853,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.READ);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
+ boolean success = false;
readLock();
try {
checkOperation(OperationCategory.READ);
@@ -8840,9 +8861,12 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
if (isPermissionEnabled) {
checkPermission(pc, src, false, null, null, null, null);
}
- return dir.getAclStatus(src);
+ final AclStatus ret = dir.getAclStatus(src);
+ success = true;
+ return ret;
} finally {
readUnlock();
+ logAuditEvent(success, "getAclStatus", src);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e60125/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
index 29fee68..e1e1c67 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
@@ -19,30 +19,39 @@
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOGGERS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY;
+import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
+import static org.mockito.Matchers.anyListOf;
+import static org.mockito.Matchers.anyString;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.InetAddress;
import java.net.URI;
import java.net.URISyntaxException;
+import java.util.List;
+import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.authorize.ProxyServers;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.junit.Before;
import org.junit.Test;
+import org.mockito.Mockito;
/**
* Tests for the {@link AuditLogger} custom audit logging interface.
@@ -166,6 +175,87 @@ public class TestAuditLogger {
}
}
+ @Test
+ public void testAuditLogWithAclFailure() throws Exception {
+ final Configuration conf = new HdfsConfiguration();
+ conf.setBoolean(DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+ conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
+ DummyAuditLogger.class.getName());
+ final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+ try {
+ cluster.waitClusterUp();
+ final FSDirectory dir = cluster.getNamesystem().getFSDirectory();
+ // Set up mock FSDirectory to test FSN audit logging during failure
+ final FSDirectory mockedDir = Mockito.spy(dir);
+ Mockito.doThrow(new AccessControlException("mock setAcl exception")).
+ when(mockedDir).
+ setAcl(anyString(), anyListOf(AclEntry.class));
+ Mockito.doThrow(new AccessControlException("mock getAclStatus exception")).
+ when(mockedDir).
+ getAclStatus(anyString());
+ Mockito.doThrow(new AccessControlException("mock removeAcl exception")).
+ when(mockedDir).
+ removeAcl(anyString());
+ Mockito.doThrow(new AccessControlException("mock removeDefaultAcl exception")).
+ when(mockedDir).
+ removeDefaultAcl(anyString());
+ Mockito.doThrow(new AccessControlException("mock removeAclEntries exception")).
+ when(mockedDir).
+ removeAclEntries(anyString(), anyListOf(AclEntry.class));
+ Mockito.doThrow(new AccessControlException("mock modifyAclEntries exception")).
+ when(mockedDir).
+ modifyAclEntries(anyString(), anyListOf(AclEntry.class));
+ // Replace the FSD with the mock FSD.
+ cluster.getNamesystem().setFSDirectory(mockedDir);
+ assertTrue(DummyAuditLogger.initialized);
+ DummyAuditLogger.resetLogCount();
+
+ final FileSystem fs = cluster.getFileSystem();
+ final Path p = new Path("/");
+ final List<AclEntry> acls = Lists.newArrayList();
+
+ try {
+ fs.getAclStatus(p);
+ } catch (AccessControlException e) {
+ assertExceptionContains("mock getAclStatus exception", e);
+ }
+
+ try {
+ fs.setAcl(p, acls);
+ } catch (AccessControlException e) {
+ assertExceptionContains("mock setAcl exception", e);
+ }
+
+ try {
+ fs.removeAcl(p);
+ } catch (AccessControlException e) {
+ assertExceptionContains("mock removeAcl exception", e);
+ }
+
+ try {
+ fs.removeDefaultAcl(p);
+ } catch (AccessControlException e) {
+ assertExceptionContains("mock removeDefaultAcl exception", e);
+ }
+
+ try {
+ fs.removeAclEntries(p, acls);
+ } catch (AccessControlException e) {
+ assertExceptionContains("mock removeAclEntries exception", e);
+ }
+
+ try {
+ fs.modifyAclEntries(p, acls);
+ } catch (AccessControlException e) {
+ assertExceptionContains("mock modifyAclEntries exception", e);
+ }
+ assertEquals(6, DummyAuditLogger.logCount);
+ assertEquals(6, DummyAuditLogger.unsuccessfulCount);
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
/**
* Tests that a broken audit logger causes requests to fail.
*/
@@ -194,6 +284,7 @@ public class TestAuditLogger {
static boolean initialized;
static int logCount;
+ static int unsuccessfulCount;
static short foundPermission;
static String remoteAddr;
@@ -203,6 +294,7 @@ public class TestAuditLogger {
public static void resetLogCount() {
logCount = 0;
+ unsuccessfulCount = 0;
}
public void logAuditEvent(boolean succeeded, String userName,
@@ -210,6 +302,9 @@ public class TestAuditLogger {
FileStatus stat) {
remoteAddr = addr.getHostAddress();
logCount++;
+ if (!succeeded) {
+ unsuccessfulCount++;
+ }
if (stat != null) {
foundPermission = stat.getPermission().toShort();
}
[16/43] git commit: HADOOP-11269. Add java 8 profile for
hadoop-annotations. Contributed by Li Lu.
Posted by vi...@apache.org.
HADOOP-11269. Add java 8 profile for hadoop-annotations. Contributed by Li Lu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0762b4a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0762b4a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0762b4a3
Branch: refs/heads/HDFS-EC
Commit: 0762b4a3fbb8820d38b3c0d1adc0261a156433aa
Parents: b761798
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Nov 4 17:57:31 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Tue Nov 4 18:01:14 2014 -0800
----------------------------------------------------------------------
hadoop-common-project/hadoop-annotations/pom.xml | 15 +++++++++++++++
hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
2 files changed, 17 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0762b4a3/hadoop-common-project/hadoop-annotations/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-annotations/pom.xml b/hadoop-common-project/hadoop-annotations/pom.xml
index 4428cac..84a106e 100644
--- a/hadoop-common-project/hadoop-annotations/pom.xml
+++ b/hadoop-common-project/hadoop-annotations/pom.xml
@@ -71,6 +71,21 @@
</dependency>
</dependencies>
</profile>
+ <profile>
+ <id>jdk1.8</id>
+ <activation>
+ <jdk>1.8</jdk>
+ </activation>
+ <dependencies>
+ <dependency>
+ <groupId>jdk.tools</groupId>
+ <artifactId>jdk.tools</artifactId>
+ <version>1.8</version>
+ <scope>system</scope>
+ <systemPath>${java.home}/../lib/tools.jar</systemPath>
+ </dependency>
+ </dependencies>
+ </profile>
</profiles>
</project>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0762b4a3/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8195ef8..7827270 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -395,6 +395,8 @@ Release 2.7.0 - UNRELEASED
HADOOP-11230. Add missing dependency of bouncycastle for kms, httpfs, hdfs, MR
and YARN. (Robert Kanter via wheat9)
+ HADOOP-11269. Add java 8 profile for hadoop-annotations. (Li Lu via wheat9)
+
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
[23/43] git commit: HDFS-7357. FSNamesystem.checkFileProgress should
log file path. Contributed by Tsz Wo Nicholas Sze.
Posted by vi...@apache.org.
HDFS-7357. FSNamesystem.checkFileProgress should log file path. Contributed by Tsz Wo Nicholas Sze.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18312804
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18312804
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18312804
Branch: refs/heads/HDFS-EC
Commit: 18312804e9c86c0ea6a259e288994fea6fa366ef
Parents: 203c630
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Nov 4 18:03:39 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Wed Nov 5 10:14:30 2014 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++
.../BlockInfoUnderConstruction.java | 4 +-
.../namenode/EditLogFileOutputStream.java | 6 +--
.../hdfs/server/namenode/FSEditLogLoader.java | 4 +-
.../hdfs/server/namenode/FSNamesystem.java | 54 +++++++++++---------
5 files changed, 39 insertions(+), 32 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/18312804/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index be672a6..4a120e9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -338,6 +338,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7333. Improve logging in Storage.tryLock(). (shv)
+ HDFS-7357. FSNamesystem.checkFileProgress should log file path.
+ (Tsz Wo Nicholas Sze via wheat9)
+
OPTIMIZATIONS
BUG FIXES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/18312804/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
index dd3593f..0ea7c2a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
@@ -141,7 +141,7 @@ public class BlockInfoUnderConstruction extends BlockInfo {
@Override
public void appendStringTo(StringBuilder sb) {
- sb.append("ReplicaUnderConstruction[")
+ sb.append("ReplicaUC[")
.append(expectedLocation)
.append("|")
.append(state)
@@ -370,7 +370,7 @@ public class BlockInfoUnderConstruction extends BlockInfo {
}
private void appendUCParts(StringBuilder sb) {
- sb.append("{blockUCState=").append(blockUCState)
+ sb.append("{UCState=").append(blockUCState)
.append(", primaryNodeIndex=").append(primaryNodeIndex)
.append(", replicas=[");
if (replicas != null) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/18312804/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
index e9f47b9..830814c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
@@ -161,7 +161,7 @@ public class EditLogFileOutputStream extends EditLogOutputStream {
fp.close();
fp = null;
} finally {
- IOUtils.cleanup(FSNamesystem.LOG, fc, fp);
+ IOUtils.cleanup(LOG, fc, fp);
doubleBuf = null;
fc = null;
fp = null;
@@ -233,8 +233,8 @@ public class EditLogFileOutputStream extends EditLogOutputStream {
size += fillCapacity;
total += fillCapacity;
}
- if(FSNamesystem.LOG.isDebugEnabled()) {
- FSNamesystem.LOG.debug("Preallocated " + total + " bytes at the end of " +
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Preallocated " + total + " bytes at the end of " +
"the edit log (offset " + oldSize + ")");
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/18312804/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 1b2b4ae..492a5ac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -329,8 +329,8 @@ public class FSEditLogLoader {
AddCloseOp addCloseOp = (AddCloseOp)op;
final String path =
renameReservedPathsOnUpgrade(addCloseOp.path, logVersion);
- if (FSNamesystem.LOG.isDebugEnabled()) {
- FSNamesystem.LOG.debug(op.opCode + ": " + path +
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(op.opCode + ": " + path +
" numblocks : " + addCloseOp.blocks.length +
" clientHolder " + addCloseOp.clientName +
" clientMachine " + addCloseOp.clientMachine);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/18312804/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 8c35315..52c12c0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3151,7 +3151,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
String clientMachine = null;
if(NameNode.stateChangeLog.isDebugEnabled()) {
- NameNode.stateChangeLog.debug("BLOCK* NameSystem.getAdditionalBlock: "
+ NameNode.stateChangeLog.debug("BLOCK* getAdditionalBlock: "
+ src + " inodeId " + fileId + " for " + clientName);
}
@@ -3374,7 +3374,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
// Check if the penultimate block is minimally replicated
- if (!checkFileProgress(pendingFile, false)) {
+ if (!checkFileProgress(src, pendingFile, false)) {
throw new NotReplicatedYetException("Not replicated yet: " + src);
}
return new FileState(pendingFile, src);
@@ -3622,14 +3622,14 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
// Check the state of the penultimate block. It should be completed
// before attempting to complete the last one.
- if (!checkFileProgress(pendingFile, false)) {
+ if (!checkFileProgress(src, pendingFile, false)) {
return false;
}
// commit the last block and complete it if it has minimum replicas
commitOrCompleteLastBlock(pendingFile, last);
- if (!checkFileProgress(pendingFile, true)) {
+ if (!checkFileProgress(src, pendingFile, true)) {
return false;
}
@@ -3653,8 +3653,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
throws IOException {
assert hasWriteLock();
BlockInfo b = dir.addBlock(src, inodesInPath, newBlock, targets);
- NameNode.stateChangeLog.info("BLOCK* allocateBlock: " + src + ". "
- + getBlockPoolId() + " " + b);
+ NameNode.stateChangeLog.info("BLOCK* allocate " + b + " for " + src);
DatanodeStorageInfo.incrementBlocksScheduled(targets);
return b;
}
@@ -3675,30 +3674,21 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* replicated. If not, return false. If checkall is true, then check
* all blocks, otherwise check only penultimate block.
*/
- boolean checkFileProgress(INodeFile v, boolean checkall) {
+ private boolean checkFileProgress(String src, INodeFile v, boolean checkall) {
readLock();
try {
if (checkall) {
- //
// check all blocks of the file.
- //
for (BlockInfo block: v.getBlocks()) {
- if (!block.isComplete()) {
- LOG.info("BLOCK* checkFileProgress: " + block
- + " has not reached minimal replication "
- + blockManager.minReplication);
+ if (!isCompleteBlock(src, block, blockManager.minReplication)) {
return false;
}
}
} else {
- //
// check the penultimate block of this file
- //
BlockInfo b = v.getPenultimateBlock();
- if (b != null && !b.isComplete()) {
- LOG.warn("BLOCK* checkFileProgress: " + b
- + " has not reached minimal replication "
- + blockManager.minReplication);
+ if (b != null
+ && !isCompleteBlock(src, b, blockManager.minReplication)) {
return false;
}
}
@@ -3708,6 +3698,19 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
}
+ private static boolean isCompleteBlock(String src, BlockInfo b, int minRepl) {
+ if (!b.isComplete()) {
+ final BlockInfoUnderConstruction uc = (BlockInfoUnderConstruction)b;
+ final int numNodes = b.numNodes();
+ LOG.info("BLOCK* " + b + " is not COMPLETE (ucState = "
+ + uc.getBlockUCState() + ", replication# = " + numNodes
+ + (numNodes < minRepl? " < ": " >= ")
+ + " minimum = " + minRepl + ") in file " + src);
+ return false;
+ }
+ return true;
+ }
+
////////////////////////////////////////////////////////////////
// Here's how to handle block-copy failure during client write:
// -- As usual, the client's write should result in a streaming
@@ -5152,9 +5155,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
if(!nameNodeHasResourcesAvailable()) {
String lowResourcesMsg = "NameNode low on available disk space. ";
if (!isInSafeMode()) {
- FSNamesystem.LOG.warn(lowResourcesMsg + "Entering safe mode.");
+ LOG.warn(lowResourcesMsg + "Entering safe mode.");
} else {
- FSNamesystem.LOG.warn(lowResourcesMsg + "Already in safe mode.");
+ LOG.warn(lowResourcesMsg + "Already in safe mode.");
}
enterSafeMode(true);
}
@@ -7014,11 +7017,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
- LOG.info("updatePipeline(block=" + oldBlock
- + ", newGenerationStamp=" + newBlock.getGenerationStamp()
+ LOG.info("updatePipeline(" + oldBlock.getLocalBlock()
+ + ", newGS=" + newBlock.getGenerationStamp()
+ ", newLength=" + newBlock.getNumBytes()
+ ", newNodes=" + Arrays.asList(newNodes)
- + ", clientName=" + clientName
+ + ", client=" + clientName
+ ")");
waitForLoadingFSImage();
writeLock();
@@ -7036,7 +7039,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
RetryCache.setState(cacheEntry, success);
}
getEditLog().logSync();
- LOG.info("updatePipeline(" + oldBlock + ") successfully to " + newBlock);
+ LOG.info("updatePipeline(" + oldBlock.getLocalBlock() + " => "
+ + newBlock.getLocalBlock() + ") success");
}
/**
[06/43] git commit: HDFS-7356. Use DirectoryListing.hasMore()
directly in nfs. Contributed by Li Lu.
Posted by vi...@apache.org.
HDFS-7356. Use DirectoryListing.hasMore() directly in nfs. Contributed by Li Lu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27f106e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27f106e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27f106e2
Branch: refs/heads/HDFS-EC
Commit: 27f106e2261d0dfdb04e3d08dfd84ca4fdfad244
Parents: ad21d28
Author: Jing Zhao <ji...@apache.org>
Authored: Tue Nov 4 15:04:26 2014 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Tue Nov 4 15:04:26 2014 -0800
----------------------------------------------------------------------
.../java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 6 ++----
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
2 files changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27f106e2/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index 91d066e..d96babf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -1584,8 +1584,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
*/
HdfsFileStatus[] fstatus = dlisting.getPartialListing();
int n = (int) Math.min(fstatus.length, count-2);
- boolean eof = (n < fstatus.length) ? false : (dlisting
- .getRemainingEntries() == 0);
+ boolean eof = (n >= fstatus.length) && !dlisting.hasMore();
Entry3[] entries;
if (cookie == 0) {
@@ -1733,8 +1732,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
// Set up the dirents in the response
HdfsFileStatus[] fstatus = dlisting.getPartialListing();
int n = (int) Math.min(fstatus.length, dirCount-2);
- boolean eof = (n < fstatus.length) ? false : (dlisting
- .getRemainingEntries() == 0);
+ boolean eof = (n >= fstatus.length) && !dlisting.hasMore();
READDIRPLUS3Response.EntryPlus3[] entries;
if (cookie == 0) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/27f106e2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8790951..b4e0fa1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -331,6 +331,8 @@ Release 2.7.0 - UNRELEASED
HDFS-6917. Add an hdfs debug command to validate blocks, call recoverlease,
etc. (cmccabe)
+ HDFS-7356. Use DirectoryListing.hasMore() directly in nfs. (Li Lu via jing9)
+
OPTIMIZATIONS
BUG FIXES
[08/43] git commit: HDFS-7355.
TestDataNodeVolumeFailure#testUnderReplicationAfterVolFailure fails on
Windows,
because we cannot deny access to the file owner. Contributed by Chris Nauroth.
Posted by vi...@apache.org.
HDFS-7355. TestDataNodeVolumeFailure#testUnderReplicationAfterVolFailure fails on Windows, because we cannot deny access to the file owner. Contributed by Chris Nauroth.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99d71034
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99d71034
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99d71034
Branch: refs/heads/HDFS-EC
Commit: 99d710348a20ff99044207df4b92ab3bff31bd69
Parents: 2e1d9a4
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Nov 4 15:18:51 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Tue Nov 4 15:18:51 2014 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 ++++
.../hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java | 5 +++++
2 files changed, 9 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/99d71034/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b4e0fa1..c896ead 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1003,6 +1003,10 @@ Release 2.6.0 - UNRELEASED
HDFS-7334. Fix periodic failures of TestCheckpoint
#testTooManyEditReplayFailures. (Charles Lamb via wheat9)
+ HDFS-7355. TestDataNodeVolumeFailure#testUnderReplicationAfterVolFailure
+ fails on Windows, because we cannot deny access to the file owner.
+ (Chris Nauroth via wheat9)
+
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
HDFS-6387. HDFS CLI admin tool for creating & deleting an
http://git-wip-us.apache.org/repos/asf/hadoop/blob/99d71034/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index 8429055..6b9c4b1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -205,6 +205,11 @@ public class TestDataNodeVolumeFailure {
*/
@Test
public void testUnderReplicationAfterVolFailure() throws Exception {
+ // This test relies on denying access to data volumes to simulate data volume
+ // failure. This doesn't work on Windows, because an owner of an object
+ // always has the ability to read and change permissions on the object.
+ assumeTrue(!Path.WINDOWS);
+
// Bring up one more datanode
cluster.startDataNodes(conf, 1, true, null, null);
cluster.waitActive();
[42/43] git commit: HDFS-7336. Unused member
DFSInputStream.buffersize. Contributed by Milan Desai.
Posted by vi...@apache.org.
HDFS-7336. Unused member DFSInputStream.buffersize. Contributed by Milan Desai.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/80d7d183
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/80d7d183
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/80d7d183
Branch: refs/heads/HDFS-EC
Commit: 80d7d183cd4052d6e6d412ff6588d26471c85d6d
Parents: 228afed
Author: Milan Desai <mi...@wandisco.com>
Authored: Wed Nov 5 21:00:28 2014 -0800
Committer: Konstantin V Shvachko <sh...@apache.org>
Committed: Wed Nov 5 21:00:28 2014 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
.../src/main/java/org/apache/hadoop/hdfs/DFSClient.java | 2 +-
.../src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java | 7 ++-----
3 files changed, 5 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/80d7d183/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d03074b..1cdfeb1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -347,6 +347,8 @@ Release 2.7.0 - UNRELEASED
HDFS-7329. Improve logging when MiniDFSCluster fails to start.
(Byron Wong via shv)
+ HDFS-7336. Unused member DFSInputStream.buffersize. (Milan Desai via shv)
+
OPTIMIZATIONS
BUG FIXES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/80d7d183/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 9a080bc2..056a1b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1503,7 +1503,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
throws IOException, UnresolvedLinkException {
checkOpen();
// Get block info from namenode
- return new DFSInputStream(this, src, buffersize, verifyChecksum);
+ return new DFSInputStream(this, src, verifyChecksum);
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/80d7d183/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 008cc54..9794eec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -41,7 +41,6 @@ import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -215,19 +214,17 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
* parallel accesses to DFSInputStream (through ptreads) properly */
private final ConcurrentHashMap<DatanodeInfo, DatanodeInfo> deadNodes =
new ConcurrentHashMap<DatanodeInfo, DatanodeInfo>();
- private int buffersize = 1;
-
+
private final byte[] oneByteBuf = new byte[1]; // used for 'int read()'
void addToDeadNodes(DatanodeInfo dnInfo) {
deadNodes.put(dnInfo, dnInfo);
}
- DFSInputStream(DFSClient dfsClient, String src, int buffersize, boolean verifyChecksum
+ DFSInputStream(DFSClient dfsClient, String src, boolean verifyChecksum
) throws IOException, UnresolvedLinkException {
this.dfsClient = dfsClient;
this.verifyChecksum = verifyChecksum;
- this.buffersize = buffersize;
this.src = src;
this.cachingStrategy =
dfsClient.getDefaultReadCachingStrategy();
[17/43] git commit: MAPREDUCE-6048. Fixed TestJavaSerialization
failure. Contributed by Varun Vasudev
Posted by vi...@apache.org.
MAPREDUCE-6048. Fixed TestJavaSerialization failure. Contributed by Varun Vasudev
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73068f67
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73068f67
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73068f67
Branch: refs/heads/HDFS-EC
Commit: 73068f677bc45029743ba2e0b3c0256a8069c13e
Parents: 0762b4a
Author: Jian He <ji...@apache.org>
Authored: Tue Nov 4 20:19:54 2014 -0800
Committer: Jian He <ji...@apache.org>
Committed: Tue Nov 4 20:19:54 2014 -0800
----------------------------------------------------------------------
hadoop-mapreduce-project/CHANGES.txt | 3 ++
.../hadoop/mapred/TestJavaSerialization.java | 34 +++++++++++++-------
2 files changed, 26 insertions(+), 11 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/73068f67/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index dce4778..bbe96c2 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -456,6 +456,9 @@ Release 2.6.0 - UNRELEASED
MAPREDUCE-6022. map_input_file is missing from streaming job environment.
(jlowe via kihwal)
+ MAPREDUCE-6048. Fixed TestJavaSerialization failure. (Varun Vasudev via
+ jianhe)
+
Release 2.5.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/73068f67/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJavaSerialization.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJavaSerialization.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJavaSerialization.java
index 3ab6f56..4dea0d7 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJavaSerialization.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestJavaSerialization.java
@@ -17,11 +17,9 @@
*/
package org.apache.hadoop.mapred;
-import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
-import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
@@ -30,6 +28,7 @@ import java.util.StringTokenizer;
import junit.framework.TestCase;
+import org.apache.commons.io.FileUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
@@ -56,7 +55,10 @@ public class TestJavaSerialization extends TestCase {
throws IOException {
StringTokenizer st = new StringTokenizer(value.toString());
while (st.hasMoreTokens()) {
- output.collect(st.nextToken(), 1L);
+ String token = st.nextToken();
+ assertTrue("Invalid token; expected 'a' or 'b', got " + token,
+ token.equals("a") || token.equals("b"));
+ output.collect(token, 1L);
}
}
@@ -116,18 +118,28 @@ public class TestJavaSerialization extends TestCase {
FileOutputFormat.setOutputPath(conf, OUTPUT_DIR);
+ String inputFileContents =
+ FileUtils.readFileToString(new File(INPUT_FILE.toUri().getPath()));
+ assertTrue("Input file contents not as expected; contents are '"
+ + inputFileContents + "', expected \"b a\n\" ",
+ inputFileContents.equals("b a\n"));
+
JobClient.runJob(conf);
- Path[] outputFiles = FileUtil.stat2Paths(
- fs.listStatus(OUTPUT_DIR,
- new Utils.OutputFileUtils.OutputFilesFilter()));
+ Path[] outputFiles =
+ FileUtil.stat2Paths(fs.listStatus(OUTPUT_DIR,
+ new Utils.OutputFileUtils.OutputFilesFilter()));
assertEquals(1, outputFiles.length);
InputStream is = fs.open(outputFiles[0]);
- BufferedReader reader = new BufferedReader(new InputStreamReader(is));
- assertEquals("a\t1", reader.readLine());
- assertEquals("b\t1", reader.readLine());
- assertNull(reader.readLine());
- reader.close();
+ String reduceOutput = org.apache.commons.io.IOUtils.toString(is);
+ String[] lines = reduceOutput.split(System.getProperty("line.separator"));
+ assertEquals("Unexpected output; received output '" + reduceOutput + "'",
+ "a\t1", lines[0]);
+ assertEquals("Unexpected output; received output '" + reduceOutput + "'",
+ "b\t1", lines[1]);
+ assertEquals("Reduce output has extra lines; output is '" + reduceOutput
+ + "'", 2, lines.length);
+ is.close();
}
/**
[25/43] git commit: CHANGES.txt. Move HDFS-7199 to branch-2.6
Posted by vi...@apache.org.
CHANGES.txt. Move HDFS-7199 to branch-2.6
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b07acb0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b07acb0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b07acb0
Branch: refs/heads/HDFS-EC
Commit: 7b07acb0a51d20550f62ba29bf09120684b4097b
Parents: 56257fa
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Wed Nov 5 10:57:15 2014 -0800
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Wed Nov 5 10:57:15 2014 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b07acb0/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cf9616b..bb4f194 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -397,9 +397,6 @@ Release 2.7.0 - UNRELEASED
HDFS-7324. haadmin command usage prints incorrect command name.
(Brahma Reddy Battula via suresh)
- HDFS-7199. DFSOutputStream should not silently drop data if DataStreamer
- crashes with an unchecked exception (rushabhs via cmccabe)
-
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -1024,6 +1021,9 @@ Release 2.6.0 - UNRELEASED
HDFS-7218. FSNamesystem ACL operations should write to audit log on
failure. (clamb via yliu)
+ HDFS-7199. DFSOutputStream should not silently drop data if DataStreamer
+ crashes with an unchecked exception (rushabhs via cmccabe)
+
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
HDFS-6387. HDFS CLI admin tool for creating & deleting an
[04/43] git commit: HDFS-7334. Fix periodic failures of
TestCheckpoint#testTooManyEditReplayFailures. Contributed by Charles Lamb.
Posted by vi...@apache.org.
HDFS-7334. Fix periodic failures of TestCheckpoint#testTooManyEditReplayFailures. Contributed by Charles Lamb.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d0449bd2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d0449bd2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d0449bd2
Branch: refs/heads/HDFS-EC
Commit: d0449bd2fd0b03765bef78b2d7952b799f06575b
Parents: 1eed102
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Nov 4 14:34:52 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Tue Nov 4 14:34:52 2014 -0800
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java | 4 ++--
2 files changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0449bd2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4bc833f..8790951 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -998,6 +998,9 @@ Release 2.6.0 - UNRELEASED
HDFS-7340. Make rollingUpgrade start/finalize idempotent. (jing9)
+ HDFS-7334. Fix periodic failures of TestCheckpoint
+ #testTooManyEditReplayFailures. (Charles Lamb via wheat9)
+
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
HDFS-6387. HDFS CLI admin tool for creating & deleting an
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0449bd2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
index 34a314c..d51c1cc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
@@ -289,8 +289,8 @@ public class TestCheckpoint {
@Test(timeout=30000)
public void testTooManyEditReplayFailures() throws IOException {
Configuration conf = new HdfsConfiguration();
- conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_KEY, "1");
- conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, "1");
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_KEY, 1);
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);
FSDataOutputStream fos = null;
SecondaryNameNode secondary = null;
[26/43] git commit: YARN-2767. Added a test case to verify that http
static user cannot kill or submit apps in the secure mode. Contributed by
Varun Vasudev.
Posted by vi...@apache.org.
YARN-2767. Added a test case to verify that http static user cannot kill or submit apps in the secure mode. Contributed by Varun Vasudev.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b4c951ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b4c951ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b4c951ab
Branch: refs/heads/HDFS-EC
Commit: b4c951ab832f85189d815fb6df57eda4121c0199
Parents: 7b07acb
Author: Zhijie Shen <zj...@apache.org>
Authored: Wed Nov 5 10:56:39 2014 -0800
Committer: Zhijie Shen <zj...@apache.org>
Committed: Wed Nov 5 10:57:38 2014 -0800
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +
...tRMWebServicesHttpStaticUserPermissions.java | 195 +++++++++++++++++++
2 files changed, 198 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4c951ab/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b5aec42..2870583 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -862,6 +862,9 @@ Release 2.6.0 - UNRELEASED
YARN-2804. Fixed Timeline service to not fill the logs with JAXB bindings
exceptions. (Zhijie Shen via vinodkv)
+ YARN-2767. Added a test case to verify that http static user cannot kill or submit
+ apps in the secure mode. (Varun Vasudev via zjshen)
+
Release 2.5.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4c951ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesHttpStaticUserPermissions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesHttpStaticUserPermissions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesHttpStaticUserPermissions.java
new file mode 100644
index 0000000..3d47233
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesHttpStaticUserPermissions.java
@@ -0,0 +1,195 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.HttpURLConnection;
+import java.net.URL;
+import java.util.Map;
+import java.util.HashMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.KerberosTestUtils;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.sun.jersey.api.client.ClientResponse.Status;
+
+public class TestRMWebServicesHttpStaticUserPermissions {
+
+ private static final File testRootDir = new File("target",
+ TestRMWebServicesHttpStaticUserPermissions.class.getName() + "-root");
+ private static File spnegoKeytabFile = new File(
+ KerberosTestUtils.getKeytabFile());
+
+ private static String spnegoPrincipal = KerberosTestUtils
+ .getServerPrincipal();
+
+ private static MiniKdc testMiniKDC;
+ private static MockRM rm;
+
+ static class Helper {
+ String method;
+ String requestBody;
+
+ Helper(String method, String requestBody) {
+ this.method = method;
+ this.requestBody = requestBody;
+ }
+ }
+
+ @BeforeClass
+ public static void setUp() {
+ try {
+ testMiniKDC = new MiniKdc(MiniKdc.createConf(), testRootDir);
+ setupKDC();
+ setupAndStartRM();
+ } catch (Exception e) {
+ fail("Couldn't create MiniKDC");
+ }
+ }
+
+ @AfterClass
+ public static void tearDown() {
+ if (testMiniKDC != null) {
+ testMiniKDC.stop();
+ }
+ if (rm != null) {
+ rm.stop();
+ }
+ }
+
+ public TestRMWebServicesHttpStaticUserPermissions() throws Exception {
+ super();
+ }
+
+ private static void setupAndStartRM() throws Exception {
+ Configuration rmconf = new Configuration();
+ rmconf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
+ YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
+ rmconf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
+ ResourceScheduler.class);
+ rmconf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
+ rmconf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+ "kerberos");
+ rmconf.set("yarn.resourcemanager.principal", spnegoPrincipal);
+ rmconf.set("yarn.resourcemanager.keytab",
+ spnegoKeytabFile.getAbsolutePath());
+ rmconf.setBoolean("mockrm.webapp.enabled", true);
+ UserGroupInformation.setConfiguration(rmconf);
+ rm = new MockRM(rmconf);
+ rm.start();
+
+ }
+
+ private static void setupKDC() throws Exception {
+ testMiniKDC.start();
+ testMiniKDC.createPrincipal(spnegoKeytabFile, "HTTP/localhost", "client",
+ UserGroupInformation.getLoginUser().getShortUserName(), "client2");
+ }
+
+ // Test that the http static user can't submit or kill apps
+ // when secure mode is turned on
+
+ @Test
+ public void testWebServiceAccess() throws Exception {
+
+ ApplicationSubmissionContextInfo app =
+ new ApplicationSubmissionContextInfo();
+ String appid = "application_123_0";
+ app.setApplicationId(appid);
+ String submitAppRequestBody =
+ TestRMWebServicesDelegationTokenAuthentication
+ .getMarshalledAppInfo(app);
+
+ URL url = new URL("http://localhost:8088/ws/v1/cluster/apps");
+ HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+
+ // we should be access the apps page with the static user
+ TestRMWebServicesDelegationTokenAuthentication.setupConn(conn, "GET", "",
+ "");
+ try {
+ conn.getInputStream();
+ assertEquals(Status.OK.getStatusCode(), conn.getResponseCode());
+ } catch (IOException e) {
+ fail("Got " + conn.getResponseCode() + " instead of 200 accessing "
+ + url.toString());
+ }
+ conn.disconnect();
+
+ // new-application, submit app and kill should fail with
+ // forbidden
+ Map<String, Helper> urlRequestMap = new HashMap<String, Helper>();
+ String killAppRequestBody =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n"
+ + "<appstate>\n" + " <state>KILLED</state>\n" + "</appstate>";
+
+ urlRequestMap.put("http://localhost:8088/ws/v1/cluster/apps", new Helper(
+ "POST", submitAppRequestBody));
+ urlRequestMap.put(
+ "http://localhost:8088/ws/v1/cluster/apps/new-application", new Helper(
+ "POST", ""));
+ urlRequestMap.put(
+ "http://localhost:8088/ws/v1/cluster/apps/app_123_1/state", new Helper(
+ "PUT", killAppRequestBody));
+
+ for (Map.Entry<String, Helper> entry : urlRequestMap.entrySet()) {
+ URL reqURL = new URL(entry.getKey());
+ conn = (HttpURLConnection) reqURL.openConnection();
+ String method = entry.getValue().method;
+ String body = entry.getValue().requestBody;
+ TestRMWebServicesDelegationTokenAuthentication.setupConn(conn, method,
+ "application/xml", body);
+ try {
+ conn.getInputStream();
+ fail("Request " + entry.getKey() + "succeeded but should have failed");
+ } catch (IOException e) {
+ assertEquals(Status.FORBIDDEN.getStatusCode(), conn.getResponseCode());
+ InputStream errorStream = conn.getErrorStream();
+ String error = "";
+ BufferedReader reader =
+ new BufferedReader(new InputStreamReader(errorStream, "UTF8"));
+ for (String line; (line = reader.readLine()) != null;) {
+ error += line;
+ }
+ reader.close();
+ errorStream.close();
+ assertEquals(
+ "The default static user cannot carry out this operation.", error);
+ }
+ conn.disconnect();
+ }
+ }
+}
[15/43] git commit: YARN-2804. Fixed Timeline service to not fill the
logs with JAXB bindings exceptions. Contributed by Zhijie Shen.
Posted by vi...@apache.org.
YARN-2804. Fixed Timeline service to not fill the logs with JAXB bindings exceptions. Contributed by Zhijie Shen.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b7617989
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b7617989
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b7617989
Branch: refs/heads/HDFS-EC
Commit: b76179895dd2ef4d56e8de31e9f673375faa2afa
Parents: d794f78
Author: Vinod Kumar Vavilapalli <vi...@apache.org>
Authored: Tue Nov 4 17:45:46 2014 -0800
Committer: Vinod Kumar Vavilapalli <vi...@apache.org>
Committed: Tue Nov 4 18:00:06 2014 -0800
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +
.../api/records/timeline/TimelineEntity.java | 49 +++++--
.../api/records/timeline/TimelineEvent.java | 17 ++-
.../records/timeline/TestTimelineRecords.java | 128 ++++++++++++++++++-
.../timeline/webapp/TimelineWebServices.java | 9 +-
5 files changed, 182 insertions(+), 24 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7617989/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f9fcf5b..b5aec42 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -859,6 +859,9 @@ Release 2.6.0 - UNRELEASED
YARN-2010. Handle app-recovery failures gracefully.
(Jian He and Karthik Kambatla via kasha)
+ YARN-2804. Fixed Timeline service to not fill the logs with JAXB bindings
+ exceptions. (Zhijie Shen via vinodkv)
+
Release 2.5.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7617989/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineEntity.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineEntity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineEntity.java
index d66c253..03ed4af 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineEntity.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineEntity.java
@@ -31,6 +31,7 @@ import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
@@ -58,11 +59,11 @@ public class TimelineEntity implements Comparable<TimelineEntity> {
private String entityId;
private Long startTime;
private List<TimelineEvent> events = new ArrayList<TimelineEvent>();
- private Map<String, Set<String>> relatedEntities =
+ private HashMap<String, Set<String>> relatedEntities =
new HashMap<String, Set<String>>();
- private Map<String, Set<Object>> primaryFilters =
+ private HashMap<String, Set<Object>> primaryFilters =
new HashMap<String, Set<Object>>();
- private Map<String, Object> otherInfo =
+ private HashMap<String, Object> otherInfo =
new HashMap<String, Object>();
private String domainId;
@@ -175,11 +176,17 @@ public class TimelineEntity implements Comparable<TimelineEntity> {
*
* @return the related entities
*/
- @XmlElement(name = "relatedentities")
public Map<String, Set<String>> getRelatedEntities() {
return relatedEntities;
}
+ // Required by JAXB
+ @Private
+ @XmlElement(name = "relatedentities")
+ public HashMap<String, Set<String>> getRelatedEntitiesJAXB() {
+ return relatedEntities;
+ }
+
/**
* Add an entity to the existing related entity map
*
@@ -224,7 +231,11 @@ public class TimelineEntity implements Comparable<TimelineEntity> {
*/
public void setRelatedEntities(
Map<String, Set<String>> relatedEntities) {
- this.relatedEntities = relatedEntities;
+ if (relatedEntities != null && !(relatedEntities instanceof HashMap)) {
+ this.relatedEntities = new HashMap<String, Set<String>>(relatedEntities);
+ } else {
+ this.relatedEntities = (HashMap<String, Set<String>>) relatedEntities;
+ }
}
/**
@@ -232,11 +243,17 @@ public class TimelineEntity implements Comparable<TimelineEntity> {
*
* @return the primary filters
*/
- @XmlElement(name = "primaryfilters")
public Map<String, Set<Object>> getPrimaryFilters() {
return primaryFilters;
}
+ // Required by JAXB
+ @Private
+ @XmlElement(name = "primaryfilters")
+ public HashMap<String, Set<Object>> getPrimaryFiltersJAXB() {
+ return primaryFilters;
+ }
+
/**
* Add a single piece of primary filter to the existing primary filter map
*
@@ -280,7 +297,11 @@ public class TimelineEntity implements Comparable<TimelineEntity> {
* a map of primary filters
*/
public void setPrimaryFilters(Map<String, Set<Object>> primaryFilters) {
- this.primaryFilters = primaryFilters;
+ if (primaryFilters != null && !(primaryFilters instanceof HashMap)) {
+ this.primaryFilters = new HashMap<String, Set<Object>>(primaryFilters);
+ } else {
+ this.primaryFilters = (HashMap<String, Set<Object>>) primaryFilters;
+ }
}
/**
@@ -288,11 +309,17 @@ public class TimelineEntity implements Comparable<TimelineEntity> {
*
* @return the other information of the entity
*/
- @XmlElement(name = "otherinfo")
public Map<String, Object> getOtherInfo() {
return otherInfo;
}
+ // Required by JAXB
+ @Private
+ @XmlElement(name = "otherinfo")
+ public HashMap<String, Object> getOtherInfoJAXB() {
+ return otherInfo;
+ }
+
/**
* Add one piece of other information of the entity to the existing other info
* map
@@ -323,7 +350,11 @@ public class TimelineEntity implements Comparable<TimelineEntity> {
* a map of other information
*/
public void setOtherInfo(Map<String, Object> otherInfo) {
- this.otherInfo = otherInfo;
+ if (otherInfo != null && !(otherInfo instanceof HashMap)) {
+ this.otherInfo = new HashMap<String, Object>(otherInfo);
+ } else {
+ this.otherInfo = (HashMap<String, Object>) otherInfo;
+ }
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7617989/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineEvent.java
index aa49538..0a09e78 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineEvent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineEvent.java
@@ -26,6 +26,7 @@ import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
@@ -43,7 +44,7 @@ public class TimelineEvent implements Comparable<TimelineEvent> {
private long timestamp;
private String eventType;
- private Map<String, Object> eventInfo = new HashMap<String, Object>();
+ private HashMap<String, Object> eventInfo = new HashMap<String, Object>();
public TimelineEvent() {
}
@@ -93,11 +94,17 @@ public class TimelineEvent implements Comparable<TimelineEvent> {
*
* @return the information of the event
*/
- @XmlElement(name = "eventinfo")
public Map<String, Object> getEventInfo() {
return eventInfo;
}
+ // Required by JAXB
+ @Private
+ @XmlElement(name = "eventinfo")
+ public HashMap<String, Object> getEventInfoJAXB() {
+ return eventInfo;
+ }
+
/**
* Add one piece of the information of the event to the existing information
* map
@@ -128,7 +135,11 @@ public class TimelineEvent implements Comparable<TimelineEvent> {
* a map of of the information of the event
*/
public void setEventInfo(Map<String, Object> eventInfo) {
- this.eventInfo = eventInfo;
+ if (eventInfo != null && !(eventInfo instanceof HashMap)) {
+ this.eventInfo = new HashMap<String, Object>(eventInfo);
+ } else {
+ this.eventInfo = (HashMap<String, Object>) eventInfo;
+ }
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7617989/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/timeline/TestTimelineRecords.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/timeline/TestTimelineRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/timeline/TestTimelineRecords.java
index 2b59ff5..9d16edb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/timeline/TestTimelineRecords.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/timeline/TestTimelineRecords.java
@@ -19,19 +19,19 @@
package org.apache.hadoop.yarn.api.records.timeline;
import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
import java.util.List;
-
-import org.junit.Assert;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.WeakHashMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
-import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
-import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
-import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents;
-import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError;
import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
+import org.junit.Assert;
import org.junit.Test;
public class TestTimelineRecords {
@@ -202,4 +202,118 @@ public class TestTimelineRecords {
}
}
+ @Test
+ public void testMapInterfaceOrTimelineRecords() throws Exception {
+ TimelineEntity entity = new TimelineEntity();
+ List<Map<String, Set<Object>>> primaryFiltersList =
+ new ArrayList<Map<String, Set<Object>>>();
+ primaryFiltersList.add(
+ Collections.singletonMap("pkey", Collections.singleton((Object) "pval")));
+ Map<String, Set<Object>> primaryFilters = new TreeMap<String, Set<Object>>();
+ primaryFilters.put("pkey1", Collections.singleton((Object) "pval1"));
+ primaryFilters.put("pkey2", Collections.singleton((Object) "pval2"));
+ primaryFiltersList.add(primaryFilters);
+ entity.setPrimaryFilters(null);
+ for (Map<String, Set<Object>> primaryFiltersToSet : primaryFiltersList) {
+ entity.setPrimaryFilters(primaryFiltersToSet);
+ assertPrimaryFilters(entity);
+
+ Map<String, Set<Object>> primaryFiltersToAdd =
+ new WeakHashMap<String, Set<Object>>();
+ primaryFiltersToAdd.put("pkey3", Collections.singleton((Object) "pval3"));
+ entity.addPrimaryFilters(primaryFiltersToAdd);
+ assertPrimaryFilters(entity);
+ }
+
+ List<Map<String, Set<String>>> relatedEntitiesList =
+ new ArrayList<Map<String, Set<String>>>();
+ relatedEntitiesList.add(
+ Collections.singletonMap("rkey", Collections.singleton("rval")));
+ Map<String, Set<String>> relatedEntities = new TreeMap<String, Set<String>>();
+ relatedEntities.put("rkey1", Collections.singleton("rval1"));
+ relatedEntities.put("rkey2", Collections.singleton("rval2"));
+ relatedEntitiesList.add(relatedEntities);
+ entity.setRelatedEntities(null);
+ for (Map<String, Set<String>> relatedEntitiesToSet : relatedEntitiesList) {
+ entity.setRelatedEntities(relatedEntitiesToSet);
+ assertRelatedEntities(entity);
+
+ Map<String, Set<String>> relatedEntitiesToAdd =
+ new WeakHashMap<String, Set<String>>();
+ relatedEntitiesToAdd.put("rkey3", Collections.singleton("rval3"));
+ entity.addRelatedEntities(relatedEntitiesToAdd);
+ assertRelatedEntities(entity);
+ }
+
+ List<Map<String, Object>> otherInfoList =
+ new ArrayList<Map<String, Object>>();
+ otherInfoList.add(Collections.singletonMap("okey", (Object) "oval"));
+ Map<String, Object> otherInfo = new TreeMap<String, Object>();
+ otherInfo.put("okey1", "oval1");
+ otherInfo.put("okey2", "oval2");
+ otherInfoList.add(otherInfo);
+ entity.setOtherInfo(null);
+ for (Map<String, Object> otherInfoToSet : otherInfoList) {
+ entity.setOtherInfo(otherInfoToSet);
+ assertOtherInfo(entity);
+
+ Map<String, Object> otherInfoToAdd = new WeakHashMap<String, Object>();
+ otherInfoToAdd.put("okey3", "oval3");
+ entity.addOtherInfo(otherInfoToAdd);
+ assertOtherInfo(entity);
+ }
+
+ TimelineEvent event = new TimelineEvent();
+ List<Map<String, Object>> eventInfoList =
+ new ArrayList<Map<String, Object>>();
+ eventInfoList.add(Collections.singletonMap("ekey", (Object) "eval"));
+ Map<String, Object> eventInfo = new TreeMap<String, Object>();
+ eventInfo.put("ekey1", "eval1");
+ eventInfo.put("ekey2", "eval2");
+ eventInfoList.add(eventInfo);
+ event.setEventInfo(null);
+ for (Map<String, Object> eventInfoToSet : eventInfoList) {
+ event.setEventInfo(eventInfoToSet);
+ assertEventInfo(event);
+
+ Map<String, Object> eventInfoToAdd = new WeakHashMap<String, Object>();
+ eventInfoToAdd.put("ekey3", "eval3");
+ event.addEventInfo(eventInfoToAdd);
+ assertEventInfo(event);
+ }
+ }
+
+ private static void assertPrimaryFilters(TimelineEntity entity) {
+ Assert.assertNotNull(entity.getPrimaryFilters());
+ Assert.assertNotNull(entity.getPrimaryFiltersJAXB());
+ Assert.assertTrue(entity.getPrimaryFilters() instanceof HashMap);
+ Assert.assertTrue(entity.getPrimaryFiltersJAXB() instanceof HashMap);
+ Assert.assertEquals(
+ entity.getPrimaryFilters(), entity.getPrimaryFiltersJAXB());
+ }
+
+ private static void assertRelatedEntities(TimelineEntity entity) {
+ Assert.assertNotNull(entity.getRelatedEntities());
+ Assert.assertNotNull(entity.getRelatedEntitiesJAXB());
+ Assert.assertTrue(entity.getRelatedEntities() instanceof HashMap);
+ Assert.assertTrue(entity.getRelatedEntitiesJAXB() instanceof HashMap);
+ Assert.assertEquals(
+ entity.getRelatedEntities(), entity.getRelatedEntitiesJAXB());
+ }
+
+ private static void assertOtherInfo(TimelineEntity entity) {
+ Assert.assertNotNull(entity.getOtherInfo());
+ Assert.assertNotNull(entity.getOtherInfoJAXB());
+ Assert.assertTrue(entity.getOtherInfo() instanceof HashMap);
+ Assert.assertTrue(entity.getOtherInfoJAXB() instanceof HashMap);
+ Assert.assertEquals(entity.getOtherInfo(), entity.getOtherInfoJAXB());
+ }
+
+ private static void assertEventInfo(TimelineEvent event) {
+ Assert.assertNotNull(event);
+ Assert.assertNotNull(event.getEventInfoJAXB());
+ Assert.assertTrue(event.getEventInfo() instanceof HashMap);
+ Assert.assertTrue(event.getEventInfoJAXB() instanceof HashMap);
+ Assert.assertEquals(event.getEventInfo(), event.getEventInfoJAXB());
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7617989/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
index f290930..0907f2c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
@@ -42,7 +42,6 @@ import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.Status;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
@@ -53,11 +52,11 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineDomains;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents;
-import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
-import org.apache.hadoop.yarn.api.records.timeline.TimelineDomains;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.timeline.EntityIdentifier;
@@ -272,7 +271,7 @@ public class TimelineWebServices {
@PUT
@Path("/domain")
@Consumes({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
- public Response putDomain(
+ public TimelinePutResponse putDomain(
@Context HttpServletRequest req,
@Context HttpServletResponse res,
TimelineDomain domain) {
@@ -295,7 +294,7 @@ public class TimelineWebServices {
throw new WebApplicationException(e,
Response.Status.INTERNAL_SERVER_ERROR);
}
- return Response.status(Status.OK).build();
+ return new TimelinePutResponse();
}
/**
[14/43] git commit: HADOOP-11230. Add missing dependency of
bouncycastle for kms, httpfs, hdfs, MR and YARN. Contributed by Robert Kanter.
Posted by vi...@apache.org.
HADOOP-11230. Add missing dependency of bouncycastle for kms, httpfs, hdfs, MR and YARN. Contributed by Robert Kanter.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d794f785
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d794f785
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d794f785
Branch: refs/heads/HDFS-EC
Commit: d794f785defe4b49905f90542cd210aa2f119dfd
Parents: b2cd269
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Nov 4 17:52:03 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Tue Nov 4 17:52:03 2014 -0800
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
hadoop-common-project/hadoop-kms/pom.xml | 6 ++++++
hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml | 6 ++++++
hadoop-hdfs-project/hadoop-hdfs/pom.xml | 6 ++++++
.../hadoop-mapreduce-client-jobclient/pom.xml | 6 ++++++
.../hadoop-yarn-server-applicationhistoryservice/pom.xml | 7 +++++++
6 files changed, 34 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d794f785/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index af21a16..8195ef8 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -392,6 +392,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-11268. Update BUILDING.txt to remove the workaround for tools.jar.
(Li Lu via wheat9)
+ HADOOP-11230. Add missing dependency of bouncycastle for kms, httpfs, hdfs, MR
+ and YARN. (Robert Kanter via wheat9)
+
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d794f785/hadoop-common-project/hadoop-kms/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/pom.xml b/hadoop-common-project/hadoop-kms/pom.xml
index e6b21aa..fda25aa 100644
--- a/hadoop-common-project/hadoop-kms/pom.xml
+++ b/hadoop-common-project/hadoop-kms/pom.xml
@@ -192,6 +192,12 @@
<artifactId>curator-test</artifactId>
<scope>test</scope>
</dependency>
+ <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+ <dependency>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcprov-jdk16</artifactId>
+ <scope>test</scope>
+ </dependency>
</dependencies>
<build>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d794f785/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
index a44f686..0bb6d4b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
@@ -210,6 +210,12 @@
<artifactId>slf4j-log4j12</artifactId>
<scope>runtime</scope>
</dependency>
+ <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+ <dependency>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcprov-jdk16</artifactId>
+ <scope>test</scope>
+ </dependency>
</dependencies>
<build>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d794f785/hadoop-hdfs-project/hadoop-hdfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 84b7e68..2aab073 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -198,6 +198,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<type>test-jar</type>
<scope>test</scope>
</dependency>
+ <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+ <dependency>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcprov-jdk16</artifactId>
+ <scope>test</scope>
+ </dependency>
</dependencies>
<build>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d794f785/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
index 00eb909..e92d02b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
@@ -102,6 +102,12 @@
<artifactId>hsqldb</artifactId>
<scope>test</scope>
</dependency>
+ <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+ <dependency>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcprov-jdk16</artifactId>
+ <scope>test</scope>
+ </dependency>
</dependencies>
<profiles>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d794f785/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
index cc2d65d..968f21f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
@@ -173,5 +173,12 @@
<scope>test</scope>
<type>test-jar</type>
</dependency>
+
+ <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+ <dependency>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcprov-jdk16</artifactId>
+ <scope>test</scope>
+ </dependency>
</dependencies>
</project>
[31/43] git commit: YARN-2579. Fixed a deadlock issue when
EmbeddedElectorService and FatalEventDispatcher try to transition RM to
StandBy at the same time. Contributed by Rohith Sharmaks
Posted by vi...@apache.org.
YARN-2579. Fixed a deadlock issue when EmbeddedElectorService and FatalEventDispatcher try to transition RM to StandBy at the same time. Contributed by Rohith Sharmaks
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/395275af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/395275af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/395275af
Branch: refs/heads/HDFS-EC
Commit: 395275af8622c780b9071c243422b0780e096202
Parents: 8549fa5
Author: Jian He <ji...@apache.org>
Authored: Wed Nov 5 16:59:54 2014 -0800
Committer: Jian He <ji...@apache.org>
Committed: Wed Nov 5 16:59:54 2014 -0800
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 4 ++
.../hadoop/yarn/client/TestRMFailover.java | 8 +--
.../resourcemanager/RMFatalEventType.java | 1 -
.../server/resourcemanager/ResourceManager.java | 49 +++++++---------
.../resourcemanager/recovery/RMStateStore.java | 24 ++++++--
.../yarn/server/resourcemanager/TestRMHA.java | 62 ++++++++++++++++++++
6 files changed, 109 insertions(+), 39 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/395275af/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8adde9b..887d1d4 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -868,6 +868,10 @@ Release 2.6.0 - UNRELEASED
YARN-2805. Fixed ResourceManager to load HA configs correctly before kerberos
login. (Wangda Tan via vinodkv)
+ YARN-2579. Fixed a deadlock issue when EmbeddedElectorService and
+ FatalEventDispatcher try to transition RM to StandBy at the same time.
+ (Rohith Sharmaks via jianhe)
+
Release 2.5.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/395275af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
index 0440f1d..0634cc3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
@@ -43,8 +43,6 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.MiniYARNCluster;
import org.apache.hadoop.yarn.server.resourcemanager.AdminService;
-import org.apache.hadoop.yarn.server.resourcemanager.RMFatalEvent;
-import org.apache.hadoop.yarn.server.resourcemanager.RMFatalEventType;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer;
import org.junit.After;
@@ -173,7 +171,6 @@ public class TestRMFailover extends ClientBaseWithFixes {
verifyConnections();
}
- @SuppressWarnings("unchecked")
@Test
public void testAutomaticFailover()
throws YarnException, InterruptedException, IOException {
@@ -196,10 +193,7 @@ public class TestRMFailover extends ClientBaseWithFixes {
// so it transitions to standby.
ResourceManager rm = cluster.getResourceManager(
cluster.getActiveRMIndex());
- RMFatalEvent event =
- new RMFatalEvent(RMFatalEventType.STATE_STORE_FENCED,
- "Fake RMFatalEvent");
- rm.getRMContext().getDispatcher().getEventHandler().handle(event);
+ rm.handleTransitionToStandBy();
int maxWaitingAttempts = 2000;
while (maxWaitingAttempts-- > 0 ) {
if (rm.getRMContext().getHAServiceState() == HAServiceState.STANDBY) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/395275af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMFatalEventType.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMFatalEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMFatalEventType.java
index 0629c70..789c018 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMFatalEventType.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMFatalEventType.java
@@ -23,7 +23,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
@InterfaceAudience.Private
public enum RMFatalEventType {
// Source <- Store
- STATE_STORE_FENCED,
STATE_STORE_OP_FAILED,
// Source <- Embedded Elector
http://git-wip-us.apache.org/repos/asf/hadoop/blob/395275af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 6adc73a..4051054 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -269,6 +269,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
@VisibleForTesting
protected void setRMStateStore(RMStateStore rmStore) {
rmStore.setRMDispatcher(rmDispatcher);
+ rmStore.setResourceManager(this);
rmContext.setStateStore(rmStore);
}
@@ -397,11 +398,12 @@ public class ResourceManager extends CompositeService implements Recoverable {
private EventHandler<SchedulerEvent> schedulerDispatcher;
private ApplicationMasterLauncher applicationMasterLauncher;
private ContainerAllocationExpirer containerAllocationExpirer;
-
+ private ResourceManager rm;
private boolean recoveryEnabled;
- RMActiveServices() {
+ RMActiveServices(ResourceManager rm) {
super("RMActiveServices");
+ this.rm = rm;
}
@Override
@@ -449,6 +451,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
try {
rmStore.init(conf);
rmStore.setRMDispatcher(rmDispatcher);
+ rmStore.setResourceManager(rm);
} catch (Exception e) {
// the Exception from stateStore.init() needs to be handled for
// HA and we need to give up master status if we got fenced
@@ -729,39 +732,31 @@ public class ResourceManager extends CompositeService implements Recoverable {
@Private
public static class RMFatalEventDispatcher
implements EventHandler<RMFatalEvent> {
- private final RMContext rmContext;
- private final ResourceManager rm;
-
- public RMFatalEventDispatcher(
- RMContext rmContext, ResourceManager resourceManager) {
- this.rmContext = rmContext;
- this.rm = resourceManager;
- }
@Override
public void handle(RMFatalEvent event) {
LOG.fatal("Received a " + RMFatalEvent.class.getName() + " of type " +
event.getType().name() + ". Cause:\n" + event.getCause());
- if (event.getType() == RMFatalEventType.STATE_STORE_FENCED) {
- LOG.info("RMStateStore has been fenced");
- if (rmContext.isHAEnabled()) {
- try {
- // Transition to standby and reinit active services
- LOG.info("Transitioning RM to Standby mode");
- rm.transitionToStandby(true);
- rm.adminService.resetLeaderElection();
- return;
- } catch (Exception e) {
- LOG.fatal("Failed to transition RM to Standby mode.");
- }
- }
- }
-
ExitUtil.terminate(1, event.getCause());
}
}
+ public void handleTransitionToStandBy() {
+ if (rmContext.isHAEnabled()) {
+ try {
+ // Transition to standby and reinit active services
+ LOG.info("Transitioning RM to Standby mode");
+ transitionToStandby(true);
+ adminService.resetLeaderElection();
+ return;
+ } catch (Exception e) {
+ LOG.fatal("Failed to transition RM to Standby mode.");
+ ExitUtil.terminate(1, e);
+ }
+ }
+ }
+
@Private
public static final class ApplicationEventDispatcher implements
EventHandler<RMAppEvent> {
@@ -990,7 +985,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
* @throws Exception
*/
protected void createAndInitActiveServices() throws Exception {
- activeServices = new RMActiveServices();
+ activeServices = new RMActiveServices(this);
activeServices.init(conf);
}
@@ -1227,7 +1222,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
private Dispatcher setupDispatcher() {
Dispatcher dispatcher = createDispatcher();
dispatcher.register(RMFatalEventType.class,
- new ResourceManager.RMFatalEventDispatcher(this.rmContext, this));
+ new ResourceManager.RMFatalEventDispatcher());
return dispatcher;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/395275af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
index 973fe54..beac403 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
import org.apache.hadoop.yarn.server.records.Version;
import org.apache.hadoop.yarn.server.resourcemanager.RMFatalEvent;
import org.apache.hadoop.yarn.server.resourcemanager.RMFatalEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData;
@@ -87,6 +88,7 @@ public abstract class RMStateStore extends AbstractService {
"AMRMTokenSecretManagerRoot";
protected static final String VERSION_NODE = "RMVersionNode";
protected static final String EPOCH_NODE = "EpochNode";
+ private ResourceManager resourceManager;
public static final Log LOG = LogFactory.getLog(RMStateStore.class);
@@ -818,13 +820,15 @@ public abstract class RMStateStore extends AbstractService {
* @param failureCause the exception due to which the operation failed
*/
protected void notifyStoreOperationFailed(Exception failureCause) {
- RMFatalEventType type;
if (failureCause instanceof StoreFencedException) {
- type = RMFatalEventType.STATE_STORE_FENCED;
+ Thread standByTransitionThread =
+ new Thread(new StandByTransitionThread());
+ standByTransitionThread.setName("StandByTransitionThread Handler");
+ standByTransitionThread.start();
} else {
- type = RMFatalEventType.STATE_STORE_OP_FAILED;
+ rmDispatcher.getEventHandler().handle(
+ new RMFatalEvent(RMFatalEventType.STATE_STORE_OP_FAILED, failureCause));
}
- rmDispatcher.getEventHandler().handle(new RMFatalEvent(type, failureCause));
}
@SuppressWarnings("unchecked")
@@ -866,4 +870,16 @@ public abstract class RMStateStore extends AbstractService {
* @throws Exception
*/
public abstract void deleteStore() throws Exception;
+
+ public void setResourceManager(ResourceManager rm) {
+ this.resourceManager = rm;
+ }
+
+ private class StandByTransitionThread implements Runnable {
+ @Override
+ public void run() {
+ LOG.info("RMStateStore has been fenced");
+ resourceManager.handleTransitionToStandBy();
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/395275af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
index 8cef4c9..c6d7d09 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFencedException;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
@@ -451,6 +452,67 @@ public class TestRMHA {
checkActiveRMFunctionality();
}
+ @Test(timeout = 90000)
+ public void testTransitionedToStandbyShouldNotHang() throws Exception {
+ configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
+ Configuration conf = new YarnConfiguration(configuration);
+
+ MemoryRMStateStore memStore = new MemoryRMStateStore() {
+ @Override
+ public synchronized void updateApplicationState(ApplicationState appState) {
+ notifyStoreOperationFailed(new StoreFencedException());
+ }
+ };
+ memStore.init(conf);
+ rm = new MockRM(conf, memStore) {
+ @Override
+ void stopActiveServices() throws Exception {
+ Thread.sleep(10000);
+ super.stopActiveServices();
+ }
+ };
+ rm.init(conf);
+ final StateChangeRequestInfo requestInfo =
+ new StateChangeRequestInfo(
+ HAServiceProtocol.RequestSource.REQUEST_BY_USER);
+
+ assertEquals(STATE_ERR, HAServiceState.INITIALIZING, rm.adminService
+ .getServiceStatus().getState());
+ assertFalse("RM is ready to become active before being started",
+ rm.adminService.getServiceStatus().isReadyToBecomeActive());
+ checkMonitorHealth();
+
+ rm.start();
+ checkMonitorHealth();
+ checkStandbyRMFunctionality();
+
+ // 2. Transition to Active.
+ rm.adminService.transitionToActive(requestInfo);
+
+ // 3. Try Transition to standby
+ Thread t = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ rm.transitionToStandby(true);
+ } catch (IOException e) {
+ e.printStackTrace();
+ } catch (Exception e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+ });
+ t.start();
+
+ rm.getRMContext().getStateStore().updateApplicationState(null);
+ t.join(); // wait for thread to finish
+
+ rm.adminService.transitionToStandby(requestInfo);
+ checkStandbyRMFunctionality();
+ rm.stop();
+ }
+
public void innerTestHAWithRMHostName(boolean includeBindHost) {
//this is run two times, with and without a bind host configured
if (includeBindHost) {
[29/43] git commit: YARN-2805. Fixed ResourceManager to load HA
configs correctly before kerberos login. Contributed by Wangda Tan.
Posted by vi...@apache.org.
YARN-2805. Fixed ResourceManager to load HA configs correctly before kerberos login. Contributed by Wangda Tan.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/834e931d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/834e931d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/834e931d
Branch: refs/heads/HDFS-EC
Commit: 834e931d8efe4d806347b266e7e62929ce05389b
Parents: ba1d4ad
Author: Vinod Kumar Vavilapalli <vi...@apache.org>
Authored: Wed Nov 5 15:29:55 2014 -0800
Committer: Vinod Kumar Vavilapalli <vi...@apache.org>
Committed: Wed Nov 5 15:30:33 2014 -0800
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +++
.../yarn/server/resourcemanager/ResourceManager.java | 11 ++++++-----
2 files changed, 9 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/834e931d/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2870583..8adde9b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -865,6 +865,9 @@ Release 2.6.0 - UNRELEASED
YARN-2767. Added a test case to verify that http static user cannot kill or submit
apps in the secure mode. (Varun Vasudev via zjshen)
+ YARN-2805. Fixed ResourceManager to load HA configs correctly before kerberos
+ login. (Wangda Tan via vinodkv)
+
Release 2.5.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/834e931d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 642c732..6adc73a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -194,6 +194,12 @@ public class ResourceManager extends CompositeService implements Recoverable {
this.conf = conf;
this.rmContext = new RMContextImpl();
+ // Set HA configuration should be done before login
+ this.rmContext.setHAEnabled(HAUtil.isHAEnabled(this.conf));
+ if (this.rmContext.isHAEnabled()) {
+ HAUtil.verifyAndSetConfiguration(this.conf);
+ }
+
// Set UGI and do login
// If security is enabled, use login user
// If security is not enabled, use current user
@@ -245,11 +251,6 @@ public class ResourceManager extends CompositeService implements Recoverable {
adminService = createAdminService();
addService(adminService);
rmContext.setRMAdminService(adminService);
-
- this.rmContext.setHAEnabled(HAUtil.isHAEnabled(this.conf));
- if (this.rmContext.isHAEnabled()) {
- HAUtil.verifyAndSetConfiguration(this.conf);
- }
createAndInitActiveServices();