You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xy...@apache.org on 2017/09/20 04:58:05 UTC
[01/18] hadoop git commit: MAPREDUCE-6958. Shuffle audit logger
should log size of shuffle transfer. Contributed by Jason Lowe
Repository: hadoop
Updated Branches:
refs/heads/HDFS-7240 14e184a28 -> d77c8107f
MAPREDUCE-6958. Shuffle audit logger should log size of shuffle transfer. Contributed by Jason Lowe
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3d61304
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3d61304
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3d61304
Branch: refs/heads/HDFS-7240
Commit: b3d61304f2fa4a99526f7a60ccaac9f262083079
Parents: 1ee2527
Author: Jason Lowe <jl...@apache.org>
Authored: Mon Sep 18 17:04:43 2017 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Mon Sep 18 17:04:43 2017 -0500
----------------------------------------------------------------------
.../org/apache/hadoop/mapred/ShuffleHandler.java | 18 +++++++++++-------
1 file changed, 11 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3d61304/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index 863da7e..06a3e42 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -992,13 +992,6 @@ public class ShuffleHandler extends AuxiliaryService {
return;
}
- // this audit log is disabled by default,
- // to turn it on please enable this audit log
- // on log4j.properties by uncommenting the setting
- if (AUDITLOG.isDebugEnabled()) {
- AUDITLOG.debug("shuffle for " + jobQ.get(0) + " mappers: " + mapIds +
- " reducer " + reduceQ.get(0));
- }
int reduceId;
String jobId;
try {
@@ -1183,6 +1176,17 @@ public class ShuffleHandler extends AuxiliaryService {
// Now set the response headers.
setResponseHeaders(response, keepAliveParam, contentLength);
+
+ // this audit log is disabled by default,
+ // to turn it on please enable this audit log
+ // on log4j.properties by uncommenting the setting
+ if (AUDITLOG.isDebugEnabled()) {
+ StringBuilder sb = new StringBuilder("shuffle for ").append(jobId);
+ sb.append(" mappers: ").append(mapIds);
+ sb.append(" reducer ").append(reduce);
+ sb.append(" length ").append(contentLength);
+ AUDITLOG.debug(sb.toString());
+ }
}
protected void setResponseHeaders(HttpResponse response,
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[02/18] hadoop git commit: HADOOP-14835. mvn site build throws SAX
errors. Contributed by Andrew Wang and Sean Mackrory.
Posted by xy...@apache.org.
HADOOP-14835. mvn site build throws SAX errors. Contributed by Andrew Wang and Sean Mackrory.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3cf3540f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3cf3540f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3cf3540f
Branch: refs/heads/HDFS-7240
Commit: 3cf3540f19b5fd1a174690db9f1b7be2977d96ba
Parents: b3d6130
Author: Andrew Wang <wa...@apache.org>
Authored: Mon Sep 18 15:13:42 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Mon Sep 18 15:13:42 2017 -0700
----------------------------------------------------------------------
BUILDING.txt | 2 ++
dev-support/bin/create-release | 1 +
.../hadoop-mapreduce-client/pom.xml | 17 ++++++++++++++++-
hadoop-project-dist/pom.xml | 17 ++++++++++++++++-
hadoop-project/pom.xml | 2 ++
hadoop-yarn-project/hadoop-yarn/pom.xml | 17 ++++++++++++++++-
6 files changed, 53 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf3540f/BUILDING.txt
----------------------------------------------------------------------
diff --git a/BUILDING.txt b/BUILDING.txt
index 14deec8..47aaab4 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -308,6 +308,8 @@ Create a local staging version of the website (in /tmp/hadoop-site)
$ mvn clean site -Preleasedocs; mvn site:stage -DstagingDirectory=/tmp/hadoop-site
+Note that the site needs to be built in a second pass after other artifacts.
+
----------------------------------------------------------------------------------
Installing Hadoop
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf3540f/dev-support/bin/create-release
----------------------------------------------------------------------
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index b22e90b..b98c058 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -564,6 +564,7 @@ function makearelease
"${MVN}" "${MVN_ARGS[@]}" install \
site site:stage \
-DskipTests \
+ -DskipShade \
-Pdist,src \
"${DOCFLAGS}"
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf3540f/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
index aa7c7b1..274a821 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
@@ -196,6 +196,13 @@
<jdiff.stability>-unstable</jdiff.stability>
<jdiff.javadoc.maxmemory>512m</jdiff.javadoc.maxmemory>
</properties>
+ <dependencies>
+ <dependency>
+ <groupId>xerces</groupId>
+ <artifactId>xercesImpl</artifactId>
+ <version>${xerces.jdiff.version}</version>
+ </dependency>
+ </dependencies>
<build>
<plugins>
<plugin>
@@ -238,6 +245,14 @@
<outputDirectory>${project.build.directory}</outputDirectory>
<destFileName>hadoop-annotations.jar</destFileName>
</artifactItem>
+ <artifactItem>
+ <groupId>xerces</groupId>
+ <artifactId>xercesImpl</artifactId>
+ <version>${xerces.version.jdiff}</version>
+ <overWrite>false</overWrite>
+ <outputDirectory>${project.build.directory}</outputDirectory>
+ <destFileName>xerces.jar</destFileName>
+ </artifactItem>
</artifactItems>
</configuration>
</execution>
@@ -275,7 +290,7 @@
sourceFiles="${dev-support.relative.dir}/jdiff/Null.java"
maxmemory="${jdiff.javadoc.maxmemory}">
<doclet name="org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet"
- path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar">
+ path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar:${project.build.directory}/xerces.jar">
<param name="-oldapi" value="${project.name} ${jdiff.stable.api}"/>
<param name="-newapi" value="${project.name} ${project.version}"/>
<param name="-oldapidir" value="${basedir}/${dev-support.relative.dir}/jdiff"/>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf3540f/hadoop-project-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index addc2a5..8815dd4 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -152,6 +152,13 @@
<jdiff.compatibility></jdiff.compatibility>
<jdiff.javadoc.maxmemory>512m</jdiff.javadoc.maxmemory>
</properties>
+ <dependencies>
+ <dependency>
+ <groupId>xerces</groupId>
+ <artifactId>xercesImpl</artifactId>
+ <version>${xerces.jdiff.version}</version>
+ </dependency>
+ </dependencies>
<build>
<plugins>
<plugin>
@@ -194,6 +201,14 @@
<outputDirectory>${project.build.directory}</outputDirectory>
<destFileName>hadoop-annotations.jar</destFileName>
</artifactItem>
+ <artifactItem>
+ <groupId>xerces</groupId>
+ <artifactId>xercesImpl</artifactId>
+ <version>${xerces.jdiff.version}</version>
+ <overWrite>false</overWrite>
+ <outputDirectory>${project.build.directory}</outputDirectory>
+ <destFileName>xerces.jar</destFileName>
+ </artifactItem>
</artifactItems>
</configuration>
</execution>
@@ -259,7 +274,7 @@
sourceFiles="${basedir}/dev-support/jdiff/Null.java"
maxmemory="${jdiff.javadoc.maxmemory}">
<doclet name="org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet"
- path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar">
+ path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar:${project.build.directory}/xerces.jar">
<param name="-oldapi" value="${project.name} ${jdiff.stable.api}"/>
<param name="-newapi" value="${project.name} ${project.version}"/>
<param name="-oldapidir" value="${basedir}/dev-support/jdiff"/>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf3540f/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index b8fb961..a698126 100755
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -45,6 +45,8 @@
<!-- These 2 versions are defined here because they are used -->
<!-- JDIFF generation from embedded ant in the antrun plugin -->
<jdiff.version>1.0.9</jdiff.version>
+ <!-- Version number for xerces used by JDiff -->
+ <xerces.jdiff.version>2.11.0</xerces.jdiff.version>
<kafka.version>0.8.2.1</kafka.version>
<hbase.version>1.2.6</hbase.version>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf3540f/hadoop-yarn-project/hadoop-yarn/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml
index be435d5..288f4bc 100644
--- a/hadoop-yarn-project/hadoop-yarn/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/pom.xml
@@ -113,6 +113,13 @@
<jdiff.compatibility></jdiff.compatibility>
<jdiff.javadoc.maxmemory>512m</jdiff.javadoc.maxmemory>
</properties>
+ <dependencies>
+ <dependency>
+ <groupId>xerces</groupId>
+ <artifactId>xercesImpl</artifactId>
+ <version>${xerces.jdiff.version}</version>
+ </dependency>
+ </dependencies>
<build>
<plugins>
<plugin>
@@ -155,6 +162,14 @@
<outputDirectory>${project.build.directory}</outputDirectory>
<destFileName>hadoop-annotations.jar</destFileName>
</artifactItem>
+ <artifactItem>
+ <groupId>xerces</groupId>
+ <artifactId>xercesImpl</artifactId>
+ <version>${xerces.version.jdiff}</version>
+ <overWrite>false</overWrite>
+ <outputDirectory>${project.build.directory}</outputDirectory>
+ <destFileName>xerces.jar</destFileName>
+ </artifactItem>
</artifactItems>
</configuration>
</execution>
@@ -193,7 +208,7 @@
sourceFiles="${dev-support.relative.dir}/jdiff/Null.java"
maxmemory="${jdiff.javadoc.maxmemory}">
<doclet name="org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet"
- path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar">
+ path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar:${project.build.directory}/xerces.jar">
<param name="-oldapi" value="${project.name} ${jdiff.stable.api}"/>
<param name="-newapi" value="${project.name} ${project.version}"/>
<param name="-oldapidir" value="${basedir}/${dev-support.relative.dir}/jdiff"/>
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[15/18] hadoop git commit: HDFS-12437. Fix test setup in
TestLeaseRecoveryStriped.
Posted by xy...@apache.org.
HDFS-12437. Fix test setup in TestLeaseRecoveryStriped.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12d9d7bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12d9d7bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12d9d7bc
Branch: refs/heads/HDFS-7240
Commit: 12d9d7bc509bca82b8f40301e3dc5ca764be45eb
Parents: 51edaac
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Sep 19 16:42:20 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Tue Sep 19 16:42:20 2017 -0700
----------------------------------------------------------------------
.../hadoop/hdfs/TestLeaseRecoveryStriped.java | 156 ++++++++++++++-----
1 file changed, 113 insertions(+), 43 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12d9d7bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
index 2846dbf..36ac8b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
@@ -19,8 +19,7 @@ package org.apache.hadoop.hdfs;
import com.google.common.base.Preconditions;
import com.google.common.base.Supplier;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.apache.commons.lang.builder.ToStringBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
@@ -28,6 +27,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
import org.apache.hadoop.io.IOUtils;
@@ -40,34 +40,41 @@ import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
+import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeoutException;
public class TestLeaseRecoveryStriped {
- public static final Log LOG = LogFactory
- .getLog(TestLeaseRecoveryStriped.class);
+ public static final Logger LOG = LoggerFactory
+ .getLogger(TestLeaseRecoveryStriped.class);
private final ErasureCodingPolicy ecPolicy =
StripedFileTestUtil.getDefaultECPolicy();
private final int dataBlocks = ecPolicy.getNumDataUnits();
private final int parityBlocks = ecPolicy.getNumParityUnits();
private final int cellSize = ecPolicy.getCellSize();
- private final int stripSize = dataBlocks * cellSize;
- private final int stripesPerBlock = 15;
+ private final int stripeSize = dataBlocks * cellSize;
+ private final int stripesPerBlock = 4;
private final int blockSize = cellSize * stripesPerBlock;
private final int blockGroupSize = blockSize * dataBlocks;
private static final int bytesPerChecksum = 512;
static {
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
+ GenericTestUtils.setLogLevel(DFSStripedOutputStream.LOG, Level.DEBUG);
+ GenericTestUtils.setLogLevel(BlockRecoveryWorker.LOG, Level.DEBUG);
+ GenericTestUtils.setLogLevel(DataStreamer.LOG, Level.DEBUG);
}
static private final String fakeUsername = "fakeUser1";
@@ -83,7 +90,7 @@ public class TestLeaseRecoveryStriped {
public void setup() throws IOException {
conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
- conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 6000L);
+ conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 60000L);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
false);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
@@ -104,78 +111,118 @@ public class TestLeaseRecoveryStriped {
}
}
- private int[][][] getBlockLengthsSuite() {
+ private static class BlockLengths {
+ private final int[] blockLengths;
+ private final long safeLength;
+
+ BlockLengths(ErasureCodingPolicy policy, int[] blockLengths) {
+ this.blockLengths = blockLengths;
+ long[] longArray = Arrays.stream(blockLengths).asLongStream().toArray();
+ this.safeLength = StripedBlockUtil.getSafeLength(policy, longArray);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringBuilder(this)
+ .append("blockLengths", getBlockLengths())
+ .append("safeLength", getSafeLength())
+ .toString();
+ }
+
+ /**
+ * Length of each block in a block group.
+ */
+ public int[] getBlockLengths() {
+ return blockLengths;
+ }
+
+ /**
+ * Safe length, calculated by the block lengths.
+ */
+ public long getSafeLength() {
+ return safeLength;
+ }
+ }
+
+ private BlockLengths[] getBlockLengthsSuite() {
final int groups = 4;
- final int minNumCell = 3;
- final int maxNumCell = 11;
+ final int minNumCell = 1;
+ final int maxNumCell = stripesPerBlock;
final int minNumDelta = -4;
final int maxNumDelta = 2;
- int delta = 0;
- int[][][] blkLenSuite = new int[groups][][];
+ BlockLengths[] suite = new BlockLengths[groups];
Random random = ThreadLocalRandom.current();
- for (int i = 0; i < blkLenSuite.length; i++) {
- if (i == blkLenSuite.length - 1) {
- delta = bytesPerChecksum;
- }
- int[][] suite = new int[2][];
- int[] lens = new int[dataBlocks + parityBlocks];
- long[] lenInLong = new long[lens.length];
- for (int j = 0; j < lens.length; j++) {
+ for (int i = 0; i < groups; i++) {
+ int[] blockLengths = new int[dataBlocks + parityBlocks];
+ for (int j = 0; j < blockLengths.length; j++) {
+ // Choose a random number of cells for the block
int numCell = random.nextInt(maxNumCell - minNumCell + 1) + minNumCell;
- int numDelta = j < dataBlocks ?
- random.nextInt(maxNumDelta - minNumDelta + 1) + minNumDelta : 0;
- lens[j] = cellSize * numCell + delta * numDelta;
- lenInLong[j] = lens[j];
+ // For data blocks, jitter the length a bit
+ int numDelta = 0;
+ if (i == groups - 1 && j < dataBlocks) {
+ numDelta = random.nextInt(maxNumDelta - minNumDelta + 1) +
+ minNumDelta;
+ }
+ blockLengths[j] = (cellSize * numCell) + (bytesPerChecksum * numDelta);
}
- suite[0] = lens;
- suite[1] = new int[]{
- (int) StripedBlockUtil.getSafeLength(ecPolicy, lenInLong)};
- blkLenSuite[i] = suite;
+ suite[i] = new BlockLengths(ecPolicy, blockLengths);
}
- return blkLenSuite;
+ return suite;
}
- private final int[][][] blockLengthsSuite = getBlockLengthsSuite();
+ private final BlockLengths[] blockLengthsSuite = getBlockLengthsSuite();
@Test
public void testLeaseRecovery() throws Exception {
+ LOG.info("blockLengthsSuite: " +
+ Arrays.toString(blockLengthsSuite));
for (int i = 0; i < blockLengthsSuite.length; i++) {
- int[] blockLengths = blockLengthsSuite[i][0];
- int safeLength = blockLengthsSuite[i][1][0];
+ BlockLengths blockLengths = blockLengthsSuite[i];
try {
- runTest(blockLengths, safeLength);
+ runTest(blockLengths.getBlockLengths(), blockLengths.getSafeLength());
} catch (Throwable e) {
String msg = "failed testCase at i=" + i + ", blockLengths="
- + Arrays.toString(blockLengths) + "\n"
+ + blockLengths + "\n"
+ StringUtils.stringifyException(e);
Assert.fail(msg);
}
}
}
- private void runTest(int[] blockLengths, int safeLength) throws Exception {
+ private void runTest(int[] blockLengths, long safeLength) throws Exception {
writePartialBlocks(blockLengths);
recoverLease();
List<Long> oldGS = new ArrayList<>();
oldGS.add(1001L);
- StripedFileTestUtil.checkData(dfs, p, safeLength,
+ StripedFileTestUtil.checkData(dfs, p, (int)safeLength,
new ArrayList<DatanodeInfo>(), oldGS, blockGroupSize);
// After recovery, storages are reported by primary DN. we should verify
// storages reported by blockReport.
cluster.restartNameNode(true);
cluster.waitFirstBRCompleted(0, 10000);
- StripedFileTestUtil.checkData(dfs, p, safeLength,
+ StripedFileTestUtil.checkData(dfs, p, (int)safeLength,
new ArrayList<DatanodeInfo>(), oldGS, blockGroupSize);
}
+ /**
+ * Write a file with blocks of different lengths.
+ *
+ * This method depends on completing before the DFS socket timeout.
+ * Otherwise, the client will mark timed-out streamers as failed, and the
+ * write will fail if there are too many failed streamers.
+ *
+ * @param blockLengths lengths of blocks to write
+ * @throws Exception
+ */
private void writePartialBlocks(int[] blockLengths) throws Exception {
final FSDataOutputStream out = dfs.create(p);
final DFSStripedOutputStream stripedOut = (DFSStripedOutputStream) out
.getWrappedStream();
- int length = (stripesPerBlock - 1) * stripSize;
+ int length = (stripesPerBlock - 1) * stripeSize;
int[] posToKill = getPosToKill(blockLengths);
int checkingPos = nextCheckingPos(posToKill, 0);
+ Set<Integer> stoppedStreamerIndexes = new HashSet<>();
try {
for (int pos = 0; pos < length; pos++) {
out.write(StripedFileTestUtil.getByte(pos));
@@ -183,15 +230,31 @@ public class TestLeaseRecoveryStriped {
for (int index : getIndexToStop(posToKill, pos)) {
out.flush();
stripedOut.enqueueAllCurrentPackets();
+ LOG.info("Stopping block stream idx {} at file offset {} block " +
+ "length {}", index, pos, blockLengths[index]);
StripedDataStreamer s = stripedOut.getStripedDataStreamer(index);
waitStreamerAllAcked(s);
waitByteSent(s, blockLengths[index]);
stopBlockStream(s);
+ stoppedStreamerIndexes.add(index);
}
checkingPos = nextCheckingPos(posToKill, pos);
}
}
} finally {
+ // Flush everything
+ out.flush();
+ stripedOut.enqueueAllCurrentPackets();
+ // Wait for streamers that weren't killed above to be written out
+ for (int i=0; i< blockLengths.length; i++) {
+ if (stoppedStreamerIndexes.contains(i)) {
+ continue;
+ }
+ StripedDataStreamer s = stripedOut.getStripedDataStreamer(i);
+ LOG.info("Waiting for block stream idx {} to reach length {}", i,
+ blockLengths[i]);
+ waitStreamerAllAcked(s);
+ }
DFSTestUtil.abortStream(stripedOut);
}
}
@@ -210,7 +273,7 @@ public class TestLeaseRecoveryStriped {
int[] posToKill = new int[dataBlocks + parityBlocks];
for (int i = 0; i < dataBlocks; i++) {
int numStripe = (blockLengths[i] - 1) / cellSize;
- posToKill[i] = numStripe * stripSize + i * cellSize
+ posToKill[i] = numStripe * stripeSize + i * cellSize
+ blockLengths[i] % cellSize;
if (blockLengths[i] % cellSize == 0) {
posToKill[i] += cellSize;
@@ -220,7 +283,7 @@ public class TestLeaseRecoveryStriped {
+ parityBlocks; i++) {
Preconditions.checkArgument(blockLengths[i] % cellSize == 0);
int numStripe = (blockLengths[i]) / cellSize;
- posToKill[i] = numStripe * stripSize;
+ posToKill[i] = numStripe * stripeSize;
}
return posToKill;
}
@@ -243,13 +306,20 @@ public class TestLeaseRecoveryStriped {
public Boolean get() {
return s.bytesSent >= byteSent;
}
- }, 100, 3000);
+ }, 100, 30000);
} catch (TimeoutException e) {
throw new IOException("Timeout waiting for streamer " + s + ". Sent="
+ s.bytesSent + ", expected=" + byteSent);
}
}
+ /**
+ * Stop the block stream without immediately inducing a hard failure.
+ * Packets can continue to be queued until the streamer hits a socket timeout.
+ *
+ * @param s
+ * @throws Exception
+ */
private void stopBlockStream(StripedDataStreamer s) throws Exception {
IOUtils.NullOutputStream nullOutputStream = new IOUtils.NullOutputStream();
Whitebox.setInternalState(s, "blockStream",
@@ -257,8 +327,8 @@ public class TestLeaseRecoveryStriped {
}
private void recoverLease() throws Exception {
- final DistributedFileSystem dfs2 = (DistributedFileSystem) getFSAsAnotherUser(
- conf);
+ final DistributedFileSystem dfs2 =
+ (DistributedFileSystem) getFSAsAnotherUser(conf);
try {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[14/18] hadoop git commit: HDFS-12445. Correct spellings of choosen
to chosen. Contributed by hu xiaodong.
Posted by xy...@apache.org.
HDFS-12445. Correct spellings of choosen to chosen. Contributed by hu xiaodong.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51edaacd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51edaacd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51edaacd
Branch: refs/heads/HDFS-7240
Commit: 51edaacd09d86419f99ca96545a1393db1f43f73
Parents: 59830ca
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Sep 19 13:48:23 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Tue Sep 19 13:48:23 2017 -0700
----------------------------------------------------------------------
.../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java | 4 ++--
.../org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java | 2 +-
.../java/org/apache/hadoop/examples/dancing/DancingLinks.java | 2 +-
.../org/apache/hadoop/examples/dancing/DistributedPentomino.java | 4 ++--
.../main/java/org/apache/hadoop/examples/dancing/Pentomino.java | 2 +-
5 files changed, 7 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51edaacd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f33ec63..0545bb2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3551,8 +3551,8 @@ public class BlockManager implements BlockStatsMXBean {
List<DatanodeStorageInfo> replicasToDelete = replicator
.chooseReplicasToDelete(nonExcess, nonExcess, replication, excessTypes,
addedNode, delNodeHint);
- for (DatanodeStorageInfo choosenReplica : replicasToDelete) {
- processChosenExcessRedundancy(nonExcess, choosenReplica, storedBlock);
+ for (DatanodeStorageInfo chosenReplica : replicasToDelete) {
+ processChosenExcessRedundancy(nonExcess, chosenReplica, storedBlock);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51edaacd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
index b6c1318..1860565 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
@@ -178,7 +178,7 @@ public class TestDeadDatanode {
clientNode, new HashSet<>(), 256 * 1024 * 1024L, null, (byte) 7,
BlockType.CONTIGUOUS, null, null);
for (DatanodeStorageInfo datanodeStorageInfo : results) {
- assertFalse("Dead node should not be choosen", datanodeStorageInfo
+ assertFalse("Dead node should not be chosen", datanodeStorageInfo
.getDatanodeDescriptor().equals(clientNode));
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51edaacd/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
index 537b4d4..eef4461 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
@@ -368,7 +368,7 @@ public class DancingLinks<ColumnName> {
/**
* Make one move from a prefix
- * @param goalRow the row that should be choosen
+ * @param goalRow the row that should be chosen
* @return the row that was found
*/
private Node<ColumnName> advance(int goalRow) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51edaacd/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java
index 29f1eb2..d4fe6dc 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java
@@ -39,8 +39,8 @@ import com.google.common.base.Charsets;
* Launch a distributed pentomino solver.
* It generates a complete list of prefixes of length N with each unique prefix
* as a separate line. A prefix is a sequence of N integers that denote the
- * index of the row that is choosen for each column in order. Note that the
- * next column is heuristically choosen by the solver, so it is dependant on
+ * index of the row that is chosen for each column in order. Note that the
+ * next column is heuristically chosen by the solver, so it is dependant on
* the previous choice. That file is given as the input to
* map/reduce. The output key/value are the move prefix/solution as Text/Text.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51edaacd/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Pentomino.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Pentomino.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Pentomino.java
index 2485728..a30d62c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Pentomino.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Pentomino.java
@@ -411,7 +411,7 @@ public class Pentomino {
/**
* Find all of the solutions that start with the given prefix. The printer
* is given each solution as it is found.
- * @param split a list of row indexes that should be choosen for each row
+ * @param split a list of row indexes that should be chosen for each row
* in order
* @return the number of solutions found
*/
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[09/18] hadoop git commit: Revert "MAPREDUCE-6958. Shuffle audit
logger should log size of shuffle transfer. Contributed by Jason Lowe"
Posted by xy...@apache.org.
Revert "MAPREDUCE-6958. Shuffle audit logger should log size of shuffle transfer. Contributed by Jason Lowe"
This reverts commit b3d61304f2fa4a99526f7a60ccaac9f262083079.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea845ba5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea845ba5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea845ba5
Branch: refs/heads/HDFS-7240
Commit: ea845ba58c585647c4be8d30d9b814f098e34a12
Parents: aa6e8d2
Author: Jason Lowe <jl...@apache.org>
Authored: Tue Sep 19 08:45:05 2017 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Tue Sep 19 08:45:05 2017 -0500
----------------------------------------------------------------------
.../org/apache/hadoop/mapred/ShuffleHandler.java | 18 +++++++-----------
1 file changed, 7 insertions(+), 11 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea845ba5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index 06a3e42..863da7e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -992,6 +992,13 @@ public class ShuffleHandler extends AuxiliaryService {
return;
}
+ // this audit log is disabled by default,
+ // to turn it on please enable this audit log
+ // on log4j.properties by uncommenting the setting
+ if (AUDITLOG.isDebugEnabled()) {
+ AUDITLOG.debug("shuffle for " + jobQ.get(0) + " mappers: " + mapIds +
+ " reducer " + reduceQ.get(0));
+ }
int reduceId;
String jobId;
try {
@@ -1176,17 +1183,6 @@ public class ShuffleHandler extends AuxiliaryService {
// Now set the response headers.
setResponseHeaders(response, keepAliveParam, contentLength);
-
- // this audit log is disabled by default,
- // to turn it on please enable this audit log
- // on log4j.properties by uncommenting the setting
- if (AUDITLOG.isDebugEnabled()) {
- StringBuilder sb = new StringBuilder("shuffle for ").append(jobId);
- sb.append(" mappers: ").append(mapIds);
- sb.append(" reducer ").append(reduce);
- sb.append(" length ").append(contentLength);
- AUDITLOG.debug(sb.toString());
- }
}
protected void setResponseHeaders(HttpResponse response,
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[16/18] hadoop git commit: YARN-6499. Remove the doc about
Schedulable#redistributeShare(). (Contributed by Chetna Chaudhari via Yufei
Gu)
Posted by xy...@apache.org.
YARN-6499. Remove the doc about Schedulable#redistributeShare(). (Contributed by Chetna Chaudhari via Yufei Gu)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9019e1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9019e1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9019e1f
Branch: refs/heads/HDFS-7240
Commit: a9019e1fb753f15c1927e3f9355996fd6544c14f
Parents: 647b752
Author: Yufei Gu <yu...@apache.org>
Authored: Tue Sep 19 18:27:37 2017 -0700
Committer: Yufei Gu <yu...@apache.org>
Committed: Tue Sep 19 18:28:31 2017 -0700
----------------------------------------------------------------------
.../yarn/server/resourcemanager/scheduler/fair/Schedulable.java | 4 ----
1 file changed, 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9019e1f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java
index 4d6af98..bd1ff7a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java
@@ -41,10 +41,6 @@ import org.apache.hadoop.yarn.api.records.Resource;
* - updateDemand() is called periodically to compute the demand of the various
* jobs and queues, which may be expensive (e.g. jobs must iterate through all
* their tasks to count failed tasks, tasks that can be speculated, etc).
- * - redistributeShare() is called after demands are updated and a Schedulable's
- * fair share has been set by its parent to let it distribute its share among
- * the other Schedulables within it (e.g. for queues that want to perform fair
- * sharing among their jobs).
*/
@Private
@Unstable
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[07/18] hadoop git commit: HDFS-12479. Some misuses of lock in
DFSStripedOutputStream. Contributed by Huafeng Wang
Posted by xy...@apache.org.
HDFS-12479. Some misuses of lock in DFSStripedOutputStream. Contributed by Huafeng Wang
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dba7a7dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dba7a7dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dba7a7dd
Branch: refs/heads/HDFS-7240
Commit: dba7a7dd9d70adfab36a78eb55059c54e553a5cb
Parents: 2018538
Author: Kai Zheng <ka...@intel.com>
Authored: Tue Sep 19 17:45:41 2017 +0800
Committer: Kai Zheng <ka...@intel.com>
Committed: Tue Sep 19 17:45:41 2017 +0800
----------------------------------------------------------------------
.../java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dba7a7dd/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 44db3a6..66eec7a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -63,6 +63,7 @@ import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletionService;
+import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.ExecutorService;
@@ -85,11 +86,10 @@ public class DFSStripedOutputStream extends DFSOutputStream
private final List<BlockingQueue<T>> queues;
MultipleBlockingQueue(int numQueue, int queueSize) {
- List<BlockingQueue<T>> list = new ArrayList<>(numQueue);
+ queues = new ArrayList<>(numQueue);
for (int i = 0; i < numQueue; i++) {
- list.add(new LinkedBlockingQueue<T>(queueSize));
+ queues.add(new LinkedBlockingQueue<T>(queueSize));
}
- queues = Collections.synchronizedList(list);
}
void offer(int i, T object) {
@@ -156,8 +156,7 @@ public class DFSStripedOutputStream extends DFSOutputStream
followingBlocks = new MultipleBlockingQueue<>(numAllBlocks, 1);
endBlocks = new MultipleBlockingQueue<>(numAllBlocks, 1);
newBlocks = new MultipleBlockingQueue<>(numAllBlocks, 1);
- updateStreamerMap = Collections.synchronizedMap(
- new HashMap<StripedDataStreamer, Boolean>(numAllBlocks));
+ updateStreamerMap = new ConcurrentHashMap<>(numAllBlocks);
streamerUpdateResult = new MultipleBlockingQueue<>(numAllBlocks, 1);
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[10/18] hadoop git commit: MAPREDUCE-6958. Shuffle audit logger
should log size of shuffle transfer. Contributed by Jason Lowe
Posted by xy...@apache.org.
MAPREDUCE-6958. Shuffle audit logger should log size of shuffle transfer. Contributed by Jason Lowe
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a20debd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a20debd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a20debd
Branch: refs/heads/HDFS-7240
Commit: 3a20debddeac69596ceb5b36f8413529ea8570e6
Parents: ea845ba
Author: Jason Lowe <jl...@apache.org>
Authored: Tue Sep 19 09:13:17 2017 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Tue Sep 19 09:13:17 2017 -0500
----------------------------------------------------------------------
.../org/apache/hadoop/mapred/ShuffleHandler.java | 18 +++++++++++-------
1 file changed, 11 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a20debd/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index 863da7e..b7f2c6d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -992,13 +992,6 @@ public class ShuffleHandler extends AuxiliaryService {
return;
}
- // this audit log is disabled by default,
- // to turn it on please enable this audit log
- // on log4j.properties by uncommenting the setting
- if (AUDITLOG.isDebugEnabled()) {
- AUDITLOG.debug("shuffle for " + jobQ.get(0) + " mappers: " + mapIds +
- " reducer " + reduceQ.get(0));
- }
int reduceId;
String jobId;
try {
@@ -1183,6 +1176,17 @@ public class ShuffleHandler extends AuxiliaryService {
// Now set the response headers.
setResponseHeaders(response, keepAliveParam, contentLength);
+
+ // this audit log is disabled by default,
+ // to turn it on please enable this audit log
+ // on log4j.properties by uncommenting the setting
+ if (AUDITLOG.isDebugEnabled()) {
+ StringBuilder sb = new StringBuilder("shuffle for ");
+ sb.append(jobId).append(" reducer ").append(reduce);
+ sb.append(" length ").append(contentLength);
+ sb.append(" mappers: ").append(mapIds);
+ AUDITLOG.debug(sb.toString());
+ }
}
protected void setResponseHeaders(HttpResponse response,
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[11/18] hadoop git commit: MAPREDUCE-6960. Shuffle Handler prints
disk error stack traces for every read failure.
Posted by xy...@apache.org.
MAPREDUCE-6960. Shuffle Handler prints disk error stack traces for every read failure.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/595d4784
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/595d4784
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/595d4784
Branch: refs/heads/HDFS-7240
Commit: 595d478408104bdfe1f08efd79930e18862fafbb
Parents: 3a20deb
Author: Eric Payne <ep...@apache.org>
Authored: Tue Sep 19 10:35:15 2017 -0500
Committer: Eric Payne <ep...@apache.org>
Committed: Tue Sep 19 10:35:15 2017 -0500
----------------------------------------------------------------------
.../main/java/org/apache/hadoop/mapred/ShuffleHandler.java | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/595d4784/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index b7f2c6d..0eeae19 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -80,6 +80,7 @@ import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.DiskChecker;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.concurrent.HadoopExecutors;
import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -1088,7 +1089,11 @@ public class ShuffleHandler extends AuxiliaryService {
}
nextMap.addListener(new ReduceMapFileCount(reduceContext));
} catch (IOException e) {
- LOG.error("Shuffle error :", e);
+ if (e instanceof DiskChecker.DiskErrorException) {
+ LOG.error("Shuffle error :" + e);
+ } else {
+ LOG.error("Shuffle error :", e);
+ }
String errorMessage = getErrorMessage(e);
sendError(reduceContext.getCtx(), errorMessage,
INTERNAL_SERVER_ERROR);
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[13/18] hadoop git commit: HDFS-12444. Reduce runtime of
TestWriteReadStripedFile. Contributed by Huafeng Wang and Andrew Wang.
Posted by xy...@apache.org.
HDFS-12444. Reduce runtime of TestWriteReadStripedFile. Contributed by Huafeng Wang and Andrew Wang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59830ca7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59830ca7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59830ca7
Branch: refs/heads/HDFS-7240
Commit: 59830ca772dfb5dcc8b3e5281ca482dea5a5fa3e
Parents: 7bbeacb
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Sep 19 13:44:42 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Tue Sep 19 13:44:42 2017 -0700
----------------------------------------------------------------------
.../apache/hadoop/hdfs/StripedFileTestUtil.java | 13 +++++++----
.../hadoop/hdfs/TestWriteReadStripedFile.java | 24 ++++++++++++--------
.../hdfs/TestWriteStripedFileWithFailure.java | 3 ++-
3 files changed, 25 insertions(+), 15 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/59830ca7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
index 1489e48..c771d21 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
@@ -79,10 +79,15 @@ public class StripedFileTestUtil {
assertEquals("File length should be the same", fileLength, status.getLen());
}
- static void verifyPread(FileSystem fs, Path srcPath, int fileLength,
- byte[] expected, byte[] buf) throws IOException {
- final ErasureCodingPolicy ecPolicy =
- ((DistributedFileSystem)fs).getErasureCodingPolicy(srcPath);
+ static void verifyPread(DistributedFileSystem fs, Path srcPath,
+ int fileLength, byte[] expected, byte[] buf) throws IOException {
+ final ErasureCodingPolicy ecPolicy = fs.getErasureCodingPolicy(srcPath);
+ verifyPread(fs, srcPath, fileLength, expected, buf, ecPolicy);
+ }
+
+ static void verifyPread(FileSystem fs, Path srcPath, int fileLength,
+ byte[] expected, byte[] buf, ErasureCodingPolicy ecPolicy)
+ throws IOException {
try (FSDataInputStream in = fs.open(srcPath)) {
int[] startOffsets = {0, 1, ecPolicy.getCellSize() - 102,
ecPolicy.getCellSize(), ecPolicy.getCellSize() + 102,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/59830ca7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
index f27c978..805bcea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
@@ -47,12 +48,13 @@ import java.util.Random;
public class TestWriteReadStripedFile {
public static final Log LOG = LogFactory.getLog(TestWriteReadStripedFile.class);
private final ErasureCodingPolicy ecPolicy =
- StripedFileTestUtil.getDefaultECPolicy();
+ SystemErasureCodingPolicies.getByID(
+ SystemErasureCodingPolicies.RS_3_2_POLICY_ID);
private final int cellSize = ecPolicy.getCellSize();
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
private final int numDNs = dataBlocks + parityBlocks;
- private final int stripesPerBlock = 4;
+ private final int stripesPerBlock = 2;
private final int blockSize = stripesPerBlock * cellSize;
private final int blockGroupSize = blockSize * dataBlocks;
@@ -78,11 +80,10 @@ public class TestWriteReadStripedFile {
false);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
fs = cluster.getFileSystem();
- fs.enableErasureCodingPolicy(
- StripedFileTestUtil.getDefaultECPolicy().getName());
+ fs.enableErasureCodingPolicy(ecPolicy.getName());
fs.mkdirs(new Path("/ec"));
cluster.getFileSystem().getClient().setErasureCodingPolicy("/ec",
- StripedFileTestUtil.getDefaultECPolicy().getName());
+ ecPolicy.getName());
}
@After
@@ -225,7 +226,8 @@ public class TestWriteReadStripedFile {
byte[] smallBuf = new byte[1024];
byte[] largeBuf = new byte[fileLength + 100];
- StripedFileTestUtil.verifyPread(fs, srcPath, fileLength, expected, largeBuf);
+ StripedFileTestUtil.verifyPread(fs, srcPath, fileLength, expected,
+ largeBuf);
StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected,
largeBuf);
@@ -268,13 +270,15 @@ public class TestWriteReadStripedFile {
byte[] smallBuf = new byte[1024];
byte[] largeBuf = new byte[fileLength + 100];
- // TODO: HDFS-8797
- //StripedFileTestUtil.verifyPread(fs, srcPath, fileLength, expected, largeBuf);
+ StripedFileTestUtil
+ .verifyPread(fs, srcPath, fileLength, expected, largeBuf, ecPolicy);
- StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected, largeBuf);
+ StripedFileTestUtil
+ .verifyStatefulRead(fs, srcPath, fileLength, expected, largeBuf);
StripedFileTestUtil.verifySeek(fs, srcPath, fileLength, ecPolicy,
blockGroupSize);
- StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected, smallBuf);
+ StripedFileTestUtil
+ .verifyStatefulRead(fs, srcPath, fileLength, expected, smallBuf);
// webhdfs doesn't support bytebuffer read
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/59830ca7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
index 03e9e10..c859b71 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
@@ -160,7 +160,8 @@ public class TestWriteStripedFileWithFailure {
blockSize * dataBlocks);
StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected,
smallBuf);
- StripedFileTestUtil.verifyPread(fs, srcPath, fileLength, expected, largeBuf);
+ StripedFileTestUtil.verifyPread((DistributedFileSystem)fs, srcPath,
+ fileLength, expected, largeBuf);
// delete the file
fs.delete(srcPath, true);
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[03/18] hadoop git commit: YARN-7203. Add container ExecutionType
into ContainerReport. (Botong Huang via asuresh)
Posted by xy...@apache.org.
YARN-7203. Add container ExecutionType into ContainerReport. (Botong Huang via asuresh)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/56ef5279
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/56ef5279
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/56ef5279
Branch: refs/heads/HDFS-7240
Commit: 56ef5279c1db93d03b2f1e04badbfe804f548918
Parents: 3cf3540
Author: Arun Suresh <as...@apache.org>
Authored: Mon Sep 18 15:49:31 2017 -0700
Committer: Arun Suresh <as...@apache.org>
Committed: Mon Sep 18 15:49:31 2017 -0700
----------------------------------------------------------------------
.../yarn/api/records/ContainerReport.java | 26 ++++++++++++++++++++
.../src/main/proto/yarn_protos.proto | 1 +
.../yarn/client/api/impl/TestYarnClient.java | 1 +
.../records/impl/pb/ContainerReportPBImpl.java | 20 +++++++++++++++
.../rmcontainer/RMContainerImpl.java | 2 +-
5 files changed, 49 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56ef5279/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
index 11d7bca..31d2812 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
@@ -52,6 +52,18 @@ public abstract class ContainerReport {
long creationTime, long finishTime, String diagnosticInfo, String logUrl,
int containerExitStatus, ContainerState containerState,
String nodeHttpAddress) {
+ return newInstance(containerId, allocatedResource, assignedNode, priority,
+ creationTime, finishTime, diagnosticInfo, logUrl, containerExitStatus,
+ containerState, nodeHttpAddress, ExecutionType.GUARANTEED);
+ }
+
+ @Private
+ @Unstable
+ public static ContainerReport newInstance(ContainerId containerId,
+ Resource allocatedResource, NodeId assignedNode, Priority priority,
+ long creationTime, long finishTime, String diagnosticInfo, String logUrl,
+ int containerExitStatus, ContainerState containerState,
+ String nodeHttpAddress, ExecutionType executionType) {
ContainerReport report = Records.newRecord(ContainerReport.class);
report.setContainerId(containerId);
report.setAllocatedResource(allocatedResource);
@@ -64,6 +76,7 @@ public abstract class ContainerReport {
report.setContainerExitStatus(containerExitStatus);
report.setContainerState(containerState);
report.setNodeHttpAddress(nodeHttpAddress);
+ report.setExecutionType(executionType);
return report;
}
@@ -209,4 +222,17 @@ public abstract class ContainerReport {
@Private
@Unstable
public abstract void setNodeHttpAddress(String nodeHttpAddress);
+
+ /**
+ * Get the execution type of the container.
+ *
+ * @return the execution type of the container
+ */
+ @Public
+ @Unstable
+ public abstract ExecutionType getExecutionType();
+
+ @Private
+ @Unstable
+ public abstract void setExecutionType(ExecutionType executionType);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56ef5279/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 066441c..fb340d1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -137,6 +137,7 @@ message ContainerReportProto {
optional int32 container_exit_status = 9;
optional ContainerStateProto container_state = 10;
optional string node_http_address = 11;
+ optional ExecutionTypeProto executionType = 12 [default = GUARANTEED];
}
enum YarnApplicationStateProto {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56ef5279/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index cd0e472..4e5d8cd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -601,6 +601,7 @@ public class TestYarnClient {
Assert.assertEquals(report.getContainerId().toString(),
(ContainerId.newContainerId(expectedReports.get(0)
.getCurrentApplicationAttemptId(), 3)).toString());
+ Assert.assertNotNull(report.getExecutionType());
client.stop();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56ef5279/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerReportPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerReportPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerReportPBImpl.java
index 5d435da..2b58c70 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerReportPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerReportPBImpl.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.api.records.impl.pb;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerReport;
import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
@@ -355,4 +356,23 @@ public class ContainerReportPBImpl extends ContainerReport {
}
builder.setNodeHttpAddress(nodeHttpAddress);
}
+
+ @Override
+ public ExecutionType getExecutionType() {
+ ContainerReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasExecutionType()) {
+ return ExecutionType.GUARANTEED; // default value
+ }
+ return ProtoUtils.convertFromProtoFormat(p.getExecutionType());
+ }
+
+ @Override
+ public void setExecutionType(ExecutionType executionType) {
+ maybeInitBuilder();
+ if (executionType == null) {
+ builder.clearExecutionType();
+ return;
+ }
+ builder.setExecutionType(ProtoUtils.convertToProtoFormat(executionType));
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56ef5279/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index 8c165de..a43459c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -756,7 +756,7 @@ public class RMContainerImpl implements RMContainer {
this.getAllocatedSchedulerKey().getPriority(), this.getCreationTime(),
this.getFinishTime(), this.getDiagnosticsInfo(), this.getLogURL(),
this.getContainerExitStatus(), this.getContainerState(),
- this.getNodeHttpAddress());
+ this.getNodeHttpAddress(), this.getExecutionType());
} finally {
this.readLock.unlock();
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[06/18] hadoop git commit: HDFS-11799. Introduce a config to allow
setting up write pipeline with fewer nodes than replication factor.
Contributed by Brahma Reddy Battula
Posted by xy...@apache.org.
HDFS-11799. Introduce a config to allow setting up write pipeline with fewer nodes than replication factor. Contributed by Brahma Reddy Battula
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fda1221c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fda1221c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fda1221c
Branch: refs/heads/HDFS-7240
Commit: fda1221c55101d97ac62e1ee4e3ddf9a915d5363
Parents: 31b5840
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Tue Sep 19 11:25:45 2017 +0530
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Tue Sep 19 11:25:45 2017 +0530
----------------------------------------------------------------------
.../java/org/apache/hadoop/hdfs/DFSClient.java | 13 +-
.../org/apache/hadoop/hdfs/DataStreamer.java | 31 +-
.../hdfs/client/HdfsClientConfigKeys.java | 2 +
.../src/main/resources/hdfs-default.xml | 17 ++
.../TestReplaceDatanodeFailureReplication.java | 291 +++++++++++++++++++
.../hadoop/tools/TestHdfsConfigFields.java | 4 +-
6 files changed, 354 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fda1221c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 772049d..7e8e95b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -223,6 +223,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
final String clientName;
final SocketFactory socketFactory;
final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure;
+ final short dtpReplaceDatanodeOnFailureReplication;
private final FileSystem.Statistics stats;
private final URI namenodeUri;
private final Random r = new Random();
@@ -305,7 +306,17 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
this.smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
-
+ this.dtpReplaceDatanodeOnFailureReplication = (short) conf
+ .getInt(HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+ MIN_REPLICATION,
+ HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+ MIN_REPLICATION_DEFAULT);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(
+ "Sets " + HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+ MIN_REPLICATION + " to "
+ + dtpReplaceDatanodeOnFailureReplication);
+ }
this.ugi = UserGroupInformation.getCurrentUser();
this.namenodeUri = nameNodeUri;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fda1221c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 4eafca1..99fa5f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -1384,7 +1384,36 @@ class DataStreamer extends Daemon {
setPipeline(lb);
//find the new datanode
- final int d = findNewDatanode(original);
+ final int d;
+ try {
+ d = findNewDatanode(original);
+ } catch (IOException ioe) {
+ // check the minimal number of nodes available to decide whether to
+ // continue the write.
+
+ //if live block location datanodes is greater than or equal to
+ // HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+ // MIN_REPLICATION threshold value, continue writing to the
+ // remaining nodes. Otherwise throw exception.
+ //
+ // If HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+ // MIN_REPLICATION is set to 0 or less than zero, an exception will be
+ // thrown if a replacement could not be found.
+
+ if (dfsClient.dtpReplaceDatanodeOnFailureReplication > 0 && nodes.length
+ >= dfsClient.dtpReplaceDatanodeOnFailureReplication) {
+ DFSClient.LOG.warn(
+ "Failed to find a new datanode to add to the write pipeline, "
+ + " continue to write to the pipeline with " + nodes.length
+ + " nodes since it's no less than minimum replication: "
+ + dfsClient.dtpReplaceDatanodeOnFailureReplication
+ + " configured by "
+ + BlockWrite.ReplaceDatanodeOnFailure.MIN_REPLICATION
+ + ".", ioe);
+ return;
+ }
+ throw ioe;
+ }
//transfer replica. pick a source from the original nodes
final DatanodeInfo src = original[tried % original.length];
final DatanodeInfo[] targets = {nodes[d]};
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fda1221c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index e99b099..97cb68b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -320,6 +320,8 @@ public interface HdfsClientConfigKeys {
String POLICY_DEFAULT = "DEFAULT";
String BEST_EFFORT_KEY = PREFIX + "best-effort";
boolean BEST_EFFORT_DEFAULT = false;
+ String MIN_REPLICATION = PREFIX + "min-replication";
+ short MIN_REPLICATION_DEFAULT = 0;
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fda1221c/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index af40a34..9327a2c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -658,6 +658,23 @@
</description>
</property>
+ <property>
+ <name>dfs.client.block.write.replace-datanode-on-failure.min-replication</name>
+ <value>0</value>
+ <description>
+ The minimum number of replications that are needed to not to fail
+ the write pipeline if new datanodes can not be found to replace
+ failed datanodes (could be due to network failure) in the write pipeline.
+ If the number of the remaining datanodes in the write pipeline is greater
+ than or equal to this property value, continue writing to the remaining nodes.
+ Otherwise throw exception.
+
+ If this is set to 0, an exception will be thrown, when a replacement
+ can not be found.
+ See also dfs.client.block.write.replace-datanode-on-failure.policy
+ </description>
+ </property>
+
<property>
<name>dfs.blockreport.intervalMsec</name>
<value>21600000</value>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fda1221c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java
new file mode 100644
index 0000000..9591cb4
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java
@@ -0,0 +1,291 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
+import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure.Policy;
+import org.apache.hadoop.io.IOUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Verify the behaviours of HdfsClientConfigKeys.BlockWrite.
+ * ReplaceDatanodeOnFailure.MIN_REPLICATION.if live block location datanodes is
+ * greater than or equal to
+ * 'dfs.client.block.write.replace-datanode-on-failure.min.replication'
+ * threshold value, if yes continue writing to the two remaining nodes.
+ * Otherwise it will throw exception.
+ * <p>
+ * If this HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+ * MIN_REPLICATION is set to 0 or less than zero, an exception will be thrown
+ * if a replacement could not be found.
+ */
+public class TestReplaceDatanodeFailureReplication {
+ static final Log LOG = LogFactory
+ .getLog(TestReplaceDatanodeFailureReplication.class);
+
+ static final String DIR =
+ "/" + TestReplaceDatanodeFailureReplication.class.getSimpleName() + "/";
+ static final short REPLICATION = 3;
+ final private static String RACK0 = "/rack0";
+
+ /**
+ * Test fail last datanode in the pipeline.
+ */
+ @Test
+ public void testLastDatanodeFailureInPipeline() throws Exception {
+ testWriteFileAndVerifyAfterDNStop(2, 1, 10, false);
+ }
+
+ /**
+ * Test fail first datanode in the pipeline.
+ */
+ @Test
+ public void testFirstDatanodeFailureInPipeline() throws Exception {
+ testWriteFileAndVerifyAfterDNStop(2, 0, 10, false);
+ }
+
+ /**
+ * Test fail all the datanodes except first in the pipeline.
+ */
+ @Test
+ public void testWithOnlyFirstDatanodeIsAlive() throws Exception {
+ testWriteFileAndVerifyAfterDNStop(1, 1, 1, true);
+ }
+
+ /**
+ * Test fail all the datanodes except lastnode in the pipeline.
+ */
+ @Test
+ public void testWithOnlyLastDatanodeIsAlive() throws Exception {
+ testWriteFileAndVerifyAfterDNStop(1, 0, 1, true);
+ }
+
+ /**
+ * Test when number of live nodes are less than the
+ * "dfs.client.block.write.replace-datanode-on-failure.min.replication".
+ */
+ @Test
+ public void testLessNumberOfLiveDatanodesThanWriteReplaceDatanodeOnFailureRF()
+ throws Exception {
+ final MiniDFSCluster cluster = setupCluster(2);
+
+ try {
+ final DistributedFileSystem fs = cluster.getFileSystem();
+ final Path dir = new Path(DIR);
+
+ final SlowWriter[] slowwriters = new SlowWriter[1];
+ for (int i = 1; i <= slowwriters.length; i++) {
+ // create slow writers in different speed
+ slowwriters[i - 1] = new SlowWriter(fs, new Path(dir, "file" + i),
+ i * 200L);
+ }
+
+ for (SlowWriter s : slowwriters) {
+ s.start();
+ }
+
+ // Let slow writers write something.
+ // Some of them are too slow and will be not yet started.
+ sleepSeconds(1);
+
+ // stop an old datanode
+ cluster.stopDataNode(0);
+ cluster.stopDataNode(0);
+
+ // Let the slow writer writes a few more seconds
+ // Everyone should have written something.
+ sleepSeconds(20);
+
+ // check replication and interrupt.
+ for (SlowWriter s : slowwriters) {
+ try {
+ s.out.getCurrentBlockReplication();
+ Assert.fail(
+ "Must throw exception as failed to add a new datanode for write "
+ + "pipeline, minimum failure replication");
+ } catch (IOException e) {
+ // expected
+ }
+ s.interruptRunning();
+ }
+
+ // close files
+ for (SlowWriter s : slowwriters) {
+ s.joinAndClose();
+ }
+
+ // Verify the file
+ verifyFileContent(fs, slowwriters);
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ private MiniDFSCluster setupCluster(int failRF) throws IOException {
+ final Configuration conf = new HdfsConfiguration();
+ conf.setInt(HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+ MIN_REPLICATION, failRF);
+ // always replace a datanode
+ ReplaceDatanodeOnFailure.write(Policy.ALWAYS, false, conf);
+
+ final String[] racks = new String[REPLICATION];
+ Arrays.fill(racks, RACK0);
+ return new MiniDFSCluster.Builder(conf).racks(racks)
+ .numDataNodes(REPLICATION).build();
+ }
+
+ private void testWriteFileAndVerifyAfterDNStop(int failRF, int dnindex,
+ int slowWrites, boolean failPipeLine)
+ throws IOException, InterruptedException, TimeoutException {
+ final MiniDFSCluster cluster = setupCluster(failRF);
+ try {
+ final DistributedFileSystem fs = cluster.getFileSystem();
+ final Path dir = new Path(DIR);
+
+ final SlowWriter[] slowwriters = new SlowWriter[slowWrites];
+ for (int i = 1; i <= slowwriters.length; i++) {
+ // create slow writers in different speed
+ slowwriters[i - 1] = new SlowWriter(fs, new Path(dir, "file" + i),
+ i * 200L);
+ }
+
+ for (SlowWriter s : slowwriters) {
+ s.start();
+ }
+
+ // Let slow writers write something.
+ // Some of them are too slow and will be not yet started.
+ sleepSeconds(3);
+
+ // stop an datanode
+ cluster.stopDataNode(dnindex);
+ if (failPipeLine) {
+ cluster.stopDataNode(dnindex);
+ }
+
+ // Let the slow writer writes a few more seconds
+ // Everyone should have written something.
+ sleepSeconds(5);
+ cluster.waitFirstBRCompleted(0, 10000);
+ // check replication and interrupt.
+ for (SlowWriter s : slowwriters) {
+ Assert.assertEquals(failRF, s.out.getCurrentBlockReplication());
+ s.interruptRunning();
+ }
+
+ // close files
+ for (SlowWriter s : slowwriters) {
+ s.joinAndClose();
+ }
+
+ // Verify the file
+ verifyFileContent(fs, slowwriters);
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ private void verifyFileContent(DistributedFileSystem fs,
+ SlowWriter[] slowwriters) throws IOException {
+ LOG.info("Verify the file");
+ for (int i = 0; i < slowwriters.length; i++) {
+ LOG.info(slowwriters[i].filepath + ": length=" + fs
+ .getFileStatus(slowwriters[i].filepath).getLen());
+ FSDataInputStream in = null;
+ try {
+ in = fs.open(slowwriters[i].filepath);
+ for (int j = 0, x;; j++) {
+ x = in.read();
+ if ((x) != -1) {
+ Assert.assertEquals(j, x);
+ } else {
+ return;
+ }
+ }
+ } finally {
+ IOUtils.closeStream(in);
+ }
+ }
+ }
+
+ static void sleepSeconds(final int waittime) throws InterruptedException {
+ LOG.info("Wait " + waittime + " seconds");
+ Thread.sleep(waittime * 1000L);
+ }
+
+ static class SlowWriter extends Thread {
+ private final Path filepath;
+ private final HdfsDataOutputStream out;
+ private final long sleepms;
+ private volatile boolean running = true;
+
+ SlowWriter(DistributedFileSystem fs, Path filepath, final long sleepms)
+ throws IOException {
+ super(SlowWriter.class.getSimpleName() + ":" + filepath);
+ this.filepath = filepath;
+ this.out = (HdfsDataOutputStream) fs.create(filepath, REPLICATION);
+ this.sleepms = sleepms;
+ }
+
+ @Override public void run() {
+ int i = 0;
+ try {
+ sleep(sleepms);
+ for (; running; i++) {
+ LOG.info(getName() + " writes " + i);
+ out.write(i);
+ out.hflush();
+ sleep(sleepms);
+ }
+ } catch (InterruptedException e) {
+ LOG.info(getName() + " interrupted:" + e);
+ } catch (IOException e) {
+ throw new RuntimeException(getName(), e);
+ } finally {
+ LOG.info(getName() + " terminated: i=" + i);
+ }
+ }
+
+ void interruptRunning() {
+ running = false;
+ interrupt();
+ }
+
+ void joinAndClose() throws InterruptedException {
+ LOG.info(getName() + " join and close");
+ join();
+ IOUtils.closeStream(out);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fda1221c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
index 233dc5a..47db565 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
@@ -41,8 +41,8 @@ public class TestHdfsConfigFields extends TestConfigurationFieldsBase {
public void initializeMemberVariables() {
xmlFilename = new String("hdfs-default.xml");
configurationClasses = new Class[] { HdfsClientConfigKeys.class,
- HdfsClientConfigKeys.StripedRead.class,
- DFSConfigKeys.class};
+ HdfsClientConfigKeys.StripedRead.class, DFSConfigKeys.class,
+ HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.class };
// Set error modes
errorIfMissingConfigProps = true;
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[05/18] hadoop git commit: HDFS-12480.
TestNameNodeMetrics#testTransactionAndCheckpointMetrics Fails in trunk.
Contributed by Hanisha Koneru
Posted by xy...@apache.org.
HDFS-12480. TestNameNodeMetrics#testTransactionAndCheckpointMetrics Fails in trunk. Contributed by Hanisha Koneru
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31b58406
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31b58406
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31b58406
Branch: refs/heads/HDFS-7240
Commit: 31b58406ac369716ef1665b7d60a3409117bdf9d
Parents: 595d478
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Tue Sep 19 10:37:07 2017 +0530
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Tue Sep 19 10:37:07 2017 +0530
----------------------------------------------------------------------
.../namenode/metrics/TestNameNodeMetrics.java | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/31b58406/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index 077a5f8..db9adbe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -851,22 +851,22 @@ public class TestNameNodeMetrics {
getMetrics(NS_METRICS));
assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
- assertGauge("LastWrittenTransactionId", 3L, getMetrics(NS_METRICS));
- assertGauge("TransactionsSinceLastCheckpoint", 3L, getMetrics(NS_METRICS));
- assertGauge("TransactionsSinceLastLogRoll", 3L, getMetrics(NS_METRICS));
+ assertGauge("LastWrittenTransactionId", 4L, getMetrics(NS_METRICS));
+ assertGauge("TransactionsSinceLastCheckpoint", 4L, getMetrics(NS_METRICS));
+ assertGauge("TransactionsSinceLastLogRoll", 4L, getMetrics(NS_METRICS));
fs.mkdirs(new Path(TEST_ROOT_DIR_PATH, "/tmp"));
assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
- assertGauge("LastWrittenTransactionId", 4L, getMetrics(NS_METRICS));
- assertGauge("TransactionsSinceLastCheckpoint", 4L, getMetrics(NS_METRICS));
- assertGauge("TransactionsSinceLastLogRoll", 4L, getMetrics(NS_METRICS));
+ assertGauge("LastWrittenTransactionId", 5L, getMetrics(NS_METRICS));
+ assertGauge("TransactionsSinceLastCheckpoint", 5L, getMetrics(NS_METRICS));
+ assertGauge("TransactionsSinceLastLogRoll", 5L, getMetrics(NS_METRICS));
cluster.getNameNodeRpc().rollEditLog();
assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
- assertGauge("LastWrittenTransactionId", 6L, getMetrics(NS_METRICS));
- assertGauge("TransactionsSinceLastCheckpoint", 6L, getMetrics(NS_METRICS));
+ assertGauge("LastWrittenTransactionId", 7L, getMetrics(NS_METRICS));
+ assertGauge("TransactionsSinceLastCheckpoint", 7L, getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
@@ -876,7 +876,7 @@ public class TestNameNodeMetrics {
long newLastCkptTime = MetricsAsserts.getLongGauge("LastCheckpointTime",
getMetrics(NS_METRICS));
assertTrue(lastCkptTime < newLastCkptTime);
- assertGauge("LastWrittenTransactionId", 8L, getMetrics(NS_METRICS));
+ assertGauge("LastWrittenTransactionId", 9L, getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastCheckpoint", 1L, getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[17/18] hadoop git commit: YARN-7186. Fix finicky
TestContainerManager tests. Contributed by Arun Suresh.
Posted by xy...@apache.org.
YARN-7186. Fix finicky TestContainerManager tests. Contributed by Arun Suresh.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/647b7527
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/647b7527
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/647b7527
Branch: refs/heads/HDFS-7240
Commit: 647b7527a9cdf4717e7dcbbb660e5812b67a17f1
Parents: 12d9d7b
Author: Junping Du <ju...@apache.org>
Authored: Tue Sep 19 18:31:15 2017 -0700
Committer: Junping Du <ju...@apache.org>
Committed: Tue Sep 19 18:31:15 2017 -0700
----------------------------------------------------------------------
.../containermanager/TestContainerManager.java | 128 -------------------
.../TestContainerSchedulerQueuing.java | 70 ++++++++++
2 files changed, 70 insertions(+), 128 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/647b7527/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
index 6eea77b..38df208 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
@@ -70,7 +70,6 @@ import org.apache.hadoop.yarn.api.records.ContainerRetryContext;
import org.apache.hadoop.yarn.api.records.ContainerRetryPolicy;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
-import org.apache.hadoop.yarn.api.records.ExecutionType;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
@@ -105,7 +104,6 @@ import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext
import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.junit.Assert;
-import org.junit.Before;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.Mockito;
@@ -142,14 +140,6 @@ public class TestContainerManager extends BaseContainerManagerTest {
exec.setConf(conf);
return spy(exec);
}
-
- @Override
- @Before
- public void setup() throws IOException {
- conf.setInt(
- YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH, 0);
- super.setup();
- }
@Override
protected ContainerManagerImpl
@@ -1945,122 +1935,4 @@ public class TestContainerManager extends BaseContainerManagerTest {
Assert.assertTrue(response.getFailedRequests().get(cId).getMessage()
.contains("Null resource visibility for local resource"));
}
-
- @Test
- public void testContainerUpdateExecTypeOpportunisticToGuaranteed()
- throws IOException, YarnException, InterruptedException {
- delayContainers = true;
- containerManager.start();
- // Construct the Container-id
- ContainerId cId = createContainerId(0);
- ContainerLaunchContext containerLaunchContext =
- recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
- StartContainerRequest scRequest =
- StartContainerRequest.newInstance(
- containerLaunchContext,
- createContainerToken(cId, DUMMY_RM_IDENTIFIER,
- context.getNodeId(), user, BuilderUtils.newResource(512, 1),
- context.getContainerTokenSecretManager(), null,
- ExecutionType.OPPORTUNISTIC));
- List<StartContainerRequest> list = new ArrayList<>();
- list.add(scRequest);
- StartContainersRequest allRequests =
- StartContainersRequest.newInstance(list);
- containerManager.startContainers(allRequests);
- // Make sure the container reaches RUNNING state
- BaseContainerManagerTest.waitForNMContainerState(containerManager, cId,
- org.apache.hadoop.yarn.server.nodemanager.
- containermanager.container.ContainerState.RUNNING);
- // Construct container resource increase request,
- List<Token> updateTokens = new ArrayList<>();
- Token containerToken =
- createContainerToken(cId, 1, DUMMY_RM_IDENTIFIER, context.getNodeId(),
- user, BuilderUtils.newResource(512, 1),
- context.getContainerTokenSecretManager(), null,
- ExecutionType.GUARANTEED);
- updateTokens.add(containerToken);
- ContainerUpdateRequest updateRequest =
- ContainerUpdateRequest.newInstance(updateTokens);
- ContainerUpdateResponse updateResponse =
- containerManager.updateContainer(updateRequest);
-
- Assert.assertEquals(
- 1, updateResponse.getSuccessfullyUpdatedContainers().size());
- Assert.assertTrue(updateResponse.getFailedRequests().isEmpty());
-
- //Make sure the container is running
- List<ContainerId> statList = new ArrayList<ContainerId>();
- statList.add(cId);
- GetContainerStatusesRequest statRequest =
- GetContainerStatusesRequest.newInstance(statList);
- List<ContainerStatus> containerStatuses = containerManager
- .getContainerStatuses(statRequest).getContainerStatuses();
- Assert.assertEquals(1, containerStatuses.size());
- for (ContainerStatus status : containerStatuses) {
- Assert.assertEquals(
- org.apache.hadoop.yarn.api.records.ContainerState.RUNNING,
- status.getState());
- Assert.assertEquals(ExecutionType.GUARANTEED, status.getExecutionType());
- }
- }
-
- @Test
- public void testContainerUpdateExecTypeGuaranteedToOpportunistic()
- throws IOException, YarnException, InterruptedException {
- delayContainers = true;
- containerManager.start();
- // Construct the Container-id
- ContainerId cId = createContainerId(0);
- ContainerLaunchContext containerLaunchContext =
- recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
- StartContainerRequest scRequest =
- StartContainerRequest.newInstance(
- containerLaunchContext,
- createContainerToken(cId, DUMMY_RM_IDENTIFIER,
- context.getNodeId(), user, BuilderUtils.newResource(512, 1),
- context.getContainerTokenSecretManager(), null));
- List<StartContainerRequest> list = new ArrayList<>();
- list.add(scRequest);
- StartContainersRequest allRequests =
- StartContainersRequest.newInstance(list);
- containerManager.startContainers(allRequests);
- // Make sure the container reaches RUNNING state
- BaseContainerManagerTest.waitForNMContainerState(containerManager, cId,
- org.apache.hadoop.yarn.server.nodemanager.
- containermanager.container.ContainerState.RUNNING);
- // Construct container resource increase request,
- List<Token> updateTokens = new ArrayList<>();
- Token containerToken =
- createContainerToken(cId, 1, DUMMY_RM_IDENTIFIER, context.getNodeId(),
- user, BuilderUtils.newResource(512, 1),
- context.getContainerTokenSecretManager(), null,
- ExecutionType.OPPORTUNISTIC);
- updateTokens.add(containerToken);
- ContainerUpdateRequest updateRequest =
- ContainerUpdateRequest.newInstance(updateTokens);
- ContainerUpdateResponse updateResponse =
- containerManager.updateContainer(updateRequest);
-
- Assert.assertEquals(
- 1, updateResponse.getSuccessfullyUpdatedContainers().size());
- Assert.assertTrue(updateResponse.getFailedRequests().isEmpty());
-
- //Make sure the container is running
- List<ContainerId> statList = new ArrayList<ContainerId>();
- statList.add(cId);
- GetContainerStatusesRequest statRequest =
- GetContainerStatusesRequest.newInstance(statList);
- List<ContainerStatus> containerStatuses = containerManager
- .getContainerStatuses(statRequest).getContainerStatuses();
- Assert.assertEquals(1, containerStatuses.size());
- for (ContainerStatus status : containerStatuses) {
- Assert.assertEquals(
- org.apache.hadoop.yarn.api.records.ContainerState.RUNNING,
- status.getState());
- Assert
- .assertEquals(ExecutionType.OPPORTUNISTIC, status.getExecutionType());
- }
- }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/647b7527/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
index f3fc724..7c74049 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
@@ -1147,7 +1147,77 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
waitForContainerState(containerManager, createContainerId(1),
org.apache.hadoop.yarn.api.records.ContainerState.RUNNING);
+ containerStatuses = containerManager
+ .getContainerStatuses(statRequest).getContainerStatuses();
+ Assert.assertEquals(1, containerStatuses.size());
+ for (ContainerStatus status : containerStatuses) {
+ if (org.apache.hadoop.yarn.api.records.ContainerState.RUNNING ==
+ status.getState()) {
+ Assert.assertEquals(
+ ExecutionType.GUARANTEED, status.getExecutionType());
+ }
+ }
+
// Ensure no containers are queued.
Assert.assertEquals(0, containerScheduler.getNumQueuedContainers());
}
+
+ @Test
+ public void testContainerUpdateExecTypeGuaranteedToOpportunistic()
+ throws IOException, YarnException, InterruptedException {
+ delayContainers = true;
+ containerManager.start();
+ // Construct the Container-id
+ ContainerId cId = createContainerId(0);
+ ContainerLaunchContext containerLaunchContext =
+ recordFactory.newRecordInstance(ContainerLaunchContext.class);
+
+ StartContainerRequest scRequest =
+ StartContainerRequest.newInstance(
+ containerLaunchContext,
+ createContainerToken(cId, DUMMY_RM_IDENTIFIER,
+ context.getNodeId(), user, BuilderUtils.newResource(512, 1),
+ context.getContainerTokenSecretManager(), null));
+ List<StartContainerRequest> list = new ArrayList<>();
+ list.add(scRequest);
+ StartContainersRequest allRequests =
+ StartContainersRequest.newInstance(list);
+ containerManager.startContainers(allRequests);
+ // Make sure the container reaches RUNNING state
+ BaseContainerManagerTest.waitForNMContainerState(containerManager, cId,
+ org.apache.hadoop.yarn.server.nodemanager.
+ containermanager.container.ContainerState.RUNNING);
+ // Construct container resource increase request,
+ List<Token> updateTokens = new ArrayList<>();
+ Token containerToken =
+ createContainerToken(cId, 1, DUMMY_RM_IDENTIFIER, context.getNodeId(),
+ user, BuilderUtils.newResource(512, 1),
+ context.getContainerTokenSecretManager(), null,
+ ExecutionType.OPPORTUNISTIC);
+ updateTokens.add(containerToken);
+ ContainerUpdateRequest updateRequest =
+ ContainerUpdateRequest.newInstance(updateTokens);
+ ContainerUpdateResponse updateResponse =
+ containerManager.updateContainer(updateRequest);
+
+ Assert.assertEquals(
+ 1, updateResponse.getSuccessfullyUpdatedContainers().size());
+ Assert.assertTrue(updateResponse.getFailedRequests().isEmpty());
+
+ //Make sure the container is running
+ List<ContainerId> statList = new ArrayList<ContainerId>();
+ statList.add(cId);
+ GetContainerStatusesRequest statRequest =
+ GetContainerStatusesRequest.newInstance(statList);
+ List<ContainerStatus> containerStatuses = containerManager
+ .getContainerStatuses(statRequest).getContainerStatuses();
+ Assert.assertEquals(1, containerStatuses.size());
+ for (ContainerStatus status : containerStatuses) {
+ Assert.assertEquals(
+ org.apache.hadoop.yarn.api.records.ContainerState.RUNNING,
+ status.getState());
+ Assert
+ .assertEquals(ExecutionType.OPPORTUNISTIC, status.getExecutionType());
+ }
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[12/18] hadoop git commit: HDFS-12449.
TestReconstructStripedFile.testNNSendsErasureCodingTasks randomly cannot
finish in 60s. (SammiChen via lei)
Posted by xy...@apache.org.
HDFS-12449. TestReconstructStripedFile.testNNSendsErasureCodingTasks randomly cannot finish in 60s. (SammiChen via lei)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7bbeacb7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7bbeacb7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7bbeacb7
Branch: refs/heads/HDFS-7240
Commit: 7bbeacb75e93261dbda0e8efcde510e5fcf83efb
Parents: fda1221
Author: Lei Xu <le...@apache.org>
Authored: Tue Sep 19 11:50:01 2017 -0700
Committer: Lei Xu <le...@apache.org>
Committed: Tue Sep 19 11:50:01 2017 -0700
----------------------------------------------------------------------
.../java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bbeacb7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
index 72b1412..713a10b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
@@ -456,8 +456,8 @@ public class TestReconstructStripedFile {
ErasureCodingPolicy policy = StripedFileTestUtil.getDefaultECPolicy();
fs.getClient().setErasureCodingPolicy("/", policy.getName());
- final int fileLen = cellSize * ecPolicy.getNumDataUnits() * 2;
- for (int i = 0; i < 100; i++) {
+ final int fileLen = cellSize * ecPolicy.getNumDataUnits();
+ for (int i = 0; i < 50; i++) {
writeFile(fs, "/ec-file-" + i, fileLen);
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[18/18] hadoop git commit: Merge branch 'trunk' of
https://git-wip-us.apache.org/repos/asf/hadoop into HDFS-7240
Posted by xy...@apache.org.
Merge branch 'trunk' of https://git-wip-us.apache.org/repos/asf/hadoop into HDFS-7240
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d77c8107
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d77c8107
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d77c8107
Branch: refs/heads/HDFS-7240
Commit: d77c8107fe89f1d56852ed9b1101bc33032ac279
Parents: 14e184a a9019e1
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Tue Sep 19 21:56:47 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Tue Sep 19 21:56:47 2017 -0700
----------------------------------------------------------------------
BUILDING.txt | 2 +
dev-support/bin/create-release | 1 +
hadoop-client-modules/hadoop-client/pom.xml | 31 --
.../java/org/apache/hadoop/hdfs/DFSClient.java | 13 +-
.../hadoop/hdfs/DFSStripedOutputStream.java | 9 +-
.../org/apache/hadoop/hdfs/DataStreamer.java | 31 +-
.../hdfs/client/HdfsClientConfigKeys.java | 2 +
.../server/blockmanagement/BlockManager.java | 4 +-
.../src/main/resources/hdfs-default.xml | 17 ++
.../apache/hadoop/hdfs/StripedFileTestUtil.java | 13 +-
.../hadoop/hdfs/TestLeaseRecoveryStriped.java | 156 +++++++---
.../hadoop/hdfs/TestReconstructStripedFile.java | 4 +-
.../TestReplaceDatanodeFailureReplication.java | 291 +++++++++++++++++++
.../hadoop/hdfs/TestWriteReadStripedFile.java | 24 +-
.../hdfs/TestWriteStripedFileWithFailure.java | 3 +-
.../hdfs/server/namenode/TestDeadDatanode.java | 2 +-
.../namenode/metrics/TestNameNodeMetrics.java | 18 +-
.../hadoop/tools/TestHdfsConfigFields.java | 4 +-
.../apache/hadoop/mapred/ShuffleHandler.java | 25 +-
.../hadoop-mapreduce-client/pom.xml | 17 +-
.../hadoop/examples/BaileyBorweinPlouffe.java | 7 +-
.../apache/hadoop/examples/DBCountPageView.java | 7 +-
.../hadoop/examples/dancing/DancingLinks.java | 9 +-
.../examples/dancing/DistributedPentomino.java | 4 +-
.../hadoop/examples/dancing/Pentomino.java | 2 +-
.../org/apache/hadoop/examples/pi/DistSum.java | 6 +-
.../hadoop/examples/terasort/TeraGen.java | 6 +-
.../examples/terasort/TeraOutputFormat.java | 7 +-
.../hadoop/examples/terasort/TeraScheduler.java | 7 +-
.../hadoop/examples/terasort/TeraSort.java | 6 +-
.../hadoop/examples/terasort/TestTeraSort.java | 6 +-
hadoop-project-dist/pom.xml | 17 +-
hadoop-project/pom.xml | 2 +
.../yarn/api/records/ContainerReport.java | 26 ++
.../src/main/proto/yarn_protos.proto | 1 +
.../yarn/client/api/impl/TestYarnClient.java | 1 +
.../records/impl/pb/ContainerReportPBImpl.java | 20 ++
.../containermanager/TestContainerManager.java | 128 --------
.../TestContainerSchedulerQueuing.java | 70 +++++
.../rmcontainer/RMContainerImpl.java | 2 +-
.../scheduler/fair/Schedulable.java | 4 -
hadoop-yarn-project/hadoop-yarn/pom.xml | 17 +-
42 files changed, 734 insertions(+), 288 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d77c8107/hadoop-project/pom.xml
----------------------------------------------------------------------
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[08/18] hadoop git commit: Revert "HADOOP-14771. hadoop-client does
not include hadoop-yarn-client. (Ajay Kumar via Haibo Chen)" HADOOP-14879
Build failure due to failing hadoop-client-check-invariants This reverts
commit 1ee25278c891e95ba2ab142e5b78aebd
Posted by xy...@apache.org.
Revert "HADOOP-14771. hadoop-client does not include hadoop-yarn-client. (Ajay Kumar via Haibo Chen)"
HADOOP-14879 Build failure due to failing hadoop-client-check-invariants
This reverts commit 1ee25278c891e95ba2ab142e5b78aebd752ea163.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa6e8d2d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa6e8d2d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa6e8d2d
Branch: refs/heads/HDFS-7240
Commit: aa6e8d2dff533c3d0c86776567c860548723c21c
Parents: dba7a7d
Author: Steve Loughran <st...@apache.org>
Authored: Tue Sep 19 11:53:11 2017 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Tue Sep 19 11:53:11 2017 +0100
----------------------------------------------------------------------
hadoop-client-modules/hadoop-client/pom.xml | 31 ------------------------
1 file changed, 31 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa6e8d2d/hadoop-client-modules/hadoop-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client/pom.xml b/hadoop-client-modules/hadoop-client/pom.xml
index 6500ebf..bed3f5c 100644
--- a/hadoop-client-modules/hadoop-client/pom.xml
+++ b/hadoop-client-modules/hadoop-client/pom.xml
@@ -179,37 +179,6 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-yarn-client</artifactId>
- <scope>compile</scope>
- <exclusions>
- <!--Excluding hadoop-yarn-api & hadoop-annotations as they are already
- included as direct dependencies. Guava,commons-cli and log4j are
- transitive dependencies -->
- <exclusion>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-yarn-api</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-annotations</artifactId>
- </exclusion>
- <exclusion>
- <groupId>com.google.guava</groupId>
- <artifactId>guava</artifactId>
- </exclusion>
- <exclusion>
- <groupId>commons-cli</groupId>
- <artifactId>commons-cli</artifactId>
- </exclusion>
- <exclusion>
- <groupId>log4j</groupId>
- <artifactId>log4j</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
-
- <dependency>
- <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<scope>compile</scope>
<exclusions>
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[04/18] hadoop git commit: MAPREDUCE-6947. Moving logging APIs over to slf4j in hadoop-mapreduce-examples. Contributed by Gergery Novák.
Posted by xy...@apache.org.
MAPREDUCE-6947. Moving logging APIs over to slf4j in hadoop-mapreduce-examples. Contributed by Gergery Novák.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2018538f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2018538f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2018538f
Branch: refs/heads/HDFS-7240
Commit: 2018538fdba1a95a6556187569e872fce7f9e1c3
Parents: 56ef527
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Sep 19 11:05:54 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Sep 19 11:05:54 2017 +0900
----------------------------------------------------------------------
.../java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java | 7 ++++---
.../main/java/org/apache/hadoop/examples/DBCountPageView.java | 7 ++++---
.../java/org/apache/hadoop/examples/dancing/DancingLinks.java | 7 +++----
.../src/main/java/org/apache/hadoop/examples/pi/DistSum.java | 6 +++---
.../java/org/apache/hadoop/examples/terasort/TeraGen.java | 6 +++---
.../org/apache/hadoop/examples/terasort/TeraOutputFormat.java | 7 ++++---
.../org/apache/hadoop/examples/terasort/TeraScheduler.java | 7 ++++---
.../java/org/apache/hadoop/examples/terasort/TeraSort.java | 6 +++---
.../org/apache/hadoop/examples/terasort/TestTeraSort.java | 6 +++---
9 files changed, 31 insertions(+), 28 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2018538f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
index 7e98d7d..da4ec79 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
@@ -29,8 +29,6 @@ import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
@@ -51,6 +49,8 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import com.google.common.base.Charsets;
@@ -83,7 +83,8 @@ public class BaileyBorweinPlouffe extends Configured implements Tool {
private static final String DIGIT_SIZE_PROPERTY = NAME + ".digit.size";
private static final String DIGIT_PARTS_PROPERTY = NAME + ".digit.parts";
- private static final Log LOG = LogFactory.getLog(BaileyBorweinPlouffe.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(BaileyBorweinPlouffe.class);
/** Mapper class computing digits of Pi. */
public static class BbpMapper extends
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2018538f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
index 8dec39d..7b73820 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
@@ -29,8 +29,6 @@ import java.sql.SQLException;
import java.sql.Statement;
import java.util.Random;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.LongWritable;
@@ -49,6 +47,8 @@ import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.hsqldb.server.Server;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This is a demonstrative program, which uses DBInputFormat for reading
@@ -77,7 +77,8 @@ import org.hsqldb.server.Server;
*/
public class DBCountPageView extends Configured implements Tool {
- private static final Log LOG = LogFactory.getLog(DBCountPageView.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(DBCountPageView.class);
private Connection connection;
private boolean initialized = false;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2018538f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
index 94d2c83..537b4d4 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
@@ -19,8 +19,8 @@ package org.apache.hadoop.examples.dancing;
import java.util.*;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A generic solver for tile laying problems using Knuth's dancing link
@@ -35,8 +35,7 @@ import org.apache.commons.logging.LogFactory;
* The type parameter ColumnName is the class of application's column names.
*/
public class DancingLinks<ColumnName> {
- private static final Log LOG =
- LogFactory.getLog(DancingLinks.class.getName());
+ private static final Logger LOG = LoggerFactory.getLogger(DancingLinks.class);
/**
* A cell in the table with up/down and left/right links that form doubly
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2018538f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistSum.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistSum.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistSum.java
index 99f7c24..ffe63fe 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistSum.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/DistSum.java
@@ -28,8 +28,6 @@ import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.examples.pi.math.Summation;
@@ -55,6 +53,8 @@ import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* The main class for computing sums using map/reduce jobs.
@@ -66,7 +66,7 @@ import org.apache.hadoop.util.ToolRunner;
* a mix-type job may be executed on either side.
*/
public final class DistSum extends Configured implements Tool {
- private static final Log LOG = LogFactory.getLog(DistSum.class);
+ private static final Logger LOG = LoggerFactory.getLogger(DistSum.class);
private static final String NAME = DistSum.class.getSimpleName();
private static final String N_PARTS = "mapreduce.pi." + NAME + ".nParts";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2018538f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
index 53bbdc4..ef20c4c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
@@ -25,8 +25,6 @@ import java.util.ArrayList;
import java.util.List;
import java.util.zip.Checksum;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
@@ -49,6 +47,8 @@ import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.PureJavaCrc32;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Generate the official GraySort input data set.
@@ -66,7 +66,7 @@ import org.apache.hadoop.util.ToolRunner;
* <b>bin/hadoop jar hadoop-*-examples.jar teragen 10000000000 in-dir</b>
*/
public class TeraGen extends Configured implements Tool {
- private static final Log LOG = LogFactory.getLog(TeraGen.class);
+ private static final Logger LOG = LoggerFactory.getLogger(TeraGen.class);
public enum Counters {CHECKSUM}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2018538f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
index 96580b1..e0ce36c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
@@ -21,8 +21,6 @@ package org.apache.hadoop.examples.terasort;
import java.io.FileNotFoundException;
import java.io.IOException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
@@ -38,12 +36,15 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.security.TokenCache;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* An output format that writes the key and value appended together.
*/
public class TeraOutputFormat extends FileOutputFormat<Text,Text> {
- private static final Log LOG = LogFactory.getLog(TeraOutputFormat.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TeraOutputFormat.class);
private OutputCommitter committer = null;
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2018538f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java
index 3e12a3d..7a9e44b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java
@@ -21,17 +21,18 @@ package org.apache.hadoop.examples.terasort;
import java.io.*;
import java.util.*;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import com.google.common.base.Charsets;
class TeraScheduler {
- private static final Log LOG = LogFactory.getLog(TeraScheduler.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TeraScheduler.class);
private Split[] splits;
private List<Host> hosts = new ArrayList<Host>();
private int slotsPerHost;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2018538f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
index 040d13f..8b698e3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
@@ -23,8 +23,6 @@ import java.io.IOException;
import java.io.PrintStream;
import java.net.URI;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
@@ -38,6 +36,8 @@ import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Generates the sampled split points, launches the job, and waits for it to
@@ -47,7 +47,7 @@ import org.apache.hadoop.util.ToolRunner;
* <b>bin/hadoop jar hadoop-*-examples.jar terasort in-dir out-dir</b>
*/
public class TeraSort extends Configured implements Tool {
- private static final Log LOG = LogFactory.getLog(TeraSort.class);
+ private static final Logger LOG = LoggerFactory.getLogger(TeraSort.class);
/**
* A partitioner that splits text keys into roughly equal partitions
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2018538f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/terasort/TestTeraSort.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/terasort/TestTeraSort.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/terasort/TestTeraSort.java
index b835a3b..b301659 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/terasort/TestTeraSort.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/terasort/TestTeraSort.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.examples.terasort;
import java.io.File;
import java.io.IOException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.FileAlreadyExistsException;
@@ -29,12 +27,14 @@ import org.apache.hadoop.mapred.HadoopTestCase;
import org.apache.hadoop.util.ToolRunner;
import org.junit.After;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
public class TestTeraSort extends HadoopTestCase {
- private static Log LOG = LogFactory.getLog(TestTeraSort.class);
+ private static final Logger LOG = LoggerFactory.getLogger(TestTeraSort.class);
public TestTeraSort()
throws IOException {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org