You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2017/05/09 05:46:44 UTC
[01/50] [abbrv] hadoop git commit: YARN-6517. Fix warnings from
Spotbugs in hadoop-yarn-common. Contributed by Weiwei Yang
Repository: hadoop
Updated Branches:
refs/heads/HDFS-7240 f713452e7 -> 86414507f
YARN-6517. Fix warnings from Spotbugs in hadoop-yarn-common. Contributed by Weiwei Yang
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b5bd73a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b5bd73a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b5bd73a
Branch: refs/heads/HDFS-7240
Commit: 4b5bd73ac53f7de4899b5b70078249ad20216048
Parents: f378621
Author: Naganarasimha <na...@apache.org>
Authored: Mon May 1 16:31:16 2017 +0530
Committer: Naganarasimha <na...@apache.org>
Committed: Mon May 1 16:31:16 2017 +0530
----------------------------------------------------------------------
.../logaggregation/AggregatedLogFormat.java | 4 ++++
.../yarn/util/ProcfsBasedProcessTree.java | 23 +++++++++++---------
2 files changed, 17 insertions(+), 10 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b5bd73a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
index 0aa318c..e5cb255 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
@@ -310,6 +310,10 @@ public class AggregatedLogFormat {
}
private Set<File> getPendingLogFilesToUpload(File containerLogDir) {
+ if(containerLogDir == null ||
+ containerLogDir.listFiles() == null) {
+ return new HashSet<>(0);
+ }
Set<File> candidates =
new HashSet<File>(Arrays.asList(containerLogDir.listFiles()));
for (File logFile : candidates) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b5bd73a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index d54611e..7f81c5b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -481,18 +481,21 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
* Get the list of all processes in the system.
*/
private List<String> getProcessList() {
- String[] processDirs = (new File(procfsDir)).list();
List<String> processList = new ArrayList<String>();
-
- for (String dir : processDirs) {
- Matcher m = numberPattern.matcher(dir);
- if (!m.matches()) continue;
- try {
- if ((new File(procfsDir, dir)).isDirectory()) {
- processList.add(dir);
+ String[] processDirs = (new File(procfsDir)).list();
+ if (processDirs != null) {
+ for (String dir : processDirs) {
+ Matcher m = numberPattern.matcher(dir);
+ if (!m.matches()) {
+ continue;
+ }
+ try {
+ if ((new File(procfsDir, dir)).isDirectory()) {
+ processList.add(dir);
+ }
+ } catch (SecurityException s) {
+ // skip this process
}
- } catch (SecurityException s) {
- // skip this process
}
}
return processList;
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[31/50] [abbrv] hadoop git commit: HADOOP-14380. Make the Guava
version Hadoop which builds with configurable. Contributed by Steve Loughran
Posted by ae...@apache.org.
HADOOP-14380. Make the Guava version Hadoop which builds with configurable. Contributed by Steve Loughran
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61858a5c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61858a5c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61858a5c
Branch: refs/heads/HDFS-7240
Commit: 61858a5c378da75aff9cde84d418af46d718d08b
Parents: 25f5d9a
Author: Jason Lowe <jl...@yahoo-inc.com>
Authored: Thu May 4 15:17:46 2017 -0500
Committer: Jason Lowe <jl...@yahoo-inc.com>
Committed: Thu May 4 15:17:46 2017 -0500
----------------------------------------------------------------------
hadoop-project/pom.xml | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/61858a5c/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index b282065..fdb4fb1 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -90,6 +90,7 @@
<findbugs.version>3.0.0</findbugs.version>
<spotbugs.version>3.1.0-RC1</spotbugs.version>
+ <guava.version>21.0</guava.version>
<guice.version>4.0</guice.version>
<joda-time.version>2.9.4</joda-time.version>
@@ -519,7 +520,7 @@
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
- <version>21.0</version>
+ <version>${guava.version}</version>
</dependency>
<dependency>
<groupId>com.google.code.gson</groupId>
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[12/50] [abbrv] hadoop git commit: YARN-6542. Fix the logger in
TestAlignedPlanner and TestGreedyReservationAgent.
Posted by ae...@apache.org.
YARN-6542. Fix the logger in TestAlignedPlanner and TestGreedyReservationAgent.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e514fc43
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e514fc43
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e514fc43
Branch: refs/heads/HDFS-7240
Commit: e514fc432a424c57bdce16cad1d5b1d2e010f34e
Parents: 1058b40
Author: Subru Krishnan <su...@apache.org>
Authored: Mon May 1 18:10:07 2017 -0700
Committer: Subru Krishnan <su...@apache.org>
Committed: Mon May 1 18:10:07 2017 -0700
----------------------------------------------------------------------
.../reservation/planning/TestAlignedPlanner.java | 8 ++++++--
.../reservation/planning/TestGreedyReservationAgent.java | 8 ++++++--
2 files changed, 12 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e514fc43/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestAlignedPlanner.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestAlignedPlanner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestAlignedPlanner.java
index 7207d71..fd187fc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestAlignedPlanner.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestAlignedPlanner.java
@@ -52,12 +52,16 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
-import org.eclipse.jetty.util.log.Log;
import org.junit.Before;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestAlignedPlanner {
+ private static final Logger LOG = LoggerFactory
+ .getLogger(TestAlignedPlanner.class);
+
private ReservationAgent agentRight;
private ReservationAgent agentLeft;
private InMemoryPlan plan;
@@ -1030,7 +1034,7 @@ public class TestAlignedPlanner {
// Initialize random seed
long seed = rand.nextLong();
rand.setSeed(seed);
- Log.getLog().info("Running with seed: " + seed);
+ LOG.info("Running with seed: " + seed);
// Set cluster parameters
long timeWindow = 1000000L;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e514fc43/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestGreedyReservationAgent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestGreedyReservationAgent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestGreedyReservationAgent.java
index 46bfa80..51b971b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestGreedyReservationAgent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestGreedyReservationAgent.java
@@ -55,16 +55,20 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
-import org.eclipse.jetty.util.log.Log;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
@RunWith(Parameterized.class)
public class TestGreedyReservationAgent {
+ private static final Logger LOG = LoggerFactory
+ .getLogger(TestGreedyReservationAgent.class);
+
ReservationAgent agent;
InMemoryPlan plan;
Resource minAlloc = Resource.newInstance(1024, 1);
@@ -89,7 +93,7 @@ public class TestGreedyReservationAgent {
long seed = rand.nextLong();
rand.setSeed(seed);
- Log.getLog().info("Running with seed: " + seed);
+ LOG.info("Running with seed: " + seed);
// setting completely loose quotas
long timeWindow = 1000000L;
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[46/50] [abbrv] hadoop git commit: YARN-6457. Allow custom SSL
configuration to be supplied in WebApps. (Sanjay M Pujare via Haibo Chen)
Posted by ae...@apache.org.
YARN-6457. Allow custom SSL configuration to be supplied in WebApps. (Sanjay M Pujare via Haibo Chen)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1769b12a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1769b12a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1769b12a
Branch: refs/heads/HDFS-7240
Commit: 1769b12a773dc6c83f13663a77da33fa78878730
Parents: ff5ec3b
Author: Haibo Chen <ha...@cloudera.com>
Authored: Mon May 8 15:43:20 2017 -0700
Committer: Haibo Chen <ha...@cloudera.com>
Committed: Mon May 8 15:46:12 2017 -0700
----------------------------------------------------------------------
.../hadoop/yarn/webapp/util/WebAppUtils.java | 17 +++++++++++------
1 file changed, 11 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1769b12a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
index 64a4b2b..eabd9b3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
@@ -375,16 +375,21 @@ public class WebAppUtils {
/**
* Load the SSL keystore / truststore into the HttpServer builder.
* @param builder the HttpServer2.Builder to populate with ssl config
- * @param sslConf the Configuration instance to use during loading of SSL conf
+ * @param conf the Configuration instance to load custom SSL config from
+ *
+ * @return HttpServer2.Builder instance (passed in as the first parameter)
+ * after loading SSL stores
*/
public static HttpServer2.Builder loadSslConfiguration(
- HttpServer2.Builder builder, Configuration sslConf) {
- if (sslConf == null) {
- sslConf = new Configuration(false);
+ HttpServer2.Builder builder, Configuration conf) {
+
+ Configuration sslConf = new Configuration(false);
+
+ sslConf.addResource(YarnConfiguration.YARN_SSL_SERVER_RESOURCE_DEFAULT);
+ if (conf != null) {
+ sslConf.addResource(conf);
}
boolean needsClientAuth = YarnConfiguration.YARN_SSL_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
- sslConf.addResource(YarnConfiguration.YARN_SSL_SERVER_RESOURCE_DEFAULT);
-
return builder
.needsClientAuth(needsClientAuth)
.keyPassword(getPassword(sslConf, WEB_APP_KEY_PASSWORD_KEY))
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[37/50] [abbrv] hadoop git commit: HDFS-9807. Add an optional
StorageID to writes. Contributed by Ewan Higgs
Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
index e7f0228..75baf84 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
@@ -81,10 +81,11 @@ class FsVolumeList {
return Collections.unmodifiableList(volumes);
}
- private FsVolumeReference chooseVolume(List<FsVolumeImpl> list, long blockSize)
- throws IOException {
+ private FsVolumeReference chooseVolume(List<FsVolumeImpl> list,
+ long blockSize, String storageId) throws IOException {
while (true) {
- FsVolumeImpl volume = blockChooser.chooseVolume(list, blockSize);
+ FsVolumeImpl volume = blockChooser.chooseVolume(list, blockSize,
+ storageId);
try {
return volume.obtainReference();
} catch (ClosedChannelException e) {
@@ -100,18 +101,20 @@ class FsVolumeList {
* Get next volume.
*
* @param blockSize free space needed on the volume
- * @param storageType the desired {@link StorageType}
+ * @param storageType the desired {@link StorageType}
+ * @param storageId the storage id which may or may not be used by
+ * the VolumeChoosingPolicy.
* @return next volume to store the block in.
*/
- FsVolumeReference getNextVolume(StorageType storageType, long blockSize)
- throws IOException {
+ FsVolumeReference getNextVolume(StorageType storageType, String storageId,
+ long blockSize) throws IOException {
final List<FsVolumeImpl> list = new ArrayList<>(volumes.size());
for(FsVolumeImpl v : volumes) {
if (v.getStorageType() == storageType) {
list.add(v);
}
}
- return chooseVolume(list, blockSize);
+ return chooseVolume(list, blockSize, storageId);
}
/**
@@ -129,7 +132,7 @@ class FsVolumeList {
list.add(v);
}
}
- return chooseVolume(list, blockSize);
+ return chooseVolume(list, blockSize, null);
}
long getDfsUsed() throws IOException {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 74cdeae..c98a336 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -1018,7 +1018,8 @@ public class DFSTestUtil {
// send the request
new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
- new StorageType[]{StorageType.DEFAULT});
+ new StorageType[]{StorageType.DEFAULT},
+ new String[0]);
out.flush();
return BlockOpResponseProto.parseDelimitedFrom(in);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
index b6884da..3a8fb59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
@@ -1448,12 +1448,33 @@ public class TestBlockStoragePolicy {
testStorageTypeCheckAccessResult(new StorageType[]{StorageType.RAM_DISK,
StorageType.SSD, StorageType.ARCHIVE},
new StorageType[]{StorageType.DISK}, false);
+
+ testStorageTypeCheckAccessResult(
+ new StorageType[]{StorageType.DISK, StorageType.SSD},
+ new StorageType[]{StorageType.SSD},
+ true);
+
+ testStorageTypeCheckAccessResult(new StorageType[]{StorageType.RAM_DISK},
+ new StorageType[]{StorageType.DISK}, false);
+
+ testStorageTypeCheckAccessResult(
+ new StorageType[]{StorageType.RAM_DISK, StorageType.SSD,
+ StorageType.ARCHIVE},
+ new StorageType[]{StorageType.DISK},
+ false);
+
+ testStorageTypeCheckAccessResult(
+ new StorageType[]{StorageType.RAM_DISK, StorageType.SSD,
+ StorageType.ARCHIVE},
+ new StorageType[]{StorageType.DISK},
+ false);
+
}
private void testStorageTypeCheckAccessResult(StorageType[] requested,
StorageType[] allowed, boolean expAccess) {
try {
- BlockTokenSecretManager.checkAccess(requested, allowed);
+ BlockTokenSecretManager.checkAccess(requested, allowed, "StorageTypes");
if (!expAccess) {
fail("No expected access with allowed StorageTypes "
+ Arrays.toString(allowed) + " and requested StorageTypes "
@@ -1467,4 +1488,56 @@ public class TestBlockStoragePolicy {
}
}
}
+
+ @Test
+ public void testStorageIDCheckAccess() {
+ testStorageIDCheckAccessResult(
+ new String[]{"DN1-Storage1"},
+ new String[]{"DN1-Storage1"}, true);
+
+ testStorageIDCheckAccessResult(new String[]{"DN1-Storage1", "DN2-Storage1"},
+ new String[]{"DN1-Storage1"},
+ true);
+
+ testStorageIDCheckAccessResult(new String[]{"DN1-Storage1", "DN2-Storage1"},
+ new String[]{"DN1-Storage1", "DN1-Storage2"}, false);
+
+ testStorageIDCheckAccessResult(
+ new String[]{"DN1-Storage1", "DN1-Storage2"},
+ new String[]{"DN1-Storage1"}, true);
+
+ testStorageIDCheckAccessResult(
+ new String[]{"DN1-Storage1", "DN1-Storage2"},
+ new String[]{"DN2-Storage1"}, false);
+
+ testStorageIDCheckAccessResult(
+ new String[]{"DN1-Storage2", "DN2-Storage2"},
+ new String[]{"DN1-Storage1", "DN2-Storage1"}, false);
+
+ testStorageIDCheckAccessResult(new String[0], new String[0], false);
+
+ testStorageIDCheckAccessResult(new String[0], new String[]{"DN1-Storage1"},
+ true);
+
+ testStorageIDCheckAccessResult(new String[]{"DN1-Storage1"}, new String[0],
+ false);
+ }
+
+ private void testStorageIDCheckAccessResult(String[] requested,
+ String[] allowed, boolean expAccess) {
+ try {
+ BlockTokenSecretManager.checkAccess(requested, allowed, "StorageIDs");
+ if (!expAccess) {
+ fail("No expected access with allowed StorageIDs"
+ + Arrays.toString(allowed) + " and requested StorageIDs"
+ + Arrays.toString(requested));
+ }
+ } catch (SecretManager.InvalidToken e) {
+ if (expAccess) {
+ fail("Expected access with allowed StorageIDs "
+ + Arrays.toString(allowed) + " and requested StorageIDs"
+ + Arrays.toString(requested));
+ }
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
index 3f4fe28..7a2ac1b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
@@ -559,6 +559,7 @@ public class TestDataTransferProtocol {
BlockTokenSecretManager.DUMMY_TOKEN, "cl",
new DatanodeInfo[1], new StorageType[1], null, stage,
0, block.getNumBytes(), block.getNumBytes(), newGS,
- checksum, CachingStrategy.newDefaultStrategy(), false, false, null);
+ checksum, CachingStrategy.newDefaultStrategy(), false, false,
+ null, null, new String[0]);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteBlockGetsBlockLengthHint.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteBlockGetsBlockLengthHint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteBlockGetsBlockLengthHint.java
index 5c1b38f..e159914 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteBlockGetsBlockLengthHint.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteBlockGetsBlockLengthHint.java
@@ -98,11 +98,11 @@ public class TestWriteBlockGetsBlockLengthHint {
* correctly propagate the hint to FsDatasetSpi.
*/
@Override
- public synchronized ReplicaHandler createRbw(
- StorageType storageType, ExtendedBlock b, boolean allowLazyPersist)
+ public synchronized ReplicaHandler createRbw(StorageType storageType,
+ String storageId, ExtendedBlock b, boolean allowLazyPersist)
throws IOException {
assertThat(b.getLocalBlock().getNumBytes(), is(EXPECTED_BLOCK_LENGTH));
- return super.createRbw(storageType, b, allowLazyPersist);
+ return super.createRbw(storageType, storageId, b, allowLazyPersist);
}
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
index e98207f..747f295 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
@@ -151,7 +151,7 @@ public class TestBlockToken {
assertTrue("Received BlockTokenIdentifier is wrong", ident.equals(id));
sm.checkAccess(id, null, PBHelperClient.convert(req.getBlock()),
BlockTokenIdentifier.AccessMode.WRITE,
- new StorageType[]{StorageType.DEFAULT});
+ new StorageType[]{StorageType.DEFAULT}, null);
result = id.getBlockId();
}
return GetReplicaVisibleLengthResponseProto.newBuilder()
@@ -160,11 +160,11 @@ public class TestBlockToken {
}
private BlockTokenIdentifier generateTokenId(BlockTokenSecretManager sm,
- ExtendedBlock block,
- EnumSet<BlockTokenIdentifier.AccessMode> accessModes,
- StorageType... storageTypes) throws IOException {
+ ExtendedBlock block, EnumSet<BlockTokenIdentifier.AccessMode> accessModes,
+ StorageType[] storageTypes, String[] storageIds)
+ throws IOException {
Token<BlockTokenIdentifier> token = sm.generateToken(block, accessModes,
- storageTypes);
+ storageTypes, storageIds);
BlockTokenIdentifier id = sm.createIdentifier();
id.readFields(new DataInputStream(new ByteArrayInputStream(token
.getIdentifier())));
@@ -178,29 +178,28 @@ public class TestBlockToken {
enableProtobuf);
TestWritable.testWritable(generateTokenId(sm, block3,
EnumSet.noneOf(BlockTokenIdentifier.AccessMode.class),
- StorageType.DEFAULT));
+ new StorageType[]{StorageType.DEFAULT}, null));
TestWritable.testWritable(generateTokenId(sm, block3,
EnumSet.of(BlockTokenIdentifier.AccessMode.WRITE),
- StorageType.DEFAULT));
+ new StorageType[]{StorageType.DEFAULT}, null));
TestWritable.testWritable(generateTokenId(sm, block3,
EnumSet.allOf(BlockTokenIdentifier.AccessMode.class),
- StorageType.DEFAULT));
+ new StorageType[]{StorageType.DEFAULT}, null));
TestWritable.testWritable(generateTokenId(sm, block1,
EnumSet.allOf(BlockTokenIdentifier.AccessMode.class),
- StorageType.DEFAULT));
+ new StorageType[]{StorageType.DEFAULT}, null));
TestWritable.testWritable(generateTokenId(sm, block2,
EnumSet.of(BlockTokenIdentifier.AccessMode.WRITE),
- StorageType.DEFAULT));
+ new StorageType[]{StorageType.DEFAULT}, null));
TestWritable.testWritable(generateTokenId(sm, block3,
EnumSet.noneOf(BlockTokenIdentifier.AccessMode.class),
- StorageType.DEFAULT));
+ new StorageType[]{StorageType.DEFAULT}, null));
// We must be backwards compatible when adding storageType
TestWritable.testWritable(generateTokenId(sm, block3,
- EnumSet.noneOf(BlockTokenIdentifier.AccessMode.class),
- (StorageType[]) null));
+ EnumSet.noneOf(BlockTokenIdentifier.AccessMode.class), null, null));
TestWritable.testWritable(generateTokenId(sm, block3,
EnumSet.noneOf(BlockTokenIdentifier.AccessMode.class),
- StorageType.EMPTY_ARRAY));
+ StorageType.EMPTY_ARRAY, null));
}
@Test
@@ -215,35 +214,36 @@ public class TestBlockToken {
private static void checkAccess(BlockTokenSecretManager m,
Token<BlockTokenIdentifier> t, ExtendedBlock blk,
- BlockTokenIdentifier.AccessMode mode) throws SecretManager.InvalidToken {
- m.checkAccess(t, null, blk, mode, new StorageType[]{ StorageType.DEFAULT });
+ BlockTokenIdentifier.AccessMode mode, StorageType[] storageTypes,
+ String[] storageIds) throws SecretManager.InvalidToken {
+ m.checkAccess(t, null, blk, mode, storageTypes, storageIds);
}
private void tokenGenerationAndVerification(BlockTokenSecretManager master,
- BlockTokenSecretManager slave, StorageType... storageTypes)
- throws Exception {
+ BlockTokenSecretManager slave, StorageType[] storageTypes,
+ String[] storageIds) throws Exception {
// single-mode tokens
for (BlockTokenIdentifier.AccessMode mode : BlockTokenIdentifier.AccessMode
.values()) {
// generated by master
Token<BlockTokenIdentifier> token1 = master.generateToken(block1,
- EnumSet.of(mode), storageTypes);
- checkAccess(master, token1, block1, mode);
- checkAccess(slave, token1, block1, mode);
+ EnumSet.of(mode), storageTypes, storageIds);
+ checkAccess(master, token1, block1, mode, storageTypes, storageIds);
+ checkAccess(slave, token1, block1, mode, storageTypes, storageIds);
// generated by slave
Token<BlockTokenIdentifier> token2 = slave.generateToken(block2,
- EnumSet.of(mode), storageTypes);
- checkAccess(master, token2, block2, mode);
- checkAccess(slave, token2, block2, mode);
+ EnumSet.of(mode), storageTypes, storageIds);
+ checkAccess(master, token2, block2, mode, storageTypes, storageIds);
+ checkAccess(slave, token2, block2, mode, storageTypes, storageIds);
}
// multi-mode tokens
Token<BlockTokenIdentifier> mtoken = master.generateToken(block3,
EnumSet.allOf(BlockTokenIdentifier.AccessMode.class),
- storageTypes);
+ storageTypes, storageIds);
for (BlockTokenIdentifier.AccessMode mode : BlockTokenIdentifier.AccessMode
.values()) {
- checkAccess(master, mtoken, block3, mode);
- checkAccess(slave, mtoken, block3, mode);
+ checkAccess(master, mtoken, block3, mode, storageTypes, storageIds);
+ checkAccess(slave, mtoken, block3, mode, storageTypes, storageIds);
}
}
@@ -259,18 +259,18 @@ public class TestBlockToken {
ExportedBlockKeys keys = masterHandler.exportKeys();
slaveHandler.addKeys(keys);
tokenGenerationAndVerification(masterHandler, slaveHandler,
- StorageType.DEFAULT);
- tokenGenerationAndVerification(masterHandler, slaveHandler, null);
+ new StorageType[]{StorageType.DEFAULT}, null);
+ tokenGenerationAndVerification(masterHandler, slaveHandler, null, null);
// key updating
masterHandler.updateKeys();
tokenGenerationAndVerification(masterHandler, slaveHandler,
- StorageType.DEFAULT);
- tokenGenerationAndVerification(masterHandler, slaveHandler, null);
+ new StorageType[]{StorageType.DEFAULT}, null);
+ tokenGenerationAndVerification(masterHandler, slaveHandler, null, null);
keys = masterHandler.exportKeys();
slaveHandler.addKeys(keys);
tokenGenerationAndVerification(masterHandler, slaveHandler,
- StorageType.DEFAULT);
- tokenGenerationAndVerification(masterHandler, slaveHandler, null);
+ new StorageType[]{StorageType.DEFAULT}, null);
+ tokenGenerationAndVerification(masterHandler, slaveHandler, null, null);
}
@Test
@@ -315,7 +315,7 @@ public class TestBlockToken {
enableProtobuf);
Token<BlockTokenIdentifier> token = sm.generateToken(block3,
EnumSet.allOf(BlockTokenIdentifier.AccessMode.class),
- new StorageType[]{StorageType.DEFAULT});
+ new StorageType[]{StorageType.DEFAULT}, new String[0]);
final Server server = createMockDatanode(sm, token, conf);
@@ -365,7 +365,7 @@ public class TestBlockToken {
enableProtobuf);
Token<BlockTokenIdentifier> token = sm.generateToken(block3,
EnumSet.allOf(BlockTokenIdentifier.AccessMode.class),
- new StorageType[]{StorageType.DEFAULT});
+ new StorageType[]{StorageType.DEFAULT}, new String[0]);
final Server server = createMockDatanode(sm, token, conf);
server.start();
@@ -451,19 +451,23 @@ public class TestBlockToken {
ExportedBlockKeys keys = masterHandler.exportKeys();
bpMgr.addKeys(bpid, keys);
+ String[] storageIds = new String[] {"DS-9001"};
tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid),
- StorageType.DEFAULT);
- tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid), null);
+ new StorageType[]{StorageType.DEFAULT}, storageIds);
+ tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid), null,
+ null);
// Test key updating
masterHandler.updateKeys();
tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid),
- StorageType.DEFAULT);
- tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid), null);
+ new StorageType[]{StorageType.DEFAULT}, storageIds);
+ tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid), null,
+ null);
keys = masterHandler.exportKeys();
bpMgr.addKeys(bpid, keys);
tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid),
- StorageType.DEFAULT);
- tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid), null);
+ new StorageType[]{StorageType.DEFAULT}, new String[]{"DS-9001"});
+ tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid), null,
+ null);
}
}
@@ -540,7 +544,7 @@ public class TestBlockToken {
useProto);
Token<BlockTokenIdentifier> token = sm.generateToken(block1,
EnumSet.noneOf(BlockTokenIdentifier.AccessMode.class),
- new StorageType[]{StorageType.DEFAULT});
+ new StorageType[]{StorageType.DEFAULT}, new String[0]);
final byte[] tokenBytes = token.getIdentifier();
BlockTokenIdentifier legacyToken = new BlockTokenIdentifier();
BlockTokenIdentifier protobufToken = new BlockTokenIdentifier();
@@ -605,7 +609,7 @@ public class TestBlockToken {
useProto);
Token<BlockTokenIdentifier> token = sm.generateToken(block1,
EnumSet.noneOf(BlockTokenIdentifier.AccessMode.class),
- StorageType.EMPTY_ARRAY);
+ StorageType.EMPTY_ARRAY, new String[0]);
final byte[] tokenBytes = token.getIdentifier();
BlockTokenIdentifier legacyToken = new BlockTokenIdentifier();
BlockTokenIdentifier protobufToken = new BlockTokenIdentifier();
@@ -699,7 +703,8 @@ public class TestBlockToken {
*/
BlockTokenIdentifier identifier = new BlockTokenIdentifier("user",
"blockpool", 123, EnumSet.allOf(BlockTokenIdentifier.AccessMode.class),
- new StorageType[]{StorageType.DISK, StorageType.ARCHIVE}, true);
+ new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
+ new String[] {"fake-storage-id"}, true);
Calendar cal = new GregorianCalendar();
cal.set(2017, 1, 9, 0, 12, 35);
long datetime = cal.getTimeInMillis();
@@ -749,7 +754,8 @@ public class TestBlockToken {
new StorageType[]{StorageType.RAM_DISK, StorageType.SSD,
StorageType.DISK, StorageType.ARCHIVE};
BlockTokenIdentifier ident = new BlockTokenIdentifier("user", "bpool",
- 123, accessModes, storageTypes, useProto);
+ 123, accessModes, storageTypes, new String[] {"fake-storage-id"},
+ useProto);
ident.setExpiryDate(1487080345L);
BlockTokenIdentifier ret = writeAndReadBlockToken(ident);
assertEquals(ret.getExpiryDate(), 1487080345L);
@@ -760,6 +766,7 @@ public class TestBlockToken {
assertEquals(ret.getAccessModes(),
EnumSet.allOf(BlockTokenIdentifier.AccessMode.class));
assertArrayEquals(ret.getStorageTypes(), storageTypes);
+ assertArrayEquals(ret.getStorageIds(), new String[] {"fake-storage-id"});
}
@Test
@@ -767,5 +774,4 @@ public class TestBlockToken {
testBlockTokenSerialization(false);
testBlockTokenSerialization(true);
}
-
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java
index 6810a0b..c9ff572 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java
@@ -389,7 +389,7 @@ public abstract class BlockReportTestBase {
// Create a bogus new block which will not be present on the namenode.
ExtendedBlock b = new ExtendedBlock(
poolId, rand.nextLong(), 1024L, rand.nextLong());
- dn.getFSDataset().createRbw(StorageType.DEFAULT, b, false);
+ dn.getFSDataset().createRbw(StorageType.DEFAULT, null, b, false);
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index cd3befd..18b4922 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -1023,21 +1023,22 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
@Override // FsDatasetSpi
public synchronized ReplicaHandler createRbw(
- StorageType storageType, ExtendedBlock b,
+ StorageType storageType, String storageId, ExtendedBlock b,
boolean allowLazyPersist) throws IOException {
- return createTemporary(storageType, b);
+ return createTemporary(storageType, storageId, b);
}
@Override // FsDatasetSpi
public synchronized ReplicaHandler createTemporary(
- StorageType storageType, ExtendedBlock b) throws IOException {
+ StorageType storageType, String storageId, ExtendedBlock b)
+ throws IOException {
if (isValidBlock(b)) {
- throw new ReplicaAlreadyExistsException("Block " + b +
- " is valid, and cannot be written to.");
- }
+ throw new ReplicaAlreadyExistsException("Block " + b +
+ " is valid, and cannot be written to.");
+ }
if (isValidRbw(b)) {
- throw new ReplicaAlreadyExistsException("Block " + b +
- " is being written, and cannot be written to.");
+ throw new ReplicaAlreadyExistsException("Block " + b +
+ " is being written, and cannot be written to.");
}
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
BInfo binfo = new BInfo(b.getBlockPoolId(), b.getLocalBlock(), true);
@@ -1419,7 +1420,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
@Override
public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block,
- StorageType targetStorageType) throws IOException {
+ StorageType targetStorageType, String storageId) throws IOException {
// TODO Auto-generated method stub
return null;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
index 579252b..311d5a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
@@ -647,7 +647,7 @@ public class TestBlockRecovery {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
- dn.data.createRbw(StorageType.DEFAULT, block, false);
+ dn.data.createRbw(StorageType.DEFAULT, null, block, false);
BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous =
recoveryWorker.new RecoveryTaskContiguous(rBlock);
try {
@@ -673,7 +673,7 @@ public class TestBlockRecovery {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
ReplicaInPipeline replicaInfo = dn.data.createRbw(
- StorageType.DEFAULT, block, false).getReplica();
+ StorageType.DEFAULT, null, block, false).getReplica();
ReplicaOutputStreams streams = null;
try {
streams = replicaInfo.createStreams(true,
@@ -972,7 +972,7 @@ public class TestBlockRecovery {
// Register this thread as the writer for the recoveringBlock.
LOG.debug("slowWriter creating rbw");
ReplicaHandler replicaHandler =
- spyDN.data.createRbw(StorageType.DISK, block, false);
+ spyDN.data.createRbw(StorageType.DISK, null, block, false);
replicaHandler.close();
LOG.debug("slowWriter created rbw");
// Tell the parent thread to start progressing.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
index f811bd8..8992d47 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
@@ -394,7 +394,7 @@ public class TestBlockReplacement {
DataOutputStream out = new DataOutputStream(sock.getOutputStream());
new Sender(out).replaceBlock(block, targetStorageType,
BlockTokenSecretManager.DUMMY_TOKEN, source.getDatanodeUuid(),
- sourceProxy);
+ sourceProxy, null);
out.flush();
// receiveResponse
DataInputStream reply = new DataInputStream(sock.getInputStream());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java
index b2bfe49..8fda664 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java
@@ -129,7 +129,7 @@ public class TestDataXceiverLazyPersistHint {
DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 0),
CachingStrategy.newDefaultStrategy(),
lazyPersist,
- false, null);
+ false, null, null, new String[0]);
}
// Helper functions to setup the mock objects.
@@ -151,7 +151,7 @@ public class TestDataXceiverLazyPersistHint {
any(BlockConstructionStage.class), anyLong(), anyLong(), anyLong(),
anyString(), any(DatanodeInfo.class), any(DataNode.class),
any(DataChecksum.class), any(CachingStrategy.class),
- captor.capture(), anyBoolean());
+ captor.capture(), anyBoolean(), any(String.class));
doReturn(mock(DataOutputStream.class)).when(xceiverSpy)
.getBufferedOutputStream();
return xceiverSpy;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
index cd86720..38e4287 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
@@ -167,7 +167,8 @@ public class TestDiskError {
BlockTokenSecretManager.DUMMY_TOKEN, "",
new DatanodeInfo[0], new StorageType[0], null,
BlockConstructionStage.PIPELINE_SETUP_CREATE, 1, 0L, 0L, 0L,
- checksum, CachingStrategy.newDefaultStrategy(), false, false, null);
+ checksum, CachingStrategy.newDefaultStrategy(), false, false,
+ null, null, new String[0]);
out.flush();
// close the connection before sending the content of the block
@@ -274,7 +275,7 @@ public class TestDiskError {
dn1.getDatanodeId());
dn0.transferBlock(block, new DatanodeInfo[]{dnd1},
- new StorageType[]{StorageType.DISK});
+ new StorageType[]{StorageType.DISK}, new String[0]);
// Sleep for 1 second so the DataTrasnfer daemon can start transfer.
try {
Thread.sleep(1000);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
index 4e724bc7..2e69595 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
@@ -81,7 +81,7 @@ public class TestSimulatedFSDataset {
// we pass expected len as zero, - fsdataset should use the sizeof actual
// data written
ReplicaInPipeline bInfo = fsdataset.createRbw(
- StorageType.DEFAULT, b, false).getReplica();
+ StorageType.DEFAULT, null, b, false).getReplica();
ReplicaOutputStreams out = bInfo.createStreams(true,
DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
try {
@@ -368,7 +368,7 @@ public class TestSimulatedFSDataset {
ExtendedBlock block = new ExtendedBlock(newbpid,1);
try {
// it will throw an exception if the block pool is not found
- fsdataset.createTemporary(StorageType.DEFAULT, block);
+ fsdataset.createTemporary(StorageType.DEFAULT, null, block);
} catch (IOException ioe) {
// JUnit does not capture exception in non-main thread,
// so cache it and then let main thread throw later.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
index 62ef731..2e439d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
@@ -138,14 +138,15 @@ public class ExternalDatasetImpl implements FsDatasetSpi<ExternalVolumeImpl> {
}
@Override
- public ReplicaHandler createTemporary(StorageType t, ExtendedBlock b)
+ public ReplicaHandler createTemporary(StorageType t, String i,
+ ExtendedBlock b)
throws IOException {
return new ReplicaHandler(new ExternalReplicaInPipeline(), null);
}
@Override
- public ReplicaHandler createRbw(StorageType t, ExtendedBlock b, boolean tf)
- throws IOException {
+ public ReplicaHandler createRbw(StorageType storageType, String id,
+ ExtendedBlock b, boolean tf) throws IOException {
return new ReplicaHandler(new ExternalReplicaInPipeline(), null);
}
@@ -332,7 +333,8 @@ public class ExternalDatasetImpl implements FsDatasetSpi<ExternalVolumeImpl> {
}
@Override
- public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block, StorageType targetStorageType) throws IOException {
+ public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block,
+ StorageType targetStorageType, String storageId) throws IOException {
return null;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java
index 9414a0e..24a43e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java
@@ -89,10 +89,12 @@ public class TestAvailableSpaceVolumeChoosingPolicy {
// than the threshold of 1MB.
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(1).getAvailable()).thenReturn(1024L * 1024L * 3);
-
- Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100));
- Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100));
- Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100));
+ Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100,
+ null));
+ Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100,
+ null));
+ Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100,
+ null));
}
@Test(timeout=60000)
@@ -115,21 +117,29 @@ public class TestAvailableSpaceVolumeChoosingPolicy {
// Third volume, again with 3MB free space.
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(2).getAvailable()).thenReturn(1024L * 1024L * 3);
-
+
// We should alternate assigning between the two volumes with a lot of free
// space.
initPolicy(policy, 1.0f);
- Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100));
- Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100));
- Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100));
- Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100));
+ Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100,
+ null));
+ Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100,
+ null));
+ Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100,
+ null));
+ Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100,
+ null));
// All writes should be assigned to the volume with the least free space.
initPolicy(policy, 0.0f);
- Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
- Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
- Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
- Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
+ Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100,
+ null));
+ Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100,
+ null));
+ Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100,
+ null));
+ Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100,
+ null));
}
@Test(timeout=60000)
@@ -156,22 +166,30 @@ public class TestAvailableSpaceVolumeChoosingPolicy {
// Fourth volume, again with 3MB free space.
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(3).getAvailable()).thenReturn(1024L * 1024L * 3);
-
+
// We should alternate assigning between the two volumes with a lot of free
// space.
initPolicy(policy, 1.0f);
- Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100));
- Assert.assertEquals(volumes.get(3), policy.chooseVolume(volumes, 100));
- Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100));
- Assert.assertEquals(volumes.get(3), policy.chooseVolume(volumes, 100));
+ Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100,
+ null));
+ Assert.assertEquals(volumes.get(3), policy.chooseVolume(volumes, 100,
+ null));
+ Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100,
+ null));
+ Assert.assertEquals(volumes.get(3), policy.chooseVolume(volumes, 100,
+ null));
// We should alternate assigning between the two volumes with less free
// space.
initPolicy(policy, 0.0f);
- Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
- Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100));
- Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
- Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100));
+ Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100,
+ null));
+ Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100,
+ null));
+ Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100,
+ null));
+ Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100,
+ null));
}
@Test(timeout=60000)
@@ -190,13 +208,14 @@ public class TestAvailableSpaceVolumeChoosingPolicy {
// than the threshold of 1MB.
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(1).getAvailable()).thenReturn(1024L * 1024L * 3);
-
+
// All writes should be assigned to the volume with the least free space.
// However, if the volume with the least free space doesn't have enough
// space to accept the replica size, and another volume does have enough
// free space, that should be chosen instead.
initPolicy(policy, 0.0f);
- Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 1024L * 1024L * 2));
+ Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes,
+ 1024L * 1024L * 2, null));
}
@Test(timeout=60000)
@@ -220,10 +239,11 @@ public class TestAvailableSpaceVolumeChoosingPolicy {
.thenReturn(1024L * 1024L * 3)
.thenReturn(1024L * 1024L * 3)
.thenReturn(1024L * 1024L * 1); // After the third check, return 1MB.
-
+
// Should still be able to get a volume for the replica even though the
// available space on the second volume changed.
- Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100));
+ Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes,
+ 100, null));
}
@Test(timeout=60000)
@@ -271,12 +291,12 @@ public class TestAvailableSpaceVolumeChoosingPolicy {
Mockito.when(volume.getAvailable()).thenReturn(1024L * 1024L * 3);
volumes.add(volume);
}
-
+
initPolicy(policy, preferencePercent);
long lowAvailableSpaceVolumeSelected = 0;
long highAvailableSpaceVolumeSelected = 0;
for (int i = 0; i < RANDOMIZED_ITERATIONS; i++) {
- FsVolumeSpi volume = policy.chooseVolume(volumes, 100);
+ FsVolumeSpi volume = policy.chooseVolume(volumes, 100, null);
for (int j = 0; j < volumes.size(); j++) {
// Note how many times the first low available volume was selected
if (volume == volumes.get(j) && j == 0) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestRoundRobinVolumeChoosingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestRoundRobinVolumeChoosingPolicy.java
index 9b3047f..44e2a30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestRoundRobinVolumeChoosingPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestRoundRobinVolumeChoosingPolicy.java
@@ -50,20 +50,21 @@ public class TestRoundRobinVolumeChoosingPolicy {
// Second volume, with 200 bytes of space.
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(1).getAvailable()).thenReturn(200L);
-
+
// Test two rounds of round-robin choosing
- Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 0));
- Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0));
- Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 0));
- Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0));
+ Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 0, null));
+ Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0, null));
+ Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 0, null));
+ Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0, null));
// The first volume has only 100L space, so the policy should
// wisely choose the second one in case we ask for more.
- Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 150));
+ Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 150,
+ null));
// Fail if no volume can be chosen?
try {
- policy.chooseVolume(volumes, Long.MAX_VALUE);
+ policy.chooseVolume(volumes, Long.MAX_VALUE, null);
Assert.fail();
} catch (IOException e) {
// Passed.
@@ -93,7 +94,7 @@ public class TestRoundRobinVolumeChoosingPolicy {
int blockSize = 700;
try {
- policy.chooseVolume(volumes, blockSize);
+ policy.chooseVolume(volumes, blockSize, null);
Assert.fail("expected to throw DiskOutOfSpaceException");
} catch(DiskOutOfSpaceException e) {
Assert.assertEquals("Not returnig the expected message",
@@ -137,21 +138,21 @@ public class TestRoundRobinVolumeChoosingPolicy {
Mockito.when(ssdVolumes.get(1).getAvailable()).thenReturn(100L);
Assert.assertEquals(diskVolumes.get(0),
- policy.chooseVolume(diskVolumes, 0));
+ policy.chooseVolume(diskVolumes, 0, null));
// Independent Round-Robin for different storage type
Assert.assertEquals(ssdVolumes.get(0),
- policy.chooseVolume(ssdVolumes, 0));
+ policy.chooseVolume(ssdVolumes, 0, null));
// Take block size into consideration
Assert.assertEquals(ssdVolumes.get(0),
- policy.chooseVolume(ssdVolumes, 150L));
+ policy.chooseVolume(ssdVolumes, 150L, null));
Assert.assertEquals(diskVolumes.get(1),
- policy.chooseVolume(diskVolumes, 0));
+ policy.chooseVolume(diskVolumes, 0, null));
Assert.assertEquals(diskVolumes.get(0),
- policy.chooseVolume(diskVolumes, 50L));
+ policy.chooseVolume(diskVolumes, 50L, null));
try {
- policy.chooseVolume(diskVolumes, 200L);
+ policy.chooseVolume(diskVolumes, 200L, null);
Assert.fail("Should throw an DiskOutOfSpaceException before this!");
} catch (DiskOutOfSpaceException e) {
// Pass.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index 905c3f0..3293561 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -259,7 +259,7 @@ public class TestFsDatasetImpl {
String bpid = BLOCK_POOL_IDS[NUM_BLOCKS % BLOCK_POOL_IDS.length];
ExtendedBlock eb = new ExtendedBlock(bpid, i);
try (ReplicaHandler replica =
- dataset.createRbw(StorageType.DEFAULT, eb, false)) {
+ dataset.createRbw(StorageType.DEFAULT, null, eb, false)) {
}
}
final String[] dataDirs =
@@ -566,7 +566,7 @@ public class TestFsDatasetImpl {
class ResponderThread extends Thread {
public void run() {
try (ReplicaHandler replica = dataset
- .createRbw(StorageType.DEFAULT, eb, false)) {
+ .createRbw(StorageType.DEFAULT, null, eb, false)) {
LOG.info("CreateRbw finished");
startFinalizeLatch.countDown();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
index 83c15ca..ee3a79f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
@@ -101,7 +101,7 @@ public class TestFsVolumeList {
}
for (int i = 0; i < 10; i++) {
try (FsVolumeReference ref =
- volumeList.getNextVolume(StorageType.DEFAULT, 128)) {
+ volumeList.getNextVolume(StorageType.DEFAULT, null, 128)) {
// volume No.2 will not be chosen.
assertNotEquals(ref.getVolume(), volumes.get(1));
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
index da53cae..11525ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
@@ -353,7 +353,7 @@ public class TestWriteToReplica {
}
try {
- dataSet.createRbw(StorageType.DEFAULT, blocks[FINALIZED], false);
+ dataSet.createRbw(StorageType.DEFAULT, null, blocks[FINALIZED], false);
Assert.fail("Should not have created a replica that's already " +
"finalized " + blocks[FINALIZED]);
} catch (ReplicaAlreadyExistsException e) {
@@ -371,7 +371,7 @@ public class TestWriteToReplica {
}
try {
- dataSet.createRbw(StorageType.DEFAULT, blocks[TEMPORARY], false);
+ dataSet.createRbw(StorageType.DEFAULT, null, blocks[TEMPORARY], false);
Assert.fail("Should not have created a replica that had created as " +
"temporary " + blocks[TEMPORARY]);
} catch (ReplicaAlreadyExistsException e) {
@@ -381,7 +381,7 @@ public class TestWriteToReplica {
0L, blocks[RBW].getNumBytes()); // expect to be successful
try {
- dataSet.createRbw(StorageType.DEFAULT, blocks[RBW], false);
+ dataSet.createRbw(StorageType.DEFAULT, null, blocks[RBW], false);
Assert.fail("Should not have created a replica that had created as RBW " +
blocks[RBW]);
} catch (ReplicaAlreadyExistsException e) {
@@ -397,7 +397,7 @@ public class TestWriteToReplica {
}
try {
- dataSet.createRbw(StorageType.DEFAULT, blocks[RWR], false);
+ dataSet.createRbw(StorageType.DEFAULT, null, blocks[RWR], false);
Assert.fail("Should not have created a replica that was waiting to be " +
"recovered " + blocks[RWR]);
} catch (ReplicaAlreadyExistsException e) {
@@ -413,7 +413,7 @@ public class TestWriteToReplica {
}
try {
- dataSet.createRbw(StorageType.DEFAULT, blocks[RUR], false);
+ dataSet.createRbw(StorageType.DEFAULT, null, blocks[RUR], false);
Assert.fail("Should not have created a replica that was under recovery " +
blocks[RUR]);
} catch (ReplicaAlreadyExistsException e) {
@@ -430,49 +430,49 @@ public class TestWriteToReplica {
e.getMessage().contains(ReplicaNotFoundException.NON_EXISTENT_REPLICA));
}
- dataSet.createRbw(StorageType.DEFAULT, blocks[NON_EXISTENT], false);
+ dataSet.createRbw(StorageType.DEFAULT, null, blocks[NON_EXISTENT], false);
}
private void testWriteToTemporary(FsDatasetImpl dataSet, ExtendedBlock[] blocks) throws IOException {
try {
- dataSet.createTemporary(StorageType.DEFAULT, blocks[FINALIZED]);
+ dataSet.createTemporary(StorageType.DEFAULT, null, blocks[FINALIZED]);
Assert.fail("Should not have created a temporary replica that was " +
"finalized " + blocks[FINALIZED]);
} catch (ReplicaAlreadyExistsException e) {
}
try {
- dataSet.createTemporary(StorageType.DEFAULT, blocks[TEMPORARY]);
+ dataSet.createTemporary(StorageType.DEFAULT, null, blocks[TEMPORARY]);
Assert.fail("Should not have created a replica that had created as" +
"temporary " + blocks[TEMPORARY]);
} catch (ReplicaAlreadyExistsException e) {
}
try {
- dataSet.createTemporary(StorageType.DEFAULT, blocks[RBW]);
+ dataSet.createTemporary(StorageType.DEFAULT, null, blocks[RBW]);
Assert.fail("Should not have created a replica that had created as RBW " +
blocks[RBW]);
} catch (ReplicaAlreadyExistsException e) {
}
try {
- dataSet.createTemporary(StorageType.DEFAULT, blocks[RWR]);
+ dataSet.createTemporary(StorageType.DEFAULT, null, blocks[RWR]);
Assert.fail("Should not have created a replica that was waiting to be " +
"recovered " + blocks[RWR]);
} catch (ReplicaAlreadyExistsException e) {
}
try {
- dataSet.createTemporary(StorageType.DEFAULT, blocks[RUR]);
+ dataSet.createTemporary(StorageType.DEFAULT, null, blocks[RUR]);
Assert.fail("Should not have created a replica that was under recovery " +
blocks[RUR]);
} catch (ReplicaAlreadyExistsException e) {
}
- dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
+ dataSet.createTemporary(StorageType.DEFAULT, null, blocks[NON_EXISTENT]);
try {
- dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
+ dataSet.createTemporary(StorageType.DEFAULT, null, blocks[NON_EXISTENT]);
Assert.fail("Should not have created a replica that had already been "
+ "created " + blocks[NON_EXISTENT]);
} catch (Exception e) {
@@ -485,7 +485,8 @@ public class TestWriteToReplica {
blocks[NON_EXISTENT].setGenerationStamp(newGenStamp);
try {
ReplicaInPipeline replicaInfo =
- dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]).getReplica();
+ dataSet.createTemporary(StorageType.DEFAULT, null,
+ blocks[NON_EXISTENT]).getReplica();
Assert.assertTrue(replicaInfo.getGenerationStamp() == newGenStamp);
Assert.assertTrue(
replicaInfo.getBlockId() == blocks[NON_EXISTENT].getBlockId());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeStorageDirectives.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeStorageDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeStorageDirectives.java
new file mode 100644
index 0000000..e0f7426
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeStorageDirectives.java
@@ -0,0 +1,330 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.ReconfigurationException;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.*;
+import org.apache.hadoop.hdfs.protocol.*;
+import org.apache.hadoop.hdfs.server.blockmanagement.*;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
+import org.apache.hadoop.net.Node;
+import org.junit.After;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.*;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test to ensure that the StorageType and StorageID sent from Namenode
+ * to DFSClient are respected.
+ */
+public class TestNamenodeStorageDirectives {
+ public static final Logger LOG =
+ LoggerFactory.getLogger(TestNamenodeStorageDirectives.class);
+
+ private static final int BLOCK_SIZE = 512;
+
+ private MiniDFSCluster cluster;
+
+ @After
+ public void tearDown() {
+ shutdown();
+ }
+
+ private void startDFSCluster(int numNameNodes, int numDataNodes,
+ int storagePerDataNode, StorageType[][] storageTypes)
+ throws IOException {
+ startDFSCluster(numNameNodes, numDataNodes, storagePerDataNode,
+ storageTypes, RoundRobinVolumeChoosingPolicy.class,
+ BlockPlacementPolicyDefault.class);
+ }
+
+ private void startDFSCluster(int numNameNodes, int numDataNodes,
+ int storagePerDataNode, StorageType[][] storageTypes,
+ Class<? extends VolumeChoosingPolicy> volumeChoosingPolicy,
+ Class<? extends BlockPlacementPolicy> blockPlacementPolicy) throws
+ IOException {
+ shutdown();
+ Configuration conf = new Configuration();
+ conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+
+ /*
+ * Lower the DN heartbeat, DF rate, and recheck interval to one second
+ * so state about failures and datanode death propagates faster.
+ */
+ conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
+ conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000);
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
+ 1000);
+ /* Allow 1 volume failure */
+ conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
+ conf.setTimeDuration(DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY,
+ 0, TimeUnit.MILLISECONDS);
+ conf.setClass(
+ DFSConfigKeys.DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY,
+ volumeChoosingPolicy, VolumeChoosingPolicy.class);
+ conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
+ blockPlacementPolicy, BlockPlacementPolicy.class);
+
+ MiniDFSNNTopology nnTopology =
+ MiniDFSNNTopology.simpleFederatedTopology(numNameNodes);
+
+ cluster = new MiniDFSCluster.Builder(conf)
+ .nnTopology(nnTopology)
+ .numDataNodes(numDataNodes)
+ .storagesPerDatanode(storagePerDataNode)
+ .storageTypes(storageTypes)
+ .build();
+ cluster.waitActive();
+ }
+
+ private void shutdown() {
+ if (cluster != null) {
+ cluster.shutdown();
+ cluster = null;
+ }
+ }
+
+ private void createFile(Path path, int numBlocks, short replicateFactor)
+ throws IOException, InterruptedException, TimeoutException {
+ createFile(0, path, numBlocks, replicateFactor);
+ }
+
+ private void createFile(int fsIdx, Path path, int numBlocks,
+ short replicateFactor)
+ throws IOException, TimeoutException, InterruptedException {
+ final int seed = 0;
+ final DistributedFileSystem fs = cluster.getFileSystem(fsIdx);
+ DFSTestUtil.createFile(fs, path, BLOCK_SIZE * numBlocks,
+ replicateFactor, seed);
+ DFSTestUtil.waitReplication(fs, path, replicateFactor);
+ }
+
+ private boolean verifyFileReplicasOnStorageType(Path path, int numBlocks,
+ StorageType storageType) throws IOException {
+ MiniDFSCluster.NameNodeInfo info = cluster.getNameNodeInfos()[0];
+ InetSocketAddress addr = info.nameNode.getServiceRpcAddress();
+ assert addr.getPort() != 0;
+ DFSClient client = new DFSClient(addr, cluster.getConfiguration(0));
+
+ FileSystem fs = cluster.getFileSystem();
+
+ if (!fs.exists(path)) {
+ LOG.info("verifyFileReplicasOnStorageType: file {} does not exist", path);
+ return false;
+ }
+ long fileLength = client.getFileInfo(path.toString()).getLen();
+ int foundBlocks = 0;
+ LocatedBlocks locatedBlocks =
+ client.getLocatedBlocks(path.toString(), 0, fileLength);
+ for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
+ for (StorageType st : locatedBlock.getStorageTypes()) {
+ if (st == storageType) {
+ foundBlocks++;
+ }
+ }
+ }
+
+ LOG.info("Found {}/{} blocks on StorageType {}",
+ foundBlocks, numBlocks, storageType);
+ final boolean isValid = foundBlocks >= numBlocks;
+ return isValid;
+ }
+
+ private void testStorageTypes(StorageType[][] storageTypes,
+ String storagePolicy, StorageType[] expectedStorageTypes,
+ StorageType[] unexpectedStorageTypes) throws ReconfigurationException,
+ InterruptedException, TimeoutException, IOException {
+ final int numDataNodes = storageTypes.length;
+ final int storagePerDataNode = storageTypes[0].length;
+ startDFSCluster(1, numDataNodes, storagePerDataNode, storageTypes);
+ cluster.getFileSystem(0).setStoragePolicy(new Path("/"), storagePolicy);
+ Path testFile = new Path("/test");
+ final short replFactor = 2;
+ final int numBlocks = 10;
+ createFile(testFile, numBlocks, replFactor);
+
+ for (StorageType storageType: expectedStorageTypes) {
+ assertTrue(verifyFileReplicasOnStorageType(testFile, numBlocks,
+ storageType));
+ }
+
+ for (StorageType storageType: unexpectedStorageTypes) {
+ assertFalse(verifyFileReplicasOnStorageType(testFile, numBlocks,
+ storageType));
+ }
+ }
+
+ /**
+ * Verify that writing to SSD and DISK will write to the correct Storage
+ * Types.
+ * @throws IOException
+ */
+ @Test(timeout=60000)
+ public void testTargetStorageTypes() throws ReconfigurationException,
+ InterruptedException, TimeoutException, IOException {
+ // DISK and not anything else.
+ testStorageTypes(new StorageType[][]{
+ {StorageType.SSD, StorageType.DISK},
+ {StorageType.SSD, StorageType.DISK}},
+ "ONE_SSD",
+ new StorageType[]{StorageType.SSD, StorageType.DISK},
+ new StorageType[]{StorageType.RAM_DISK, StorageType.ARCHIVE});
+ // only on SSD.
+ testStorageTypes(new StorageType[][]{
+ {StorageType.SSD, StorageType.DISK},
+ {StorageType.SSD, StorageType.DISK}},
+ "ALL_SSD",
+ new StorageType[]{StorageType.SSD},
+ new StorageType[]{StorageType.RAM_DISK, StorageType.DISK,
+ StorageType.ARCHIVE});
+ // only on SSD.
+ testStorageTypes(new StorageType[][]{
+ {StorageType.SSD, StorageType.DISK, StorageType.DISK},
+ {StorageType.SSD, StorageType.DISK, StorageType.DISK},
+ {StorageType.DISK, StorageType.DISK, StorageType.DISK}},
+ "ALL_SSD",
+ new StorageType[]{StorageType.SSD},
+ new StorageType[]{StorageType.RAM_DISK, StorageType.DISK,
+ StorageType.ARCHIVE});
+
+ // DISK and not anything else.
+ testStorageTypes(new StorageType[][] {
+ {StorageType.RAM_DISK, StorageType.SSD},
+ {StorageType.SSD, StorageType.DISK},
+ {StorageType.SSD, StorageType.DISK}},
+ "HOT",
+ new StorageType[]{StorageType.DISK},
+ new StorageType[] {StorageType.RAM_DISK, StorageType.SSD,
+ StorageType.ARCHIVE});
+
+ testStorageTypes(new StorageType[][] {
+ {StorageType.RAM_DISK, StorageType.SSD},
+ {StorageType.SSD, StorageType.DISK},
+ {StorageType.ARCHIVE, StorageType.ARCHIVE},
+ {StorageType.ARCHIVE, StorageType.ARCHIVE}},
+ "WARM",
+ new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
+ new StorageType[]{StorageType.RAM_DISK, StorageType.SSD});
+
+ testStorageTypes(new StorageType[][] {
+ {StorageType.RAM_DISK, StorageType.SSD},
+ {StorageType.SSD, StorageType.DISK},
+ {StorageType.ARCHIVE, StorageType.ARCHIVE},
+ {StorageType.ARCHIVE, StorageType.ARCHIVE}},
+ "COLD",
+ new StorageType[]{StorageType.ARCHIVE},
+ new StorageType[]{StorageType.RAM_DISK, StorageType.SSD,
+ StorageType.DISK});
+
+ // We wait for Lasy Persist to write to disk.
+ testStorageTypes(new StorageType[][] {
+ {StorageType.RAM_DISK, StorageType.SSD},
+ {StorageType.SSD, StorageType.DISK},
+ {StorageType.SSD, StorageType.DISK}},
+ "LAZY_PERSIST",
+ new StorageType[]{StorageType.DISK},
+ new StorageType[]{StorageType.RAM_DISK, StorageType.SSD,
+ StorageType.ARCHIVE});
+ }
+
+ /**
+ * A VolumeChoosingPolicy test stub used to verify that the storageId passed
+ * in is indeed in the list of volumes.
+ * @param <V>
+ */
+ private static class TestVolumeChoosingPolicy<V extends FsVolumeSpi>
+ extends RoundRobinVolumeChoosingPolicy<V> {
+ static String expectedStorageId;
+
+ @Override
+ public V chooseVolume(List<V> volumes, long replicaSize, String storageId)
+ throws IOException {
+ assertEquals(expectedStorageId, storageId);
+ return super.chooseVolume(volumes, replicaSize, storageId);
+ }
+ }
+
+ private static class TestBlockPlacementPolicy
+ extends BlockPlacementPolicyDefault {
+ static DatanodeStorageInfo[] dnStorageInfosToReturn;
+
+ @Override
+ public DatanodeStorageInfo[] chooseTarget(String srcPath, int numOfReplicas,
+ Node writer, List<DatanodeStorageInfo> chosenNodes,
+ boolean returnChosenNodes, Set<Node> excludedNodes, long blocksize,
+ final BlockStoragePolicy storagePolicy, EnumSet<AddBlockFlag> flags) {
+ return dnStorageInfosToReturn;
+ }
+ }
+
+ private DatanodeStorageInfo getDatanodeStorageInfo(int dnIndex)
+ throws UnregisteredNodeException {
+ if (cluster == null) {
+ return null;
+ }
+ DatanodeID dnId = cluster.getDataNodes().get(dnIndex).getDatanodeId();
+ DatanodeManager dnManager = cluster.getNamesystem()
+ .getBlockManager().getDatanodeManager();
+ return dnManager.getDatanode(dnId).getStorageInfos()[0];
+ }
+
+ @Test(timeout=60000)
+ public void testStorageIDBlockPlacementSpecific()
+ throws ReconfigurationException, InterruptedException, TimeoutException,
+ IOException {
+ final StorageType[][] storageTypes = {
+ {StorageType.DISK, StorageType.DISK},
+ {StorageType.DISK, StorageType.DISK},
+ {StorageType.DISK, StorageType.DISK},
+ {StorageType.DISK, StorageType.DISK},
+ {StorageType.DISK, StorageType.DISK},
+ };
+ final int numDataNodes = storageTypes.length;
+ final int storagePerDataNode = storageTypes[0].length;
+ startDFSCluster(1, numDataNodes, storagePerDataNode, storageTypes,
+ TestVolumeChoosingPolicy.class, TestBlockPlacementPolicy.class);
+ Path testFile = new Path("/test");
+ final short replFactor = 1;
+ final int numBlocks = 10;
+ DatanodeStorageInfo dnInfoToUse = getDatanodeStorageInfo(0);
+ TestBlockPlacementPolicy.dnStorageInfosToReturn =
+ new DatanodeStorageInfo[] {dnInfoToUse};
+ TestVolumeChoosingPolicy.expectedStorageId = dnInfoToUse.getStorageID();
+ //file creation invokes both BlockPlacementPolicy and VolumeChoosingPolicy,
+ //and will test that the storage ids match
+ createFile(testFile, numBlocks, replFactor);
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[28/50] [abbrv] hadoop git commit: HDFS-11722. Change Datanode file
IO profiling sampling to percentage. Contributed by Hanisha Koneru.
Posted by ae...@apache.org.
HDFS-11722. Change Datanode file IO profiling sampling to percentage. Contributed by Hanisha Koneru.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/81092b1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/81092b1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/81092b1f
Branch: refs/heads/HDFS-7240
Commit: 81092b1f1193cb0d4208960b51ab4ffaddeafe01
Parents: 30cd265
Author: Arpit Agarwal <ar...@apache.org>
Authored: Wed May 3 16:29:30 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Wed May 3 16:29:30 2017 -0700
----------------------------------------------------------------------
.../hadoop-common/src/site/markdown/Metrics.md | 4 ++--
.../org/apache/hadoop/hdfs/DFSConfigKeys.java | 8 ++++----
.../server/blockmanagement/DatanodeManager.java | 7 ++++---
.../apache/hadoop/hdfs/server/common/Util.java | 12 ++++++------
.../hadoop/hdfs/server/datanode/DNConf.java | 7 ++++---
.../hdfs/server/datanode/FileIoProvider.java | 4 ++--
.../server/datanode/ProfilingFileIoEvents.java | 19 ++++++++++---------
.../src/main/resources/hdfs-default.xml | 11 +++++++++++
.../blockmanagement/TestSlowDiskTracker.java | 4 ++--
.../hdfs/server/datanode/TestDataNodeMXBean.java | 4 ++--
.../datanode/TestDataNodeVolumeMetrics.java | 4 ++--
.../namenode/TestNameNodeStatusMXBean.java | 5 +++--
.../hadoop/tools/TestHdfsConfigFields.java | 2 --
13 files changed, 52 insertions(+), 39 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/81092b1f/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index a8bdbeb..336ad85 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -334,8 +334,8 @@ FsVolume
Per-volume metrics contain Datanode Volume IO related statistics. Per-volume
metrics are off by default. They can be enabled by setting `dfs.datanode
-.fileio.profiling.sampling.fraction` to a fraction between 0.0 and 1.0.
-Setting this value to 0.0 would mean profiling is not enabled. But enabling
+.fileio.profiling.percentage.fraction` to an integer value between 1 and 100.
+Setting this value to 0 would mean profiling is not enabled. But enabling
per-volume metrics may have a performance impact. Each metrics record
contains tags such as Hostname as additional information along with metrics.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/81092b1f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 3fa383b..0ca344c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -731,10 +731,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final boolean
DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_DEFAULT = false;
public static final String
- DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY =
- "dfs.datanode.fileio.profiling.sampling.fraction";
- public static final double
- DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEFAULT = 0.0;
+ DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY =
+ "dfs.datanode.fileio.profiling.sampling.percentage";
+ public static final int
+ DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_DEFAULT = 0;
//Keys with no defaults
public static final String DFS_DATANODE_PLUGINS_KEY = "dfs.datanode.plugins";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/81092b1f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index c7bdca9..a61aa78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -215,9 +215,10 @@ public class DatanodeManager {
this.dataNodePeerStatsEnabled = conf.getBoolean(
DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_KEY,
DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_DEFAULT);
- this.dataNodeDiskStatsEnabled = Util.isDiskStatsEnabled(conf.getDouble(
- DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY,
- DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEFAULT));
+ this.dataNodeDiskStatsEnabled = Util.isDiskStatsEnabled(conf.getInt(
+ DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY,
+ DFSConfigKeys.
+ DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_DEFAULT));
final Timer timer = new Timer();
this.slowPeerTracker = dataNodePeerStatsEnabled ?
http://git-wip-us.apache.org/repos/asf/hadoop/blob/81092b1f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
index fdb09df..e9ceeb0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
@@ -389,17 +389,17 @@ public final class Util {
return addrsList;
}
- public static boolean isDiskStatsEnabled(double fileIOSamplingFraction) {
+ public static boolean isDiskStatsEnabled(int fileIOSamplingPercentage) {
final boolean isEnabled;
- if (fileIOSamplingFraction < 0.000001) {
+ if (fileIOSamplingPercentage <= 0) {
LOG.info(DFSConfigKeys
- .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY + " set to "
- + fileIOSamplingFraction + ". Disabling file IO profiling");
+ .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY + " set to "
+ + fileIOSamplingPercentage + ". Disabling file IO profiling");
isEnabled = false;
} else {
LOG.info(DFSConfigKeys
- .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY + " set to "
- + fileIOSamplingFraction + ". Enabling file IO profiling");
+ .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY + " set to "
+ + fileIOSamplingPercentage + ". Enabling file IO profiling");
isEnabled = true;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/81092b1f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
index 21ffccc..8e5b597 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
@@ -175,9 +175,10 @@ public class DNConf {
this.peerStatsEnabled = getConf().getBoolean(
DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_KEY,
DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_DEFAULT);
- this.diskStatsEnabled = Util.isDiskStatsEnabled(getConf().getDouble(
- DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY,
- DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEFAULT));
+ this.diskStatsEnabled = Util.isDiskStatsEnabled(getConf().getInt(
+ DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY,
+ DFSConfigKeys.
+ DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_DEFAULT));
this.outliersReportIntervalMs = getConf().getTimeDuration(
DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY,
DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_DEFAULT,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/81092b1f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
index 5508e0b..694eadd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
@@ -62,8 +62,8 @@ import static org.apache.hadoop.hdfs.server.datanode.FileIoProvider.OPERATION.*;
*
* Behavior can be injected into these events by enabling the
* profiling and/or fault injection event hooks through
- * {@link DFSConfigKeys#DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY} and
- * {@link DFSConfigKeys#DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_KEY}.
+ * {@link DFSConfigKeys#DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY}
+ * and {@link DFSConfigKeys#DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_KEY}.
* These event hooks are disabled by default.
*
* Most functions accept an optional {@link FsVolumeSpi} parameter for
http://git-wip-us.apache.org/repos/asf/hadoop/blob/81092b1f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java
index 35118b2..83ee5f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java
@@ -44,18 +44,19 @@ class ProfilingFileIoEvents {
public ProfilingFileIoEvents(@Nullable Configuration conf) {
if (conf != null) {
- double fileIOSamplingFraction = conf.getDouble(
- DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY,
+ int fileIOSamplingPercentage = conf.getInt(
+ DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY,
DFSConfigKeys
- .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEFAULT);
- isEnabled = Util.isDiskStatsEnabled(fileIOSamplingFraction);
- if (fileIOSamplingFraction > 1) {
+ .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_DEFAULT);
+ isEnabled = Util.isDiskStatsEnabled(fileIOSamplingPercentage);
+ if (fileIOSamplingPercentage > 100) {
LOG.warn(DFSConfigKeys
- .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY +
- " value cannot be more than 1. Setting value to 1");
- fileIOSamplingFraction = 1;
+ .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY +
+ " value cannot be more than 100. Setting value to 100");
+ fileIOSamplingPercentage = 100;
}
- sampleRangeMax = (int) (fileIOSamplingFraction * Integer.MAX_VALUE);
+ sampleRangeMax = (int) ((double) fileIOSamplingPercentage / 100 *
+ Integer.MAX_VALUE);
} else {
isEnabled = false;
sampleRangeMax = 0;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/81092b1f/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 7fcea01..0f33b70 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -2022,6 +2022,17 @@
</property>
<property>
+ <name>dfs.datanode.fileio.profiling.sampling.percentage</name>
+ <value>0</value>
+ <description>
+ This setting controls the percentage of file I/O events which will be
+ profiled for DataNode disk statistics. The default value of 0 disables
+ disk statistics. Set to an integer value between 1 and 100 to enable disk
+ statistics.
+ </description>
+</property>
+
+<property>
<name>hadoop.user.group.metrics.percentiles.intervals</name>
<value></value>
<description>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/81092b1f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java
index 16dfab2..172400d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java
@@ -28,7 +28,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableMap;
import org.apache.hadoop.conf.Configuration;
import static org.apache.hadoop.hdfs.DFSConfigKeys
- .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY;
+ .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys
.DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
@@ -80,7 +80,7 @@ public class TestSlowDiskTracker {
static {
conf = new HdfsConfiguration();
conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1L);
- conf.setDouble(DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY, 1.0);
+ conf.setInt(DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY, 100);
conf.setTimeDuration(DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY,
OUTLIERS_REPORT_INTERVAL, TimeUnit.MILLISECONDS);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/81092b1f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
index b80976a..faead18 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
@@ -218,8 +218,8 @@ public class TestDataNodeMXBean {
@Test
public void testDataNodeMXBeanSlowDisksEnabled() throws Exception {
Configuration conf = new Configuration();
- conf.setDouble(DFSConfigKeys
- .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY, 1.0);
+ conf.setInt(DFSConfigKeys
+ .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY, 100);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/81092b1f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
index 03e1dee..0f41d23 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
@@ -121,8 +121,8 @@ public class TestDataNodeVolumeMetrics {
private MiniDFSCluster setupClusterForVolumeMetrics() throws IOException {
Configuration conf = new HdfsConfiguration();
- conf.setDouble(DFSConfigKeys
- .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY, 1.0);
+ conf.setInt(DFSConfigKeys
+ .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY, 100);
SimulatedFSDataset.setFactory(conf);
return new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATANODES)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/81092b1f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java
index 8fe734e..f9bfc37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java
@@ -105,8 +105,9 @@ public class TestNameNodeStatusMXBean {
@Test
public void testNameNodeMXBeanSlowDisksEnabled() throws Exception {
Configuration conf = new Configuration();
- conf.setDouble(
- DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY, 1.0);
+ conf.setInt(
+ DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY,
+ 100);
conf.setTimeDuration(
DFSConfigKeys.DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY,
1000, TimeUnit.MILLISECONDS);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/81092b1f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
index 1fdf713..f23b266 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
@@ -105,8 +105,6 @@ public class TestHdfsConfigFields extends TestConfigurationFieldsBase {
.add(DFSConfigKeys.DFS_NAMENODE_STARTUP_KEY);
configurationPropsToSkipCompare.add(DFSConfigKeys
.DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_KEY);
- configurationPropsToSkipCompare.add(DFSConfigKeys
- .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY);
// Allocate
xmlPropsToSkipCompare = new HashSet<String>();
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[15/50] [abbrv] hadoop git commit: HADOOP-14371. License error in
TestLoadBalancingKMSClientProvider.java. Contributed by hu xiaodong.
Posted by ae...@apache.org.
HADOOP-14371. License error in TestLoadBalancingKMSClientProvider.java. Contributed by hu xiaodong.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/20cde552
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/20cde552
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/20cde552
Branch: refs/heads/HDFS-7240
Commit: 20cde552748cc6be1c5ca590755d4e5a67baf187
Parents: b0f54ea
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue May 2 23:36:54 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue May 2 23:36:54 2017 +0900
----------------------------------------------------------------------
.../hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/20cde552/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
index 0499691..499b991 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
@@ -1,4 +1,4 @@
-/** when(p1.getKMSUrl()).thenReturn("p1");
+/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[20/50] [abbrv] hadoop git commit: HADOOP-14306. TestLocalFileSystem
tests have very low timeouts. Contributed by Eric Badger
Posted by ae...@apache.org.
HADOOP-14306. TestLocalFileSystem tests have very low timeouts. Contributed by Eric Badger
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cedaf4ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cedaf4ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cedaf4ca
Branch: refs/heads/HDFS-7240
Commit: cedaf4cab9d5013acd559dcb92d4976823ccdf85
Parents: 9f0aea0
Author: Jason Lowe <jl...@yahoo-inc.com>
Authored: Tue May 2 16:50:51 2017 -0500
Committer: Jason Lowe <jl...@yahoo-inc.com>
Committed: Tue May 2 16:50:51 2017 -0500
----------------------------------------------------------------------
.../apache/hadoop/fs/TestLocalFileSystem.java | 37 +++++++++++++-------
1 file changed, 25 insertions(+), 12 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cedaf4ca/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
index 5da5a4a..777e5c0 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
@@ -41,7 +41,9 @@ import static org.mockito.Mockito.*;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
+import org.junit.Rule;
import org.junit.Test;
+import org.junit.rules.Timeout;
import org.mockito.internal.util.reflection.Whitebox;
@@ -57,6 +59,17 @@ public class TestLocalFileSystem {
private Configuration conf;
private LocalFileSystem fileSys;
+ /**
+ * standard test timeout: {@value}.
+ */
+ public static final int DEFAULT_TEST_TIMEOUT = 60 * 1000;
+
+ /**
+ * Set the timeout for every test.
+ */
+ @Rule
+ public Timeout testTimeout = new Timeout(DEFAULT_TEST_TIMEOUT);
+
private void cleanupFile(FileSystem fs, Path name) throws IOException {
assertTrue(fs.exists(name));
fs.delete(name, true);
@@ -82,7 +95,7 @@ public class TestLocalFileSystem {
/**
* Test the capability of setting the working directory.
*/
- @Test(timeout = 10000)
+ @Test
public void testWorkingDirectory() throws IOException {
Path origDir = fileSys.getWorkingDirectory();
Path subdir = new Path(TEST_ROOT_DIR, "new");
@@ -136,7 +149,7 @@ public class TestLocalFileSystem {
* test Syncable interface on raw local file system
* @throws IOException
*/
- @Test(timeout = 1000)
+ @Test
public void testSyncable() throws IOException {
FileSystem fs = fileSys.getRawFileSystem();
Path file = new Path(TEST_ROOT_DIR, "syncable");
@@ -169,7 +182,7 @@ public class TestLocalFileSystem {
}
}
- @Test(timeout = 10000)
+ @Test
public void testCopy() throws IOException {
Path src = new Path(TEST_ROOT_DIR, "dingo");
Path dst = new Path(TEST_ROOT_DIR, "yak");
@@ -195,7 +208,7 @@ public class TestLocalFileSystem {
}
}
- @Test(timeout = 1000)
+ @Test
public void testHomeDirectory() throws IOException {
Path home = new Path(System.getProperty("user.home"))
.makeQualified(fileSys);
@@ -203,7 +216,7 @@ public class TestLocalFileSystem {
assertEquals(home, fsHome);
}
- @Test(timeout = 1000)
+ @Test
public void testPathEscapes() throws IOException {
Path path = new Path(TEST_ROOT_DIR, "foo%bar");
writeFile(fileSys, path, 1);
@@ -212,7 +225,7 @@ public class TestLocalFileSystem {
cleanupFile(fileSys, path);
}
- @Test(timeout = 1000)
+ @Test
public void testCreateFileAndMkdirs() throws IOException {
Path test_dir = new Path(TEST_ROOT_DIR, "test_dir");
Path test_file = new Path(test_dir, "file1");
@@ -248,7 +261,7 @@ public class TestLocalFileSystem {
}
/** Test deleting a file, directory, and non-existent path */
- @Test(timeout = 1000)
+ @Test
public void testBasicDelete() throws IOException {
Path dir1 = new Path(TEST_ROOT_DIR, "dir1");
Path file1 = new Path(TEST_ROOT_DIR, "file1");
@@ -263,7 +276,7 @@ public class TestLocalFileSystem {
assertTrue("Did not delete non-empty dir", fileSys.delete(dir1));
}
- @Test(timeout = 1000)
+ @Test
public void testStatistics() throws Exception {
int fileSchemeCount = 0;
for (Statistics stats : FileSystem.getAllStatistics()) {
@@ -274,7 +287,7 @@ public class TestLocalFileSystem {
assertEquals(1, fileSchemeCount);
}
- @Test(timeout = 1000)
+ @Test
public void testHasFileDescriptor() throws IOException {
Path path = new Path(TEST_ROOT_DIR, "test-file");
writeFile(fileSys, path, 1);
@@ -288,7 +301,7 @@ public class TestLocalFileSystem {
}
}
- @Test(timeout = 1000)
+ @Test
public void testListStatusWithColons() throws IOException {
assumeNotWindows();
File colonFile = new File(TEST_ROOT_DIR, "foo:bar");
@@ -314,7 +327,7 @@ public class TestLocalFileSystem {
stats[0].getPath().toUri().getPath());
}
- @Test(timeout = 10000)
+ @Test
public void testReportChecksumFailure() throws IOException {
base.mkdirs();
assertTrue(base.exists() && base.isDirectory());
@@ -394,7 +407,7 @@ public class TestLocalFileSystem {
assertEquals(expectedAccTime, status.getAccessTime());
}
- @Test(timeout = 1000)
+ @Test
public void testSetTimes() throws Exception {
Path path = new Path(TEST_ROOT_DIR, "set-times");
writeFile(fileSys, path, 1);
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[45/50] [abbrv] hadoop git commit: HADOOP-14383. Implement FileSystem
that reads from HTTP / HTTPS endpoints.
Posted by ae...@apache.org.
HADOOP-14383. Implement FileSystem that reads from HTTP / HTTPS endpoints.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff5ec3b8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff5ec3b8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff5ec3b8
Branch: refs/heads/HDFS-7240
Commit: ff5ec3b841612f7f28ad8be5bbfec4168a8ac1f0
Parents: 424887e
Author: Haohui Mai <wh...@apache.org>
Authored: Thu May 4 17:27:44 2017 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Mon May 8 15:28:45 2017 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/pom.xml | 5 +
.../hadoop/fs/http/AbstractHttpFileSystem.java | 153 +++++++++++++++++++
.../apache/hadoop/fs/http/HttpFileSystem.java | 28 ++++
.../apache/hadoop/fs/http/HttpsFileSystem.java | 28 ++++
.../org/apache/hadoop/fs/http/package-info.java | 23 +++
.../services/org.apache.hadoop.fs.FileSystem | 2 +
.../hadoop/fs/http/TestHttpFileSystem.java | 67 ++++++++
hadoop-project/pom.xml | 6 +
8 files changed, 312 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff5ec3b8/hadoop-common-project/hadoop-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index f76575d..e8b5317 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -318,6 +318,11 @@
<artifactId>aalto-xml</artifactId>
<scope>compile</scope>
</dependency>
+ <dependency>
+ <groupId>com.squareup.okhttp3</groupId>
+ <artifactId>mockwebserver</artifactId>
+ <scope>test</scope>
+ </dependency>
</dependencies>
<build>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff5ec3b8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/AbstractHttpFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/AbstractHttpFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/AbstractHttpFileSystem.java
new file mode 100644
index 0000000..fa0b2cf
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/AbstractHttpFileSystem.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.http;
+
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.hadoop.fs.Seekable;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Progressable;
+
+import java.io.FilterInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.net.URLConnection;
+
+abstract class AbstractHttpFileSystem extends FileSystem {
+ private static final long DEFAULT_BLOCK_SIZE = 4096;
+ private static final Path WORKING_DIR = new Path("/");
+
+ private URI uri;
+
+ @Override
+ public void initialize(URI name, Configuration conf) throws IOException {
+ super.initialize(name, conf);
+ this.uri = name;
+ }
+
+ public abstract String getScheme();
+
+ @Override
+ public URI getUri() {
+ return uri;
+ }
+
+ @Override
+ public FSDataInputStream open(Path path, int bufferSize) throws IOException {
+ URLConnection conn = path.toUri().toURL().openConnection();
+ InputStream in = conn.getInputStream();
+ return new FSDataInputStream(new HttpDataInputStream(in));
+ }
+
+ @Override
+ public FSDataOutputStream create(Path path, FsPermission fsPermission,
+ boolean b, int i, short i1, long l,
+ Progressable progressable)
+ throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public FSDataOutputStream append(Path path, int i, Progressable progressable)
+ throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean rename(Path path, Path path1) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean delete(Path path, boolean b) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public FileStatus[] listStatus(Path path) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void setWorkingDirectory(Path path) {
+ }
+
+ @Override
+ public Path getWorkingDirectory() {
+ return WORKING_DIR;
+ }
+
+ @Override
+ public boolean mkdirs(Path path, FsPermission fsPermission)
+ throws IOException {
+ return false;
+ }
+
+ @Override
+ public FileStatus getFileStatus(Path path) throws IOException {
+ return new FileStatus(-1, false, 1, DEFAULT_BLOCK_SIZE, 0, path);
+ }
+
+ private static class HttpDataInputStream extends FilterInputStream
+ implements Seekable, PositionedReadable {
+
+ HttpDataInputStream(InputStream in) {
+ super(in);
+ }
+
+ @Override
+ public int read(long position, byte[] buffer, int offset, int length)
+ throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readFully(long position, byte[] buffer, int offset, int length)
+ throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readFully(long position, byte[] buffer) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void seek(long pos) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long getPos() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean seekToNewSource(long targetPos) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff5ec3b8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/HttpFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/HttpFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/HttpFileSystem.java
new file mode 100644
index 0000000..a4d1505
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/HttpFileSystem.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.http;
+
+/**
+ * A Filesystem that reads from HTTP endpoint.
+ */
+public class HttpFileSystem extends AbstractHttpFileSystem {
+ @Override
+ public String getScheme() {
+ return "http";
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff5ec3b8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/HttpsFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/HttpsFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/HttpsFileSystem.java
new file mode 100644
index 0000000..88e0968
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/HttpsFileSystem.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.http;
+
+/**
+ * A Filesystem that reads from HTTPS endpoint.
+ */
+public class HttpsFileSystem extends AbstractHttpFileSystem {
+ @Override
+ public String getScheme() {
+ return "https";
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff5ec3b8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/package-info.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/package-info.java
new file mode 100644
index 0000000..a5b5206
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/http/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Filesystem implementations that allow Hadoop to read directly from
+ * HTTP / HTTPS endpoints.
+ */
+package org.apache.hadoop.fs.http;
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff5ec3b8/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
index 17ffa7f..cbf2d6d 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
+++ b/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
@@ -17,3 +17,5 @@ org.apache.hadoop.fs.LocalFileSystem
org.apache.hadoop.fs.viewfs.ViewFileSystem
org.apache.hadoop.fs.ftp.FTPFileSystem
org.apache.hadoop.fs.HarFileSystem
+org.apache.hadoop.fs.http.HttpFileSystem
+org.apache.hadoop.fs.http.HttpsFileSystem
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff5ec3b8/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/http/TestHttpFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/http/TestHttpFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/http/TestHttpFileSystem.java
new file mode 100644
index 0000000..0902c04
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/http/TestHttpFileSystem.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.http;
+
+import okhttp3.mockwebserver.MockResponse;
+import okhttp3.mockwebserver.MockWebServer;
+import okhttp3.mockwebserver.RecordedRequest;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Testing HttpFileSystem.
+ */
+public class TestHttpFileSystem {
+ @Test
+ public void testHttpFileSystem() throws IOException, URISyntaxException,
+ InterruptedException {
+ Configuration conf = new Configuration(false);
+ conf.set("fs.http.impl", HttpFileSystem.class.getCanonicalName());
+ final String data = "foo";
+
+ try (MockWebServer server = new MockWebServer()) {
+ server.enqueue(new MockResponse().setBody(data));
+ server.start();
+ URI uri = URI.create(String.format("http://%s:%d", server.getHostName(),
+ server.getPort()));
+ FileSystem fs = FileSystem.get(uri, conf);
+ try (InputStream is = fs.open(
+ new Path(new URL(uri.toURL(), "/foo").toURI()),
+ 4096)) {
+ byte[] buf = new byte[data.length()];
+ IOUtils.readFully(is, buf, 0, buf.length);
+ assertEquals(data, new String(buf, StandardCharsets.UTF_8));
+ }
+ RecordedRequest req = server.takeRequest();
+ assertEquals("/foo", req.getPath());
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff5ec3b8/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index fdb4fb1..c9b6522 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -146,6 +146,12 @@
<version>2.4.0</version>
</dependency>
<dependency>
+ <groupId>com.squareup.okhttp3</groupId>
+ <artifactId>mockwebserver</artifactId>
+ <version>3.7.0</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
<groupId>jdiff</groupId>
<artifactId>jdiff</artifactId>
<version>${jdiff.version}</version>
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[18/50] [abbrv] hadoop git commit: HADOOP-14281. Fix
TestKafkaMetrics#testPutMetrics. Contributed by Alison Yu.
Posted by ae...@apache.org.
HADOOP-14281. Fix TestKafkaMetrics#testPutMetrics. Contributed by Alison Yu.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b062b323
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b062b323
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b062b323
Branch: refs/heads/HDFS-7240
Commit: b062b323b7f48343c379520f5380e1a63dbcc7a4
Parents: dcc292d
Author: Andrew Wang <wa...@apache.org>
Authored: Tue May 2 11:49:19 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Tue May 2 11:49:19 2017 -0700
----------------------------------------------------------------------
.../java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b062b323/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java b/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java
index 8479298..bee6aaa 100644
--- a/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java
+++ b/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java
@@ -155,7 +155,7 @@ public class TestKafkaMetrics {
Date currDate = new Date(timestamp);
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
String date = dateFormat.format(currDate);
- SimpleDateFormat timeFormat = new SimpleDateFormat("hh:mm:ss");
+ SimpleDateFormat timeFormat = new SimpleDateFormat("HH:mm:ss");
String time = timeFormat.format(currDate);
String hostname = new String("null");
try {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[50/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240
Posted by ae...@apache.org.
Merge branch 'trunk' into HDFS-7240
Conflicts:
hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86414507
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86414507
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86414507
Branch: refs/heads/HDFS-7240
Commit: 86414507f4f584ddde97ff5f1d9e38204b36f77a
Parents: f713452 54fd0e4
Author: Anu Engineer <ae...@apache.org>
Authored: Mon May 8 22:42:21 2017 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Mon May 8 22:42:21 2017 -0700
----------------------------------------------------------------------
dev-support/docker/Dockerfile | 2 +
hadoop-build-tools/pom.xml | 10 +
.../hadoop-client-minicluster/pom.xml | 4 -
hadoop-common-project/hadoop-common/pom.xml | 5 +
.../src/main/conf/log4j.properties | 14 +
.../org/apache/hadoop/conf/ConfigRedactor.java | 5 +-
.../hadoop/crypto/CryptoOutputStream.java | 4 +-
.../fs/CommonConfigurationKeysPublic.java | 17 +-
.../java/org/apache/hadoop/fs/CreateFlag.java | 8 +-
.../apache/hadoop/fs/FSDataOutputStream.java | 10 +-
.../java/org/apache/hadoop/fs/FileSystem.java | 2 +-
.../java/org/apache/hadoop/fs/FileUtil.java | 5 +-
.../apache/hadoop/fs/RawLocalFileSystem.java | 2 +-
.../apache/hadoop/fs/StreamCapabilities.java | 67 ++
.../hadoop/fs/http/AbstractHttpFileSystem.java | 153 +++
.../apache/hadoop/fs/http/HttpFileSystem.java | 28 +
.../apache/hadoop/fs/http/HttpsFileSystem.java | 28 +
.../org/apache/hadoop/fs/http/package-info.java | 23 +
.../java/org/apache/hadoop/ha/NodeFencer.java | 3 +-
.../org/apache/hadoop/ha/PowerShellFencer.java | 154 +++
.../org/apache/hadoop/http/HttpServer2.java | 48 +-
.../hadoop/io/erasurecode/CodecRegistry.java | 176 +++
.../apache/hadoop/io/erasurecode/CodecUtil.java | 82 +-
.../apache/hadoop/io/erasurecode/ECSchema.java | 40 +-
.../io/erasurecode/ErasureCodeConstants.java | 1 +
.../rawcoder/DummyRawErasureCoderFactory.java | 12 +
.../NativeRSRawErasureCoderFactory.java | 13 +
.../NativeXORRawErasureCoderFactory.java | 13 +
.../rawcoder/RSLegacyRawDecoder.java | 270 +++++
.../rawcoder/RSLegacyRawEncoder.java | 129 +++
.../RSLegacyRawErasureCoderFactory.java | 51 +
.../rawcoder/RSRawDecoderLegacy.java | 270 -----
.../rawcoder/RSRawEncoderLegacy.java | 129 ---
.../rawcoder/RSRawErasureCoderFactory.java | 13 +
.../RSRawErasureCoderFactoryLegacy.java | 38 -
.../rawcoder/RawErasureCoderFactory.java | 12 +
.../rawcoder/XORRawErasureCoderFactory.java | 13 +
.../org/apache/hadoop/ipc/CallQueueManager.java | 1 +
.../apache/hadoop/ipc/DecayRpcScheduler.java | 31 +-
.../apache/hadoop/ipc/DefaultRpcScheduler.java | 4 +
.../org/apache/hadoop/ipc/RpcScheduler.java | 2 +
.../apache/hadoop/metrics2/AbstractMetric.java | 16 +-
.../org/apache/hadoop/metrics2/MetricsTag.java | 13 +-
.../metrics2/impl/AbstractMetricsRecord.java | 16 +-
.../org/apache/hadoop/metrics2/impl/MsInfo.java | 9 +-
.../hadoop/metrics2/lib/MetricsInfoImpl.java | 11 +-
.../hadoop/metrics2/lib/MetricsRegistry.java | 20 +-
.../apache/hadoop/metrics2/package-info.java | 2 +-
.../hadoop/metrics2/source/JvmMetricsInfo.java | 9 +-
.../hadoop/metrics2/util/MetricsCache.java | 18 +-
.../org/apache/hadoop/net/NetworkTopology.java | 12 +-
.../apache/hadoop/security/ssl/SSLFactory.java | 13 +-
.../hadoop/service/ServiceStateException.java | 66 +-
.../launcher/AbstractLaunchableService.java | 78 ++
.../HadoopUncaughtExceptionHandler.java | 129 +++
.../service/launcher/InterruptEscalator.java | 216 ++++
.../hadoop/service/launcher/IrqHandler.java | 178 +++
.../service/launcher/LaunchableService.java | 95 ++
.../service/launcher/LauncherArguments.java | 59 +
.../service/launcher/LauncherExitCodes.java | 183 +++
.../launcher/ServiceLaunchException.java | 81 ++
.../service/launcher/ServiceLauncher.java | 1044 ++++++++++++++++++
.../service/launcher/ServiceShutdownHook.java | 112 ++
.../hadoop/service/launcher/package-info.java | 462 ++++++++
.../apache/hadoop/util/ExitCodeProvider.java | 35 +
.../java/org/apache/hadoop/util/ExitUtil.java | 257 ++++-
.../hadoop/util/GenericOptionsParser.java | 125 ++-
.../org/apache/hadoop/util/StringUtils.java | 52 +-
.../org/apache/hadoop/util/SysInfoWindows.java | 2 +-
.../services/org.apache.hadoop.fs.FileSystem | 2 +
....erasurecode.rawcoder.RawErasureCoderFactory | 18 +
.../src/main/resources/core-default.xml | 35 +-
.../src/site/markdown/DeprecatedProperties.md | 1 +
.../src/site/markdown/FileSystemShell.md | 1 -
.../hadoop-common/src/site/markdown/Metrics.md | 4 +-
.../src/site/markdown/RackAwareness.md | 12 +-
.../apache/hadoop/conf/TestConfigRedactor.java | 22 +-
.../kms/TestLoadBalancingKMSClientProvider.java | 2 +-
.../apache/hadoop/fs/TestLocalFileSystem.java | 37 +-
.../apache/hadoop/fs/TestSymlinkLocalFS.java | 6 +-
.../contract/rawlocal/RawlocalFSContract.java | 2 +-
.../hadoop/fs/http/TestHttpFileSystem.java | 67 ++
.../fs/viewfs/ViewFileSystemBaseTest.java | 6 +-
.../apache/hadoop/fs/viewfs/ViewFsBaseTest.java | 2 +-
.../apache/hadoop/http/TestSSLHttpServer.java | 21 +-
.../erasurecode/TestCodecRawCoderMapping.java | 20 +-
.../io/erasurecode/TestCodecRegistry.java | 170 +++
.../hadoop/io/erasurecode/TestECSchema.java | 47 +-
.../coder/TestHHXORErasureCoder.java | 2 +-
.../erasurecode/coder/TestRSErasureCoder.java | 2 +-
.../rawcoder/RawErasureCoderBenchmark.java | 2 +-
.../erasurecode/rawcoder/TestDummyRawCoder.java | 4 +-
.../rawcoder/TestNativeRSRawCoder.java | 4 +-
.../rawcoder/TestNativeXORRawCoder.java | 4 +-
.../rawcoder/TestRSLegacyRawCoder.java | 33 +
.../io/erasurecode/rawcoder/TestRSRawCoder.java | 4 +-
.../rawcoder/TestRSRawCoderInteroperable1.java | 4 +-
.../rawcoder/TestRSRawCoderInteroperable2.java | 4 +-
.../rawcoder/TestRSRawCoderLegacy.java | 33 -
.../erasurecode/rawcoder/TestRawCoderBase.java | 16 +-
.../erasurecode/rawcoder/TestXORRawCoder.java | 4 +-
.../rawcoder/TestXORRawCoderInteroperable1.java | 4 +-
.../rawcoder/TestXORRawCoderInteroperable2.java | 4 +-
.../java/org/apache/hadoop/ipc/TestIPC.java | 2 +-
.../hadoop/security/ssl/TestSSLFactory.java | 10 +-
.../apache/hadoop/service/BreakableService.java | 23 +-
.../AbstractServiceLauncherTestBase.java | 317 ++++++
.../launcher/ExitTrackingServiceLauncher.java | 59 +
.../service/launcher/TestServiceConf.java | 146 +++
.../launcher/TestServiceInterruptHandling.java | 118 ++
.../service/launcher/TestServiceLauncher.java | 213 ++++
.../TestServiceLauncherCreationFailures.java | 83 ++
.../TestServiceLauncherInnerMethods.java | 95 ++
.../ExceptionInExecuteLaunchableService.java | 96 ++
.../testservices/FailInConstructorService.java | 33 +
.../testservices/FailInInitService.java | 38 +
.../testservices/FailInStartService.java | 37 +
.../testservices/FailingStopInStartService.java | 47 +
.../testservices/FailureTestService.java | 55 +
.../InitInConstructorLaunchableService.java | 63 ++
.../testservices/LaunchableRunningService.java | 111 ++
.../testservices/NoArgsAllowedService.java | 64 ++
.../testservices/NullBindLaunchableService.java | 46 +
.../launcher/testservices/RunningService.java | 84 ++
.../StoppingInStartLaunchableService.java | 49 +
.../StringConstructorOnlyService.java | 39 +
.../apache/hadoop/test/GenericTestUtils.java | 34 +
.../src/test/resources/contract/rawlocal.xml | 32 +-
hadoop-common-project/hadoop-kms/pom.xml | 4 -
.../crypto/key/kms/server/KMSWebServer.java | 7 +-
.../java/org/apache/hadoop/hdfs/DFSClient.java | 23 +-
.../org/apache/hadoop/hdfs/DFSOutputStream.java | 15 +-
.../hadoop/hdfs/DFSStripedOutputStream.java | 43 +-
.../org/apache/hadoop/hdfs/DataStreamer.java | 35 +-
.../hadoop/hdfs/DistributedFileSystem.java | 82 +-
.../apache/hadoop/hdfs/StripedDataStreamer.java | 10 +-
.../apache/hadoop/hdfs/client/HdfsAdmin.java | 12 +
.../hdfs/client/HdfsClientConfigKeys.java | 4 +
.../hdfs/protocol/AddingECPolicyResponse.java | 66 ++
.../hdfs/protocol/ClientDatanodeProtocol.java | 5 +
.../hadoop/hdfs/protocol/ClientProtocol.java | 21 +-
.../hdfs/protocol/DatanodeVolumeInfo.java | 122 ++
.../hdfs/protocol/ErasureCodingPolicy.java | 53 +-
.../hadoop/hdfs/protocol/HdfsFileStatus.java | 22 +-
.../hdfs/protocol/IllegalECPolicyException.java | 34 +
.../datatransfer/DataTransferProtocol.java | 19 +-
.../hdfs/protocol/datatransfer/Sender.java | 29 +-
.../ClientDatanodeProtocolTranslatorPB.java | 29 +
.../ClientNamenodeProtocolTranslatorPB.java | 25 +
.../hadoop/hdfs/protocolPB/PBHelperClient.java | 47 +
.../token/block/BlockTokenIdentifier.java | 66 +-
.../apache/hadoop/hdfs/util/ECPolicyLoader.java | 325 ++++++
.../src/main/proto/ClientDatanodeProtocol.proto | 9 +
.../src/main/proto/ClientNamenodeProtocol.proto | 3 +
.../src/main/proto/datatransfer.proto | 4 +
.../src/main/proto/erasurecoding.proto | 8 +
.../src/main/proto/hdfs.proto | 21 +
.../hdfs/protocol/TestErasureCodingPolicy.java | 86 ++
.../hadoop/hdfs/util/TestECPolicyLoader.java | 313 ++++++
hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml | 8 -
.../fs/http/server/HttpFSServerWebServer.java | 7 +-
.../src/main/native/libhdfs-tests/expect.h | 18 +
.../libhdfs-tests/test_libhdfs_threaded.c | 16 +
.../src/main/native/libhdfs/exception.c | 75 +-
.../src/main/native/libhdfs/exception.h | 16 +-
.../src/main/native/libhdfs/hdfs.c | 90 ++
.../src/main/native/libhdfs/include/hdfs/hdfs.h | 61 +
.../src/main/native/libhdfs/jni_helper.c | 95 +-
.../src/main/native/libhdfs/jni_helper.h | 35 +
.../libhdfs/os/posix/thread_local_storage.c | 54 +-
.../native/libhdfs/os/thread_local_storage.h | 61 +-
.../libhdfs/os/windows/thread_local_storage.c | 50 +-
.../src/main/conf/user_ec_policies.xml.template | 71 ++
.../org/apache/hadoop/hdfs/DFSConfigKeys.java | 17 +-
.../hadoop/hdfs/net/DFSNetworkTopology.java | 24 +-
.../hadoop/hdfs/net/DFSTopologyNodeImpl.java | 137 +++
.../hdfs/protocol/datatransfer/Receiver.java | 20 +-
...tDatanodeProtocolServerSideTranslatorPB.java | 28 +
...tNamenodeProtocolServerSideTranslatorPB.java | 31 +-
.../hadoop/hdfs/qjournal/server/JNStorage.java | 25 +-
.../hadoop/hdfs/qjournal/server/Journal.java | 21 +-
.../hdfs/qjournal/server/JournalNodeSyncer.java | 104 +-
.../block/BlockPoolTokenSecretManager.java | 34 +-
.../token/block/BlockTokenSecretManager.java | 70 +-
.../hadoop/hdfs/server/balancer/Dispatcher.java | 47 +-
.../hadoop/hdfs/server/balancer/KeyManager.java | 8 +-
.../hdfs/server/balancer/NameNodeConnector.java | 13 +-
.../server/blockmanagement/BlockManager.java | 44 +-
.../BlockPlacementPolicyDefault.java | 36 +-
.../blockmanagement/DatanodeDescriptor.java | 68 +-
.../server/blockmanagement/DatanodeManager.java | 26 +-
.../blockmanagement/LowRedundancyBlocks.java | 6 +-
.../apache/hadoop/hdfs/server/common/Util.java | 12 +-
.../hdfs/server/datanode/BPOfferService.java | 3 +-
.../hdfs/server/datanode/BPServiceActor.java | 3 +
.../hdfs/server/datanode/BlockReceiver.java | 12 +-
.../hadoop/hdfs/server/datanode/DNConf.java | 7 +-
.../hadoop/hdfs/server/datanode/DataNode.java | 80 +-
.../server/datanode/DataNodeFaultInjector.java | 6 +-
.../hdfs/server/datanode/DataXceiver.java | 113 +-
.../hdfs/server/datanode/FileIoProvider.java | 4 +-
.../server/datanode/ProfilingFileIoEvents.java | 19 +-
.../erasurecode/ErasureCodingWorker.java | 3 +-
.../erasurecode/StripedBlockReader.java | 4 +-
.../erasurecode/StripedBlockWriter.java | 11 +-
.../erasurecode/StripedReconstructionInfo.java | 16 +-
.../datanode/erasurecode/StripedWriter.java | 5 +-
.../AvailableSpaceVolumeChoosingPolicy.java | 20 +-
.../server/datanode/fsdataset/FsDatasetSpi.java | 6 +-
.../RoundRobinVolumeChoosingPolicy.java | 2 +-
.../fsdataset/VolumeChoosingPolicy.java | 5 +-
.../datanode/fsdataset/impl/FsDatasetImpl.java | 21 +-
.../datanode/fsdataset/impl/FsVolumeList.java | 19 +-
.../datanode/web/SimpleHttpProxyHandler.java | 4 +-
.../namenode/ErasureCodingPolicyManager.java | 95 +-
.../server/namenode/FSDirErasureCodingOp.java | 13 +-
.../hdfs/server/namenode/FSDirSnapshotOp.java | 7 +-
.../hdfs/server/namenode/FSDirWriteFileOp.java | 31 +-
.../hdfs/server/namenode/FSEditLogLoader.java | 5 +-
.../hadoop/hdfs/server/namenode/FSImage.java | 52 +-
.../server/namenode/FSImageFormatPBINode.java | 3 +-
.../hdfs/server/namenode/FSNamesystem.java | 51 +-
.../hdfs/server/namenode/FSNamesystemLock.java | 121 +-
.../hdfs/server/namenode/INodeDirectory.java | 8 +-
.../hadoop/hdfs/server/namenode/INodeFile.java | 7 +-
.../hdfs/server/namenode/INodesInPath.java | 78 +-
.../hdfs/server/namenode/LeaseManager.java | 103 +-
.../hdfs/server/namenode/NameNodeRpcServer.java | 10 +-
.../server/namenode/ha/StandbyCheckpointer.java | 7 +-
.../snapshot/DirectorySnapshottableFeature.java | 15 +-
.../namenode/snapshot/SnapshotManager.java | 24 +-
.../org/apache/hadoop/hdfs/tools/DFSAdmin.java | 30 +-
.../org/apache/hadoop/hdfs/tools/ECAdmin.java | 63 ++
.../src/main/resources/hdfs-default.xml | 42 +-
.../src/main/webapps/hdfs/dfshealth.html | 2 +-
.../src/main/webapps/hdfs/dfshealth.js | 4 +-
.../src/site/markdown/HDFSCommands.md | 2 +
.../src/site/markdown/HDFSErasureCoding.md | 28 +-
.../org/apache/hadoop/TestRefreshCallQueue.java | 88 +-
.../org/apache/hadoop/hdfs/DFSTestUtil.java | 9 +-
.../hadoop/hdfs/TestBlockStoragePolicy.java | 143 +++
.../apache/hadoop/hdfs/TestDFSOutputStream.java | 25 +
.../hadoop/hdfs/TestDFSStripedInputStream.java | 2 +-
.../hadoop/hdfs/TestDFSStripedOutputStream.java | 37 +-
.../TestDFSStripedOutputStreamWithFailure.java | 2 +-
.../hadoop/hdfs/TestDataTransferProtocol.java | 3 +-
.../hadoop/hdfs/TestDistributedFileSystem.java | 39 +
.../apache/hadoop/hdfs/TestEncryptionZones.java | 10 +-
.../hadoop/hdfs/TestErasureCodingPolicies.java | 58 +
.../org/apache/hadoop/hdfs/TestGetBlocks.java | 77 +-
.../org/apache/hadoop/hdfs/TestHdfsAdmin.java | 33 +
.../hadoop/hdfs/TestReconstructStripedFile.java | 2 +-
.../TestUnsetAndChangeDirectoryEcPolicy.java | 2 +-
.../hdfs/TestWriteBlockGetsBlockLengthHint.java | 6 +-
.../hadoop/hdfs/protocolPB/TestPBHelper.java | 35 +
.../hdfs/qjournal/TestJournalNodeSync.java | 1 +
.../security/token/block/TestBlockToken.java | 186 +++-
.../hdfs/server/balancer/TestBalancer.java | 100 +-
.../server/balancer/TestBalancerRPCDelay.java | 32 +
.../blockmanagement/BlockManagerTestUtil.java | 21 +
.../blockmanagement/TestSlowDiskTracker.java | 4 +-
.../server/datanode/BlockReportTestBase.java | 2 +-
.../server/datanode/SimulatedFSDataset.java | 19 +-
.../hdfs/server/datanode/TestBlockRecovery.java | 6 +-
.../server/datanode/TestBlockReplacement.java | 2 +-
.../server/datanode/TestDataNodeMXBean.java | 4 +-
.../server/datanode/TestDataNodeMetrics.java | 7 +-
.../datanode/TestDataNodeVolumeMetrics.java | 4 +-
.../TestDataXceiverLazyPersistHint.java | 4 +-
.../hdfs/server/datanode/TestDiskError.java | 5 +-
.../server/datanode/TestFsDatasetCache.java | 62 +-
.../server/datanode/TestSimulatedFSDataset.java | 4 +-
.../extdataset/ExternalDatasetImpl.java | 10 +-
.../TestAvailableSpaceVolumeChoosingPolicy.java | 76 +-
.../TestRoundRobinVolumeChoosingPolicy.java | 29 +-
.../fsdataset/impl/TestFsDatasetImpl.java | 4 +-
.../fsdataset/impl/TestFsVolumeList.java | 2 +-
.../fsdataset/impl/TestWriteToReplica.java | 29 +-
.../hdfs/server/namenode/NameNodeAdapter.java | 31 +-
.../hdfs/server/namenode/TestCheckpoint.java | 4 +-
.../namenode/TestDecommissioningStatus.java | 113 +-
.../TestDefaultBlockPlacementPolicy.java | 46 +
.../server/namenode/TestEnabledECPolicies.java | 11 +-
.../server/namenode/TestFSNamesystemLock.java | 16 +-
.../hdfs/server/namenode/TestLeaseManager.java | 286 ++++-
.../namenode/TestMetadataVersionOutput.java | 4 +-
.../namenode/TestNameNodeStatusMXBean.java | 5 +-
.../namenode/TestNamenodeStorageDirectives.java | 330 ++++++
.../namenode/TestReconstructStripedBlocks.java | 2 +
.../hdfs/server/namenode/TestStartup.java | 5 +-
.../namenode/ha/TestStandbyCheckpoints.java | 60 +
.../snapshot/TestOpenFilesWithSnapshot.java | 298 +++++
.../snapshot/TestSnapshotDiffReport.java | 188 +++-
.../namenode/snapshot/TestSnapshotManager.java | 11 +-
.../shortcircuit/TestShortCircuitCache.java | 8 +-
.../apache/hadoop/hdfs/tools/TestDFSAdmin.java | 20 +
.../hadoop/tools/TestHdfsConfigFields.java | 2 -
.../test/resources/testErasureCodingConf.xml | 68 ++
.../src/test/resources/test_ec_policies.xml | 65 ++
hadoop-mapreduce-project/.gitignore | 1 -
hadoop-mapreduce-project/conf/mapred-site.xml | 21 +
.../conf/mapred-site.xml.template | 21 -
.../hadoop/mapred/LocalContainerLauncher.java | 46 +-
.../hadoop/mapreduce/v2/app/MRAppMaster.java | 16 +-
.../java/org/apache/hadoop/mapred/JVMId.java | 2 +-
.../org/apache/hadoop/mapred/Operation.java | 14 +-
.../apache/hadoop/mapreduce/MRJobConfig.java | 6 +
.../mapreduce/v2/hs/HistoryFileManager.java | 5 -
.../mapreduce/v2/hs/JobHistoryServer.java | 3 -
.../org/apache/hadoop/mapred/YARNRunner.java | 132 ++-
.../apache/hadoop/hdfs/NNBenchWithoutMR.java | 6 +-
.../mapred/TestMRTimelineEventHandling.java | 2 +-
.../apache/hadoop/mapred/TestYARNRunner.java | 167 +++
.../hadoop/mapreduce/GrowingSleepJob.java | 68 ++
.../hadoop/mapreduce/v2/MiniMRYarnCluster.java | 6 +-
.../apache/hadoop/test/MapredTestDriver.java | 3 +
.../org/apache/hadoop/examples/pi/Parser.java | 8 +-
hadoop-project/pom.xml | 24 +-
hadoop-tools/hadoop-archive-logs/pom.xml | 5 +
hadoop-tools/hadoop-aws/pom.xml | 10 +-
.../org/apache/hadoop/fs/s3a/Constants.java | 28 +-
.../hadoop/fs/s3a/S3AEncryptionMethods.java | 13 +-
.../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 30 +-
.../java/org/apache/hadoop/fs/s3a/S3AUtils.java | 120 +-
.../src/site/markdown/tools/hadoop-aws/index.md | 83 +-
.../site/markdown/tools/hadoop-aws/testing.md | 4 +-
.../fs/s3a/AbstractTestS3AEncryption.java | 4 +
.../ITestS3AEncryptionAlgorithmValidation.java | 4 +-
.../hadoop/fs/s3a/ITestS3AEncryptionSSEC.java | 308 +++++-
.../hadoop/fs/s3a/TestSSEConfiguration.java | 223 ++++
.../src/site/markdown/index.md | 4 +-
.../fs/adl/live/AdlStorageConfiguration.java | 8 +-
.../hadoop/fs/azure/NativeAzureFileSystem.java | 3 -
.../fs/azure/RemoteSASKeyGeneratorImpl.java | 26 +-
.../fs/azure/RemoteWasbAuthorizerImpl.java | 22 +-
.../fs/azure/SyncableDataOutputStream.java | 12 +-
.../fs/azure/security/WasbTokenRenewer.java | 6 -
.../mapred/gridmix/TestGridmixSubmission.java | 3 +-
.../hadoop/metrics2/impl/TestKafkaMetrics.java | 10 +-
hadoop-tools/hadoop-openstack/pom.xml | 6 +-
.../SwiftAuthenticationFailedException.java | 6 +-
.../exceptions/SwiftBadRequestException.java | 6 +-
.../SwiftInvalidResponseException.java | 9 +-
.../SwiftThrottledRequestException.java | 6 +-
.../apache/hadoop/fs/swift/http/CopyMethod.java | 41 -
.../hadoop/fs/swift/http/CopyRequest.java | 41 +
.../swift/http/HttpInputStreamWithRelease.java | 25 +-
.../hadoop/fs/swift/http/SwiftRestClient.java | 805 +++++++-------
.../snative/SwiftNativeFileSystemStore.java | 11 +-
.../hadoop/fs/swift/util/HttpResponseUtils.java | 121 ++
.../TestSwiftFileSystemPartitionedUploads.java | 2 +-
.../fs/swift/http/TestSwiftRestClient.java | 2 +-
.../src/test/resources/log4j.properties | 3 -
.../org/apache/hadoop/tools/rumen/TaskInfo.java | 29 +-
.../apache/hadoop/tools/rumen/ZombieJob.java | 9 +-
hadoop-tools/hadoop-sls/pom.xml | 19 +
hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh | 46 +-
.../hadoop/yarn/sls/ReservationClientUtil.java | 78 ++
.../org/apache/hadoop/yarn/sls/SLSRunner.java | 859 +++++++++-----
.../hadoop/yarn/sls/appmaster/AMSimulator.java | 131 ++-
.../yarn/sls/appmaster/MRAMSimulator.java | 9 +-
.../sls/resourcemanager/MockAMLauncher.java | 5 +
.../sls/scheduler/SLSCapacityScheduler.java | 33 +-
.../yarn/sls/scheduler/SLSFairScheduler.java | 31 +-
.../yarn/sls/scheduler/SchedulerWrapper.java | 4 +
.../hadoop/yarn/sls/scheduler/TaskRunner.java | 9 +-
.../hadoop/yarn/sls/synthetic/SynthJob.java | 306 +++++
.../yarn/sls/synthetic/SynthJobClass.java | 180 +++
.../sls/synthetic/SynthTraceJobProducer.java | 316 ++++++
.../hadoop/yarn/sls/synthetic/SynthUtils.java | 101 ++
.../yarn/sls/synthetic/SynthWorkload.java | 121 ++
.../hadoop/yarn/sls/synthetic/package-info.java | 22 +
.../apache/hadoop/yarn/sls/utils/SLSUtils.java | 52 +-
.../apache/hadoop/yarn/sls/web/SLSWebApp.java | 33 +-
.../src/site/markdown/SchedulerLoadSimulator.md | 150 ++-
.../hadoop/yarn/sls/BaseSLSRunnerTest.java | 120 ++
.../apache/hadoop/yarn/sls/TestSLSRunner.java | 90 +-
.../hadoop/yarn/sls/TestSynthJobGeneration.java | 96 ++
.../yarn/sls/appmaster/TestAMSimulator.java | 2 +-
.../yarn/sls/scheduler/TestTaskRunner.java | 2 +-
.../hadoop/yarn/sls/utils/TestSLSUtils.java | 30 +
.../src/test/resources/capacity-scheduler.xml | 10 +
.../src/test/resources/fair-scheduler.xml | 8 +-
.../hadoop-sls/src/test/resources/inputsls.json | 55 +
.../hadoop-sls/src/test/resources/nodes.json | 84 ++
.../src/test/resources/sls-runner.xml | 6 +-
.../hadoop-sls/src/test/resources/syn.json | 53 +
.../hadoop-sls/src/test/resources/yarn-site.xml | 10 +-
.../yarn/api/ContainerManagementProtocol.java | 8 +-
.../yarn/api/records/QueueConfigurations.java | 150 +++
.../hadoop/yarn/api/records/QueueInfo.java | 42 +
.../hadoop/yarn/conf/YarnConfiguration.java | 4 +
.../yarn/exceptions/ConfigurationException.java | 45 +
.../src/main/proto/yarn_protos.proto | 14 +
.../yarn/client/api/impl/YarnClientImpl.java | 10 +-
.../apache/hadoop/yarn/client/cli/LogsCLI.java | 58 +-
.../hadoop/yarn/client/ProtocolHATestBase.java | 6 +-
.../yarn/client/api/impl/TestAMRMClient.java | 2 +-
.../hadoop/yarn/client/cli/TestYarnCLI.java | 4 +-
.../impl/pb/QueueConfigurationsPBImpl.java | 137 +++
.../api/records/impl/pb/QueueInfoPBImpl.java | 98 +-
.../apache/hadoop/yarn/client/ServerProxy.java | 5 +
.../yarn/client/api/impl/TimelineConnector.java | 9 +-
.../hadoop/yarn/event/AsyncDispatcher.java | 12 +-
.../apache/hadoop/yarn/event/Dispatcher.java | 9 -
.../hadoop/yarn/event/EventDispatcher.java | 17 +-
.../logaggregation/AggregatedLogFormat.java | 4 +
.../yarn/util/ProcfsBasedProcessTree.java | 37 +-
.../util/ResourceCalculatorProcessTree.java | 54 -
.../yarn/util/WindowsBasedProcessTree.java | 12 -
.../hadoop/yarn/webapp/util/WebAppUtils.java | 17 +-
.../src/main/resources/yarn-default.xml | 22 +-
.../hadoop/yarn/api/TestPBImplRecords.java | 21 +-
.../client/api/impl/TestTimelineClient.java | 2 +
.../hadoop/yarn/event/DrainDispatcher.java | 11 +-
.../yarn/util/TestProcfsBasedProcessTree.java | 72 +-
.../yarn/util/TestWindowsBasedProcessTree.java | 12 -
.../pom.xml | 12 +-
.../server/nodemanager/ContainerExecutor.java | 4 +-
.../nodemanager/DefaultContainerExecutor.java | 7 +-
.../nodemanager/LinuxContainerExecutor.java | 79 +-
.../nodemanager/NodeHealthCheckerService.java | 54 +-
.../yarn/server/nodemanager/NodeManager.java | 5 -
.../server/nodemanager/NodeStatusUpdater.java | 6 +
.../nodemanager/NodeStatusUpdaterImpl.java | 8 +-
.../nodemanager/amrmproxy/AMRMProxyService.java | 63 +-
.../containermanager/ContainerManager.java | 2 -
.../containermanager/ContainerManagerImpl.java | 28 -
.../launcher/ContainerLaunch.java | 38 +-
.../launcher/ContainerRelaunch.java | 9 +
.../linux/resources/CGroupsHandlerImpl.java | 10 +-
.../DelegatingLinuxContainerRuntime.java | 9 +-
.../JavaSandboxLinuxContainerRuntime.java | 13 +-
.../runtime/docker/DockerCommandExecutor.java | 191 ++++
.../linux/runtime/docker/package-info.java | 26 +
.../monitor/ContainersMonitorImpl.java | 49 +-
.../util/CgroupsLCEResourcesHandler.java | 13 +-
.../impl/container-executor.c | 40 +-
.../impl/container-executor.h | 22 +-
.../nodemanager/DummyContainerManager.java | 6 +-
.../TestDefaultContainerExecutor.java | 3 +-
.../nodemanager/TestLinuxContainerExecutor.java | 9 +-
.../TestLinuxContainerExecutorWithMocks.java | 217 ++--
.../nodemanager/TestNodeHealthService.java | 11 +-
.../nodemanager/TestNodeManagerResync.java | 164 ---
.../amrmproxy/BaseAMRMProxyTest.java | 21 +-
.../amrmproxy/TestAMRMProxyService.java | 65 ++
.../TestAMRMProxyTokenSecretManager.java | 81 ++
.../BaseContainerManagerTest.java | 13 +-
.../containermanager/TestContainerManager.java | 5 -
.../TestContainerManagerRecovery.java | 16 +-
.../containermanager/TestNMProxy.java | 30 +-
.../launcher/TestContainerLaunch.java | 65 +-
.../MockPrivilegedOperationCaptor.java | 68 ++
.../linux/resources/TestCGroupsHandlerImpl.java | 37 +
.../runtime/TestDockerContainerRuntime.java | 13 +-
.../TestJavaSandboxLinuxContainerRuntime.java | 20 +-
.../docker/TestDockerCommandExecutor.java | 218 ++++
.../runtime/docker/TestDockerLoadCommand.java | 48 +
.../runtime/docker/TestDockerRunCommand.java | 63 ++
.../TestResourceLocalizationService.java | 2 -
.../TestContainersMonitorResourceChange.java | 3 +-
.../TestContainerSchedulerQueuing.java | 9 +-
.../util/TestCgroupsLCEResourcesHandler.java | 29 +
...-container-executer-with-configuration-error | 20 +
.../hadoop-yarn-server-resourcemanager/pom.xml | 1 +
.../server/resourcemanager/AdminService.java | 3 +
.../ApplicationMasterService.java | 2 +-
.../server/resourcemanager/RMAppManager.java | 1 +
.../server/resourcemanager/ResourceManager.java | 1 -
.../ProportionalCapacityPreemptionPolicy.java | 17 +-
.../invariants/InvariantViolationException.java | 35 +
.../monitor/invariants/InvariantsChecker.java | 96 ++
.../invariants/MetricsInvariantChecker.java | 195 ++++
.../monitor/invariants/package-info.java | 22 +
.../recovery/ZKRMStateStore.java | 506 +++++++--
.../reservation/InMemoryPlan.java | 11 +
.../PeriodicRLESparseResourceAllocation.java | 167 +++
.../resourcemanager/reservation/PlanView.java | 9 +
.../RLESparseResourceAllocation.java | 51 +
.../planning/AlignedPlannerWithGreedy.java | 15 +-
.../planning/GreedyReservationAgent.java | 13 +-
.../reservation/planning/IterativePlanner.java | 196 ++--
.../reservation/planning/ReservationAgent.java | 23 +-
.../planning/SimpleCapacityReplanner.java | 8 +-
.../reservation/planning/StageAllocator.java | 10 +-
.../planning/StageAllocatorGreedy.java | 4 +-
.../planning/StageAllocatorGreedyRLE.java | 4 +-
.../planning/StageAllocatorLowCostAligned.java | 279 +++--
.../planning/StageEarliestStart.java | 46 -
.../planning/StageEarliestStartByDemand.java | 106 --
.../StageEarliestStartByJobArrival.java | 39 -
.../planning/StageExecutionInterval.java | 47 +
.../StageExecutionIntervalByDemand.java | 144 +++
.../StageExecutionIntervalUnconstrained.java | 73 ++
.../server/resourcemanager/rmapp/RMAppImpl.java | 24 +-
.../rmapp/attempt/RMAppAttemptImpl.java | 6 +-
.../rmapp/attempt/RMAppAttemptMetrics.java | 2 +-
.../scheduler/AbstractYarnScheduler.java | 2 +-
.../resourcemanager/scheduler/NodeType.java | 12 +-
.../resourcemanager/scheduler/QueueMetrics.java | 21 +-
.../scheduler/capacity/AbstractCSQueue.java | 25 +
.../scheduler/capacity/CSQueueMetrics.java | 4 +-
.../capacity/CapacitySchedulerQueueManager.java | 6 +-
.../resourcemanager/scheduler/fair/FSQueue.java | 3 +
.../scheduler/fair/FSQueueMetrics.java | 4 +-
.../scheduler/fair/FSSchedulerNode.java | 8 +-
.../yarn/server/resourcemanager/MockRM.java | 10 +-
.../server/resourcemanager/TestAppManager.java | 2 +
.../resourcemanager/TestClientRMService.java | 25 +
.../resourcemanager/TestRMDispatcher.java | 1 +
.../resourcemanager/TestRMStoreCommands.java | 15 +-
.../applicationsmanager/TestAMRestart.java | 140 ++-
.../invariants/TestMetricsInvariantChecker.java | 99 ++
.../recovery/RMStateStoreTestBase.java | 10 +-
.../recovery/TestFSRMStateStore.java | 1 +
.../recovery/TestLeveldbRMStateStore.java | 1 +
.../recovery/TestZKRMStateStore.java | 573 +++++++++-
.../reservation/ReservationSystemTestUtil.java | 14 +
...TestPeriodicRLESparseResourceAllocation.java | 142 +++
.../TestRLESparseResourceAllocation.java | 56 +-
.../planning/TestAlignedPlanner.java | 417 ++++++-
.../planning/TestGreedyReservationAgent.java | 14 +-
.../planning/TestSimpleCapacityReplanner.java | 4 +-
.../scheduler/TestAbstractYarnScheduler.java | 2 +-
.../capacity/TestCapacityScheduler.java | 15 +-
.../scheduler/fair/TestFairScheduler.java | 15 +
.../src/test/resources/invariants.txt | 54 +
.../hadoop/yarn/server/MiniYARNCluster.java | 22 +-
.../server/TestContainerManagerSecurity.java | 14 +-
.../hadoop/yarn/server/TestMiniYarnCluster.java | 34 +-
.../collector/AppLevelTimelineCollector.java | 8 +-
.../collector/TimelineCollector.java | 27 +-
.../storage/FileSystemTimelineReaderImpl.java | 129 +--
.../collector/TestTimelineCollector.java | 95 +-
.../src/site/markdown/FairScheduler.md | 6 +
.../src/main/webapp/app/adapters/yarn-app.js | 2 +-
.../webapp/app/components/em-table-html-cell.js | 23 +
.../main/webapp/app/components/timeline-view.js | 13 +-
.../webapp/app/controllers/app-table-columns.js | 213 ++--
.../webapp/app/controllers/yarn-app-attempt.js | 32 +-
.../webapp/app/controllers/yarn-app-attempts.js | 23 +-
.../src/main/webapp/app/controllers/yarn-app.js | 21 +-
.../webapp/app/controllers/yarn-apps/apps.js | 9 +-
.../app/controllers/yarn-apps/services.js | 9 +-
.../webapp/app/controllers/yarn-nodes/table.js | 8 +
.../webapp/app/controllers/yarn-services.js | 79 +-
.../src/main/webapp/app/models/yarn-app.js | 6 +-
.../src/main/webapp/app/router.js | 1 +
.../src/main/webapp/app/routes/yarn-services.js | 34 +
.../src/main/webapp/app/styles/app.css | 9 +
.../main/webapp/app/templates/application.hbs | 13 +-
.../templates/components/app-attempt-table.hbs | 8 +-
.../templates/components/container-table.hbs | 2 +-
.../templates/components/em-table-html-cell.hbs | 23 +
.../app/templates/components/timeline-view.hbs | 8 +-
.../webapp/app/templates/yarn-app-attempts.hbs | 8 +-
.../src/main/webapp/app/templates/yarn-app.hbs | 12 +-
.../src/main/webapp/app/templates/yarn-apps.hbs | 4 -
.../webapp/app/templates/yarn-apps/apps.hbs | 2 +-
.../webapp/app/templates/yarn-apps/services.hbs | 2 +-
.../webapp/app/templates/yarn-nodes/table.hbs | 2 +-
.../main/webapp/app/templates/yarn-services.hbs | 86 ++
.../src/main/webapp/app/utils/converter.js | 24 +
.../components/em-table-html-cell-test.js | 43 +
.../tests/unit/routes/yarn-services-test.js | 29 +
566 files changed, 23470 insertions(+), 4541 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86414507/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
----------------------------------------------------------------------
diff --cc hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index 1289115,22a2117..fb7eadf
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@@ -322,36 -322,16 +322,50 @@@ log4j.appender.EWMA.cleanupInterval=${y
log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
++<<<<<<< HEAD
+# Fair scheduler requests log on state dump
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.statedump=DEBUG,FSLOGGER
+log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.statedump=false
+log4j.appender.FSLOGGER=org.apache.log4j.RollingFileAppender
+log4j.appender.FSLOGGER.File=${hadoop.log.dir}/fairscheduler-statedump.log
+log4j.appender.FSLOGGER.layout=org.apache.log4j.PatternLayout
+log4j.appender.FSLOGGER.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.FSLOGGER.MaxFileSize=${hadoop.log.maxfilesize}
+log4j.appender.FSLOGGER.MaxBackupIndex=${hadoop.log.maxbackupindex}
+
+#
+# Add a logger for ozone that is separate from the Datanode.
+#
+log4j.logger.org.apache.hadoop.ozone=DEBUG,OZONE,FILE
+
+# Do not log into datanode logs. Remove this line to have single log.
+log4j.additivity.org.apache.hadoop.ozone=false
+
+# For development purposes, log both to console and log file.
+log4j.appender.OZONE=org.apache.log4j.ConsoleAppender
+log4j.appender.OZONE.Threshold=info
+log4j.appender.OZONE.layout=org.apache.log4j.PatternLayout
+log4j.appender.OZONE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
+ %X{component} %X{function} %X{resource} %X{user} %X{request} - %m%n
+
+# Real ozone logger that writes to ozone.log
+log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.FILE.File=${hadoop.log.dir}/ozone.log
+log4j.appender.FILE.Threshold=debug
+log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
+(%F:%L) %X{function} %X{resource} %X{user} %X{request} - \
+%m%n
+ #
+ # Fair scheduler state dump
+ #
+ # Use following logger to dump the state to a separate file
+
+ #log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.statedump=DEBUG,FSSTATEDUMP
+ #log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.statedump=false
+ #log4j.appender.FSSTATEDUMP=org.apache.log4j.RollingFileAppender
+ #log4j.appender.FSSTATEDUMP.File=${hadoop.log.dir}/fairscheduler-statedump.log
+ #log4j.appender.FSSTATEDUMP.layout=org.apache.log4j.PatternLayout
+ #log4j.appender.FSSTATEDUMP.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+ #log4j.appender.FSSTATEDUMP.MaxFileSize=${hadoop.log.maxfilesize}
-#log4j.appender.FSSTATEDUMP.MaxBackupIndex=${hadoop.log.maxbackupindex}
++#log4j.appender.FSSTATEDUMP.MaxBackupIndex=${hadoop.log.maxbackupindex}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86414507/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86414507/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86414507/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86414507/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86414507/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86414507/hadoop-project/pom.xml
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86414507/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
----------------------------------------------------------------------
diff --cc hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
index 4effb0d,ea08b2b..eb16fb5
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
@@@ -133,91 -122,92 +122,92 @@@ public class RemoteWasbAuthorizerImpl i
public boolean authorize(String wasbAbsolutePath, String accessType)
throws WasbAuthorizationException, IOException {
- try {
+ try {
/* Make an exception for the internal -RenamePending files */
- if (wasbAbsolutePath.endsWith(NativeAzureFileSystem.FolderRenamePending.SUFFIX)) {
- return true;
- }
-
- setDelegationToken();
- URIBuilder uriBuilder = new URIBuilder(remoteAuthorizerServiceUrl);
- uriBuilder.setPath("/" + CHECK_AUTHORIZATION_OP);
- uriBuilder.addParameter(WASB_ABSOLUTE_PATH_QUERY_PARAM_NAME,
- wasbAbsolutePath);
- uriBuilder.addParameter(ACCESS_OPERATION_QUERY_PARAM_NAME,
- accessType);
- if (isSecurityEnabled && StringUtils.isNotEmpty(delegationToken)) {
- uriBuilder.addParameter(DELEGATION_TOKEN_QUERY_PARAM_NAME,
- delegationToken);
- }
-
- String responseBody = null;
- UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
- UserGroupInformation connectUgi = ugi.getRealUser();
- if (connectUgi == null) {
- connectUgi = ugi;
- } else {
- uriBuilder.addParameter(Constants.DOAS_PARAM, ugi.getShortUserName());
- }
-
- try {
- responseBody = connectUgi
- .doAs(new PrivilegedExceptionAction<String>() {
- @Override
- public String run() throws Exception {
- AuthenticatedURL.Token token = null;
- HttpGet httpGet = new HttpGet(uriBuilder.build());
- if (isKerberosSupportEnabled && UserGroupInformation
- .isSecurityEnabled() && (delegationToken == null
- || delegationToken.isEmpty())) {
- token = new AuthenticatedURL.Token();
- final Authenticator kerberosAuthenticator = new KerberosDelegationTokenAuthenticator();
- try {
- kerberosAuthenticator
- .authenticate(uriBuilder.build().toURL(), token);
- Validate.isTrue(token.isSet(),
- "Authenticated Token is NOT present. The request cannot proceed.");
- } catch (AuthenticationException e){
- throw new IOException("Authentication failed in check authorization", e);
- }
- if (token != null) {
- httpGet.setHeader("Cookie",
- AuthenticatedURL.AUTH_COOKIE + "=" + token);
- }
+ if (wasbAbsolutePath.endsWith(NativeAzureFileSystem.FolderRenamePending.SUFFIX)) {
+ return true;
+ }
+
++ setDelegationToken();
+ URIBuilder uriBuilder = new URIBuilder(remoteAuthorizerServiceUrl);
+ uriBuilder.setPath("/" + CHECK_AUTHORIZATION_OP);
+ uriBuilder.addParameter(WASB_ABSOLUTE_PATH_QUERY_PARAM_NAME,
+ wasbAbsolutePath);
+ uriBuilder.addParameter(ACCESS_OPERATION_QUERY_PARAM_NAME,
+ accessType);
+ if (isSecurityEnabled && StringUtils.isNotEmpty(delegationToken)) {
+ uriBuilder.addParameter(DELEGATION_TOKEN_QUERY_PARAM_NAME,
+ delegationToken);
+ }
+
+ String responseBody = null;
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+ UserGroupInformation connectUgi = ugi.getRealUser();
+ if (connectUgi == null) {
+ connectUgi = ugi;
+ } else {
+ uriBuilder.addParameter(Constants.DOAS_PARAM, ugi.getShortUserName());
+ }
- if (isSecurityEnabled && !connectUgi.hasKerberosCredentials()) {
- connectUgi = UserGroupInformation.getLoginUser();
- }
- connectUgi.checkTGTAndReloginFromKeytab();
+
+ try {
+ responseBody = connectUgi
+ .doAs(new PrivilegedExceptionAction<String>() {
+ @Override
+ public String run() throws Exception {
+ AuthenticatedURL.Token token = null;
+ HttpGet httpGet = new HttpGet(uriBuilder.build());
+ if (isKerberosSupportEnabled && UserGroupInformation
+ .isSecurityEnabled() && (delegationToken == null
+ || delegationToken.isEmpty())) {
+ token = new AuthenticatedURL.Token();
+ final Authenticator kerberosAuthenticator = new KerberosDelegationTokenAuthenticator();
+ try {
+ kerberosAuthenticator
+ .authenticate(uriBuilder.build().toURL(), token);
+ Validate.isTrue(token.isSet(),
+ "Authenticated Token is NOT present. The request cannot proceed.");
+ } catch (AuthenticationException e){
+ throw new IOException("Authentication failed in check authorization", e);
+ }
+ if (token != null) {
+ httpGet.setHeader("Cookie",
+ AuthenticatedURL.AUTH_COOKIE + "=" + token);
}
- return remoteCallHelper.makeRemoteGetRequest(httpGet);
}
- });
- } catch (InterruptedException e) {
- LOG.error("Error in check authorization", e);
- throw new WasbAuthorizationException("Error in check authorize", e);
- }
-
- ObjectMapper objectMapper = new ObjectMapper();
- RemoteAuthorizerResponse authorizerResponse =
- objectMapper
- .readValue(responseBody, RemoteAuthorizerResponse.class);
-
- if (authorizerResponse == null) {
- throw new WasbAuthorizationException(
- "RemoteAuthorizerResponse object null from remote call");
- } else if (authorizerResponse.getResponseCode()
- == REMOTE_CALL_SUCCESS_CODE) {
- return authorizerResponse.getAuthorizationResult();
- } else {
- throw new WasbAuthorizationException("Remote authorization"
- + " service encountered an error "
- + authorizerResponse.getResponseMessage());
- }
- } catch (URISyntaxException | WasbRemoteCallException
- | JsonParseException | JsonMappingException ex) {
- throw new WasbAuthorizationException(ex);
+ return remoteCallHelper.makeRemoteGetRequest(httpGet);
+ }
+ });
+ } catch (InterruptedException e) {
+ LOG.error("Error in check authorization", e);
+ throw new WasbAuthorizationException("Error in check authorize", e);
}
+
+ ObjectMapper objectMapper = new ObjectMapper();
+ RemoteAuthorizerResponse authorizerResponse =
+ objectMapper
+ .readValue(responseBody, RemoteAuthorizerResponse.class);
+
+ if (authorizerResponse == null) {
+ throw new WasbAuthorizationException(
+ "RemoteAuthorizerResponse object null from remote call");
+ } else if (authorizerResponse.getResponseCode()
+ == REMOTE_CALL_SUCCESS_CODE) {
+ return authorizerResponse.getAuthorizationResult();
+ } else {
+ throw new WasbAuthorizationException("Remote authorization"
+ + " service encountered an error "
+ + authorizerResponse.getResponseMessage());
+ }
+ } catch (URISyntaxException | WasbRemoteCallException
+ | JsonParseException | JsonMappingException ex) {
+ throw new WasbAuthorizationException(ex);
+ }
}
+
+ private void setDelegationToken() throws IOException {
+ this.delegationToken = SecurityUtils.getDelegationTokenFromCredentials();
+ }
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/86414507/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
----------------------------------------------------------------------
diff --cc hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
index 6269f21,1b2bca3..fbb56b0
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
@@@ -1206,11 -1206,11 +1206,11 @@@ public class TestAMRMClient
// Should receive atleast 1 token
Assert.assertTrue(receivedNMTokens.size() > 0
&& receivedNMTokens.size() <= nodeCount);
-
+
assertEquals(allocatedContainerCount, containersRequestedAny);
- assertEquals(2, amClient.release.size());
+ assertEquals(2, releases.size());
assertEquals(0, amClient.ask.size());
-
+
// need to tell the AMRMClient that we dont need these resources anymore
amClient.removeContainerRequest(
new ContainerRequest(capability, nodes, racks, priority));
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[43/50] [abbrv] hadoop git commit: HDFS-11702. Remove indefinite
caching of key provider uri in DFSClient. Contributed by Rushabh S Shah.
Posted by ae...@apache.org.
HDFS-11702. Remove indefinite caching of key provider uri in DFSClient. Contributed by Rushabh S Shah.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cef2815c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cef2815c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cef2815c
Branch: refs/heads/HDFS-7240
Commit: cef2815cf48154fe82f44082dcbdce6373c81284
Parents: a9a3d21
Author: Kihwal Lee <ki...@apache.org>
Authored: Mon May 8 08:27:37 2017 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Mon May 8 08:27:37 2017 -0500
----------------------------------------------------------------------
.../main/java/org/apache/hadoop/hdfs/DFSClient.java | 14 +-------------
.../org/apache/hadoop/hdfs/TestEncryptionZones.java | 9 ---------
2 files changed, 1 insertion(+), 22 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cef2815c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 3f1f3ea..d21b9b4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -232,7 +232,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
private static ThreadPoolExecutor HEDGED_READ_THREAD_POOL;
private static volatile ThreadPoolExecutor STRIPED_READ_THREAD_POOL;
private final int smallBufferSize;
- private URI keyProviderUri = null;
public DfsClientConf getConf() {
return dfsClientConf;
@@ -2901,10 +2900,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
* @throws IOException
*/
URI getKeyProviderUri() throws IOException {
- if (keyProviderUri != null) {
- return keyProviderUri;
- }
-
+ URI keyProviderUri = null;
// Lookup the secret in credentials object for namenodeuri.
Credentials credentials = ugi.getCredentials();
byte[] keyProviderUriBytes = credentials.getSecretKey(getKeyProviderMapKey());
@@ -2936,14 +2932,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
return clientContext.getKeyProviderCache().get(conf, getKeyProviderUri());
}
- /*
- * Should be used only for testing.
- */
- @VisibleForTesting
- public void setKeyProviderUri(URI providerUri) {
- this.keyProviderUri = providerUri;
- }
-
@VisibleForTesting
public void setKeyProvider(KeyProvider provider) {
clientContext.getKeyProviderCache().setKeyProvider(conf, provider);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cef2815c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
index 093d516..8eb3b7b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -1702,7 +1702,6 @@ public class TestEncryptionZones {
credentials.addSecretKey(lookUpKey,
DFSUtilClient.string2Bytes(dummyKeyProvider));
client.ugi.addCredentials(credentials);
- client.setKeyProviderUri(null);
Assert.assertEquals("Client Key provider is different from provider in "
+ "credentials map", dummyKeyProvider,
client.getKeyProviderUri().toString());
@@ -1724,7 +1723,6 @@ public class TestEncryptionZones {
CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
dummyKeyProviderUri1);
DFSClient mockClient = Mockito.spy(cluster.getFileSystem().getClient());
- mockClient.setKeyProviderUri(null);
// Namenode returning null as keyProviderUri in FSServerDefaults.
FsServerDefaults serverDefaultsWithKeyProviderNull =
getTestServerDefaults(null);
@@ -1736,7 +1734,6 @@ public class TestEncryptionZones {
Mockito.verify(mockClient, Mockito.times(1)).getServerDefaults();
String dummyKeyProviderUri2 = "dummy://foo:bar@test_provider2";
- mockClient.setKeyProviderUri(null);
FsServerDefaults serverDefaultsWithDummyKeyProvider =
getTestServerDefaults(dummyKeyProviderUri2);
// Namenode returning dummyKeyProvider2 in serverDefaults.
@@ -1765,8 +1762,6 @@ public class TestEncryptionZones {
// Unset the provider path in conf
clusterConf.unset(
CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH);
- // Nullify the cached value for key provider uri on client
- cluster.getFileSystem().getClient().setKeyProviderUri(null);
// Even after unsetting the local conf, the client key provider should be
// the same as namenode's provider.
Assert.assertEquals("Key Provider for client and namenode are different",
@@ -1777,8 +1772,6 @@ public class TestEncryptionZones {
clusterConf.set(
CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
"dummy://foo:bar@test_provider1");
- // Nullify the cached value for key provider uri on client
- cluster.getFileSystem().getClient().setKeyProviderUri(null);
// Even after pointing the conf to some dummy provider, the client key
// provider should be the same as namenode's provider.
Assert.assertEquals("Key Provider for client and namenode are different",
@@ -1813,8 +1806,6 @@ public class TestEncryptionZones {
// Creating a fake serverdefaults so that we can simulate namenode not
// being upgraded.
DFSClient spyClient = Mockito.spy(cluster.getFileSystem().getClient());
- // Clear the cache value of keyProviderUri on client side.
- spyClient.setKeyProviderUri(null);
Mockito.doReturn(spyServerDefaults).when(spyClient).getServerDefaults();
// Since FsServerDefaults#keyProviderUri is null, the client
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[13/50] [abbrv] hadoop git commit: YARN-5331. Extend
RLESparseResourceAllocation with period for supporting recurring reservations
in YARN ReservationSystem. (Sangeetha Abdu Jyothi via Subru).
Posted by ae...@apache.org.
YARN-5331. Extend RLESparseResourceAllocation with period for supporting recurring reservations in YARN ReservationSystem. (Sangeetha Abdu Jyothi via Subru).
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6bf42e48
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6bf42e48
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6bf42e48
Branch: refs/heads/HDFS-7240
Commit: 6bf42e48ef658bf6dd86ebd706562ce7cc06216a
Parents: e514fc4
Author: Subru Krishnan <su...@apache.org>
Authored: Mon May 1 18:48:36 2017 -0700
Committer: Subru Krishnan <su...@apache.org>
Committed: Mon May 1 18:48:36 2017 -0700
----------------------------------------------------------------------
.../PeriodicRLESparseResourceAllocation.java | 167 +++++++++++++++++++
.../RLESparseResourceAllocation.java | 51 ++++++
.../reservation/ReservationSystemTestUtil.java | 14 ++
...TestPeriodicRLESparseResourceAllocation.java | 142 ++++++++++++++++
.../TestRLESparseResourceAllocation.java | 56 ++++++-
5 files changed, 429 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bf42e48/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PeriodicRLESparseResourceAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PeriodicRLESparseResourceAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PeriodicRLESparseResourceAllocation.java
new file mode 100644
index 0000000..8e3be8b3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PeriodicRLESparseResourceAllocation.java
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.reservation;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.util.resource.Resources;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This data structure stores a periodic RLESparseResourceAllocation.
+ * Default period is 1 day (86400000ms).
+ */
+public class PeriodicRLESparseResourceAllocation extends
+ RLESparseResourceAllocation {
+
+ // Log
+ private static final Logger LOG = LoggerFactory
+ .getLogger(PeriodicRLESparseResourceAllocation.class);
+
+ private long timePeriod;
+
+ /**
+ * Constructor.
+ *
+ * @param rleVector {@link RLESparseResourceAllocation} with the run-length
+ encoded data.
+ * @param timePeriod Time period in milliseconds.
+ */
+ public PeriodicRLESparseResourceAllocation(
+ RLESparseResourceAllocation rleVector, Long timePeriod) {
+ super(rleVector.getCumulative(), rleVector.getResourceCalculator());
+ this.timePeriod = timePeriod;
+ }
+
+ /**
+ * Constructor. Default time period set to 1 day.
+ *
+ * @param rleVector {@link RLESparseResourceAllocation} with the run-length
+ encoded data.
+ */
+ public PeriodicRLESparseResourceAllocation(
+ RLESparseResourceAllocation rleVector) {
+ this(rleVector, 86400000L);
+ }
+
+ /**
+ * Get capacity at time based on periodic repetition.
+ *
+ * @param tick UTC time for which the allocated {@link Resource} is queried.
+ * @return {@link Resource} allocated at specified time
+ */
+ public Resource getCapacityAtTime(long tick) {
+ long convertedTime = (tick % timePeriod);
+ return super.getCapacityAtTime(convertedTime);
+ }
+
+ /**
+ * Add resource for the specified interval. This function will be used by
+ * {@link InMemoryPlan} while placing reservations between 0 and timePeriod.
+ * The interval may include 0, but the end time must be strictly less than
+ * timePeriod.
+ *
+ * @param interval {@link ReservationInterval} to which the specified
+ * resource is to be added.
+ * @param resource {@link Resource} to be added to the interval specified.
+ * @return true if addition is successful, false otherwise
+ */
+ public boolean addInterval(ReservationInterval interval,
+ Resource resource) {
+ long startTime = interval.getStartTime();
+ long endTime = interval.getEndTime();
+ if (startTime >= 0 && endTime > startTime && endTime <= timePeriod) {
+ return super.addInterval(interval, resource);
+ } else {
+ LOG.info("Cannot set capacity beyond end time: " + timePeriod);
+ return false;
+ }
+ }
+
+ /**
+ * Removes a resource for the specified interval.
+ *
+ * @param interval the {@link ReservationInterval} for which the resource is
+ * to be removed.
+ * @param resource the {@link Resource} to be removed.
+ * @return true if removal is successful, false otherwise
+ */
+ public boolean removeInterval(
+ ReservationInterval interval, Resource resource) {
+ long startTime = interval.getStartTime();
+ long endTime = interval.getEndTime();
+ // If the resource to be subtracted is less than the minimum resource in
+ // the range, abort removal to avoid negative capacity.
+ if (!Resources.fitsIn(
+ resource, super.getMinimumCapacityInInterval(interval))) {
+ LOG.info("Request to remove more resources than what is available");
+ return false;
+ }
+ if (startTime >= 0 && endTime > startTime && endTime <= timePeriod) {
+ return super.removeInterval(interval, resource);
+ } else {
+ LOG.info("Interval extends beyond the end time " + timePeriod);
+ return false;
+ }
+ }
+
+ /**
+ * Get maximum capacity at periodic offsets from the specified time.
+ *
+ * @param tick UTC time base from which offsets are specified for finding
+ * the maximum capacity.
+ * @param period periodic offset at which capacities are evaluted.
+ * @return the maximum {@link Resource} across the specified time instants.
+ * @return true if removal is successful, false otherwise
+ */
+ public Resource getMaximumPeriodicCapacity(long tick, long period) {
+ Resource maxResource;
+ if (period < timePeriod) {
+ maxResource =
+ super.getMaximumPeriodicCapacity(tick % timePeriod, period);
+ } else {
+ // if period is greater than the length of PeriodicRLESparseAllocation,
+ // only a single value exists in this interval.
+ maxResource = super.getCapacityAtTime(tick % timePeriod);
+ }
+ return maxResource;
+ }
+
+ /**
+ * Get time period of PeriodicRLESparseResourceAllocation.
+ *
+ * @return timePeriod time period represented in ms.
+ */
+ public long getTimePeriod() {
+ return this.timePeriod;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder ret = new StringBuilder();
+ ret.append("Period: ").append(timePeriod).append("\n")
+ .append(super.toString());
+ if (super.isEmpty()) {
+ ret.append(" no allocations\n");
+ }
+ return ret.toString();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bf42e48/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java
index c18a93e..658387b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/RLESparseResourceAllocation.java
@@ -132,6 +132,7 @@ public class RLESparseResourceAllocation {
* Returns the capacity, i.e. total resources allocated at the specified point
* of time.
*
+ * @param tick timeStap at which resource needs to be known
* @return the resources allocated at the specified time
*/
public Resource getCapacityAtTime(long tick) {
@@ -309,6 +310,10 @@ public class RLESparseResourceAllocation {
}
}
+ public ResourceCalculator getResourceCalculator() {
+ return resourceCalculator;
+ }
+
/**
* Merges the range start to end of two {@code RLESparseResourceAllocation}
* using a given {@code RLEOperator}.
@@ -533,4 +538,50 @@ public class RLESparseResourceAllocation {
add, subtract, min, max, subtractTestNonNegative
}
+ /**
+ * Get the maximum capacity across specified time instances. The search-space
+ * is specified using the starting value, tick, and the periodic interval for
+ * search. Maximum resource allocation across tick, tick + period,
+ * tick + 2 * period,..., tick + n * period .. is returned.
+ *
+ * @param tick the starting time instance
+ * @param period interval at which capacity is evaluated
+ * @return maximum resource allocation
+ */
+ public Resource getMaximumPeriodicCapacity(long tick, long period) {
+ Resource maxCapacity = ZERO_RESOURCE;
+ if (!cumulativeCapacity.isEmpty()) {
+ Long lastKey = cumulativeCapacity.lastKey();
+ for (long t = tick; t <= lastKey; t = t + period) {
+ maxCapacity = Resources.componentwiseMax(maxCapacity,
+ cumulativeCapacity.floorEntry(t).getValue());
+ }
+ }
+ return maxCapacity;
+ }
+
+ /**
+ * Get the minimum capacity in the specified time range.
+ *
+ * @param interval the {@link ReservationInterval} to be searched
+ * @return minimum resource allocation
+ */
+ public Resource getMinimumCapacityInInterval(ReservationInterval interval) {
+ Resource minCapacity = Resource.newInstance(
+ Integer.MAX_VALUE, Integer.MAX_VALUE);
+ long start = interval.getStartTime();
+ long end = interval.getEndTime();
+ NavigableMap<Long, Resource> capacityRange =
+ this.getRangeOverlapping(start, end).getCumulative();
+ if (!capacityRange.isEmpty()) {
+ for (Map.Entry<Long, Resource> entry : capacityRange.entrySet()) {
+ if (entry.getValue() != null) {
+ minCapacity = Resources.componentwiseMin(minCapacity,
+ entry.getValue());
+ }
+ }
+ }
+ return minCapacity;
+ }
+
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bf42e48/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java
index 1ff6a1a..e99842e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedule
import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;
import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
+import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
import org.junit.Assert;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
@@ -414,6 +415,19 @@ public class ReservationSystemTestUtil {
return req;
}
+ public static RLESparseResourceAllocation
+ generateRLESparseResourceAllocation(int[] alloc, long[] timeSteps) {
+ TreeMap<Long, Resource> allocationsMap = new TreeMap<>();
+ for (int i = 0; i < alloc.length; i++) {
+ allocationsMap.put(timeSteps[i],
+ Resource.newInstance(alloc[i], alloc[i]));
+ }
+ RLESparseResourceAllocation rleVector =
+ new RLESparseResourceAllocation(allocationsMap,
+ new DefaultResourceCalculator());
+ return rleVector;
+ }
+
public static Resource calculateClusterResource(int numContainers) {
return Resource.newInstance(numContainers * 1024, numContainers);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bf42e48/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestPeriodicRLESparseResourceAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestPeriodicRLESparseResourceAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestPeriodicRLESparseResourceAllocation.java
new file mode 100644
index 0000000..554eb58
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestPeriodicRLESparseResourceAllocation.java
@@ -0,0 +1,142 @@
+/******************************************************************************
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *****************************************************************************/
+
+package org.apache.hadoop.yarn.server.resourcemanager.reservation;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Testing the class PeriodicRLESparseResourceAllocation.
+ */
+public class TestPeriodicRLESparseResourceAllocation {
+
+ private static final Logger LOG = LoggerFactory
+ .getLogger(TestPeriodicRLESparseResourceAllocation.class);
+
+ @Test
+ public void testPeriodicCapacity() {
+ int[] alloc = {10, 7, 5, 2, 0};
+ long[] timeSteps = {0L, 5L, 10L, 15L, 19L};
+ RLESparseResourceAllocation rleSparseVector =
+ ReservationSystemTestUtil.generateRLESparseResourceAllocation(
+ alloc, timeSteps);
+ PeriodicRLESparseResourceAllocation periodicVector =
+ new PeriodicRLESparseResourceAllocation(rleSparseVector, 20L);
+ LOG.info(periodicVector.toString());
+ Assert.assertEquals(Resource.newInstance(5, 5),
+ periodicVector.getCapacityAtTime(10L));
+ Assert.assertEquals(Resource.newInstance(10, 10),
+ periodicVector.getCapacityAtTime(20L));
+ Assert.assertEquals(Resource.newInstance(7, 7),
+ periodicVector.getCapacityAtTime(27L));
+ Assert.assertEquals(Resource.newInstance(5, 5),
+ periodicVector.getCapacityAtTime(50L));
+ }
+
+ @Test
+ public void testMaxPeriodicCapacity() {
+ int[] alloc = {2, 5, 7, 10, 3, 4, 6, 8};
+ long[] timeSteps = {0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L};
+ RLESparseResourceAllocation rleSparseVector =
+ ReservationSystemTestUtil.generateRLESparseResourceAllocation(
+ alloc, timeSteps);
+ PeriodicRLESparseResourceAllocation periodicVector =
+ new PeriodicRLESparseResourceAllocation(rleSparseVector, 8L);
+ LOG.info(periodicVector.toString());
+ Assert.assertEquals(
+ periodicVector.getMaximumPeriodicCapacity(0, 1),
+ Resource.newInstance(10, 10));
+ Assert.assertEquals(
+ periodicVector.getMaximumPeriodicCapacity(8, 2),
+ Resource.newInstance(7, 7));
+ Assert.assertEquals(
+ periodicVector.getMaximumPeriodicCapacity(16, 3),
+ Resource.newInstance(10, 10));
+ Assert.assertEquals(
+ periodicVector.getMaximumPeriodicCapacity(17, 4),
+ Resource.newInstance(5, 5));
+ Assert.assertEquals(
+ periodicVector.getMaximumPeriodicCapacity(32, 5),
+ Resource.newInstance(4, 4));
+ }
+
+ @Test
+ public void testSetCapacityInInterval() {
+ int[] alloc = {2, 5, 0};
+ long[] timeSteps = {1L, 2L, 3L};
+ RLESparseResourceAllocation rleSparseVector =
+ ReservationSystemTestUtil.generateRLESparseResourceAllocation(
+ alloc, timeSteps);
+ PeriodicRLESparseResourceAllocation periodicVector =
+ new PeriodicRLESparseResourceAllocation(rleSparseVector, 10L);
+ ReservationInterval interval = new ReservationInterval(5L, 10L);
+ periodicVector.addInterval(
+ interval, Resource.newInstance(8, 8));
+ Assert.assertEquals(Resource.newInstance(8, 8),
+ periodicVector.getCapacityAtTime(5L));
+ Assert.assertEquals(Resource.newInstance(8, 8),
+ periodicVector.getCapacityAtTime(9L));
+ Assert.assertEquals(Resource.newInstance(0, 0),
+ periodicVector.getCapacityAtTime(10L));
+ Assert.assertEquals(Resource.newInstance(0, 0),
+ periodicVector.getCapacityAtTime(0L));
+ Assert.assertFalse(periodicVector.addInterval(
+ new ReservationInterval(7L, 12L), Resource.newInstance(8, 8)));
+ }
+
+ public void testRemoveInterval() {
+ int[] alloc = {2, 5, 3, 4, 0};
+ long[] timeSteps = {1L, 3L, 5L, 7L, 9L};
+ RLESparseResourceAllocation rleSparseVector =
+ ReservationSystemTestUtil.generateRLESparseResourceAllocation(
+ alloc, timeSteps);
+ PeriodicRLESparseResourceAllocation periodicVector =
+ new PeriodicRLESparseResourceAllocation(rleSparseVector, 10L);
+ ReservationInterval interval = new ReservationInterval(3L, 7L);
+ Assert.assertTrue(periodicVector.removeInterval(
+ interval, Resource.newInstance(3, 3)));
+ Assert.assertEquals(Resource.newInstance(2, 2),
+ periodicVector.getCapacityAtTime(1L));
+ Assert.assertEquals(Resource.newInstance(2, 2),
+ periodicVector.getCapacityAtTime(2L));
+ Assert.assertEquals(Resource.newInstance(2, 2),
+ periodicVector.getCapacityAtTime(3L));
+ Assert.assertEquals(Resource.newInstance(2, 2),
+ periodicVector.getCapacityAtTime(4L));
+ Assert.assertEquals(Resource.newInstance(0, 0),
+ periodicVector.getCapacityAtTime(5L));
+ Assert.assertEquals(Resource.newInstance(0, 0),
+ periodicVector.getCapacityAtTime(6L));
+ Assert.assertEquals(Resource.newInstance(4, 4),
+ periodicVector.getCapacityAtTime(7L));
+
+ // invalid interval
+ Assert.assertFalse(periodicVector.removeInterval(
+ new ReservationInterval(7L, 12L), Resource.newInstance(1, 1)));
+
+ // invalid capacity
+ Assert.assertFalse(periodicVector.removeInterval(
+ new ReservationInterval(2L, 4L), Resource.newInstance(8, 8)));
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bf42e48/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
index f8d2a4a..bfe46e1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
@@ -524,7 +524,61 @@ public class TestRLESparseResourceAllocation {
}
}
- private void setupArrays(TreeMap<Long, Resource> a, TreeMap<Long, Resource> b) {
+ @Test
+ public void testMaxPeriodicCapacity() {
+ long[] timeSteps = {0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L};
+ int[] alloc = {2, 5, 7, 10, 3, 4, 6, 8};
+ RLESparseResourceAllocation rleSparseVector =
+ ReservationSystemTestUtil.generateRLESparseResourceAllocation(
+ alloc, timeSteps);
+ LOG.info(rleSparseVector.toString());
+ Assert.assertEquals(
+ rleSparseVector.getMaximumPeriodicCapacity(0, 1),
+ Resource.newInstance(10, 10));
+ Assert.assertEquals(
+ rleSparseVector.getMaximumPeriodicCapacity(0, 2),
+ Resource.newInstance(7, 7));
+ Assert.assertEquals(
+ rleSparseVector.getMaximumPeriodicCapacity(0, 3),
+ Resource.newInstance(10, 10));
+ Assert.assertEquals(
+ rleSparseVector.getMaximumPeriodicCapacity(0, 4),
+ Resource.newInstance(3, 3));
+ Assert.assertEquals(
+ rleSparseVector.getMaximumPeriodicCapacity(0, 5),
+ Resource.newInstance(4, 4));
+ Assert.assertEquals(
+ rleSparseVector.getMaximumPeriodicCapacity(0, 5),
+ Resource.newInstance(4, 4));
+ Assert.assertEquals(
+ rleSparseVector.getMaximumPeriodicCapacity(7, 5),
+ Resource.newInstance(8, 8));
+ Assert.assertEquals(
+ rleSparseVector.getMaximumPeriodicCapacity(10, 3),
+ Resource.newInstance(0, 0));
+ }
+
+ @Test
+ public void testGetMinimumCapacityInInterval() {
+ long[] timeSteps = {0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L};
+ int[] alloc = {2, 5, 7, 10, 3, 4, 0, 8};
+ RLESparseResourceAllocation rleSparseVector =
+ ReservationSystemTestUtil.generateRLESparseResourceAllocation(
+ alloc, timeSteps);
+ LOG.info(rleSparseVector.toString());
+ Assert.assertEquals(
+ rleSparseVector.getMinimumCapacityInInterval(
+ new ReservationInterval(1L, 3L)), Resource.newInstance(5, 5));
+ Assert.assertEquals(
+ rleSparseVector.getMinimumCapacityInInterval(
+ new ReservationInterval(2L, 5L)), Resource.newInstance(3, 3));
+ Assert.assertEquals(
+ rleSparseVector.getMinimumCapacityInInterval(
+ new ReservationInterval(1L, 7L)), Resource.newInstance(0, 0));
+ }
+
+ private void setupArrays(
+ TreeMap<Long, Resource> a, TreeMap<Long, Resource> b) {
a.put(10L, Resource.newInstance(5, 5));
a.put(20L, Resource.newInstance(10, 10));
a.put(30L, Resource.newInstance(15, 15));
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[26/50] [abbrv] hadoop git commit: HDFS-11488. JN log segment syncing
should support HA upgrade. Contributed by Hanisha Koneru.
Posted by ae...@apache.org.
HDFS-11488. JN log segment syncing should support HA upgrade. Contributed by Hanisha Koneru.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08fb82d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08fb82d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08fb82d6
Branch: refs/heads/HDFS-7240
Commit: 08fb82d6d1217695c1cbee14608b172fa76a5569
Parents: fd5cb2c
Author: Arpit Agarwal <ar...@apache.org>
Authored: Wed May 3 15:46:08 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Wed May 3 15:46:08 2017 -0700
----------------------------------------------------------------------
.../hadoop-common/src/site/markdown/Metrics.md | 4 ++--
.../org/apache/hadoop/hdfs/DFSConfigKeys.java | 8 ++++----
.../server/blockmanagement/DatanodeManager.java | 7 ++++---
.../apache/hadoop/hdfs/server/common/Util.java | 12 ++++++------
.../hadoop/hdfs/server/datanode/DNConf.java | 7 ++++---
.../hdfs/server/datanode/FileIoProvider.java | 4 ++--
.../server/datanode/ProfilingFileIoEvents.java | 19 ++++++++++---------
.../src/main/resources/hdfs-default.xml | 11 +++++++++++
.../blockmanagement/TestSlowDiskTracker.java | 4 ++--
.../hdfs/server/datanode/TestDataNodeMXBean.java | 4 ++--
.../datanode/TestDataNodeVolumeMetrics.java | 4 ++--
.../namenode/TestNameNodeStatusMXBean.java | 5 +++--
.../hadoop/tools/TestHdfsConfigFields.java | 2 --
13 files changed, 52 insertions(+), 39 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/08fb82d6/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index a8bdbeb..336ad85 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -334,8 +334,8 @@ FsVolume
Per-volume metrics contain Datanode Volume IO related statistics. Per-volume
metrics are off by default. They can be enabled by setting `dfs.datanode
-.fileio.profiling.sampling.fraction` to a fraction between 0.0 and 1.0.
-Setting this value to 0.0 would mean profiling is not enabled. But enabling
+.fileio.profiling.percentage.fraction` to an integer value between 1 and 100.
+Setting this value to 0 would mean profiling is not enabled. But enabling
per-volume metrics may have a performance impact. Each metrics record
contains tags such as Hostname as additional information along with metrics.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/08fb82d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 3fa383b..0ca344c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -731,10 +731,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final boolean
DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_DEFAULT = false;
public static final String
- DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY =
- "dfs.datanode.fileio.profiling.sampling.fraction";
- public static final double
- DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEFAULT = 0.0;
+ DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY =
+ "dfs.datanode.fileio.profiling.sampling.percentage";
+ public static final int
+ DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_DEFAULT = 0;
//Keys with no defaults
public static final String DFS_DATANODE_PLUGINS_KEY = "dfs.datanode.plugins";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/08fb82d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index c7bdca9..a61aa78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -215,9 +215,10 @@ public class DatanodeManager {
this.dataNodePeerStatsEnabled = conf.getBoolean(
DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_KEY,
DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_DEFAULT);
- this.dataNodeDiskStatsEnabled = Util.isDiskStatsEnabled(conf.getDouble(
- DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY,
- DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEFAULT));
+ this.dataNodeDiskStatsEnabled = Util.isDiskStatsEnabled(conf.getInt(
+ DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY,
+ DFSConfigKeys.
+ DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_DEFAULT));
final Timer timer = new Timer();
this.slowPeerTracker = dataNodePeerStatsEnabled ?
http://git-wip-us.apache.org/repos/asf/hadoop/blob/08fb82d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
index fdb09df..e9ceeb0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
@@ -389,17 +389,17 @@ public final class Util {
return addrsList;
}
- public static boolean isDiskStatsEnabled(double fileIOSamplingFraction) {
+ public static boolean isDiskStatsEnabled(int fileIOSamplingPercentage) {
final boolean isEnabled;
- if (fileIOSamplingFraction < 0.000001) {
+ if (fileIOSamplingPercentage <= 0) {
LOG.info(DFSConfigKeys
- .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY + " set to "
- + fileIOSamplingFraction + ". Disabling file IO profiling");
+ .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY + " set to "
+ + fileIOSamplingPercentage + ". Disabling file IO profiling");
isEnabled = false;
} else {
LOG.info(DFSConfigKeys
- .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY + " set to "
- + fileIOSamplingFraction + ". Enabling file IO profiling");
+ .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY + " set to "
+ + fileIOSamplingPercentage + ". Enabling file IO profiling");
isEnabled = true;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/08fb82d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
index 21ffccc..8e5b597 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
@@ -175,9 +175,10 @@ public class DNConf {
this.peerStatsEnabled = getConf().getBoolean(
DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_KEY,
DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_DEFAULT);
- this.diskStatsEnabled = Util.isDiskStatsEnabled(getConf().getDouble(
- DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY,
- DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEFAULT));
+ this.diskStatsEnabled = Util.isDiskStatsEnabled(getConf().getInt(
+ DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY,
+ DFSConfigKeys.
+ DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_DEFAULT));
this.outliersReportIntervalMs = getConf().getTimeDuration(
DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY,
DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_DEFAULT,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/08fb82d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
index 5508e0b..694eadd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
@@ -62,8 +62,8 @@ import static org.apache.hadoop.hdfs.server.datanode.FileIoProvider.OPERATION.*;
*
* Behavior can be injected into these events by enabling the
* profiling and/or fault injection event hooks through
- * {@link DFSConfigKeys#DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY} and
- * {@link DFSConfigKeys#DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_KEY}.
+ * {@link DFSConfigKeys#DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY}
+ * and {@link DFSConfigKeys#DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_KEY}.
* These event hooks are disabled by default.
*
* Most functions accept an optional {@link FsVolumeSpi} parameter for
http://git-wip-us.apache.org/repos/asf/hadoop/blob/08fb82d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java
index 35118b2..83ee5f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java
@@ -44,18 +44,19 @@ class ProfilingFileIoEvents {
public ProfilingFileIoEvents(@Nullable Configuration conf) {
if (conf != null) {
- double fileIOSamplingFraction = conf.getDouble(
- DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY,
+ int fileIOSamplingPercentage = conf.getInt(
+ DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY,
DFSConfigKeys
- .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEFAULT);
- isEnabled = Util.isDiskStatsEnabled(fileIOSamplingFraction);
- if (fileIOSamplingFraction > 1) {
+ .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_DEFAULT);
+ isEnabled = Util.isDiskStatsEnabled(fileIOSamplingPercentage);
+ if (fileIOSamplingPercentage > 100) {
LOG.warn(DFSConfigKeys
- .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY +
- " value cannot be more than 1. Setting value to 1");
- fileIOSamplingFraction = 1;
+ .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY +
+ " value cannot be more than 100. Setting value to 100");
+ fileIOSamplingPercentage = 100;
}
- sampleRangeMax = (int) (fileIOSamplingFraction * Integer.MAX_VALUE);
+ sampleRangeMax = (int) ((double) fileIOSamplingPercentage / 100 *
+ Integer.MAX_VALUE);
} else {
isEnabled = false;
sampleRangeMax = 0;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/08fb82d6/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 7fcea01..0f33b70 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -2022,6 +2022,17 @@
</property>
<property>
+ <name>dfs.datanode.fileio.profiling.sampling.percentage</name>
+ <value>0</value>
+ <description>
+ This setting controls the percentage of file I/O events which will be
+ profiled for DataNode disk statistics. The default value of 0 disables
+ disk statistics. Set to an integer value between 1 and 100 to enable disk
+ statistics.
+ </description>
+</property>
+
+<property>
<name>hadoop.user.group.metrics.percentiles.intervals</name>
<value></value>
<description>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/08fb82d6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java
index 16dfab2..172400d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java
@@ -28,7 +28,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableMap;
import org.apache.hadoop.conf.Configuration;
import static org.apache.hadoop.hdfs.DFSConfigKeys
- .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY;
+ .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys
.DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
@@ -80,7 +80,7 @@ public class TestSlowDiskTracker {
static {
conf = new HdfsConfiguration();
conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1L);
- conf.setDouble(DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY, 1.0);
+ conf.setInt(DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY, 100);
conf.setTimeDuration(DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY,
OUTLIERS_REPORT_INTERVAL, TimeUnit.MILLISECONDS);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/08fb82d6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
index b80976a..faead18 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
@@ -218,8 +218,8 @@ public class TestDataNodeMXBean {
@Test
public void testDataNodeMXBeanSlowDisksEnabled() throws Exception {
Configuration conf = new Configuration();
- conf.setDouble(DFSConfigKeys
- .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY, 1.0);
+ conf.setInt(DFSConfigKeys
+ .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY, 100);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/08fb82d6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
index 03e1dee..0f41d23 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
@@ -121,8 +121,8 @@ public class TestDataNodeVolumeMetrics {
private MiniDFSCluster setupClusterForVolumeMetrics() throws IOException {
Configuration conf = new HdfsConfiguration();
- conf.setDouble(DFSConfigKeys
- .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY, 1.0);
+ conf.setInt(DFSConfigKeys
+ .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY, 100);
SimulatedFSDataset.setFactory(conf);
return new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATANODES)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/08fb82d6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java
index 8fe734e..f9bfc37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java
@@ -105,8 +105,9 @@ public class TestNameNodeStatusMXBean {
@Test
public void testNameNodeMXBeanSlowDisksEnabled() throws Exception {
Configuration conf = new Configuration();
- conf.setDouble(
- DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY, 1.0);
+ conf.setInt(
+ DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY,
+ 100);
conf.setTimeDuration(
DFSConfigKeys.DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY,
1000, TimeUnit.MILLISECONDS);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/08fb82d6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
index 1fdf713..f23b266 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
@@ -105,8 +105,6 @@ public class TestHdfsConfigFields extends TestConfigurationFieldsBase {
.add(DFSConfigKeys.DFS_NAMENODE_STARTUP_KEY);
configurationPropsToSkipCompare.add(DFSConfigKeys
.DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_KEY);
- configurationPropsToSkipCompare.add(DFSConfigKeys
- .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY);
// Allocate
xmlPropsToSkipCompare = new HashSet<String>();
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[11/50] [abbrv] hadoop git commit: HDFS-11593. Change
SimpleHttpProxyHandler#exceptionCaught log level from info to debug.
Contributed by Xiaobing Zhou.
Posted by ae...@apache.org.
HDFS-11593. Change SimpleHttpProxyHandler#exceptionCaught log level from info to debug. Contributed by Xiaobing Zhou.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1058b408
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1058b408
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1058b408
Branch: refs/heads/HDFS-7240
Commit: 1058b4084392fbc18522ba126313d4a77c09d797
Parents: a3a615e
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Mon May 1 16:12:51 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon May 1 16:12:51 2017 -0700
----------------------------------------------------------------------
.../hadoop/hdfs/server/datanode/web/SimpleHttpProxyHandler.java | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1058b408/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/SimpleHttpProxyHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/SimpleHttpProxyHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/SimpleHttpProxyHandler.java
index ffa7681..9d659f1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/SimpleHttpProxyHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/SimpleHttpProxyHandler.java
@@ -144,7 +144,9 @@ class SimpleHttpProxyHandler extends SimpleChannelInboundHandler<HttpRequest> {
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
- LOG.info("Proxy for " + uri + " failed. cause: ", cause);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Proxy for " + uri + " failed. cause: ", cause);
+ }
if (proxiedChannel != null) {
proxiedChannel.close();
proxiedChannel = null;
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[29/50] [abbrv] hadoop git commit: HDFS-11643. Add shouldReplicate
option to create builder. Contributed by SammiChen.
Posted by ae...@apache.org.
HDFS-11643. Add shouldReplicate option to create builder. Contributed by SammiChen.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c2a52ef9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c2a52ef9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c2a52ef9
Branch: refs/heads/HDFS-7240
Commit: c2a52ef9c29459ff9ef3e23b29e14912bfdb1405
Parents: 81092b1
Author: Andrew Wang <wa...@apache.org>
Authored: Thu May 4 11:39:14 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Thu May 4 11:39:14 2017 -0700
----------------------------------------------------------------------
.../java/org/apache/hadoop/fs/CreateFlag.java | 8 ++-
.../hadoop/hdfs/DistributedFileSystem.java | 59 ++++++++++++++++----
.../hadoop/hdfs/protocol/ClientProtocol.java | 10 +++-
.../hadoop/hdfs/protocolPB/PBHelperClient.java | 7 +++
.../src/main/proto/ClientNamenodeProtocol.proto | 1 +
.../hdfs/server/balancer/NameNodeConnector.java | 13 ++++-
.../hdfs/server/namenode/FSDirWriteFileOp.java | 31 +++++-----
.../hdfs/server/namenode/FSNamesystem.java | 13 ++++-
.../hadoop/hdfs/TestErasureCodingPolicies.java | 58 +++++++++++++++++++
.../hdfs/server/balancer/TestBalancer.java | 2 +
10 files changed, 170 insertions(+), 32 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2a52ef9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
index d480fc9..383d65a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
@@ -110,7 +110,13 @@ public enum CreateFlag {
* 'local' means the same host as the client is being run on.
*/
@InterfaceAudience.LimitedPrivate({"HBase"})
- NO_LOCAL_WRITE((short) 0x40);
+ NO_LOCAL_WRITE((short) 0x40),
+
+ /**
+ * Enforce the file to be a replicated file, no matter what its parent
+ * directory's replication or erasure coding policy is.
+ */
+ SHOULD_REPLICATE((short) 0x80);
private final short mode;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2a52ef9/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 429f4c2..9e89bc5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -100,6 +100,7 @@ import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
+import org.apache.commons.lang.StringUtils;
import javax.annotation.Nonnull;
@@ -462,17 +463,20 @@ public class DistributedFileSystem extends FileSystem {
/**
* Same as
* {@link #create(Path, FsPermission, EnumSet<CreateFlag>, int, short, long,
- * Progressable, ChecksumOpt)} with the addition of favoredNodes that is a
- * hint to where the namenode should place the file blocks.
- * The favored nodes hint is not persisted in HDFS. Hence it may be honored
- * at the creation time only. And with favored nodes, blocks will be pinned
- * on the datanodes to prevent balancing move the block. HDFS could move the
- * blocks during replication, to move the blocks from favored nodes. A value
- * of null means no favored nodes for this create.
- * Another addition is ecPolicyName. A non-null ecPolicyName specifies an
+ * Progressable, ChecksumOpt)} with a few additions. First, addition of
+ * favoredNodes that is a hint to where the namenode should place the file
+ * blocks. The favored nodes hint is not persisted in HDFS. Hence it may be
+ * honored at the creation time only. And with favored nodes, blocks will be
+ * pinned on the datanodes to prevent balancing move the block. HDFS could
+ * move the blocks during replication, to move the blocks from favored nodes.
+ * A value of null means no favored nodes for this create.
+ * The second addition is ecPolicyName. A non-null ecPolicyName specifies an
* explicit erasure coding policy for this file, overriding the inherited
- * policy. A null ecPolicyName means the file will inherit its EC policy from
- * an ancestor (the default).
+ * policy. A null ecPolicyName means the file will inherit its EC policy or
+ * replication policy from its ancestor (the default).
+ * ecPolicyName and SHOULD_REPLICATE CreateFlag are mutually exclusive. It's
+ * invalid to set both SHOULD_REPLICATE and a non-null ecPolicyName.
+ *
*/
private HdfsDataOutputStream create(final Path f,
final FsPermission permission, final EnumSet<CreateFlag> flag,
@@ -2669,6 +2673,7 @@ public class DistributedFileSystem extends FileSystem {
private final DistributedFileSystem dfs;
private InetSocketAddress[] favoredNodes = null;
private String ecPolicyName = null;
+ private boolean shouldReplicate = false;
public HdfsDataOutputStreamBuilder(DistributedFileSystem dfs, Path path) {
super(dfs, path);
@@ -2690,6 +2695,14 @@ public class DistributedFileSystem extends FileSystem {
return ecPolicyName;
}
+ /**
+ * Enforce the file to be a striped file with erasure coding policy
+ * 'policyName', no matter what its parent directory's replication
+ * or erasure coding policy is. Don't call this function and
+ * enforceReplicate() in the same builder since they have conflict
+ * of interest.
+ *
+ */
public HdfsDataOutputStreamBuilder setEcPolicyName(
@Nonnull final String policyName) {
Preconditions.checkNotNull(policyName);
@@ -2697,9 +2710,33 @@ public class DistributedFileSystem extends FileSystem {
return this;
}
+ public boolean shouldReplicate() {
+ return shouldReplicate;
+ }
+
+ /**
+ * Enforce the file to be a replicated file, no matter what its parent
+ * directory's replication or erasure coding policy is. Don't call this
+ * function and setEcPolicyName() in the same builder since they have
+ * conflict of interest.
+ */
+ public HdfsDataOutputStreamBuilder replicate() {
+ shouldReplicate = true;
+ return this;
+ }
+
@Override
public HdfsDataOutputStream build() throws IOException {
- return dfs.create(getPath(), getPermission(), getFlags(),
+ Preconditions.checkState(
+ !(shouldReplicate() && (!StringUtils.isEmpty(getEcPolicyName()))),
+ "shouldReplicate and ecPolicyName are " +
+ "exclusive parameters. Set both is not allowed!");
+
+ EnumSet<CreateFlag> createFlags = getFlags();
+ if (shouldReplicate()) {
+ createFlags.add(CreateFlag.SHOULD_REPLICATE);
+ }
+ return dfs.create(getPath(), getPermission(), createFlags,
getBufferSize(), getReplication(), getBlockSize(),
getProgress(), getChecksumOpt(), getFavoredNodes(),
getEcPolicyName());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2a52ef9/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 117b9dd..b178ddc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -154,8 +154,10 @@ public interface ClientProtocol {
* @param src path of the file being created.
* @param masked masked permission.
* @param clientName name of the current client.
- * @param flag indicates whether the file should be
- * overwritten if it already exists or create if it does not exist or append.
+ * @param flag indicates whether the file should be overwritten if it already
+ * exists or create if it does not exist or append, or whether the
+ * file should be a replicate file, no matter what its ancestor's
+ * replication or erasure coding policy is.
* @param createParent create missing parent directory if true
* @param replication block replication factor.
* @param blockSize maximum block size.
@@ -163,7 +165,9 @@ public interface ClientProtocol {
* @param ecPolicyName the name of erasure coding policy. A null value means
* this file will inherit its parent directory's policy,
* either traditional replication or erasure coding
- * policy.
+ * policy. ecPolicyName and SHOULD_REPLICATE CreateFlag
+ * are mutually exclusive. It's invalid to set both
+ * SHOULD_REPLICATE flag and a non-null ecPolicyName.
*
* @return the status of the created file, it could be null if the server
* doesn't support returning the file status
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2a52ef9/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 6ca3541..2b8f102 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -1753,6 +1753,9 @@ public class PBHelperClient {
if (flag.contains(CreateFlag.NEW_BLOCK)) {
value |= CreateFlagProto.NEW_BLOCK.getNumber();
}
+ if (flag.contains(CreateFlag.SHOULD_REPLICATE)) {
+ value |= CreateFlagProto.SHOULD_REPLICATE.getNumber();
+ }
return value;
}
@@ -1966,6 +1969,10 @@ public class PBHelperClient {
== CreateFlagProto.NEW_BLOCK_VALUE) {
result.add(CreateFlag.NEW_BLOCK);
}
+ if ((flag & CreateFlagProto.SHOULD_REPLICATE.getNumber())
+ == CreateFlagProto.SHOULD_REPLICATE.getNumber()) {
+ result.add(CreateFlag.SHOULD_REPLICATE);
+ }
return new EnumSetWritable<>(result, CreateFlag.class);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2a52ef9/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index b8bd6bf..eee3c4d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -68,6 +68,7 @@ enum CreateFlagProto {
APPEND = 0x04; // Append to a file
LAZY_PERSIST = 0x10; // File with reduced durability guarantees.
NEW_BLOCK = 0x20; // Write data to a new block when appending
+ SHOULD_REPLICATE = 0x80; // Enforce to create a replicate file
}
message CreateRequestProto {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2a52ef9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
index e62dd08..88e40ee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
@@ -25,15 +25,18 @@ import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
+import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
+import com.google.common.base.Preconditions;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsServerDefaults;
@@ -241,7 +244,15 @@ public class NameNodeConnector implements Closeable {
IOUtils.closeStream(fs.append(idPath));
fs.delete(idPath, true);
}
- final FSDataOutputStream fsout = fs.create(idPath, false);
+
+ final FSDataOutputStream fsout = fs.newFSDataOutputStreamBuilder(idPath)
+ .replicate()
+ .setFlags(EnumSet.of(CreateFlag.CREATE))
+ .build();
+
+ Preconditions.checkState(!fs.getFileStatus(idPath).isErasureCoded(),
+ "Id File should be a replicate file");
+
// mark balancer idPath to be deleted during filesystem closure
fs.deleteOnExit(idPath);
if (write2IdFile) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2a52ef9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 7bf2916..a62cddd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -352,7 +352,7 @@ class FSDirWriteFileOp {
EnumSet<CreateFlag> flag, boolean createParent,
short replication, long blockSize,
FileEncryptionInfo feInfo, INode.BlocksMapUpdateInfo toRemoveBlocks,
- String ecPolicyName, boolean logRetryEntry)
+ boolean shouldReplicate, String ecPolicyName, boolean logRetryEntry)
throws IOException {
assert fsn.hasWriteLock();
boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
@@ -386,7 +386,8 @@ class FSDirWriteFileOp {
FSDirMkdirOp.createAncestorDirectories(fsd, iip, permissions);
if (parent != null) {
iip = addFile(fsd, parent, iip.getLastLocalName(), permissions,
- replication, blockSize, holder, clientMachine, ecPolicyName);
+ replication, blockSize, holder, clientMachine, shouldReplicate,
+ ecPolicyName);
newNode = iip != null ? iip.getLastINode().asFile() : null;
}
if (newNode == null) {
@@ -522,8 +523,8 @@ class FSDirWriteFileOp {
private static INodesInPath addFile(
FSDirectory fsd, INodesInPath existing, byte[] localName,
PermissionStatus permissions, short replication, long preferredBlockSize,
- String clientName, String clientMachine, String ecPolicyName)
- throws IOException {
+ String clientName, String clientMachine, boolean shouldReplicate,
+ String ecPolicyName) throws IOException {
Preconditions.checkNotNull(existing);
long modTime = now();
@@ -531,16 +532,18 @@ class FSDirWriteFileOp {
fsd.writeLock();
try {
boolean isStriped = false;
- ErasureCodingPolicy ecPolicy;
- if (!StringUtils.isEmpty(ecPolicyName)) {
- ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicyByName(
- fsd.getFSNamesystem(), ecPolicyName);
- } else {
- ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(
- fsd.getFSNamesystem(), existing);
- }
- if (ecPolicy != null) {
- isStriped = true;
+ ErasureCodingPolicy ecPolicy = null;
+ if (!shouldReplicate) {
+ if (!StringUtils.isEmpty(ecPolicyName)) {
+ ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicyByName(
+ fsd.getFSNamesystem(), ecPolicyName);
+ } else {
+ ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(
+ fsd.getFSNamesystem(), existing);
+ }
+ if (ecPolicy != null) {
+ isStriped = true;
+ }
}
final BlockType blockType = isStriped ?
BlockType.STRIPED : BlockType.CONTIGUOUS;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2a52ef9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 103437a..afcc717 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2225,6 +2225,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
throw new InvalidPathException(src);
}
+ boolean shouldReplicate = flag.contains(CreateFlag.SHOULD_REPLICATE);
+ if (shouldReplicate &&
+ (!org.apache.commons.lang.StringUtils.isEmpty(ecPolicyName))) {
+ throw new HadoopIllegalArgumentException("SHOULD_REPLICATE flag and " +
+ "ecPolicyName are exclusive parameters. Set both is not allowed!");
+ }
+
FSPermissionChecker pc = getPermissionChecker();
INodesInPath iip = null;
boolean skipSync = true; // until we do something that might create edits
@@ -2240,7 +2247,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
iip = FSDirWriteFileOp.resolvePathForStartFile(
dir, pc, src, flag, createParent);
- if (!FSDirErasureCodingOp.hasErasureCodingPolicy(this, iip)) {
+ if (shouldReplicate ||
+ (org.apache.commons.lang.StringUtils.isEmpty(ecPolicyName) &&
+ !FSDirErasureCodingOp.hasErasureCodingPolicy(this, iip))) {
blockManager.verifyReplication(src, replication, clientMachine);
}
@@ -2272,7 +2281,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
try {
stat = FSDirWriteFileOp.startFile(this, iip, permissions, holder,
clientMachine, flag, createParent, replication, blockSize, feInfo,
- toRemoveBlocks, ecPolicyName, logRetryCache);
+ toRemoveBlocks, shouldReplicate, ecPolicyName, logRetryCache);
} catch (IOException e) {
skipSync = e instanceof StandbyException;
throw e;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2a52ef9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
index 1aee929..a14b08c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
@@ -46,6 +47,7 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.Collection;
+import java.util.EnumSet;
import java.util.List;
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
@@ -564,4 +566,60 @@ public class TestErasureCodingPolicies {
assertEquals(ecPolicyOnDir, fs.getErasureCodingPolicy(dirPath));
fs.delete(dirPath, true);
}
+
+ /**
+ * Enforce file as replicated file without regarding its parent's EC policy.
+ */
+ @Test
+ public void testEnforceAsReplicatedFile() throws Exception {
+ final Path dirPath = new Path("/striped");
+ final Path filePath = new Path(dirPath, "file");
+
+ fs.mkdirs(dirPath);
+ fs.setErasureCodingPolicy(dirPath, EC_POLICY.getName());
+
+ final String ecPolicyName = "RS-10-4-64k";
+ fs.newFSDataOutputStreamBuilder(filePath).build().close();
+ assertEquals(EC_POLICY, fs.getErasureCodingPolicy(filePath));
+ fs.delete(filePath, true);
+
+ fs.newFSDataOutputStreamBuilder(filePath)
+ .setEcPolicyName(ecPolicyName)
+ .build()
+ .close();
+ assertEquals(ecPolicyName, fs.getErasureCodingPolicy(filePath).getName());
+ fs.delete(filePath, true);
+
+ try {
+ fs.newFSDataOutputStreamBuilder(filePath)
+ .setEcPolicyName(ecPolicyName)
+ .replicate()
+ .build().close();
+ Assert.fail("shouldReplicate and ecPolicyName are exclusive " +
+ "parameters. Set both is not allowed.");
+ }catch (Exception e){
+ GenericTestUtils.assertExceptionContains("shouldReplicate and " +
+ "ecPolicyName are exclusive parameters. Set both is not allowed!", e);
+ }
+
+ try {
+ final DFSClient dfsClient = fs.getClient();
+ dfsClient.create(filePath.toString(), null,
+ EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE,
+ CreateFlag.SHOULD_REPLICATE), false, (short) 1, 1024, null, 1024,
+ null, null, ecPolicyName);
+ Assert.fail("SHOULD_REPLICATE flag and ecPolicyName are exclusive " +
+ "parameters. Set both is not allowed.");
+ }catch (Exception e){
+ GenericTestUtils.assertExceptionContains("SHOULD_REPLICATE flag and " +
+ "ecPolicyName are exclusive parameters. Set both is not allowed!", e);
+ }
+
+ fs.newFSDataOutputStreamBuilder(filePath)
+ .replicate()
+ .build()
+ .close();
+ assertNull(fs.getErasureCodingPolicy(filePath));
+ fs.delete(dirPath, true);
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2a52ef9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index e177da3..167997e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -1946,7 +1946,9 @@ public class TestBalancer {
public void testBalancerWithStripedFile() throws Exception {
Configuration conf = new Configuration();
initConfWithStripe(conf);
+ NameNodeConnector.setWrite2IdFile(true);
doTestBalancerWithStripedFile(conf);
+ NameNodeConnector.setWrite2IdFile(false);
}
private void doTestBalancerWithStripedFile(Configuration conf) throws Exception {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[27/50] [abbrv] hadoop git commit: Revert "HDFS-11488. JN log segment
syncing should support HA upgrade. Contributed by Hanisha Koneru."
Posted by ae...@apache.org.
Revert "HDFS-11488. JN log segment syncing should support HA upgrade. Contributed by Hanisha Koneru."
This reverts commit 08fb82d6d1217695c1cbee14608b172fa76a5569.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30cd2651
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30cd2651
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30cd2651
Branch: refs/heads/HDFS-7240
Commit: 30cd265134ba60ddf9e105ddfd395830b770a692
Parents: 08fb82d
Author: Arpit Agarwal <ar...@apache.org>
Authored: Wed May 3 16:28:47 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Wed May 3 16:28:47 2017 -0700
----------------------------------------------------------------------
.../hadoop-common/src/site/markdown/Metrics.md | 4 ++--
.../org/apache/hadoop/hdfs/DFSConfigKeys.java | 8 ++++----
.../server/blockmanagement/DatanodeManager.java | 7 +++----
.../apache/hadoop/hdfs/server/common/Util.java | 12 ++++++------
.../hadoop/hdfs/server/datanode/DNConf.java | 7 +++----
.../hdfs/server/datanode/FileIoProvider.java | 4 ++--
.../server/datanode/ProfilingFileIoEvents.java | 19 +++++++++----------
.../src/main/resources/hdfs-default.xml | 11 -----------
.../blockmanagement/TestSlowDiskTracker.java | 4 ++--
.../hdfs/server/datanode/TestDataNodeMXBean.java | 4 ++--
.../datanode/TestDataNodeVolumeMetrics.java | 4 ++--
.../namenode/TestNameNodeStatusMXBean.java | 5 ++---
.../hadoop/tools/TestHdfsConfigFields.java | 2 ++
13 files changed, 39 insertions(+), 52 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30cd2651/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 336ad85..a8bdbeb 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -334,8 +334,8 @@ FsVolume
Per-volume metrics contain Datanode Volume IO related statistics. Per-volume
metrics are off by default. They can be enabled by setting `dfs.datanode
-.fileio.profiling.percentage.fraction` to an integer value between 1 and 100.
-Setting this value to 0 would mean profiling is not enabled. But enabling
+.fileio.profiling.sampling.fraction` to a fraction between 0.0 and 1.0.
+Setting this value to 0.0 would mean profiling is not enabled. But enabling
per-volume metrics may have a performance impact. Each metrics record
contains tags such as Hostname as additional information along with metrics.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30cd2651/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 0ca344c..3fa383b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -731,10 +731,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final boolean
DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_DEFAULT = false;
public static final String
- DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY =
- "dfs.datanode.fileio.profiling.sampling.percentage";
- public static final int
- DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_DEFAULT = 0;
+ DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY =
+ "dfs.datanode.fileio.profiling.sampling.fraction";
+ public static final double
+ DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEFAULT = 0.0;
//Keys with no defaults
public static final String DFS_DATANODE_PLUGINS_KEY = "dfs.datanode.plugins";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30cd2651/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index a61aa78..c7bdca9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -215,10 +215,9 @@ public class DatanodeManager {
this.dataNodePeerStatsEnabled = conf.getBoolean(
DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_KEY,
DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_DEFAULT);
- this.dataNodeDiskStatsEnabled = Util.isDiskStatsEnabled(conf.getInt(
- DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY,
- DFSConfigKeys.
- DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_DEFAULT));
+ this.dataNodeDiskStatsEnabled = Util.isDiskStatsEnabled(conf.getDouble(
+ DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY,
+ DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEFAULT));
final Timer timer = new Timer();
this.slowPeerTracker = dataNodePeerStatsEnabled ?
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30cd2651/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
index e9ceeb0..fdb09df 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
@@ -389,17 +389,17 @@ public final class Util {
return addrsList;
}
- public static boolean isDiskStatsEnabled(int fileIOSamplingPercentage) {
+ public static boolean isDiskStatsEnabled(double fileIOSamplingFraction) {
final boolean isEnabled;
- if (fileIOSamplingPercentage <= 0) {
+ if (fileIOSamplingFraction < 0.000001) {
LOG.info(DFSConfigKeys
- .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY + " set to "
- + fileIOSamplingPercentage + ". Disabling file IO profiling");
+ .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY + " set to "
+ + fileIOSamplingFraction + ". Disabling file IO profiling");
isEnabled = false;
} else {
LOG.info(DFSConfigKeys
- .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY + " set to "
- + fileIOSamplingPercentage + ". Enabling file IO profiling");
+ .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY + " set to "
+ + fileIOSamplingFraction + ". Enabling file IO profiling");
isEnabled = true;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30cd2651/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
index 8e5b597..21ffccc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
@@ -175,10 +175,9 @@ public class DNConf {
this.peerStatsEnabled = getConf().getBoolean(
DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_KEY,
DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_DEFAULT);
- this.diskStatsEnabled = Util.isDiskStatsEnabled(getConf().getInt(
- DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY,
- DFSConfigKeys.
- DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_DEFAULT));
+ this.diskStatsEnabled = Util.isDiskStatsEnabled(getConf().getDouble(
+ DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY,
+ DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEFAULT));
this.outliersReportIntervalMs = getConf().getTimeDuration(
DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY,
DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_DEFAULT,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30cd2651/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
index 694eadd..5508e0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
@@ -62,8 +62,8 @@ import static org.apache.hadoop.hdfs.server.datanode.FileIoProvider.OPERATION.*;
*
* Behavior can be injected into these events by enabling the
* profiling and/or fault injection event hooks through
- * {@link DFSConfigKeys#DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY}
- * and {@link DFSConfigKeys#DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_KEY}.
+ * {@link DFSConfigKeys#DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY} and
+ * {@link DFSConfigKeys#DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_KEY}.
* These event hooks are disabled by default.
*
* Most functions accept an optional {@link FsVolumeSpi} parameter for
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30cd2651/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java
index 83ee5f6..35118b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java
@@ -44,19 +44,18 @@ class ProfilingFileIoEvents {
public ProfilingFileIoEvents(@Nullable Configuration conf) {
if (conf != null) {
- int fileIOSamplingPercentage = conf.getInt(
- DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY,
+ double fileIOSamplingFraction = conf.getDouble(
+ DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY,
DFSConfigKeys
- .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_DEFAULT);
- isEnabled = Util.isDiskStatsEnabled(fileIOSamplingPercentage);
- if (fileIOSamplingPercentage > 100) {
+ .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEFAULT);
+ isEnabled = Util.isDiskStatsEnabled(fileIOSamplingFraction);
+ if (fileIOSamplingFraction > 1) {
LOG.warn(DFSConfigKeys
- .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY +
- " value cannot be more than 100. Setting value to 100");
- fileIOSamplingPercentage = 100;
+ .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY +
+ " value cannot be more than 1. Setting value to 1");
+ fileIOSamplingFraction = 1;
}
- sampleRangeMax = (int) ((double) fileIOSamplingPercentage / 100 *
- Integer.MAX_VALUE);
+ sampleRangeMax = (int) (fileIOSamplingFraction * Integer.MAX_VALUE);
} else {
isEnabled = false;
sampleRangeMax = 0;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30cd2651/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 0f33b70..7fcea01 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -2022,17 +2022,6 @@
</property>
<property>
- <name>dfs.datanode.fileio.profiling.sampling.percentage</name>
- <value>0</value>
- <description>
- This setting controls the percentage of file I/O events which will be
- profiled for DataNode disk statistics. The default value of 0 disables
- disk statistics. Set to an integer value between 1 and 100 to enable disk
- statistics.
- </description>
-</property>
-
-<property>
<name>hadoop.user.group.metrics.percentiles.intervals</name>
<value></value>
<description>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30cd2651/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java
index 172400d..16dfab2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java
@@ -28,7 +28,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableMap;
import org.apache.hadoop.conf.Configuration;
import static org.apache.hadoop.hdfs.DFSConfigKeys
- .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY;
+ .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys
.DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
@@ -80,7 +80,7 @@ public class TestSlowDiskTracker {
static {
conf = new HdfsConfiguration();
conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1L);
- conf.setInt(DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY, 100);
+ conf.setDouble(DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY, 1.0);
conf.setTimeDuration(DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY,
OUTLIERS_REPORT_INTERVAL, TimeUnit.MILLISECONDS);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30cd2651/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
index faead18..b80976a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
@@ -218,8 +218,8 @@ public class TestDataNodeMXBean {
@Test
public void testDataNodeMXBeanSlowDisksEnabled() throws Exception {
Configuration conf = new Configuration();
- conf.setInt(DFSConfigKeys
- .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY, 100);
+ conf.setDouble(DFSConfigKeys
+ .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY, 1.0);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30cd2651/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
index 0f41d23..03e1dee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
@@ -121,8 +121,8 @@ public class TestDataNodeVolumeMetrics {
private MiniDFSCluster setupClusterForVolumeMetrics() throws IOException {
Configuration conf = new HdfsConfiguration();
- conf.setInt(DFSConfigKeys
- .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY, 100);
+ conf.setDouble(DFSConfigKeys
+ .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY, 1.0);
SimulatedFSDataset.setFactory(conf);
return new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATANODES)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30cd2651/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java
index f9bfc37..8fe734e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java
@@ -105,9 +105,8 @@ public class TestNameNodeStatusMXBean {
@Test
public void testNameNodeMXBeanSlowDisksEnabled() throws Exception {
Configuration conf = new Configuration();
- conf.setInt(
- DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY,
- 100);
+ conf.setDouble(
+ DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY, 1.0);
conf.setTimeDuration(
DFSConfigKeys.DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY,
1000, TimeUnit.MILLISECONDS);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30cd2651/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
index f23b266..1fdf713 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
@@ -105,6 +105,8 @@ public class TestHdfsConfigFields extends TestConfigurationFieldsBase {
.add(DFSConfigKeys.DFS_NAMENODE_STARTUP_KEY);
configurationPropsToSkipCompare.add(DFSConfigKeys
.DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_KEY);
+ configurationPropsToSkipCompare.add(DFSConfigKeys
+ .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY);
// Allocate
xmlPropsToSkipCompare = new HashSet<String>();
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[34/50] [abbrv] hadoop git commit: YARN-6522. Make SLS JSON input
file format simple and scalable (yufeigu via rkanter)
Posted by ae...@apache.org.
YARN-6522. Make SLS JSON input file format simple and scalable (yufeigu via rkanter)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3082552b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3082552b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3082552b
Branch: refs/heads/HDFS-7240
Commit: 3082552b3b991df846caf572b58e44308ddf8eeb
Parents: 07761af
Author: Robert Kanter <rk...@apache.org>
Authored: Thu May 4 17:21:46 2017 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Thu May 4 17:21:46 2017 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/yarn/sls/SLSRunner.java | 102 ++++++++++++++-----
.../hadoop/yarn/sls/appmaster/AMSimulator.java | 42 ++++----
.../sls/synthetic/SynthTraceJobProducer.java | 2 +-
.../apache/hadoop/yarn/sls/utils/SLSUtils.java | 49 ++++++---
.../src/site/markdown/SchedulerLoadSimulator.md | 28 +++--
.../hadoop/yarn/sls/utils/TestSLSUtils.java | 30 ++++++
6 files changed, 182 insertions(+), 71 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3082552b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
index 9d35d1b..ddd35ef 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
@@ -119,6 +119,9 @@ public class SLSRunner extends Configured implements Tool {
// logger
public final static Logger LOG = Logger.getLogger(SLSRunner.class);
+ private final static int DEFAULT_MAPPER_PRIORITY = 20;
+ private final static int DEFAULT_REDUCER_PRIORITY = 10;
+
/**
* The type of trace in input.
*/
@@ -247,8 +250,8 @@ public class SLSRunner extends Configured implements Tool {
break;
case SYNTH:
stjp = new SynthTraceJobProducer(getConf(), new Path(inputTraces[0]));
- nodeSet.addAll(SLSUtils.generateNodesFromSynth(stjp.getNumNodes(),
- stjp.getNodesPerRack()));
+ nodeSet.addAll(SLSUtils.generateNodes(stjp.getNumNodes(),
+ stjp.getNumNodes()/stjp.getNodesPerRack()));
break;
default:
throw new YarnException("Input configuration not recognized, "
@@ -259,6 +262,10 @@ public class SLSRunner extends Configured implements Tool {
nodeSet.addAll(SLSUtils.parseNodesFromNodeFile(nodeFile));
}
+ if (nodeSet.size() == 0) {
+ throw new YarnException("No node! Please configure nodes.");
+ }
+
// create NM simulators
Random random = new Random();
Set<String> rackSet = new HashSet<String>();
@@ -348,7 +355,11 @@ public class SLSRunner extends Configured implements Tool {
private void createAMForJob(Map jsonJob) throws YarnException {
long jobStartTime = Long.parseLong(jsonJob.get("job.start.ms").toString());
- long jobFinishTime = Long.parseLong(jsonJob.get("job.end.ms").toString());
+
+ long jobFinishTime = 0;
+ if (jsonJob.containsKey("job.end.ms")) {
+ jobFinishTime = Long.parseLong(jsonJob.get("job.end.ms").toString());
+ }
String user = (String) jsonJob.get("job.user");
if (user == null) {
@@ -358,25 +369,49 @@ public class SLSRunner extends Configured implements Tool {
String queue = jsonJob.get("job.queue.name").toString();
increaseQueueAppNum(queue);
- String oldAppId = jsonJob.get("job.id").toString();
+ String oldAppId = (String)jsonJob.get("job.id");
+ if (oldAppId == null) {
+ oldAppId = Integer.toString(AM_ID);
+ }
- // tasks
+ String amType = (String)jsonJob.get("am.type");
+ if (amType == null) {
+ amType = SLSUtils.DEFAULT_JOB_TYPE;
+ }
+
+ runNewAM(amType, user, queue, oldAppId, jobStartTime, jobFinishTime,
+ getTaskContainers(jsonJob), null);
+ }
+
+ private List<ContainerSimulator> getTaskContainers(Map jsonJob)
+ throws YarnException {
+ List<ContainerSimulator> containers = new ArrayList<>();
List tasks = (List) jsonJob.get("job.tasks");
if (tasks == null || tasks.size() == 0) {
throw new YarnException("No task for the job!");
}
- List<ContainerSimulator> containerList = new ArrayList<>();
for (Object o : tasks) {
Map jsonTask = (Map) o;
- String hostname = jsonTask.get("container.host").toString();
- long taskStart = Long.parseLong(jsonTask.get("container.start.ms")
- .toString());
- long taskFinish = Long.parseLong(jsonTask.get("container.end.ms")
- .toString());
- long lifeTime = taskFinish - taskStart;
-
- // Set memory and vcores from job trace file
+
+ String hostname = (String) jsonTask.get("container.host");
+
+ long duration = 0;
+ if (jsonTask.containsKey("duration.ms")) {
+ duration = Integer.parseInt(jsonTask.get("duration.ms").toString());
+ } else if (jsonTask.containsKey("container.start.ms") &&
+ jsonTask.containsKey("container.end.ms")) {
+ long taskStart = Long.parseLong(jsonTask.get("container.start.ms")
+ .toString());
+ long taskFinish = Long.parseLong(jsonTask.get("container.end.ms")
+ .toString());
+ duration = taskFinish - taskStart;
+ }
+ if (duration <= 0) {
+ throw new YarnException("Duration of a task shouldn't be less or equal"
+ + " to 0!");
+ }
+
Resource res = getDefaultContainerResource();
if (jsonTask.containsKey("container.memory")) {
int containerMemory =
@@ -390,17 +425,30 @@ public class SLSRunner extends Configured implements Tool {
res.setVirtualCores(containerVCores);
}
- int priority = Integer.parseInt(jsonTask.get("container.priority")
- .toString());
- String type = jsonTask.get("container.type").toString();
- containerList.add(
- new ContainerSimulator(res, lifeTime, hostname, priority, type));
+ int priority = DEFAULT_MAPPER_PRIORITY;
+ if (jsonTask.containsKey("container.priority")) {
+ priority = Integer.parseInt(jsonTask.get("container.priority")
+ .toString());
+ }
+
+ String type = "map";
+ if (jsonTask.containsKey("container.type")) {
+ type = jsonTask.get("container.type").toString();
+ }
+
+ int count = 1;
+ if (jsonTask.containsKey("count")) {
+ count = Integer.parseInt(jsonTask.get("count").toString());
+ }
+ count = Math.max(count, 1);
+
+ for (int i = 0; i < count; i++) {
+ containers.add(
+ new ContainerSimulator(res, duration, hostname, priority, type));
+ }
}
- // create a new AM
- String amType = jsonJob.get("am.type").toString();
- runNewAM(amType, user, queue, oldAppId, jobStartTime, jobFinishTime,
- containerList, null);
+ return containers;
}
/**
@@ -463,7 +511,7 @@ public class SLSRunner extends Configured implements Tool {
taskAttempt.getStartTime();
containerList.add(
new ContainerSimulator(getDefaultContainerResource(),
- containerLifeTime, hostname, 10, "map"));
+ containerLifeTime, hostname, DEFAULT_MAPPER_PRIORITY, "map"));
}
// reducer
@@ -479,7 +527,7 @@ public class SLSRunner extends Configured implements Tool {
taskAttempt.getStartTime();
containerList.add(
new ContainerSimulator(getDefaultContainerResource(),
- containerLifeTime, hostname, 20, "reduce"));
+ containerLifeTime, hostname, DEFAULT_REDUCER_PRIORITY, "reduce"));
}
// Only supports the default job type currently
@@ -559,7 +607,7 @@ public class SLSRunner extends Configured implements Tool {
Resource.newInstance((int) tai.getTaskInfo().getTaskMemory(),
(int) tai.getTaskInfo().getTaskVCores());
containerList.add(new ContainerSimulator(containerResource,
- containerLifeTime, hostname, 10, "map"));
+ containerLifeTime, hostname, DEFAULT_MAPPER_PRIORITY, "map"));
maxMapRes = Resources.componentwiseMax(maxMapRes, containerResource);
maxMapDur =
containerLifeTime > maxMapDur ? containerLifeTime : maxMapDur;
@@ -579,7 +627,7 @@ public class SLSRunner extends Configured implements Tool {
Resource.newInstance((int) tai.getTaskInfo().getTaskMemory(),
(int) tai.getTaskInfo().getTaskVCores());
containerList.add(new ContainerSimulator(containerResource,
- containerLifeTime, hostname, 20, "reduce"));
+ containerLifeTime, hostname, DEFAULT_REDUCER_PRIORITY, "reduce"));
maxRedRes = Resources.componentwiseMax(maxRedRes, containerResource);
maxRedDur =
containerLifeTime > maxRedDur ? containerLifeTime : maxRedDur;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3082552b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
index 45a3c07..70c5579 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
@@ -400,26 +400,28 @@ public abstract class AMSimulator extends TaskRunner.Task {
Map<String, ResourceRequest> nodeLocalRequestMap = new HashMap<String, ResourceRequest>();
ResourceRequest anyRequest = null;
for (ContainerSimulator cs : csList) {
- String rackHostNames[] = SLSUtils.getRackHostName(cs.getHostname());
- // check rack local
- String rackname = "/" + rackHostNames[0];
- if (rackLocalRequestMap.containsKey(rackname)) {
- rackLocalRequestMap.get(rackname).setNumContainers(
- rackLocalRequestMap.get(rackname).getNumContainers() + 1);
- } else {
- ResourceRequest request = createResourceRequest(
- cs.getResource(), rackname, priority, 1);
- rackLocalRequestMap.put(rackname, request);
- }
- // check node local
- String hostname = rackHostNames[1];
- if (nodeLocalRequestMap.containsKey(hostname)) {
- nodeLocalRequestMap.get(hostname).setNumContainers(
- nodeLocalRequestMap.get(hostname).getNumContainers() + 1);
- } else {
- ResourceRequest request = createResourceRequest(
- cs.getResource(), hostname, priority, 1);
- nodeLocalRequestMap.put(hostname, request);
+ if (cs.getHostname() != null) {
+ String[] rackHostNames = SLSUtils.getRackHostName(cs.getHostname());
+ // check rack local
+ String rackname = "/" + rackHostNames[0];
+ if (rackLocalRequestMap.containsKey(rackname)) {
+ rackLocalRequestMap.get(rackname).setNumContainers(
+ rackLocalRequestMap.get(rackname).getNumContainers() + 1);
+ } else {
+ ResourceRequest request =
+ createResourceRequest(cs.getResource(), rackname, priority, 1);
+ rackLocalRequestMap.put(rackname, request);
+ }
+ // check node local
+ String hostname = rackHostNames[1];
+ if (nodeLocalRequestMap.containsKey(hostname)) {
+ nodeLocalRequestMap.get(hostname).setNumContainers(
+ nodeLocalRequestMap.get(hostname).getNumContainers() + 1);
+ } else {
+ ResourceRequest request =
+ createResourceRequest(cs.getResource(), hostname, priority, 1);
+ nodeLocalRequestMap.put(hostname, request);
+ }
}
// any
if (anyRequest == null) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3082552b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthTraceJobProducer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthTraceJobProducer.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthTraceJobProducer.java
index 3d2ec94..c89e4e2 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthTraceJobProducer.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthTraceJobProducer.java
@@ -131,7 +131,7 @@ public class SynthTraceJobProducer implements JobStoryProducer {
}
public int getNodesPerRack() {
- return trace.nodes_per_rack;
+ return trace.nodes_per_rack < 1 ? 1: trace.nodes_per_rack;
}
public int getNumNodes() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3082552b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java
index e27b36f..dbc2dab 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java
@@ -101,7 +101,7 @@ public class SLSUtils {
*/
public static Set<String> parseNodesFromSLSTrace(String jobTrace)
throws IOException {
- Set<String> nodeSet = new HashSet<String>();
+ Set<String> nodeSet = new HashSet<>();
JsonFactory jsonF = new JsonFactory();
ObjectMapper mapper = new ObjectMapper();
Reader input =
@@ -109,13 +109,7 @@ public class SLSUtils {
try {
Iterator<Map> i = mapper.readValues(jsonF.createParser(input), Map.class);
while (i.hasNext()) {
- Map jsonE = i.next();
- List tasks = (List) jsonE.get("job.tasks");
- for (Object o : tasks) {
- Map jsonTask = (Map) o;
- String hostname = jsonTask.get("container.host").toString();
- nodeSet.add(hostname);
- }
+ addNodes(nodeSet, i.next());
}
} finally {
input.close();
@@ -123,6 +117,29 @@ public class SLSUtils {
return nodeSet;
}
+ private static void addNodes(Set<String> nodeSet, Map jsonEntry) {
+ if (jsonEntry.containsKey("num.nodes")) {
+ int numNodes = Integer.parseInt(jsonEntry.get("num.nodes").toString());
+ int numRacks = 1;
+ if (jsonEntry.containsKey("num.racks")) {
+ numRacks = Integer.parseInt(
+ jsonEntry.get("num.racks").toString());
+ }
+ nodeSet.addAll(generateNodes(numNodes, numRacks));
+ }
+
+ if (jsonEntry.containsKey("job.tasks")) {
+ List tasks = (List) jsonEntry.get("job.tasks");
+ for (Object o : tasks) {
+ Map jsonTask = (Map) o;
+ String hostname = (String) jsonTask.get("container.host");
+ if (hostname != null) {
+ nodeSet.add(hostname);
+ }
+ }
+ }
+ }
+
/**
* parse the input node file, return each host name
*/
@@ -150,11 +167,19 @@ public class SLSUtils {
return nodeSet;
}
- public static Set<? extends String> generateNodesFromSynth(
- int numNodes, int nodesPerRack) {
- Set<String> nodeSet = new HashSet<String>();
+ public static Set<? extends String> generateNodes(int numNodes,
+ int numRacks){
+ Set<String> nodeSet = new HashSet<>();
+ if (numRacks < 1) {
+ numRacks = 1;
+ }
+
+ if (numRacks > numNodes) {
+ numRacks = numNodes;
+ }
+
for (int i = 0; i < numNodes; i++) {
- nodeSet.add("/rack" + i % nodesPerRack + "/node" + i);
+ nodeSet.add("/rack" + i % numRacks + "/node" + i);
}
return nodeSet;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3082552b/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md b/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md
index f0e3b8c..6e00e9a 100644
--- a/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md
+++ b/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md
@@ -328,18 +328,24 @@ Appendix
Here we provide an example format of the sls json file, which contains 2 jobs. The first job has 3 map tasks and the second one has 2 map tasks.
{
- "am.type" : "mapreduce",
- "job.start.ms" : 0,
- "job.end.ms" : 95375,
- "job.queue.name" : "sls_queue_1",
- "job.id" : "job_1",
- "job.user" : "default",
+ "num.nodes": 3, // total number of nodes in the cluster
+ "num.racks": 1 // total number of racks in the cluster, it divides num.nodes into the racks evenly, optional, the default value is 1
+ }
+ {
+ "am.type" : "mapreduce", // type of AM, optional, the default value is "mapreduce"
+ "job.start.ms" : 0, // job start time
+ "job.end.ms" : 95375, // job finish time, optional, the default value is 0
+ "job.queue.name" : "sls_queue_1", // the queue job will be submitted to
+ "job.id" : "job_1", // the job id used to track the job, optional, the default value is an zero-based integer increasing with number of jobs
+ "job.user" : "default", // user, optional, the default value is "default"
"job.tasks" : [ {
- "container.host" : "/default-rack/node1",
- "container.start.ms" : 6664,
- "container.end.ms" : 23707,
- "container.priority" : 20,
- "container.type" : "map"
+ "count": 1, // number of tasks, optional, the default value is 1
+ "container.host" : "/default-rack/node1", // host the container asks for
+ "container.start.ms" : 6664, // container start time, optional
+ "container.end.ms" : 23707, // container finish time, optional
+ "duration.ms": 50000, // duration of the container, optional if start and end time is specified
+ "container.priority" : 20, // priority of the container, optional, the default value is 20
+ "container.type" : "map" // type of the container, could be "map" or "reduce", optional, the default value is "map"
}, {
"container.host" : "/default-rack/node3",
"container.start.ms" : 6665,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3082552b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/utils/TestSLSUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/utils/TestSLSUtils.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/utils/TestSLSUtils.java
index f4eda67..30964a1 100644
--- a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/utils/TestSLSUtils.java
+++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/utils/TestSLSUtils.java
@@ -21,6 +21,9 @@ package org.apache.hadoop.yarn.sls.utils;
import org.junit.Assert;
import org.junit.Test;
+import java.util.HashSet;
+import java.util.Set;
+
public class TestSLSUtils {
@Test
@@ -36,4 +39,31 @@ public class TestSLSUtils {
Assert.assertEquals(rackHostname[1], "node1");
}
+ @Test
+ public void testGenerateNodes() {
+ Set<? extends String> nodes = SLSUtils.generateNodes(3, 3);
+ Assert.assertEquals("Number of nodes is wrong.", 3, nodes.size());
+ Assert.assertEquals("Number of racks is wrong.", 3, getNumRack(nodes));
+
+ nodes = SLSUtils.generateNodes(3, 1);
+ Assert.assertEquals("Number of nodes is wrong.", 3, nodes.size());
+ Assert.assertEquals("Number of racks is wrong.", 1, getNumRack(nodes));
+
+ nodes = SLSUtils.generateNodes(3, 4);
+ Assert.assertEquals("Number of nodes is wrong.", 3, nodes.size());
+ Assert.assertEquals("Number of racks is wrong.", 3, getNumRack(nodes));
+
+ nodes = SLSUtils.generateNodes(3, 0);
+ Assert.assertEquals("Number of nodes is wrong.", 3, nodes.size());
+ Assert.assertEquals("Number of racks is wrong.", 1, getNumRack(nodes));
+ }
+
+ private int getNumRack(Set<? extends String> nodes) {
+ Set<String> racks = new HashSet<>();
+ for (String node : nodes) {
+ String[] rackHostname = SLSUtils.getRackHostName(node);
+ racks.add(rackHostname[0]);
+ }
+ return racks.size();
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[30/50] [abbrv] hadoop git commit: HDFS-11687. Add new public
encryption APIs required by Hive. (lei)
Posted by ae...@apache.org.
HDFS-11687. Add new public encryption APIs required by Hive. (lei)
Change-Id: I4a23a00de63ad18022312ceb1f306a87d032d07c
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25f5d9ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25f5d9ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25f5d9ad
Branch: refs/heads/HDFS-7240
Commit: 25f5d9ad5ee5ead349d259a99b49541a70b1604d
Parents: c2a52ef
Author: Lei Xu <le...@apache.org>
Authored: Thu May 4 12:06:50 2017 -0700
Committer: Lei Xu <le...@apache.org>
Committed: Thu May 4 12:06:50 2017 -0700
----------------------------------------------------------------------
.../java/org/apache/hadoop/hdfs/DFSClient.java | 18 ++---------
.../hadoop/hdfs/DistributedFileSystem.java | 9 ++++--
.../apache/hadoop/hdfs/client/HdfsAdmin.java | 12 +++++++
.../apache/hadoop/hdfs/TestEncryptionZones.java | 1 +
.../org/apache/hadoop/hdfs/TestHdfsAdmin.java | 33 ++++++++++++++++++++
5 files changed, 55 insertions(+), 18 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/25f5d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 187d2e9..3f1f3ea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2951,24 +2951,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
/**
* Probe for encryption enabled on this filesystem.
- * Note (see HDFS-11689):
- * Not to throw exception in this method since it would break hive.
- * Hive accesses this method and assumes no exception would be thrown.
- * Hive should not access DFSClient since it is InterfaceAudience.Private.
- * Deprecated annotation is added to trigger build warning at hive side.
- * Request has been made to Hive to remove access to DFSClient.
* @return true if encryption is enabled
*/
- @Deprecated
- public boolean isHDFSEncryptionEnabled() {
- boolean result = false;
- try {
- result = (getKeyProviderUri() != null);
- } catch (IOException ioe) {
- DFSClient.LOG.warn("Exception while checking whether encryption zone "
- + "is supported, assumes it is not supported", ioe);
- }
- return result;
+ boolean isHDFSEncryptionEnabled() throws IOException {
+ return getKeyProviderUri() != null;
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/25f5d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 9e89bc5..7b2f2fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -2587,8 +2587,13 @@ public class DistributedFileSystem extends FileSystem {
*/
@Override
public Path getTrashRoot(Path path) {
- if ((path == null) || !dfs.isHDFSEncryptionEnabled()) {
- return super.getTrashRoot(path);
+ try {
+ if ((path == null) || !dfs.isHDFSEncryptionEnabled()) {
+ return super.getTrashRoot(path);
+ }
+ } catch (IOException ioe) {
+ DFSClient.LOG.warn("Exception while checking whether encryption zone is "
+ + "supported", ioe);
}
String parentSrc = path.isRoot()?
http://git-wip-us.apache.org/repos/asf/hadoop/blob/25f5d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
index 456f280..71f6a35 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.fs.BlockStoragePolicySpi;
import org.apache.hadoop.fs.CacheFlag;
import org.apache.hadoop.fs.FileAlreadyExistsException;
@@ -268,6 +269,17 @@ public class HdfsAdmin {
}
/**
+ * Get KeyProvider if present.
+ *
+ * @return the key provider if encryption is enabled on HDFS.
+ * Otherwise, it returns null.
+ * @throws IOException on RPC exception to the NN.
+ */
+ public KeyProvider getKeyProvider() throws IOException {
+ return dfs.getClient().getKeyProvider();
+ }
+
+ /**
* Create an encryption zone rooted at an empty existing directory, using the
* specified encryption key. An encryption zone has an associated encryption
* key used when reading and writing files within the zone.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/25f5d9ad/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
index 1f51732..093d516 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -349,6 +349,7 @@ public class TestEncryptionZones {
@Test
public void testBasicOperations() throws Exception {
+ assertNotNull("key provider is not present", dfsAdmin.getKeyProvider());
int numZones = 0;
/* Number of EZs should be 0 if no EZ is created */
assertEquals("Unexpected number of encryption zones!", numZones,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/25f5d9ad/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
index 717d79e..fe20c68 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
+import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
@@ -27,8 +28,11 @@ import java.util.HashSet;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
import org.apache.hadoop.fs.BlockStoragePolicySpi;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
@@ -172,4 +176,33 @@ public class TestHdfsAdmin {
Assert.assertTrue(
Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
}
+
+ private static String getKeyProviderURI() {
+ FileSystemTestHelper helper = new FileSystemTestHelper();
+ // Set up java key store
+ String testRoot = helper.getTestRootDir();
+ File testRootDir = new File(testRoot).getAbsoluteFile();
+ return JavaKeyStoreProvider.SCHEME_NAME + "://file" +
+ new Path(testRootDir.toString(), "test.jks").toUri();
+ }
+
+ @Test
+ public void testGetKeyProvider() throws IOException {
+ HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
+ Assert.assertNull("should return null for an non-encrypted cluster",
+ hdfsAdmin.getKeyProvider());
+
+ shutDownCluster();
+
+ Configuration conf = new Configuration();
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
+ getKeyProviderURI());
+
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+ cluster.waitActive();
+ hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
+
+ Assert.assertNotNull("should not return null for an encrypted cluster",
+ hdfsAdmin.getKeyProvider());
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[07/50] [abbrv] hadoop git commit: HDFS-11714. Newly added NN storage
directory won't get initialized and cause space exhaustion. Contributed by
Kihwal Lee.
Posted by ae...@apache.org.
HDFS-11714. Newly added NN storage directory won't get initialized and cause space exhaustion. Contributed by Kihwal Lee.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4cfc8664
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4cfc8664
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4cfc8664
Branch: refs/heads/HDFS-7240
Commit: 4cfc8664362ed04b01872e854715a36dad9408a6
Parents: 343948c
Author: Kihwal Lee <ki...@apache.org>
Authored: Mon May 1 17:29:25 2017 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Mon May 1 17:29:25 2017 -0500
----------------------------------------------------------------------
.../hadoop/hdfs/server/namenode/FSImage.java | 52 +++++++++++++++++++-
.../namenode/ha/TestStandbyCheckpoints.java | 40 +++++++++++++++
2 files changed, 91 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cfc8664/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 76500a3..e758108 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -98,6 +98,16 @@ public class FSImage implements Closeable {
protected NNStorageRetentionManager archivalManager;
+ /**
+ * The collection of newly added storage directories. These are partially
+ * formatted then later fully populated along with a VERSION file.
+ * For HA, the second part is done when the next checkpoint is saved.
+ * This set will be cleared once a VERSION file is created.
+ * For non-HA, a new fsimage will be locally generated along with a new
+ * VERSION file. This set is not used for non-HA mode.
+ */
+ private Set<StorageDirectory> newDirs = null;
+
/* Used to make sure there are no concurrent checkpoints for a given txid
* The checkpoint here could be one of the following operations.
* a. checkpoint when NN is in standby.
@@ -261,9 +271,26 @@ public class FSImage implements Closeable {
throw new IOException(StorageState.NON_EXISTENT +
" state cannot be here");
case NOT_FORMATTED:
+ // Create a dir structure, but not the VERSION file. The presence of
+ // VERSION is checked in the inspector's needToSave() method and
+ // saveNamespace is triggered if it is absent. This will bring
+ // the storage state uptodate along with a new VERSION file.
+ // If HA is enabled, NNs start up as standby so saveNamespace is not
+ // triggered.
LOG.info("Storage directory " + sd.getRoot() + " is not formatted.");
LOG.info("Formatting ...");
sd.clearDirectory(); // create empty currrent dir
+ // For non-HA, no further action is needed here, as saveNamespace will
+ // take care of the rest.
+ if (!target.isHaEnabled()) {
+ continue;
+ }
+ // If HA is enabled, save the dirs to create a version file later when
+ // a checkpoint image is saved.
+ if (newDirs == null) {
+ newDirs = new HashSet<StorageDirectory>();
+ }
+ newDirs.add(sd);
break;
default:
break;
@@ -289,7 +316,27 @@ public class FSImage implements Closeable {
return loadFSImage(target, startOpt, recovery);
}
-
+
+ /**
+ * Create a VERSION file in the newly added storage directories.
+ */
+ private void initNewDirs() {
+ if (newDirs == null) {
+ return;
+ }
+ for (StorageDirectory sd : newDirs) {
+ try {
+ storage.writeProperties(sd);
+ LOG.info("Wrote VERSION in the new storage, " + sd.getCurrentDir());
+ } catch (IOException e) {
+ // Failed to create a VERSION file. Report the error.
+ storage.reportErrorOnFile(sd.getVersionFile());
+ }
+ }
+ newDirs.clear();
+ newDirs = null;
+ }
+
/**
* For each storage directory, performs recovery of incomplete transitions
* (eg. upgrade, rollback, checkpoint) and inserts the directory's storage
@@ -1350,6 +1397,9 @@ public class FSImage implements Closeable {
if (txid > storage.getMostRecentCheckpointTxId()) {
storage.setMostRecentCheckpointInfo(txid, Time.now());
}
+
+ // Create a version file in any new storage directory.
+ initNewDirs();
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cfc8664/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
index 7c0ed7b..ada62ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.*;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.util.Canceler;
@@ -41,6 +42,7 @@ import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
+import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.ThreadUtil;
import org.junit.After;
import org.junit.Before;
@@ -175,6 +177,44 @@ public class TestStandbyCheckpoints {
purgeLogsOlderThan(Mockito.anyLong());
}
+ @Test
+ public void testNewDirInitAfterCheckpointing() throws Exception {
+ File hdfsDir = new File(PathUtils.getTestDir(TestStandbyCheckpoints.class),
+ "testNewDirInitAfterCheckpointing");
+ File nameDir = new File(hdfsDir, "name1");
+ assert nameDir.mkdirs();
+
+ // Restart nn0 with an additional name dir.
+ String existingDir = cluster.getConfiguration(0).
+ get(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
+ cluster.getConfiguration(0).set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+ existingDir + "," + Util.fileAsURI(nameDir).toString());
+ cluster.restartNameNode(0);
+ nns[0] = cluster.getNameNode(0);
+ cluster.transitionToActive(0);
+
+ // "current" is created, but current/VERSION isn't.
+ File currDir = new File(nameDir, "current");
+ File versionFile = new File(currDir, "VERSION");
+ assert currDir.exists();
+ assert !versionFile.exists();
+
+ // Trigger a checkpointing and upload.
+ doEdits(0, 10);
+ HATestUtil.waitForStandbyToCatchUp(nns[0], nns[1]);
+
+ // The version file will be created if a checkpoint is uploaded.
+ // Wait for it to happen up to 10 seconds.
+ for (int i = 0; i < 20; i++) {
+ if (versionFile.exists()) {
+ break;
+ }
+ Thread.sleep(500);
+ }
+ // VERSION must have been created.
+ assert versionFile.exists();
+ }
+
/**
* Test for the case when both of the NNs in the cluster are
* in the standby state, and thus are both creating checkpoints
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[03/50] [abbrv] hadoop git commit: YARN-6520. Fix warnings from
Spotbugs in hadoop-yarn-client. Contributed by Weiwei Yang.
Posted by ae...@apache.org.
YARN-6520. Fix warnings from Spotbugs in hadoop-yarn-client. Contributed by Weiwei Yang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64f68cb0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64f68cb0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64f68cb0
Branch: refs/heads/HDFS-7240
Commit: 64f68cb0b8c0b93d37699893b812b37c4b05c939
Parents: 0f1af31
Author: Naganarasimha <na...@apache.org>
Authored: Mon May 1 18:38:22 2017 +0530
Committer: Naganarasimha <na...@apache.org>
Committed: Mon May 1 18:38:22 2017 +0530
----------------------------------------------------------------------
.../yarn/client/api/impl/YarnClientImpl.java | 10 +++-
.../apache/hadoop/yarn/client/cli/LogsCLI.java | 58 ++++++++------------
2 files changed, 31 insertions(+), 37 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/64f68cb0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
index 8865b52..83210bd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
@@ -367,9 +367,13 @@ public class YarnClientImpl extends YarnClient {
if (timelineClient == null) {
synchronized (this) {
if (timelineClient == null) {
- timelineClient = createTimelineClient();
- timelineClient.init(getConfig());
- timelineClient.start();
+ TimelineClient tlClient = createTimelineClient();
+ tlClient.init(getConfig());
+ tlClient.start();
+ // Assign value to timeline client variable only
+ // when it is fully initiated. In order to avoid
+ // other threads to see partially initialized object.
+ this.timelineClient = tlClient;
}
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/64f68cb0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
index 4125a81..5528412 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
@@ -963,48 +963,38 @@ public class LogsCLI extends Configured implements Tool {
request.setNodeId(nodeId);
request.setContainerState(report.getContainerState());
} catch (IOException | YarnException ex) {
- if (isAppFinished) {
- return printContainerLogsForFinishedApplicationWithoutNodeId(
- request, logCliHelper, useRegex);
+ nodeHttpAddress = getNodeHttpAddressFromRMWebString(request);
+ if (nodeHttpAddress != null && !nodeHttpAddress.isEmpty()) {
+ request.setNodeHttpAddress(nodeHttpAddress);
} else {
- nodeHttpAddress = getNodeHttpAddressFromRMWebString(request);
- if (nodeHttpAddress != null && !nodeHttpAddress.isEmpty()) {
- request.setNodeHttpAddress(nodeHttpAddress);
+ // for the case, we have already uploaded partial logs in HDFS
+ int result = -1;
+ if (nodeAddress != null && !nodeAddress.isEmpty()) {
+ result = printAggregatedContainerLogs(request,
+ logCliHelper, useRegex);
} else {
- // for the case, we have already uploaded partial logs in HDFS
- int result = -1;
- if (nodeAddress != null && !nodeAddress.isEmpty()) {
- result = printAggregatedContainerLogs(
- request, logCliHelper, useRegex);
- } else {
- result = printAggregatedContainerLogsWithoutNodeId(
- request, logCliHelper, useRegex);
- }
- if (result == -1) {
- System.err.println("Unable to get logs for this container:"
- + containerIdStr + " for the application:" + appIdStr
- + " with the appOwner: " + appOwner);
- System.err.println("The application: " + appIdStr
- + " is still running, and we can not get Container report "
- + "for the container: " + containerIdStr +". Please try later "
- + "or after the application finishes.");
- }
- return result;
+ result = printAggregatedContainerLogsWithoutNodeId(request,
+ logCliHelper,
+ useRegex);
}
+ if (result == -1) {
+ System.err.println(
+ "Unable to get logs for this container:"
+ + containerIdStr + " for the application:"
+ + appIdStr + " with the appOwner: " + appOwner);
+ System.err.println("The application: " + appIdStr
+ + " is still running, and we can not get Container report "
+ + "for the container: " + containerIdStr + ". Please try later "
+ + "or after the application finishes.");
+ }
+ return result;
}
}
// If the application is not in the final state,
// we will provide the NodeHttpAddress and get the container logs
// by calling NodeManager webservice.
- if (!isAppFinished) {
- resultCode = printContainerLogsFromRunningApplication(getConf(), request,
- logCliHelper, useRegex);
- } else {
- // If the application is in the final state, we will directly
- // get the container logs from HDFS.
- resultCode = printContainerLogsForFinishedApplication(
- request, logCliHelper, useRegex);
- }
+ resultCode = printContainerLogsFromRunningApplication(getConf(), request,
+ logCliHelper, useRegex);
return resultCode;
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[06/50] [abbrv] hadoop git commit: HDFS-11724. libhdfs compilation is
broken on OS X. Contributed by John Zhuge.
Posted by ae...@apache.org.
HDFS-11724. libhdfs compilation is broken on OS X. Contributed by John Zhuge.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/343948ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/343948ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/343948ca
Branch: refs/heads/HDFS-7240
Commit: 343948ca795d20b0f77aa086f14e9a79d90a435b
Parents: 07b98e7
Author: John Zhuge <jz...@apache.org>
Authored: Sat Apr 29 11:04:50 2017 -0700
Committer: John Zhuge <jz...@apache.org>
Committed: Mon May 1 13:37:55 2017 -0700
----------------------------------------------------------------------
.../src/main/native/libhdfs/os/posix/thread_local_storage.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/343948ca/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c
index 9faa594..e6b59d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c
@@ -19,7 +19,7 @@
#include "os/thread_local_storage.h"
#include <jni.h>
-#include <malloc.h>
+#include <stdlib.h>
#include <pthread.h>
#include <stdio.h>
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[49/50] [abbrv] hadoop git commit: HDFS-11644. Support for querying
outputstream capabilities. Contributed by Manoj Govindassamy.
Posted by ae...@apache.org.
HDFS-11644. Support for querying outputstream capabilities. Contributed by Manoj Govindassamy.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54fd0e44
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54fd0e44
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54fd0e44
Branch: refs/heads/HDFS-7240
Commit: 54fd0e44b76c4b982dcfb47932b6159851f14136
Parents: 749e5c0
Author: Andrew Wang <wa...@apache.org>
Authored: Mon May 8 21:59:49 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Mon May 8 21:59:49 2017 -0700
----------------------------------------------------------------------
.../apache/hadoop/fs/FSDataOutputStream.java | 10 ++-
.../apache/hadoop/fs/StreamCapabilities.java | 67 ++++++++++++++++++++
.../org/apache/hadoop/hdfs/DFSOutputStream.java | 15 ++++-
.../hadoop/hdfs/DFSStripedOutputStream.java | 11 +++-
.../apache/hadoop/hdfs/TestDFSOutputStream.java | 25 ++++++++
.../hadoop/hdfs/TestDFSStripedOutputStream.java | 24 ++++---
.../fs/azure/SyncableDataOutputStream.java | 12 +++-
7 files changed, 150 insertions(+), 14 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/54fd0e44/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java
index 3f97ea8..1d95cd3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FSDataOutputStream extends DataOutputStream
- implements Syncable, CanSetDropBehind {
+ implements Syncable, CanSetDropBehind, StreamCapabilities {
private final OutputStream wrappedStream;
private static class PositionCache extends FilterOutputStream {
@@ -111,6 +111,14 @@ public class FSDataOutputStream extends DataOutputStream
return wrappedStream;
}
+ @Override
+ public boolean hasCapability(String capability) {
+ if (wrappedStream instanceof StreamCapabilities) {
+ return ((StreamCapabilities) wrappedStream).hasCapability(capability);
+ }
+ return false;
+ }
+
@Override // Syncable
public void hflush() throws IOException {
if (wrappedStream instanceof Syncable) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/54fd0e44/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StreamCapabilities.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StreamCapabilities.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StreamCapabilities.java
new file mode 100644
index 0000000..65aa679
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StreamCapabilities.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Interface to query streams for supported capabilities.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface StreamCapabilities {
+ /**
+ * Capabilities that a stream can support and be queried for.
+ */
+ enum StreamCapability {
+ /**
+ * Stream hflush capability to flush out the data in client's buffer.
+ * Streams with this capability implement {@link Syncable} and support
+ * {@link Syncable#hflush()}.
+ */
+ HFLUSH("hflush"),
+
+ /**
+ * Stream hsync capability to flush out the data in client's buffer and
+ * the disk device. Streams with this capability implement {@link Syncable}
+ * and support {@link Syncable#hsync()}.
+ */
+ HSYNC("hsync");
+
+ private final String capability;
+
+ StreamCapability(String value) {
+ this.capability = value;
+ }
+
+ public final String getValue() {
+ return capability;
+ }
+ }
+
+ /**
+ * Query the stream for a specific capability.
+ *
+ * @param capability string to query the stream support for.
+ * @return True if the stream supports capability.
+ */
+ boolean hasCapability(String capability);
+}
+
http://git-wip-us.apache.org/repos/asf/hadoop/blob/54fd0e44/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index ceaefd8..83f1425 100755
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -17,6 +17,9 @@
*/
package org.apache.hadoop.hdfs;
+import static org.apache.hadoop.fs.StreamCapabilities.StreamCapability.HFLUSH;
+import static org.apache.hadoop.fs.StreamCapabilities.StreamCapability.HSYNC;
+
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InterruptedIOException;
@@ -34,6 +37,7 @@ import org.apache.hadoop.fs.FSOutputSummer;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.ParentNotDirectoryException;
+import org.apache.hadoop.fs.StreamCapabilities;
import org.apache.hadoop.fs.Syncable;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
@@ -90,7 +94,7 @@ import com.google.common.base.Preconditions;
****************************************************************/
@InterfaceAudience.Private
public class DFSOutputStream extends FSOutputSummer
- implements Syncable, CanSetDropBehind {
+ implements Syncable, CanSetDropBehind, StreamCapabilities {
static final Logger LOG = LoggerFactory.getLogger(DFSOutputStream.class);
/**
* Number of times to retry creating a file when there are transient
@@ -546,6 +550,15 @@ public class DFSOutputStream extends FSOutputSummer
}
}
+ @Override
+ public boolean hasCapability(String capability) {
+ if (capability.equalsIgnoreCase(HSYNC.getValue()) ||
+ capability.equalsIgnoreCase((HFLUSH.getValue()))) {
+ return true;
+ }
+ return false;
+ }
+
/**
* Flushes out to all replicas of the block. The data is in the buffers
* of the DNs but not necessarily in the DN's OS buffers.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/54fd0e44/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 0fdae8c..2aa9e98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -46,6 +46,7 @@ import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.StreamCapabilities;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -77,8 +78,8 @@ import org.apache.htrace.core.TraceScope;
* Each stripe contains a sequence of cells.
*/
@InterfaceAudience.Private
-public class DFSStripedOutputStream extends DFSOutputStream {
-
+public class DFSStripedOutputStream extends DFSOutputStream
+ implements StreamCapabilities {
private static final ByteBufferPool BUFFER_POOL = new ElasticByteBufferPool();
static class MultipleBlockingQueue<T> {
@@ -810,6 +811,12 @@ public class DFSStripedOutputStream extends DFSOutputStream {
}
@Override
+ public boolean hasCapability(String capability) {
+ // StreamCapabilities like hsync / hflush are not supported yet.
+ return false;
+ }
+
+ @Override
public void hflush() {
// not supported yet
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/54fd0e44/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
index 52e3bb4..f281a3b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
@@ -17,9 +17,11 @@
*/
package org.apache.hadoop.hdfs;
+import java.io.ByteArrayInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.IOException;
+import java.io.InputStream;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
@@ -32,8 +34,10 @@ import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsTracer;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StreamCapabilities.StreamCapability;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DataStreamer.LastExceptionInStreamer;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
@@ -48,6 +52,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.htrace.core.SpanId;
@@ -55,6 +60,8 @@ import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
+
+import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Matchers.anyLong;
import org.mockito.Mockito;
@@ -346,6 +353,24 @@ public class TestDFSOutputStream {
verify(spyClient, times(1)).endFileLease(anyLong());
}
+ @Test
+ public void testStreamFlush() throws Exception {
+ FileSystem fs = cluster.getFileSystem();
+ FSDataOutputStream os = fs.create(new Path("/normal-file"));
+ // Verify output stream supports hsync() and hflush().
+ assertTrue("DFSOutputStream should support hflush()!",
+ os.hasCapability(StreamCapability.HFLUSH.getValue()));
+ assertTrue("DFSOutputStream should support hsync()!",
+ os.hasCapability(StreamCapability.HSYNC.getValue()));
+ byte[] bytes = new byte[1024];
+ InputStream is = new ByteArrayInputStream(bytes);
+ IOUtils.copyBytes(is, os, bytes.length);
+ os.hflush();
+ IOUtils.copyBytes(is, os, bytes.length);
+ os.hsync();
+ os.close();
+ }
+
@AfterClass
public static void tearDown() {
if (cluster != null) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/54fd0e44/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
index 70309c9..c0cfea2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.apache.hadoop.fs.contract.ContractTestUtils.fail;
+import static org.junit.Assert.assertFalse;
import java.io.ByteArrayInputStream;
import java.io.IOException;
@@ -29,6 +29,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StreamCapabilities.StreamCapability;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.io.IOUtils;
@@ -195,14 +196,19 @@ public class TestDFSStripedOutputStream {
public void testStreamFlush() throws Exception {
final byte[] bytes = StripedFileTestUtil.generateBytes(blockSize *
dataBlocks * 3 + cellSize * dataBlocks + cellSize + 123);
- try (FSDataOutputStream os = fs.create(new Path("/ec-file-1"))) {
- InputStream is = new ByteArrayInputStream(bytes);
- IOUtils.copyBytes(is, os, bytes.length);
- os.hflush();
- os.hsync();
- } catch (Exception e) {
- fail("hflush()/hsync() on striped file output stream failed!", e);
- }
+ FSDataOutputStream os = fs.create(new Path("/ec-file-1"));
+ assertFalse("DFSStripedOutputStream should not have hflush() " +
+ "capability yet!", os.hasCapability(
+ StreamCapability.HFLUSH.getValue()));
+ assertFalse("DFSStripedOutputStream should not have hsync() " +
+ "capability yet!", os.hasCapability(
+ StreamCapability.HSYNC.getValue()));
+ InputStream is = new ByteArrayInputStream(bytes);
+ IOUtils.copyBytes(is, os, bytes.length);
+ os.hflush();
+ IOUtils.copyBytes(is, os, bytes.length);
+ os.hsync();
+ os.close();
}
private void testOneFile(String src, int writeBytes) throws Exception {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/54fd0e44/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SyncableDataOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SyncableDataOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SyncableDataOutputStream.java
index 9bec7a5..a52fdb7 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SyncableDataOutputStream.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SyncableDataOutputStream.java
@@ -22,6 +22,7 @@ import java.io.DataOutputStream;
import java.io.IOException;
import java.io.OutputStream;
+import org.apache.hadoop.fs.StreamCapabilities;
import org.apache.hadoop.fs.Syncable;
/**
@@ -30,13 +31,22 @@ import org.apache.hadoop.fs.Syncable;
* wrapped stream passed in to the constructor. This is required
* for HBase when wrapping a PageBlobOutputStream used as a write-ahead log.
*/
-public class SyncableDataOutputStream extends DataOutputStream implements Syncable {
+public class SyncableDataOutputStream extends DataOutputStream
+ implements Syncable, StreamCapabilities {
public SyncableDataOutputStream(OutputStream out) {
super(out);
}
@Override
+ public boolean hasCapability(String capability) {
+ if (out instanceof StreamCapabilities) {
+ return ((StreamCapabilities) out).hasCapability(capability);
+ }
+ return false;
+ }
+
+ @Override
public void hflush() throws IOException {
if (out instanceof Syncable) {
((Syncable) out).hflush();
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[09/50] [abbrv] hadoop git commit: YARN-4359. Update LowCost agents
logic to take advantage of YARN-4358. (Jonathan Yaniv and Ishai Menache via
Subru).
Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a615ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestAlignedPlanner.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestAlignedPlanner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestAlignedPlanner.java
index 2645366..7207d71 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestAlignedPlanner.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestAlignedPlanner.java
@@ -18,13 +18,17 @@
package org.apache.hadoop.yarn.server.resourcemanager.reservation.planning;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import java.util.Random;
import org.apache.hadoop.yarn.api.records.ReservationDefinition;
@@ -37,29 +41,32 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.CapacityOverTimePolicy;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.InMemoryPlan;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.InMemoryReservationAllocation;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationAllocation;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSchedulerConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystemTestUtil;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningException;
-import org.apache.hadoop.yarn.server.resourcemanager.reservation.planning.AlignedPlannerWithGreedy;
-import org.apache.hadoop.yarn.server.resourcemanager.reservation.planning.ReservationAgent;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.planning.StageAllocatorLowCostAligned.DurationInterval;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
+import org.eclipse.jetty.util.log.Log;
import org.junit.Before;
import org.junit.Test;
-import org.eclipse.jetty.util.log.Log;
public class TestAlignedPlanner {
- ReservationAgent agent;
- InMemoryPlan plan;
- Resource minAlloc = Resource.newInstance(1024, 1);
- ResourceCalculator res = new DefaultResourceCalculator();
- Resource maxAlloc = Resource.newInstance(1024 * 8, 8);
- Random rand = new Random();
- long step;
+ private ReservationAgent agentRight;
+ private ReservationAgent agentLeft;
+ private InMemoryPlan plan;
+ private final Resource minAlloc = Resource.newInstance(1024, 1);
+ private final ResourceCalculator res = new DefaultResourceCalculator();
+ private final Resource maxAlloc = Resource.newInstance(1024 * 8, 8);
+ private final Random rand = new Random();
+ private Resource clusterCapacity;
+ private long step;
@Test
public void testSingleReservationAccept() throws PlanningException {
@@ -82,7 +89,7 @@ public class TestAlignedPlanner {
// Add reservation
ReservationId reservationID =
ReservationSystemTestUtil.getNewReservationId();
- agent.createReservation(reservationID, "u1", plan, rr1);
+ agentRight.createReservation(reservationID, "u1", plan, rr1);
// CHECK: allocation was accepted
assertTrue("Agent-based allocation failed", reservationID != null);
@@ -107,7 +114,7 @@ public class TestAlignedPlanner {
// Create reservation
ReservationDefinition rr1 =
createReservationDefinition(
- 10L, // Job arrival time
+ 10 * step, // Job arrival time
15 * step, // Job deadline
new ReservationRequest[] {
ReservationRequest.newInstance(
@@ -126,7 +133,7 @@ public class TestAlignedPlanner {
try {
ReservationId reservationID =
ReservationSystemTestUtil.getNewReservationId();
- agent.createReservation(reservationID, "u1", plan, rr1);
+ agentRight.createReservation(reservationID, "u1", plan, rr1);
fail();
} catch (PlanningException e) {
// Expected failure
@@ -166,7 +173,7 @@ public class TestAlignedPlanner {
try {
ReservationId reservationID =
ReservationSystemTestUtil.getNewReservationId();
- agent.createReservation(reservationID, "u1", plan, rr1);
+ agentRight.createReservation(reservationID, "u1", plan, rr1);
fail();
} catch (PlanningException e) {
// Expected failure
@@ -206,7 +213,7 @@ public class TestAlignedPlanner {
try {
ReservationId reservationID =
ReservationSystemTestUtil.getNewReservationId();
- agent.createReservation(reservationID, "u1", plan, rr1);
+ agentRight.createReservation(reservationID, "u1", plan, rr1);
fail();
} catch (PlanningException e) {
// Expected failure
@@ -246,7 +253,7 @@ public class TestAlignedPlanner {
try {
ReservationId reservationID =
ReservationSystemTestUtil.getNewReservationId();
- agent.createReservation(reservationID, "u1", plan, rr1);
+ agentRight.createReservation(reservationID, "u1", plan, rr1);
fail();
} catch (PlanningException e) {
// Expected failure
@@ -285,7 +292,7 @@ public class TestAlignedPlanner {
// Add reservation
ReservationId reservationID =
ReservationSystemTestUtil.getNewReservationId();
- agent.createReservation(reservationID, "u1", plan, rr1);
+ agentRight.createReservation(reservationID, "u1", plan, rr1);
// CHECK: allocation was accepted
assertTrue("Agent-based allocation failed", reservationID != null);
@@ -328,7 +335,7 @@ public class TestAlignedPlanner {
// Add reservation
ReservationId reservationID =
ReservationSystemTestUtil.getNewReservationId();
- agent.createReservation(reservationID, "u1", plan, rr1);
+ agentRight.createReservation(reservationID, "u1", plan, rr1);
// CHECK: allocation was accepted
assertTrue("Agent-based allocation failed", reservationID != null);
@@ -374,7 +381,7 @@ public class TestAlignedPlanner {
try {
ReservationId reservationID =
ReservationSystemTestUtil.getNewReservationId();
- agent.createReservation(reservationID, "u1", plan, rr1);
+ agentRight.createReservation(reservationID, "u1", plan, rr1);
fail();
} catch (PlanningException e) {
// Expected failure
@@ -420,10 +427,10 @@ public class TestAlignedPlanner {
ReservationSystemTestUtil.getNewReservationId();
// Add block, add flex, remove block, update flex
- agent.createReservation(blockReservationID, "uBlock", plan, rrBlock);
- agent.createReservation(flexReservationID, "uFlex", plan, rrFlex);
- agent.deleteReservation(blockReservationID, "uBlock", plan);
- agent.updateReservation(flexReservationID, "uFlex", plan, rrFlex);
+ agentRight.createReservation(blockReservationID, "uBlock", plan, rrBlock);
+ agentRight.createReservation(flexReservationID, "uFlex", plan, rrFlex);
+ agentRight.deleteReservation(blockReservationID, "uBlock", plan);
+ agentRight.updateReservation(flexReservationID, "uFlex", plan, rrFlex);
// CHECK: allocation was accepted
assertTrue("Agent-based allocation failed", flexReservationID != null);
@@ -458,7 +465,7 @@ public class TestAlignedPlanner {
try {
ReservationId reservationID =
ReservationSystemTestUtil.getNewReservationId();
- agent.createReservation(reservationID, "u1", plan, rr1);
+ agentRight.createReservation(reservationID, "u1", plan, rr1);
fail();
} catch (PlanningException e) {
// Expected failure
@@ -490,7 +497,7 @@ public class TestAlignedPlanner {
// Add reservation
ReservationId reservationID =
ReservationSystemTestUtil.getNewReservationId();
- agent.createReservation(reservationID, "u1", plan, rr1);
+ agentRight.createReservation(reservationID, "u1", plan, rr1);
// CHECK: allocation was accepted
assertTrue("Agent-based allocation failed", reservationID != null);
@@ -557,9 +564,9 @@ public class TestAlignedPlanner {
ReservationSystemTestUtil.getNewReservationId();
// Add all
- agent.createReservation(reservationID1, "u1", plan, rr7Mem1Core);
- agent.createReservation(reservationID2, "u2", plan, rr6Mem6Cores);
- agent.createReservation(reservationID3, "u3", plan, rr);
+ agentRight.createReservation(reservationID1, "u1", plan, rr7Mem1Core);
+ agentRight.createReservation(reservationID2, "u2", plan, rr6Mem6Cores);
+ agentRight.createReservation(reservationID3, "u3", plan, rr);
// Get reservation
ReservationAllocation alloc3 = plan.getReservationById(reservationID3);
@@ -684,8 +691,8 @@ public class TestAlignedPlanner {
for (ReservationDefinition rr : list) {
ReservationId reservationID =
ReservationSystemTestUtil.getNewReservationId();
- agent.createReservation(reservationID, "u" + Integer.toString(i), plan,
- rr);
+ agentRight.createReservation(reservationID, "u" + Integer.toString(i),
+ plan, rr);
++i;
}
@@ -695,6 +702,328 @@ public class TestAlignedPlanner {
}
+ @Test
+ public void testSingleReservationAcceptAllocateLeft()
+ throws PlanningException {
+
+ // Create reservation
+ ReservationDefinition rr1 =
+ createReservationDefinition(
+ 10 * step, // Job arrival time
+ 35 * step, // Job deadline
+ new ReservationRequest[] {
+ ReservationRequest.newInstance(
+ Resource.newInstance(1024, 1), // Capability
+ 20, // Num containers
+ 20, // Concurrency
+ 10 * step), // Duration
+ ReservationRequest.newInstance(
+ Resource.newInstance(1024, 1), // Capability
+ 20, // Num containers
+ 20, // Concurrency
+ 10 * step) }, // Duration
+ ReservationRequestInterpreter.R_ORDER, "u1");
+
+ // Add reservation
+ ReservationId reservationID =
+ ReservationSystemTestUtil.getNewReservationId();
+ agentLeft.createReservation(reservationID, "u1", plan, rr1);
+
+ // CHECK: allocation was accepted
+ assertTrue("Agent-based allocation failed", reservationID != null);
+ assertTrue("Agent-based allocation failed", plan.getAllReservations()
+ .size() == 1);
+
+ // Get reservation
+ ReservationAllocation alloc1 = plan.getReservationById(reservationID);
+
+ // Verify allocation
+ assertTrue(alloc1.toString(),
+ check(alloc1, 10 * step, 30 * step, 20, 1024, 1));
+
+ }
+
+ @Test
+ public void testLeftSucceedsRightFails() throws PlanningException {
+
+ // Prepare basic plan
+ int numJobsInScenario = initializeScenario2();
+
+ // Create reservation
+ ReservationDefinition rr1 =
+ createReservationDefinition(
+ 7 * step, // Job arrival time
+ 16 * step, // Job deadline
+ new ReservationRequest[] {
+ ReservationRequest.newInstance(Resource.newInstance(1024, 1),
+ 20, // Num containers
+ 20, // Concurrency
+ 2 * step), // Duration
+ ReservationRequest.newInstance(Resource.newInstance(1024, 1),
+ 20, // Num containers
+ 20, // Concurrency
+ 2 * step) }, // Duration
+ ReservationRequestInterpreter.R_ORDER, "u1");
+
+ ReservationDefinition rr2 =
+ createReservationDefinition(
+ 14 * step, // Job arrival time
+ 16 * step, // Job deadline
+ new ReservationRequest[] {
+ ReservationRequest.newInstance(
+ Resource.newInstance(1024, 1), // Capability
+ 100, // Num containers
+ 100, // Concurrency
+ 2 * step) }, // Duration
+ ReservationRequestInterpreter.R_ORDER, "u2");
+
+ // Add 1st reservation
+ ReservationId reservationID1 =
+ ReservationSystemTestUtil.getNewReservationId();
+ agentLeft.createReservation(reservationID1, "u1", plan, rr1);
+
+ // CHECK: allocation was accepted
+ assertTrue("Agent-based allocation failed", reservationID1 != null);
+ assertTrue("Agent-based allocation failed", plan.getAllReservations()
+ .size() == numJobsInScenario + 1);
+
+ // Get reservation
+ ReservationAllocation alloc1 = plan.getReservationById(reservationID1);
+
+ // Verify allocation
+ assertTrue(alloc1.toString(),
+ check(alloc1, 7 * step, 11 * step, 20, 1024, 1));
+
+ // Add second reservation
+ ReservationId reservationID2 =
+ ReservationSystemTestUtil.getNewReservationId();
+ agentLeft.createReservation(reservationID2, "u2", plan, rr2);
+
+ // CHECK: allocation was accepted
+ assertTrue("Agent-based allocation failed", reservationID2 != null);
+ assertTrue("Agent-based allocation failed", plan.getAllReservations()
+ .size() == numJobsInScenario + 2);
+
+ // Get reservation
+ ReservationAllocation alloc2 = plan.getReservationById(reservationID2);
+
+ // Verify allocation
+ assertTrue(alloc2.toString(),
+ check(alloc2, 14 * step, 16 * step, 100, 1024, 1));
+
+ agentLeft.deleteReservation(reservationID1, "u1", plan);
+ agentLeft.deleteReservation(reservationID2, "u2", plan);
+
+ // Now try to allocate the same jobs with agentRight. The second
+ // job should fail
+ // Add 1st reservation
+ ReservationId reservationID3 =
+ ReservationSystemTestUtil.getNewReservationId();
+ agentRight.createReservation(reservationID3, "u1", plan, rr1);
+
+ // CHECK: allocation was accepted
+ assertTrue("Agent-based allocation failed", reservationID3 != null);
+ assertTrue("Agent-based allocation failed", plan.getAllReservations()
+ .size() == numJobsInScenario + 1);
+
+ // Add 2nd reservation
+ try {
+ ReservationId reservationID4 =
+ ReservationSystemTestUtil.getNewReservationId();
+ agentRight.createReservation(reservationID4, "u2", plan, rr2);
+ fail();
+ } catch (PlanningException e) {
+ // Expected failure
+ }
+
+ }
+
+ @Test
+ public void testValidateOrderNoGap() {
+
+ //
+ // Initialize allocations
+ //
+
+ RLESparseResourceAllocation allocation =
+ new RLESparseResourceAllocation(res);
+ allocation.addInterval(new ReservationInterval(10 * step, 13 * step),
+ Resource.newInstance(1024, 1));
+
+ // curAlloc
+ Map<ReservationInterval, Resource> curAlloc =
+ new HashMap<ReservationInterval, Resource>();
+
+ //
+ // Check cases
+ //
+
+ // 1. allocateLeft = false, succeed when there is no gap
+ curAlloc.clear();
+ curAlloc.put(new ReservationInterval(9 * step, 10 * step),
+ Resource.newInstance(1024, 1));
+ assertTrue("validateOrderNoFap() should have suceeded",
+ IterativePlanner.validateOrderNoGap(allocation, curAlloc, false));
+
+ // 2. allocateLeft = false, fail when curAlloc has a gap
+ curAlloc.put(new ReservationInterval(7 * step, 8 * step),
+ Resource.newInstance(1024, 1));
+ assertFalse("validateOrderNoGap() failed to identify a gap in curAlloc",
+ IterativePlanner.validateOrderNoGap(allocation, curAlloc, false));
+
+ // 3. allocateLeft = false, fail when there is a gap between curAlloc and
+ // allocations
+ curAlloc.clear();
+ curAlloc.put(new ReservationInterval(8 * step, 9 * step),
+ Resource.newInstance(1024, 1));
+ assertFalse("validateOrderNoGap() failed to identify a gap between "
+ + "allocations and curAlloc",
+ IterativePlanner.validateOrderNoGap(allocation, curAlloc, false));
+
+ // 4. allocateLeft = true, succeed when there is no gap
+ curAlloc.clear();
+ curAlloc.put(new ReservationInterval(13 * step, 14 * step),
+ Resource.newInstance(1024, 1));
+ assertTrue("validateOrderNoFap() should have suceeded",
+ IterativePlanner.validateOrderNoGap(allocation, curAlloc, true));
+
+ // 5. allocateLeft = true, fail when there is a gap between curAlloc and
+ // allocations
+ curAlloc.put(new ReservationInterval(15 * step, 16 * step),
+ Resource.newInstance(1024, 1));
+ assertFalse("validateOrderNoGap() failed to identify a gap in curAlloc",
+ IterativePlanner.validateOrderNoGap(allocation, curAlloc, true));
+
+ // 6. allocateLeft = true, fail when curAlloc has a gap
+ curAlloc.clear();
+ curAlloc.put(new ReservationInterval(14 * step, 15 * step),
+ Resource.newInstance(1024, 1));
+ assertFalse("validateOrderNoGap() failed to identify a gap between "
+ + "allocations and curAlloc",
+ IterativePlanner.validateOrderNoGap(allocation, curAlloc, true));
+
+ }
+
+ @Test
+ public void testGetDurationInterval() throws PlanningException {
+
+ DurationInterval durationInterval = null;
+
+ // Create netRLERes:
+ // - 4GB & 4VC between [10,20) and [30,40)
+ // - 8GB & 8VC between [20,30)
+ RLESparseResourceAllocation netRLERes =
+ new RLESparseResourceAllocation(res);
+ netRLERes.addInterval(
+ new ReservationInterval(10 * step, 40 * step),
+ Resource.newInstance(4096, 4)
+ );
+ netRLERes.addInterval(
+ new ReservationInterval(20 * step, 30 * step),
+ Resource.newInstance(4096, 4)
+ );
+
+ // Create planLoads:
+ // - 5GB & 5VC between [20,30)
+ RLESparseResourceAllocation planLoads =
+ new RLESparseResourceAllocation(res);
+ planLoads.addInterval(
+ new ReservationInterval(20 * step, 30 * step),
+ Resource.newInstance(5120, 5)
+ );
+
+ // Create planModifications:
+ // - 1GB & 1VC between [25,35)
+ RLESparseResourceAllocation planModifications =
+ new RLESparseResourceAllocation(res);
+ planModifications.addInterval(
+ new ReservationInterval(25 * step, 35 * step),
+ Resource.newInstance(1024, 1)
+ );
+
+ // Set requested resources
+ Resource requestedResources = Resource.newInstance(1024, 1);
+
+
+ // 1.
+ // currLoad: should start at 20*step, end at 30*step with a null value
+ // (in getTotalCost(), after the for loop we will have loadPrev == null
+ // netAvailableResources: should start exactly at startTime (10*step),
+ // end exactly at endTime (30*step) with a null value
+ durationInterval =
+ StageAllocatorLowCostAligned.getDurationInterval(10*step, 30*step,
+ planLoads, planModifications, clusterCapacity, netRLERes, res, step,
+ requestedResources);
+ assertEquals(durationInterval.numCanFit(), 4);
+ assertEquals(durationInterval.getTotalCost(), 0.55, 0.00001);
+
+ // 2.
+ // currLoad: should start at 20*step, end at 31*step with a null value
+ // (in getTotalCost, after the for loop we will have loadPrev == null)
+ // netAvailableResources: should start exactly at startTime (10*step),
+ // end exactly at endTime (31*step) with a null value
+ durationInterval =
+ StageAllocatorLowCostAligned.getDurationInterval(10*step, 31*step,
+ planLoads, planModifications, clusterCapacity, netRLERes, res, step,
+ requestedResources);
+ System.out.println(durationInterval);
+ assertEquals(durationInterval.numCanFit(), 3);
+ assertEquals(durationInterval.getTotalCost(), 0.56, 0.00001);
+
+ // 3.
+ // currLoad: should start at 20*step, end at 30*step with a null value
+ // (in getTotalCost, after the for loop we will have loadPrev == null)
+ // netAvailableResources: should start exactly startTime (15*step),
+ // end exactly at endTime (30*step) with a null value
+ durationInterval =
+ StageAllocatorLowCostAligned.getDurationInterval(15*step, 30*step,
+ planLoads, planModifications, clusterCapacity, netRLERes, res, step,
+ requestedResources);
+ assertEquals(durationInterval.numCanFit(), 4);
+ assertEquals(durationInterval.getTotalCost(), 0.55, 0.00001);
+
+ // 4.
+ // currLoad: should start at 20*step, end at 31*step with a null value
+ // (in getTotalCost, after the for loop we will have loadPrev == null)
+ // netAvailableResources: should start exactly at startTime (15*step),
+ // end exactly at endTime (31*step) with a value other than null
+ durationInterval =
+ StageAllocatorLowCostAligned.getDurationInterval(15*step, 31*step,
+ planLoads, planModifications, clusterCapacity, netRLERes, res, step,
+ requestedResources);
+ System.out.println(durationInterval);
+ assertEquals(durationInterval.numCanFit(), 3);
+ assertEquals(durationInterval.getTotalCost(), 0.56, 0.00001);
+
+ // 5.
+ // currLoad: should only contain one entry at startTime
+ // (22*step), therefore loadPrev != null and we should enter the if
+ // condition after the for loop in getTotalCost
+ // netAvailableResources: should only contain one entry at startTime
+ // (22*step)
+ durationInterval =
+ StageAllocatorLowCostAligned.getDurationInterval(22*step, 23*step,
+ planLoads, planModifications, clusterCapacity, netRLERes, res, step,
+ requestedResources);
+ System.out.println(durationInterval);
+ assertEquals(durationInterval.numCanFit(), 8);
+ assertEquals(durationInterval.getTotalCost(), 0.05, 0.00001);
+
+ // 6.
+ // currLoad: should start at 39*step, end at 41*step with a null value
+ // (in getTotalCost, after the for loop we will have loadPrev == null)
+ // netAvailableResources: should start exactly at startTime (39*step),
+ // end exactly at endTime (41*step) with a null value
+ durationInterval =
+ StageAllocatorLowCostAligned.getDurationInterval(39*step, 41*step,
+ planLoads, planModifications, clusterCapacity, netRLERes, res, step,
+ requestedResources);
+ System.out.println(durationInterval);
+ assertEquals(durationInterval.numCanFit(), 0);
+ assertEquals(durationInterval.getTotalCost(), 0, 0.00001);
+
+ }
+
@Before
public void setup() throws Exception {
@@ -709,16 +1038,15 @@ public class TestAlignedPlanner {
int capacityCores = 100;
step = 60000L;
- Resource clusterCapacity = Resource.newInstance(capacityMem, capacityCores);
+ clusterCapacity = Resource.newInstance(capacityMem, capacityCores);
String reservationQ =
ReservationSystemTestUtil.getFullReservationQueueName();
float instConstraint = 100;
float avgConstraint = 100;
- ReservationSchedulerConfiguration conf =
- ReservationSystemTestUtil.createConf(reservationQ, timeWindow,
- instConstraint, avgConstraint);
+ ReservationSchedulerConfiguration conf = ReservationSystemTestUtil
+ .createConf(reservationQ, timeWindow, instConstraint, avgConstraint);
CapacityOverTimePolicy policy = new CapacityOverTimePolicy();
policy.init(reservationQ, conf);
@@ -728,14 +1056,19 @@ public class TestAlignedPlanner {
conf.setInt(AlignedPlannerWithGreedy.SMOOTHNESS_FACTOR,
AlignedPlannerWithGreedy.DEFAULT_SMOOTHNESS_FACTOR);
+ conf.setBoolean(ReservationAgent.FAVOR_EARLY_ALLOCATION, false);
+
// Set planning agent
- agent = new AlignedPlannerWithGreedy();
- agent.init(conf);
+ agentRight = new AlignedPlannerWithGreedy();
+ agentRight.init(conf);
+
+ conf.setBoolean(ReservationAgent.FAVOR_EARLY_ALLOCATION, true);
+ agentLeft = new AlignedPlannerWithGreedy();
+ agentLeft.init(conf);
// Create Plan
- plan =
- new InMemoryPlan(queueMetrics, policy, agent, clusterCapacity, step,
- res, minAlloc, maxAlloc, "dedicated", null, true, context);
+ plan = new InMemoryPlan(queueMetrics, policy, agentRight, clusterCapacity,
+ step, res, minAlloc, maxAlloc, "dedicated", null, true, context);
}
private int initializeScenario1() throws PlanningException {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a615ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestGreedyReservationAgent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestGreedyReservationAgent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestGreedyReservationAgent.java
index 6d1cfa8..46bfa80 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestGreedyReservationAgent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestGreedyReservationAgent.java
@@ -6,9 +6,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
- *
+ *
* http://www.apache.org/licenses/LICENSE-2.0
- *
+ *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -55,12 +55,12 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
+import org.eclipse.jetty.util.log.Log;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
-import org.eclipse.jetty.util.log.Log;
@RunWith(Parameterized.class)
public class TestGreedyReservationAgent {
@@ -108,7 +108,7 @@ public class TestGreedyReservationAgent {
policy.init(reservationQ, conf);
// setting conf to
- conf.setBoolean(GreedyReservationAgent.GREEDY_FAVOR_EARLY_ALLOCATION,
+ conf.setBoolean(GreedyReservationAgent.FAVOR_EARLY_ALLOCATION,
allocateLeft);
agent = new GreedyReservationAgent();
agent.init(conf);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a615ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestSimpleCapacityReplanner.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestSimpleCapacityReplanner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestSimpleCapacityReplanner.java
index e01608c..c4f94c2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestSimpleCapacityReplanner.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestSimpleCapacityReplanner.java
@@ -6,9 +6,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
- *
+ *
* http://www.apache.org/licenses/LICENSE-2.0
- *
+ *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[21/50] [abbrv] hadoop git commit: HADOOP-14352. Make some
HttpServer2 SSL properties optional (jzhuge via rkanter)
Posted by ae...@apache.org.
HADOOP-14352. Make some HttpServer2 SSL properties optional (jzhuge via rkanter)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b82317f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b82317f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b82317f
Branch: refs/heads/HDFS-7240
Commit: 8b82317fab0cb3023da333d4d557e226712a9c92
Parents: cedaf4c
Author: Robert Kanter <rk...@apache.org>
Authored: Tue May 2 17:51:28 2017 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Tue May 2 17:51:28 2017 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/http/HttpServer2.java | 45 ++++++++++++--------
1 file changed, 27 insertions(+), 18 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b82317f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index cbabb33..0891e8e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -348,18 +348,17 @@ public final class HttpServer2 implements FilterContainer {
/**
* A wrapper of {@link Configuration#getPassword(String)}. It returns
- * <code>String</code> instead of <code>char[]</code> and throws
- * {@link IOException} when the password not found.
+ * <code>String</code> instead of <code>char[]</code>.
*
* @param conf the configuration
* @param name the property name
- * @return the password string
+ * @return the password string or null
*/
- private static String getPassword(Configuration conf, String name)
+ private static String getPasswordString(Configuration conf, String name)
throws IOException {
char[] passchars = conf.getPassword(name);
if (passchars == null) {
- throw new IOException("Password " + name + " not found");
+ return null;
}
return new String(passchars);
}
@@ -371,20 +370,30 @@ public final class HttpServer2 implements FilterContainer {
if (sslConf == null) {
return;
}
- needsClientAuth(sslConf.getBoolean(
+ needsClientAuth = sslConf.getBoolean(
SSLFactory.SSL_SERVER_NEED_CLIENT_AUTH,
- SSLFactory.SSL_SERVER_NEED_CLIENT_AUTH_DEFAULT));
- keyStore(sslConf.get(SSLFactory.SSL_SERVER_KEYSTORE_LOCATION),
- getPassword(sslConf, SSLFactory.SSL_SERVER_KEYSTORE_PASSWORD),
- sslConf.get(SSLFactory.SSL_SERVER_KEYSTORE_TYPE,
- SSLFactory.SSL_SERVER_KEYSTORE_TYPE_DEFAULT));
- keyPassword(getPassword(sslConf,
- SSLFactory.SSL_SERVER_KEYSTORE_KEYPASSWORD));
- trustStore(sslConf.get(SSLFactory.SSL_SERVER_TRUSTSTORE_LOCATION),
- getPassword(sslConf, SSLFactory.SSL_SERVER_TRUSTSTORE_PASSWORD),
- sslConf.get(SSLFactory.SSL_SERVER_TRUSTSTORE_TYPE,
- SSLFactory.SSL_SERVER_TRUSTSTORE_TYPE_DEFAULT));
- excludeCiphers(sslConf.get(SSLFactory.SSL_SERVER_EXCLUDE_CIPHER_LIST));
+ SSLFactory.SSL_SERVER_NEED_CLIENT_AUTH_DEFAULT);
+ keyStore = sslConf.getTrimmed(SSLFactory.SSL_SERVER_KEYSTORE_LOCATION);
+ if (keyStore == null || keyStore.isEmpty()) {
+ throw new IOException(String.format("Property %s not specified",
+ SSLFactory.SSL_SERVER_KEYSTORE_LOCATION));
+ }
+ keyStorePassword = getPasswordString(sslConf,
+ SSLFactory.SSL_SERVER_KEYSTORE_PASSWORD);
+ if (keyStorePassword == null) {
+ throw new IOException(String.format("Property %s not specified",
+ SSLFactory.SSL_SERVER_KEYSTORE_PASSWORD));
+ }
+ keyStoreType = sslConf.get(SSLFactory.SSL_SERVER_KEYSTORE_TYPE,
+ SSLFactory.SSL_SERVER_KEYSTORE_TYPE_DEFAULT);
+ keyPassword = getPasswordString(sslConf,
+ SSLFactory.SSL_SERVER_KEYSTORE_KEYPASSWORD);
+ trustStore = sslConf.get(SSLFactory.SSL_SERVER_TRUSTSTORE_LOCATION);
+ trustStorePassword = getPasswordString(sslConf,
+ SSLFactory.SSL_SERVER_TRUSTSTORE_PASSWORD);
+ trustStoreType = sslConf.get(SSLFactory.SSL_SERVER_TRUSTSTORE_TYPE,
+ SSLFactory.SSL_SERVER_TRUSTSTORE_TYPE_DEFAULT);
+ excludeCiphers = sslConf.get(SSLFactory.SSL_SERVER_EXCLUDE_CIPHER_LIST);
}
public HttpServer2 build() throws IOException {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[23/50] [abbrv] hadoop git commit: HADOOP-14372. TestSymlinkLocalFS
timeouts are too low. Contributed by Eric Badger.
Posted by ae...@apache.org.
HADOOP-14372. TestSymlinkLocalFS timeouts are too low. Contributed by Eric Badger.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4631e46
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4631e46
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4631e46
Branch: refs/heads/HDFS-7240
Commit: d4631e466bc85ce605061673a31f451353da9713
Parents: d9014bd
Author: Kihwal Lee <ki...@apache.org>
Authored: Wed May 3 09:47:17 2017 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Wed May 3 09:47:17 2017 -0500
----------------------------------------------------------------------
.../src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4631e46/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java
index 0a51b65..45d63b1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java
@@ -105,7 +105,7 @@ abstract public class TestSymlinkLocalFS extends SymlinkBaseTest {
super.testStatDanglingLink();
}
- @Test(timeout=1000)
+ @Test(timeout=10000)
/** lstat a non-existant file using a partially qualified path */
public void testDanglingLinkFilePartQual() throws IOException {
Path filePartQual = new Path(getScheme()+":///doesNotExist");
@@ -123,7 +123,7 @@ abstract public class TestSymlinkLocalFS extends SymlinkBaseTest {
}
}
- @Test(timeout=1000)
+ @Test(timeout=10000)
/** Stat and lstat a dangling link */
public void testDanglingLink() throws IOException {
assumeNotWindows();
@@ -169,7 +169,7 @@ abstract public class TestSymlinkLocalFS extends SymlinkBaseTest {
wrapper.getFileStatus(link);
}
- @Test(timeout=1000)
+ @Test(timeout=10000)
/**
* Test getLinkTarget with a partially qualified target.
* NB: Hadoop does not support fully qualified URIs for the
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[48/50] [abbrv] hadoop git commit: YARN-6281. Cleanup when AMRMProxy
fails to initialize a new interceptor chain. (Botong Huang via Subru)
Posted by ae...@apache.org.
YARN-6281. Cleanup when AMRMProxy fails to initialize a new interceptor chain. (Botong Huang via Subru)
(cherry picked from commit 57a9afbd45b7ef8e6021cc58f96bd0074bf1389d)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/749e5c09
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/749e5c09
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/749e5c09
Branch: refs/heads/HDFS-7240
Commit: 749e5c09b9990590c282ea944e24735b795351fc
Parents: cd9ff27
Author: Subru Krishnan <su...@apache.org>
Authored: Fri Mar 10 18:13:29 2017 -0800
Committer: Subru Krishnan <su...@apache.org>
Committed: Mon May 8 16:55:47 2017 -0700
----------------------------------------------------------------------
.../nodemanager/amrmproxy/AMRMProxyService.java | 25 +++++++++++------
.../amrmproxy/BaseAMRMProxyTest.java | 21 +++++++++-----
.../amrmproxy/TestAMRMProxyService.java | 29 ++++++++++++++++++++
3 files changed, 59 insertions(+), 16 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/749e5c09/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
index 2696bca..aeb3be8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
@@ -319,11 +319,16 @@ public class AMRMProxyService extends AbstractService implements
+ " ApplicationId:" + applicationAttemptId + " for the user: "
+ user);
- RequestInterceptor interceptorChain =
- this.createRequestInterceptorChain();
- interceptorChain.init(createApplicationMasterContext(
- applicationAttemptId, user, amrmToken, localToken));
- chainWrapper.init(interceptorChain, applicationAttemptId);
+ try {
+ RequestInterceptor interceptorChain =
+ this.createRequestInterceptorChain();
+ interceptorChain.init(createApplicationMasterContext(this.nmContext,
+ applicationAttemptId, user, amrmToken, localToken));
+ chainWrapper.init(interceptorChain, applicationAttemptId);
+ } catch (Exception e) {
+ this.applPipelineMap.remove(applicationAttemptId.getApplicationId());
+ throw e;
+ }
}
/**
@@ -339,8 +344,10 @@ public class AMRMProxyService extends AbstractService implements
this.applPipelineMap.remove(applicationId);
if (pipeline == null) {
- LOG.info("Request to stop an application that does not exist. Id:"
- + applicationId);
+ LOG.info(
+ "No interceptor pipeline for application {},"
+ + " likely because its AM is not run in this node.",
+ applicationId);
} else {
// Remove the appAttempt in AMRMTokenSecretManager
this.secretManager
@@ -413,11 +420,11 @@ public class AMRMProxyService extends AbstractService implements
}
private AMRMProxyApplicationContext createApplicationMasterContext(
- ApplicationAttemptId applicationAttemptId, String user,
+ Context context, ApplicationAttemptId applicationAttemptId, String user,
Token<AMRMTokenIdentifier> amrmToken,
Token<AMRMTokenIdentifier> localToken) {
AMRMProxyApplicationContextImpl appContext =
- new AMRMProxyApplicationContextImpl(this.nmContext, getConfig(),
+ new AMRMProxyApplicationContextImpl(context, getConfig(),
applicationAttemptId, user, amrmToken, localToken);
return appContext;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/749e5c09/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
index 7f96947..6f5009e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
@@ -121,9 +121,9 @@ public abstract class BaseAMRMProxyTest {
+ MockRequestInterceptor.class.getName());
this.dispatcher = new AsyncDispatcher();
- this.dispatcher.init(conf);
+ this.dispatcher.init(this.conf);
this.dispatcher.start();
- this.amrmProxyService = createAndStartAMRMProxyService();
+ createAndStartAMRMProxyService(this.conf);
}
@After
@@ -137,12 +137,19 @@ public abstract class BaseAMRMProxyTest {
return threadpool;
}
- protected MockAMRMProxyService createAndStartAMRMProxyService() {
- MockAMRMProxyService svc =
+ protected Configuration getConf() {
+ return this.conf;
+ }
+
+ protected void createAndStartAMRMProxyService(Configuration config) {
+ // Stop the existing instance first if not null
+ if (this.amrmProxyService != null) {
+ this.amrmProxyService.stop();
+ }
+ this.amrmProxyService =
new MockAMRMProxyService(new NullContext(), dispatcher);
- svc.init(conf);
- svc.start();
- return svc;
+ this.amrmProxyService.init(config);
+ this.amrmProxyService.start();
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/749e5c09/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
index 837278c..fa17f26 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
@@ -21,9 +21,11 @@ package org.apache.hadoop.yarn.server.nodemanager.amrmproxy;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
+import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
@@ -34,6 +36,7 @@ import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.nodemanager.amrmproxy.AMRMProxyService.RequestInterceptorChainWrapper;
import org.apache.hadoop.yarn.util.Records;
@@ -95,6 +98,32 @@ public class TestAMRMProxyService extends BaseAMRMProxyTest {
}
/**
+ * Tests the case when interceptor pipeline initialization fails.
+ */
+ @Test
+ public void testInterceptorInitFailure() {
+ Configuration conf = this.getConf();
+ // Override with a bad interceptor configuration
+ conf.set(YarnConfiguration.AMRM_PROXY_INTERCEPTOR_CLASS_PIPELINE,
+ "class.that.does.not.exist");
+
+ // Reinitialize instance with the new config
+ createAndStartAMRMProxyService(conf);
+ int testAppId = 1;
+ try {
+ registerApplicationMaster(testAppId);
+ Assert.fail("Should not reach here. Expecting an exception thrown");
+ } catch (Exception e) {
+ Map<ApplicationId, RequestInterceptorChainWrapper> pipelines =
+ getAMRMProxyService().getPipelines();
+ ApplicationId id = getApplicationId(testAppId);
+ Assert.assertTrue(
+ "The interceptor pipeline should be removed if initializtion fails",
+ pipelines.get(id) == null);
+ }
+ }
+
+ /**
* Tests the registration of multiple application master serially one at a
* time.
*
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[14/50] [abbrv] hadoop git commit: HDFS-11710.
hadoop-hdfs-native-client build fails in trunk in Windows after HDFS-11529
(Contributed by Sailesh Mukil)
Posted by ae...@apache.org.
HDFS-11710. hadoop-hdfs-native-client build fails in trunk in Windows after HDFS-11529 (Contributed by Sailesh Mukil)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0f54ea0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0f54ea0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0f54ea0
Branch: refs/heads/HDFS-7240
Commit: b0f54ea035f406909f45c66b5403580919d63a4a
Parents: 6bf42e4
Author: Vinayakumar B <vi...@apache.org>
Authored: Tue May 2 10:32:38 2017 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Tue May 2 10:32:38 2017 +0530
----------------------------------------------------------------------
.../src/main/native/libhdfs/os/windows/thread_local_storage.c | 7 +++++++
1 file changed, 7 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0f54ea0/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/thread_local_storage.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/thread_local_storage.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/thread_local_storage.c
index bf0979a..28d014d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/thread_local_storage.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/thread_local_storage.c
@@ -61,6 +61,13 @@ static void detachCurrentThreadFromJvm()
free(state);
}
+void hdfsThreadDestructor(void *v)
+{
+ // Ignore 'v' since it will contain the state and we will obtain it in the below
+ // call anyway.
+ detachCurrentThreadFromJvm();
+}
+
/**
* Unlike pthreads, the Windows API does not seem to provide a convenient way to
* hook a callback onto thread shutdown. However, the Windows portable
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[39/50] [abbrv] hadoop git commit: HADOOP-14390. Correct spelling of
'succeed' and variants. Contributed by Dongtao Zhang
Posted by ae...@apache.org.
HADOOP-14390. Correct spelling of 'succeed' and variants. Contributed by Dongtao Zhang
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4f34ecb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4f34ecb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4f34ecb
Branch: refs/heads/HDFS-7240
Commit: e4f34ecb049a252fb1084c4c7f404d710b221969
Parents: a3954cc
Author: Chris Douglas <cd...@apache.org>
Authored: Fri May 5 12:10:50 2017 -0700
Committer: Chris Douglas <cd...@apache.org>
Committed: Fri May 5 12:10:50 2017 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java | 6 +++---
.../test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java | 2 +-
.../org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java | 4 ++--
.../src/test/java/org/apache/hadoop/hdfs/NNBenchWithoutMR.java | 6 +++---
.../reservation/planning/TestAlignedPlanner.java | 4 ++--
5 files changed, 11 insertions(+), 11 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f34ecb/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
index 61295b4..68a7560 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
@@ -251,7 +251,7 @@ abstract public class ViewFileSystemBaseTest {
fsTarget.isFile(new Path(targetTestRoot,"user/foo")));
// Delete the created file
- Assert.assertTrue("Delete should suceed",
+ Assert.assertTrue("Delete should succeed",
fsView.delete(new Path("/user/foo"), false));
Assert.assertFalse("File should not exist after delete",
fsView.exists(new Path("/user/foo")));
@@ -266,7 +266,7 @@ abstract public class ViewFileSystemBaseTest {
fsTarget.isFile(new Path(targetTestRoot,"dir2/foo")));
// Delete the created file
- Assert.assertTrue("Delete should suceed",
+ Assert.assertTrue("Delete should succeed",
fsView.delete(new Path("/internalDir/linkToDir2/foo"), false));
Assert.assertFalse("File should not exist after delete",
fsView.exists(new Path("/internalDir/linkToDir2/foo")));
@@ -370,7 +370,7 @@ abstract public class ViewFileSystemBaseTest {
public void testRenameAcrossMounts1() throws IOException {
fileSystemTestHelper.createFile(fsView, "/user/foo");
fsView.rename(new Path("/user/foo"), new Path("/user2/fooBarBar"));
- /* - code if we had wanted this to suceed
+ /* - code if we had wanted this to succeed
Assert.assertFalse(fSys.exists(new Path("/user/foo")));
Assert.assertFalse(fSysLocal.exists(new Path(targetTestRoot,"user/foo")));
Assert.assertTrue(fSys.isFile(FileSystemTestHelper.getTestRootPath(fSys,"/user2/fooBarBar")));
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f34ecb/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
index 50237d1..fdc6389 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
@@ -232,7 +232,7 @@ abstract public class ViewFsBaseTest {
isFile(fcTarget, new Path(targetTestRoot,"dir2/foo")));
// Delete the created file
- Assert.assertTrue("Delete should suceed",
+ Assert.assertTrue("Delete should succeed",
fcView.delete(new Path("/internalDir/linkToDir2/foo"),false));
Assert.assertFalse("File should not exist after deletion",
exists(fcView, new Path("/internalDir/linkToDir2/foo")));
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f34ecb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
index e643d21..8b48225 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
@@ -509,8 +509,8 @@ public class TestCheckpoint {
Mockito.reset(faultInjector);
secondary.shutdown(); // secondary namenode crash!
- // start new instance of secondary and verify that
- // a new rollEditLog suceedes inspite of the fact that
+ // start new instance of secondary and verify that
+ // a new rollEditLog succeeds inspite of the fact that
// edits.new already exists.
//
secondary = startSecondaryNameNode(conf);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f34ecb/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBenchWithoutMR.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBenchWithoutMR.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBenchWithoutMR.java
index 8cd6f36..9b63010 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBenchWithoutMR.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBenchWithoutMR.java
@@ -93,7 +93,7 @@ public class NNBenchWithoutMR {
/**
* Create and write to a given number of files. Repeat each remote
- * operation until is suceeds (does not throw an exception).
+ * operation until is succeeds (does not throw an exception).
*
* @return the number of exceptions caught
*/
@@ -178,7 +178,7 @@ public class NNBenchWithoutMR {
/**
* Rename a given number of files. Repeat each remote
- * operation until is suceeds (does not throw an exception).
+ * operation until is succeeds (does not throw an exception).
*
* @return the number of exceptions caught
*/
@@ -208,7 +208,7 @@ public class NNBenchWithoutMR {
/**
* Delete a given number of files. Repeat each remote
- * operation until is suceeds (does not throw an exception).
+ * operation until is succeeds (does not throw an exception).
*
* @return the number of exceptions caught
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f34ecb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestAlignedPlanner.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestAlignedPlanner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestAlignedPlanner.java
index fd187fc..25ec9c9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestAlignedPlanner.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestAlignedPlanner.java
@@ -866,7 +866,7 @@ public class TestAlignedPlanner {
curAlloc.clear();
curAlloc.put(new ReservationInterval(9 * step, 10 * step),
Resource.newInstance(1024, 1));
- assertTrue("validateOrderNoFap() should have suceeded",
+ assertTrue("validateOrderNoFap() should have succeeded",
IterativePlanner.validateOrderNoGap(allocation, curAlloc, false));
// 2. allocateLeft = false, fail when curAlloc has a gap
@@ -888,7 +888,7 @@ public class TestAlignedPlanner {
curAlloc.clear();
curAlloc.put(new ReservationInterval(13 * step, 14 * step),
Resource.newInstance(1024, 1));
- assertTrue("validateOrderNoFap() should have suceeded",
+ assertTrue("validateOrderNoFap() should have succeeded",
IterativePlanner.validateOrderNoGap(allocation, curAlloc, true));
// 5. allocateLeft = true, fail when there is a gap between curAlloc and
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[44/50] [abbrv] hadoop git commit: YARN-3839. Quit throwing
NMNotYetReadyException. Contributed by Manikandan R
Posted by ae...@apache.org.
YARN-3839. Quit throwing NMNotYetReadyException. Contributed by Manikandan R
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/424887ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/424887ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/424887ec
Branch: refs/heads/HDFS-7240
Commit: 424887ecb7d11a72837f2757ed3ff9e0fe8c5b5d
Parents: cef2815
Author: Jason Lowe <jl...@yahoo-inc.com>
Authored: Mon May 8 17:14:37 2017 -0500
Committer: Jason Lowe <jl...@yahoo-inc.com>
Committed: Mon May 8 17:14:37 2017 -0500
----------------------------------------------------------------------
.../yarn/api/ContainerManagementProtocol.java | 8 +-
.../apache/hadoop/yarn/client/ServerProxy.java | 5 +
.../yarn/server/nodemanager/NodeManager.java | 2 -
.../nodemanager/NodeStatusUpdaterImpl.java | 2 -
.../containermanager/ContainerManager.java | 2 -
.../containermanager/ContainerManagerImpl.java | 28 ----
.../nodemanager/DummyContainerManager.java | 5 -
.../nodemanager/TestNodeManagerResync.java | 164 -------------------
.../BaseContainerManagerTest.java | 13 +-
.../containermanager/TestContainerManager.java | 5 -
.../TestContainerManagerRecovery.java | 11 --
.../containermanager/TestNMProxy.java | 30 +---
.../TestContainerSchedulerQueuing.java | 5 -
.../server/TestContainerManagerSecurity.java | 14 +-
14 files changed, 21 insertions(+), 273 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/424887ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManagementProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManagementProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManagementProtocol.java
index 9077d3b..10708a0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManagementProtocol.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManagementProtocol.java
@@ -24,10 +24,10 @@ import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.protocolrecords.CommitResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse;
import org.apache.hadoop.yarn.api.protocolrecords.ReInitializeContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ReInitializeContainerResponse;
import org.apache.hadoop.yarn.api.protocolrecords.ResourceLocalizationRequest;
@@ -45,7 +45,6 @@ import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
-import org.apache.hadoop.yarn.exceptions.NMNotYetReadyException;
import org.apache.hadoop.yarn.exceptions.YarnException;
/**
@@ -101,9 +100,6 @@ public interface ContainerManagementProtocol {
* a allServicesMetaData map.
* @throws YarnException
* @throws IOException
- * @throws NMNotYetReadyException
- * This exception is thrown when NM starts from scratch but has not
- * yet connected with RM.
*/
@Public
@Stable
http://git-wip-us.apache.org/repos/asf/hadoop/blob/424887ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java
index e42435f..6188d6a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java
@@ -80,6 +80,11 @@ public class ServerProxy {
exceptionToPolicyMap.put(ConnectTimeoutException.class, retryPolicy);
exceptionToPolicyMap.put(RetriableException.class, retryPolicy);
exceptionToPolicyMap.put(SocketException.class, retryPolicy);
+
+ /*
+ * Still keeping this to cover case like newer client talking
+ * to an older version of server
+ */
exceptionToPolicyMap.put(NMNotYetReadyException.class, retryPolicy);
return RetryPolicies.retryByException(RetryPolicies.TRY_ONCE_THEN_FAIL,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/424887ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index 55e7b09..3c0e498 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -460,8 +460,6 @@ public class NodeManager extends CompositeService
@Override
public void run() {
try {
- LOG.info("Notifying ContainerManager to block new container-requests");
- containerManager.setBlockNewContainerRequests(true);
if (!rmWorkPreservingRestartEnabled) {
LOG.info("Cleaning up running containers on resync");
containerManager.cleanupContainersOnNMResync();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/424887ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
index 4914dc7..dd5b279 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -428,8 +428,6 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
.verifyRMRegistrationResponseForNodeLabels(regNMResponse));
LOG.info(successfullRegistrationMsg);
- LOG.info("Notifying ContainerManager to unblock new container-requests");
- this.context.getContainerManager().setBlockNewContainerRequests(false);
}
private List<ApplicationId> createKeepAliveApplicationList() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/424887ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManager.java
index 066d987..2aeb245 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManager.java
@@ -42,8 +42,6 @@ public interface ContainerManager extends ServiceStateChangeListener,
void updateQueuingLimit(ContainerQueuingLimit queuingLimit);
- void setBlockNewContainerRequests(boolean blockNewContainerRequests);
-
ContainerScheduler getContainerScheduler();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/424887ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index d82c728..f65f1ac 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -166,7 +166,6 @@ import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
-import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
@@ -204,7 +203,6 @@ public class ContainerManagerImpl extends CompositeService implements
protected final AsyncDispatcher dispatcher;
private final DeletionService deletionService;
- private AtomicBoolean blockNewContainerRequests = new AtomicBoolean(false);
private boolean serviceStopped = false;
private final ReadLock readLock;
private final WriteLock writeLock;
@@ -550,10 +548,6 @@ public class ContainerManagerImpl extends CompositeService implements
refreshServiceAcls(conf, new NMPolicyProvider());
}
- LOG.info("Blocking new container-requests as container manager rpc" +
- " server is still starting.");
- this.setBlockNewContainerRequests(true);
-
String bindHost = conf.get(YarnConfiguration.NM_BIND_HOST);
String nmAddress = conf.getTrimmed(YarnConfiguration.NM_ADDRESS);
String hostOverride = null;
@@ -617,7 +611,6 @@ public class ContainerManagerImpl extends CompositeService implements
@Override
public void serviceStop() throws Exception {
- setBlockNewContainerRequests(true);
this.writeLock.lock();
try {
serviceStopped = true;
@@ -852,11 +845,6 @@ public class ContainerManagerImpl extends CompositeService implements
@Override
public StartContainersResponse startContainers(
StartContainersRequest requests) throws YarnException, IOException {
- if (blockNewContainerRequests.get()) {
- throw new NMNotYetReadyException(
- "Rejecting new containers as NodeManager has not"
- + " yet connected with ResourceManager");
- }
UserGroupInformation remoteUgi = getRemoteUgi();
NMTokenIdentifier nmTokenIdentifier = selectNMTokenIdentifier(remoteUgi);
authorizeUser(remoteUgi, nmTokenIdentifier);
@@ -1113,11 +1101,6 @@ public class ContainerManagerImpl extends CompositeService implements
public IncreaseContainersResourceResponse increaseContainersResource(
IncreaseContainersResourceRequest requests)
throws YarnException, IOException {
- if (blockNewContainerRequests.get()) {
- throw new NMNotYetReadyException(
- "Rejecting container resource increase as NodeManager has not"
- + " yet connected with ResourceManager");
- }
UserGroupInformation remoteUgi = getRemoteUgi();
NMTokenIdentifier nmTokenIdentifier = selectNMTokenIdentifier(remoteUgi);
authorizeUser(remoteUgi, nmTokenIdentifier);
@@ -1560,17 +1543,6 @@ public class ContainerManagerImpl extends CompositeService implements
}
@Override
- public void setBlockNewContainerRequests(boolean blockNewContainerRequests) {
- this.blockNewContainerRequests.set(blockNewContainerRequests);
- }
-
- @Private
- @VisibleForTesting
- public boolean getBlockNewContainerRequestsStatus() {
- return this.blockNewContainerRequests.get();
- }
-
- @Override
public void stateChanged(Service service) {
// TODO Auto-generated method stub
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/424887ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java
index feb6dd6..e520a31 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java
@@ -191,11 +191,6 @@ public class DummyContainerManager extends ContainerManagerImpl {
}
@Override
- public void setBlockNewContainerRequests(boolean blockNewContainerRequests) {
- // do nothing
- }
-
- @Override
protected void authorizeStartAndResourceIncreaseRequest(
NMTokenIdentifier nmTokenIdentifier,
ContainerTokenIdentifier containerTokenIdentifier,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/424887ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
index 5ab5c37..c5c74d1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
@@ -65,7 +65,6 @@ import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.api.records.URL;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
-import org.apache.hadoop.yarn.exceptions.NMNotYetReadyException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -87,7 +86,6 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Cont
import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.server.utils.YarnServerBuilderUtils;
-import org.apache.hadoop.yarn.util.ConverterUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@@ -188,34 +186,6 @@ public class TestNodeManagerResync {
}
}
- // This test tests new container requests are blocked when NM starts from
- // scratch until it register with RM AND while NM is resyncing with RM
- @SuppressWarnings("unchecked")
- @Test(timeout=60000)
- public void testBlockNewContainerRequestsOnStartAndResync()
- throws IOException, InterruptedException, YarnException {
- NodeManager nm = new TestNodeManager2();
- int port = ServerSocketUtil.getPort(49154, 10);
- YarnConfiguration conf = createNMConfig(port);
- conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false);
- nm.init(conf);
- nm.start();
-
- // Start the container in running state
- ContainerId cId = TestNodeManagerShutdown.createContainerId();
- TestNodeManagerShutdown.startContainer(nm, cId, localFS, tmpDir,
- processStartFile, port);
-
- nm.getNMDispatcher().getEventHandler()
- .handle(new NodeManagerEvent(NodeManagerEventType.RESYNC));
- try {
- syncBarrier.await();
- } catch (BrokenBarrierException e) {
- }
- Assert.assertFalse(assertionFailedInThread.get());
- nm.stop();
- }
-
@SuppressWarnings("unchecked")
@Test(timeout=10000)
public void testNMshutdownWhenResyncThrowException() throws IOException,
@@ -493,135 +463,6 @@ public class TestNodeManagerResync {
}
}
- class TestNodeManager2 extends NodeManager {
-
- Thread launchContainersThread = null;
- @Override
- protected NodeStatusUpdater createNodeStatusUpdater(Context context,
- Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
- return new TestNodeStatusUpdaterImpl2(context, dispatcher,
- healthChecker, metrics);
- }
-
- @Override
- protected ContainerManagerImpl createContainerManager(Context context,
- ContainerExecutor exec, DeletionService del,
- NodeStatusUpdater nodeStatusUpdater, ApplicationACLsManager aclsManager,
- LocalDirsHandlerService dirsHandler) {
- return new ContainerManagerImpl(context, exec, del, nodeStatusUpdater,
- metrics, dirsHandler){
- @Override
- public void setBlockNewContainerRequests(
- boolean blockNewContainerRequests) {
- if (blockNewContainerRequests) {
- // start test thread right after blockNewContainerRequests is set
- // true
- super.setBlockNewContainerRequests(blockNewContainerRequests);
- launchContainersThread = new RejectedContainersLauncherThread();
- launchContainersThread.start();
- } else {
- // join the test thread right before blockNewContainerRequests is
- // reset
- try {
- // stop the test thread
- ((RejectedContainersLauncherThread) launchContainersThread)
- .setStopThreadFlag(true);
- launchContainersThread.join();
- ((RejectedContainersLauncherThread) launchContainersThread)
- .setStopThreadFlag(false);
- super.setBlockNewContainerRequests(blockNewContainerRequests);
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- }
- }
- };
- }
-
- class TestNodeStatusUpdaterImpl2 extends MockNodeStatusUpdater {
-
- public TestNodeStatusUpdaterImpl2(Context context, Dispatcher dispatcher,
- NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics) {
- super(context, dispatcher, healthChecker, metrics);
- }
-
- @Override
- protected void rebootNodeStatusUpdaterAndRegisterWithRM() {
- ConcurrentMap<ContainerId, org.apache.hadoop.yarn.server.nodemanager
- .containermanager.container.Container> containers =
- getNMContext().getContainers();
-
- try {
- // ensure that containers are empty before restart nodeStatusUpdater
- if (!containers.isEmpty()) {
- for (Container container: containers.values()) {
- Assert.assertEquals(ContainerState.COMPLETE,
- container.cloneAndGetContainerStatus().getState());
- }
- }
- super.rebootNodeStatusUpdaterAndRegisterWithRM();
- // After this point new containers are free to be launched, except
- // containers from previous RM
- // Wait here so as to sync with the main test thread.
- syncBarrier.await();
- } catch (InterruptedException e) {
- } catch (BrokenBarrierException e) {
- } catch (AssertionError ae) {
- ae.printStackTrace();
- assertionFailedInThread.set(true);
- }
- }
- }
-
- class RejectedContainersLauncherThread extends Thread {
-
- boolean isStopped = false;
- public void setStopThreadFlag(boolean isStopped) {
- this.isStopped = isStopped;
- }
-
- @Override
- public void run() {
- int numContainers = 0;
- int numContainersRejected = 0;
- ContainerLaunchContext containerLaunchContext =
- recordFactory.newRecordInstance(ContainerLaunchContext.class);
- try {
- while (!isStopped && numContainers < 10) {
- StartContainerRequest scRequest =
- StartContainerRequest.newInstance(containerLaunchContext,
- null);
- List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
- list.add(scRequest);
- StartContainersRequest allRequests =
- StartContainersRequest.newInstance(list);
- System.out.println("no. of containers to be launched: "
- + numContainers);
- numContainers++;
- try {
- getContainerManager().startContainers(allRequests);
- } catch (YarnException e) {
- numContainersRejected++;
- Assert.assertTrue(e.getMessage().contains(
- "Rejecting new containers as NodeManager has not" +
- " yet connected with ResourceManager"));
- Assert.assertEquals(NMNotYetReadyException.class.getName(), e
- .getClass().getName());
- } catch (IOException e) {
- e.printStackTrace();
- assertionFailedInThread.set(true);
- }
- }
- // no. of containers to be launched should equal to no. of
- // containers rejected
- Assert.assertEquals(numContainers, numContainersRejected);
- } catch (AssertionError ae) {
- assertionFailedInThread.set(true);
- }
- }
- }
- }
-
class TestNodeManager3 extends NodeManager {
private int registrationCount = 0;
@@ -681,11 +522,6 @@ public class TestNodeManagerResync {
LocalDirsHandlerService dirsHandler) {
return new ContainerManagerImpl(context, exec, del, nodeStatusUpdater,
metrics, dirsHandler){
- @Override
- public void
- setBlockNewContainerRequests(boolean blockNewContainerRequests) {
- // do nothing
- }
@Override
protected void authorizeGetAndStopContainerRequest(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/424887ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
index ad0a831..2991c0c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager;
+import static org.mockito.Mockito.spy;
+
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
@@ -28,8 +30,6 @@ import java.util.HashSet;
import java.util.List;
import java.util.Map;
-import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
-import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -75,6 +75,7 @@ import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager;
@@ -82,10 +83,9 @@ import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerIn
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.junit.After;
+import org.junit.Assert;
import org.junit.Before;
-import static org.mockito.Mockito.spy;
-
public abstract class BaseContainerManagerTest {
protected static RecordFactory recordFactory = RecordFactoryProvider
@@ -214,11 +214,6 @@ public abstract class BaseContainerManagerTest {
return new ContainerManagerImpl(context, exec, delSrvc, nodeStatusUpdater,
metrics, dirsHandler) {
- @Override
- public void
- setBlockNewContainerRequests(boolean blockNewContainerRequests) {
- // do nothing
- }
@Override
protected void authorizeGetAndStopContainerRequest(ContainerId containerId,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/424887ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
index 6fead7e..60df7cb8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
@@ -127,11 +127,6 @@ public class TestContainerManager extends BaseContainerManagerTest {
createContainerManager(DeletionService delSrvc) {
return new ContainerManagerImpl(context, exec, delSrvc, nodeStatusUpdater,
metrics, dirsHandler) {
- @Override
- public void
- setBlockNewContainerRequests(boolean blockNewContainerRequests) {
- // do nothing
- }
@Override
protected UserGroupInformation getRemoteUgi() throws YarnException {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/424887ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
index ef60c68..633bb6d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
@@ -545,11 +545,6 @@ public class TestContainerManagerRecovery extends BaseContainerManagerTest {
return new ContainerManagerImpl(context, exec, delSrvc,
mock(NodeStatusUpdater.class), metrics, dirsHandler) {
@Override
- public void
- setBlockNewContainerRequests(boolean blockNewContainerRequests) {
- // do nothing
- }
- @Override
protected void authorizeGetAndStopContainerRequest(
ContainerId containerId, Container container,
boolean stopRequest, NMTokenIdentifier identifier)
@@ -757,12 +752,6 @@ public class TestContainerManagerRecovery extends BaseContainerManagerTest {
}
@Override
- public void setBlockNewContainerRequests(
- boolean blockNewContainerRequests) {
- // do nothing
- }
-
- @Override
public NMTimelinePublisher
createNMTimelinePublisher(Context context) {
return null;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/424887ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
index 46b32de..e6a7a02 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
@@ -56,12 +56,10 @@ public class TestNMProxy extends BaseContainerManagerTest {
}
int retryCount = 0;
- boolean shouldThrowNMNotYetReadyException = false;
@Before
public void setUp() throws Exception {
containerManager.start();
- containerManager.setBlockNewContainerRequests(false);
}
@Override
@@ -75,21 +73,13 @@ public class TestNMProxy extends BaseContainerManagerTest {
StartContainersRequest requests) throws YarnException, IOException {
if (retryCount < 5) {
retryCount++;
- if (shouldThrowNMNotYetReadyException) {
- // This causes super to throw an NMNotYetReadyException
- containerManager.setBlockNewContainerRequests(true);
+ if (isRetryPolicyRetryForEver()) {
+ // Throw non network exception
+ throw new IOException(
+ new UnreliableInterface.UnreliableException());
} else {
- if (isRetryPolicyRetryForEver()) {
- // Throw non network exception
- throw new IOException(
- new UnreliableInterface.UnreliableException());
- } else {
- throw new java.net.ConnectException("start container exception");
- }
+ throw new java.net.ConnectException("start container exception");
}
- } else {
- // This stops super from throwing an NMNotYetReadyException
- containerManager.setBlockNewContainerRequests(false);
}
return super.startContainers(requests);
}
@@ -131,26 +121,17 @@ public class TestNMProxy extends BaseContainerManagerTest {
ContainerManagementProtocol proxy = getNMProxy(conf);
- retryCount = 0;
- shouldThrowNMNotYetReadyException = false;
proxy.startContainers(allRequests);
Assert.assertEquals(5, retryCount);
retryCount = 0;
- shouldThrowNMNotYetReadyException = false;
proxy.stopContainers(Records.newRecord(StopContainersRequest.class));
Assert.assertEquals(5, retryCount);
retryCount = 0;
- shouldThrowNMNotYetReadyException = false;
proxy.getContainerStatuses(Records
.newRecord(GetContainerStatusesRequest.class));
Assert.assertEquals(5, retryCount);
-
- retryCount = 0;
- shouldThrowNMNotYetReadyException = true;
- proxy.startContainers(allRequests);
- Assert.assertEquals(5, retryCount);
}
@Test(timeout = 20000, expected = IOException.class)
@@ -162,7 +143,6 @@ public class TestNMProxy extends BaseContainerManagerTest {
ContainerManagementProtocol proxy = getNMProxy(conf);
- shouldThrowNMNotYetReadyException = false;
retryCount = 0;
proxy.startContainers(allRequests);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/424887ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
index a98a341..8264f2e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
@@ -77,11 +77,6 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest {
DeletionService delSrvc) {
return new ContainerManagerImpl(context, exec, delSrvc,
nodeStatusUpdater, metrics, dirsHandler) {
- @Override
- public void
- setBlockNewContainerRequests(boolean blockNewContainerRequests) {
- // do nothing
- }
@Override
protected UserGroupInformation getRemoteUgi() throws YarnException {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/424887ec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
index 98cb365..9626b35 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
@@ -66,7 +66,6 @@ import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.security.NMTokenIdentifier;
import org.apache.hadoop.yarn.server.nodemanager.Context;
import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
-import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl;
import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.MockRMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
@@ -178,7 +177,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
NodeManager nm = yarnCluster.getNodeManager(0);
- waitForNMToReceiveNMTokenKey(nmTokenSecretManagerNM, nm);
+ waitForNMToReceiveNMTokenKey(nmTokenSecretManagerNM);
// Both id should be equal.
Assert.assertEquals(nmTokenSecretManagerNM.getCurrentKey().getKeyId(),
@@ -412,13 +411,10 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
}
protected void waitForNMToReceiveNMTokenKey(
- NMTokenSecretManagerInNM nmTokenSecretManagerNM, NodeManager nm)
+ NMTokenSecretManagerInNM nmTokenSecretManagerNM)
throws InterruptedException {
int attempt = 60;
- ContainerManagerImpl cm =
- ((ContainerManagerImpl) nm.getNMContext().getContainerManager());
- while ((cm.getBlockNewContainerRequestsStatus() || nmTokenSecretManagerNM
- .getNodeId() == null) && attempt-- > 0) {
+ while (nmTokenSecretManagerNM.getNodeId() == null && attempt-- > 0) {
Thread.sleep(2000);
}
}
@@ -627,7 +623,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
nm.getNMContext().getNMTokenSecretManager();
String user = "test";
- waitForNMToReceiveNMTokenKey(nmTokenSecretManagerInNM, nm);
+ waitForNMToReceiveNMTokenKey(nmTokenSecretManagerInNM);
NodeId nodeId = nm.getNMContext().getNodeId();
@@ -722,7 +718,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
nm.getNMContext().getNMTokenSecretManager();
String user = "test";
- waitForNMToReceiveNMTokenKey(nmTokenSecretManagerInNM, nm);
+ waitForNMToReceiveNMTokenKey(nmTokenSecretManagerInNM);
NodeId nodeId = nm.getNMContext().getNodeId();
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[19/50] [abbrv] hadoop git commit: YARN-6481. Yarn top shows negative
container number in FS (Contributed by Tao Jie via Daniel Templeton)
Posted by ae...@apache.org.
YARN-6481. Yarn top shows negative container number in FS
(Contributed by Tao Jie via Daniel Templeton)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9f0aea0e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9f0aea0e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9f0aea0e
Branch: refs/heads/HDFS-7240
Commit: 9f0aea0ee2c680afd26ef9da6ac662be00d8e24f
Parents: b062b32
Author: Daniel Templeton <te...@apache.org>
Authored: Tue May 2 13:04:40 2017 -0700
Committer: Daniel Templeton <te...@apache.org>
Committed: Tue May 2 13:06:47 2017 -0700
----------------------------------------------------------------------
.../resourcemanager/scheduler/fair/FSQueue.java | 3 +++
.../scheduler/fair/TestFairScheduler.java | 15 +++++++++++++++
2 files changed, 18 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f0aea0e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
index acf4d5c..e131140 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
@@ -253,6 +253,9 @@ public abstract class FSQueue implements Queue, Schedulable {
stats.setAllocatedVCores(getMetrics().getAllocatedVirtualCores());
stats.setPendingVCores(getMetrics().getPendingVirtualCores());
stats.setReservedVCores(getMetrics().getReservedVirtualCores());
+ stats.setAllocatedContainers(getMetrics().getAllocatedContainers());
+ stats.setPendingContainers(getMetrics().getPendingContainers());
+ stats.setReservedContainers(getMetrics().getReservedContainers());
return stats;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f0aea0e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 9bf79f6..2233287 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -874,6 +874,11 @@ public class TestFairScheduler extends FairSchedulerTestBase {
QueueInfo queueInfo = scheduler.getQueueInfo("queueA", false, false);
Assert.assertEquals(0.25f, queueInfo.getCapacity(), 0.0f);
Assert.assertEquals(0.0f, queueInfo.getCurrentCapacity(), 0.0f);
+ // test queueMetrics
+ Assert.assertEquals(0, queueInfo.getQueueStatistics()
+ .getAllocatedContainers());
+ Assert.assertEquals(0, queueInfo.getQueueStatistics()
+ .getAllocatedMemoryMB());
queueInfo = scheduler.getQueueInfo("queueB", false, false);
Assert.assertEquals(0.75f, queueInfo.getCapacity(), 0.0f);
Assert.assertEquals(0.0f, queueInfo.getCurrentCapacity(), 0.0f);
@@ -889,9 +894,19 @@ public class TestFairScheduler extends FairSchedulerTestBase {
queueInfo = scheduler.getQueueInfo("queueA", false, false);
Assert.assertEquals(0.25f, queueInfo.getCapacity(), 0.0f);
Assert.assertEquals(0.5f, queueInfo.getCurrentCapacity(), 0.0f);
+ // test queueMetrics
+ Assert.assertEquals(1, queueInfo.getQueueStatistics()
+ .getAllocatedContainers());
+ Assert.assertEquals(1024, queueInfo.getQueueStatistics()
+ .getAllocatedMemoryMB());
queueInfo = scheduler.getQueueInfo("queueB", false, false);
Assert.assertEquals(0.75f, queueInfo.getCapacity(), 0.0f);
Assert.assertEquals(1.0f, queueInfo.getCurrentCapacity(), 0.0f);
+ // test queueMetrics
+ Assert.assertEquals(1, queueInfo.getQueueStatistics()
+ .getAllocatedContainers());
+ Assert.assertEquals(6144, queueInfo.getQueueStatistics()
+ .getAllocatedMemoryMB());
}
@Test
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[17/50] [abbrv] hadoop git commit: HADOOP-14369. NetworkTopology
calls expensive toString() when logging. Contributed by Inigo Goiri.
Posted by ae...@apache.org.
HADOOP-14369. NetworkTopology calls expensive toString() when logging. Contributed by Inigo Goiri.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dcc292d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dcc292d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dcc292d7
Branch: refs/heads/HDFS-7240
Commit: dcc292d7ded200a4976f6d348952ecba10f01db2
Parents: dc77255
Author: Andrew Wang <wa...@apache.org>
Authored: Tue May 2 10:51:20 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Tue May 2 10:51:20 2017 -0700
----------------------------------------------------------------------
.../java/org/apache/hadoop/net/NetworkTopology.java | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcc292d7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index f8cecdd..1018d58 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -130,8 +130,8 @@ public class NetworkTopology {
"Not allow to add an inner node: "+NodeBase.getPath(node));
}
if ((depthOfAllLeaves != -1) && (depthOfAllLeaves != newDepth)) {
- LOG.error("Error: can't add leaf node " + NodeBase.getPath(node) +
- " at depth " + newDepth + " to topology:\n" + this.toString());
+ LOG.error("Error: can't add leaf node {} at depth {} to topology:{}\n",
+ NodeBase.getPath(node), newDepth, this);
throw new InvalidTopologyException("Failed to add " + NodeBase.getPath(node) +
": You cannot have a rack and a non-rack node at the same " +
"level of the network topology.");
@@ -153,7 +153,7 @@ public class NetworkTopology {
}
}
}
- LOG.debug("NetworkTopology became:\n{}", this.toString());
+ LOG.debug("NetworkTopology became:\n{}", this);
} finally {
netlock.writeLock().unlock();
}
@@ -226,7 +226,7 @@ public class NetworkTopology {
numOfRacks--;
}
}
- LOG.debug("NetworkTopology became:\n{}", this.toString());
+ LOG.debug("NetworkTopology became:\n{}", this);
} finally {
netlock.writeLock().unlock();
}
@@ -525,7 +525,7 @@ public class NetworkTopology {
}
if (numOfDatanodes == 0) {
LOG.debug("Failed to find datanode (scope=\"{}\" excludedScope=\"{}\").",
- String.valueOf(scope), String.valueOf(excludedScope));
+ scope, excludedScope);
return null;
}
Node ret = null;
@@ -538,7 +538,7 @@ public class NetworkTopology {
}
LOG.debug("Choosing random from {} available nodes on node {},"
+ " scope={}, excludedScope={}, excludeNodes={}", availableNodes,
- innerNode.toString(), scope, excludedScope, excludedNodes);
+ innerNode, scope, excludedScope, excludedNodes);
if (availableNodes > 0) {
do {
int leaveIndex = r.nextInt(numOfDatanodes);
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[16/50] [abbrv] hadoop git commit: HADOOP-14367. Remove unused
setting from pom.xml. Contributed by Chen Liang.
Posted by ae...@apache.org.
HADOOP-14367. Remove unused setting from pom.xml. Contributed by Chen Liang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc77255a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc77255a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc77255a
Branch: refs/heads/HDFS-7240
Commit: dc77255a7ba82b746ec4bec8ef0fcaafbc6b6f00
Parents: 20cde55
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue May 2 23:52:34 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue May 2 23:52:34 2017 +0900
----------------------------------------------------------------------
hadoop-project/pom.xml | 1 -
1 file changed, 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc77255a/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 8c855cc..b282065 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -90,7 +90,6 @@
<findbugs.version>3.0.0</findbugs.version>
<spotbugs.version>3.1.0-RC1</spotbugs.version>
- <tomcat.version>6.0.48</tomcat.version>
<guice.version>4.0</guice.version>
<joda-time.version>2.9.4</joda-time.version>
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[10/50] [abbrv] hadoop git commit: YARN-4359. Update LowCost agents
logic to take advantage of YARN-4358. (Jonathan Yaniv and Ishai Menache via
Subru).
Posted by ae...@apache.org.
YARN-4359. Update LowCost agents logic to take advantage of YARN-4358. (Jonathan Yaniv and Ishai Menache via Subru).
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3a615ee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3a615ee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3a615ee
Branch: refs/heads/HDFS-7240
Commit: a3a615eeab8c14ccdc548311097e62a916963dc5
Parents: 14b5c93
Author: Subru Krishnan <su...@apache.org>
Authored: Mon May 1 16:01:07 2017 -0700
Committer: Subru Krishnan <su...@apache.org>
Committed: Mon May 1 16:01:07 2017 -0700
----------------------------------------------------------------------
.../reservation/InMemoryPlan.java | 11 +
.../resourcemanager/reservation/PlanView.java | 9 +
.../planning/AlignedPlannerWithGreedy.java | 15 +-
.../planning/GreedyReservationAgent.java | 13 +-
.../reservation/planning/IterativePlanner.java | 196 ++++-----
.../reservation/planning/ReservationAgent.java | 23 +-
.../planning/SimpleCapacityReplanner.java | 8 +-
.../reservation/planning/StageAllocator.java | 10 +-
.../planning/StageAllocatorGreedy.java | 4 +-
.../planning/StageAllocatorGreedyRLE.java | 4 +-
.../planning/StageAllocatorLowCostAligned.java | 279 +++++++++----
.../planning/StageEarliestStart.java | 46 ---
.../planning/StageEarliestStartByDemand.java | 106 -----
.../StageEarliestStartByJobArrival.java | 39 --
.../planning/StageExecutionInterval.java | 47 +++
.../StageExecutionIntervalByDemand.java | 144 +++++++
.../StageExecutionIntervalUnconstrained.java | 73 ++++
.../planning/TestAlignedPlanner.java | 411 +++++++++++++++++--
.../planning/TestGreedyReservationAgent.java | 8 +-
.../planning/TestSimpleCapacityReplanner.java | 4 +-
20 files changed, 987 insertions(+), 463 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a615ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java
index 3afcd47..783fd09 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/InMemoryPlan.java
@@ -687,4 +687,15 @@ public class InMemoryPlan implements Plan {
readLock.unlock();
}
}
+
+ @Override
+ public RLESparseResourceAllocation getCumulativeLoadOverTime(
+ long start, long end) {
+ readLock.lock();
+ try {
+ return rleSparseVector.getRangeOverlapping(start, end);
+ } finally {
+ readLock.unlock();
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a615ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PlanView.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PlanView.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PlanView.java
index 699f461..2767993 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PlanView.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/PlanView.java
@@ -174,4 +174,13 @@ public interface PlanView extends PlanContext {
public RLESparseResourceAllocation getConsumptionForUserOverTime(String user,
long start, long end);
+ /**
+ * Get the cumulative load over a time interval.
+ *
+ * @param start Start of the time interval.
+ * @param end End of the time interval.
+ * @return RLE sparse allocation.
+ */
+ RLESparseResourceAllocation getCumulativeLoadOverTime(long start, long end);
+
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a615ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/AlignedPlannerWithGreedy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/AlignedPlannerWithGreedy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/AlignedPlannerWithGreedy.java
index 00c2333..3853f41 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/AlignedPlannerWithGreedy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/AlignedPlannerWithGreedy.java
@@ -39,6 +39,8 @@ public class AlignedPlannerWithGreedy implements ReservationAgent {
public static final int DEFAULT_SMOOTHNESS_FACTOR = 10;
public static final String SMOOTHNESS_FACTOR =
"yarn.resourcemanager.reservation-system.smoothness-factor";
+ private boolean allocateLeft = false;
+
// Log
private static final Logger LOG = LoggerFactory
@@ -49,26 +51,31 @@ public class AlignedPlannerWithGreedy implements ReservationAgent {
// Constructor
public AlignedPlannerWithGreedy() {
+
}
@Override
public void init(Configuration conf) {
int smoothnessFactor =
conf.getInt(SMOOTHNESS_FACTOR, DEFAULT_SMOOTHNESS_FACTOR);
+ allocateLeft = conf.getBoolean(FAVOR_EARLY_ALLOCATION,
+ DEFAULT_GREEDY_FAVOR_EARLY_ALLOCATION);
// List of algorithms
List<ReservationAgent> listAlg = new LinkedList<ReservationAgent>();
// LowCostAligned planning algorithm
ReservationAgent algAligned =
- new IterativePlanner(new StageEarliestStartByDemand(),
- new StageAllocatorLowCostAligned(smoothnessFactor), false);
+ new IterativePlanner(new StageExecutionIntervalByDemand(),
+ new StageAllocatorLowCostAligned(smoothnessFactor, allocateLeft),
+ allocateLeft);
+
listAlg.add(algAligned);
// Greedy planning algorithm
ReservationAgent algGreedy =
- new IterativePlanner(new StageEarliestStartByJobArrival(),
- new StageAllocatorGreedy(), false);
+ new IterativePlanner(new StageExecutionIntervalUnconstrained(),
+ new StageAllocatorGreedyRLE(allocateLeft), allocateLeft);
listAlg.add(algGreedy);
// Set planner:
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a615ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/GreedyReservationAgent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/GreedyReservationAgent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/GreedyReservationAgent.java
index 1559b97..637a17b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/GreedyReservationAgent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/GreedyReservationAgent.java
@@ -47,9 +47,6 @@ public class GreedyReservationAgent implements ReservationAgent {
// Greedy planner
private ReservationAgent planner;
- public final static String GREEDY_FAVOR_EARLY_ALLOCATION =
- "yarn.resourcemanager.reservation-system.favor-early-allocation";
- public final static boolean DEFAULT_GREEDY_FAVOR_EARLY_ALLOCATION = true;
private boolean allocateLeft;
public GreedyReservationAgent() {
@@ -57,20 +54,20 @@ public class GreedyReservationAgent implements ReservationAgent {
@Override
public void init(Configuration conf) {
- allocateLeft = conf.getBoolean(GREEDY_FAVOR_EARLY_ALLOCATION,
+ allocateLeft = conf.getBoolean(FAVOR_EARLY_ALLOCATION,
DEFAULT_GREEDY_FAVOR_EARLY_ALLOCATION);
if (allocateLeft) {
LOG.info("Initializing the GreedyReservationAgent to favor \"early\""
+ " (left) allocations (controlled by parameter: "
- + GREEDY_FAVOR_EARLY_ALLOCATION + ")");
+ + FAVOR_EARLY_ALLOCATION + ")");
} else {
LOG.info("Initializing the GreedyReservationAgent to favor \"late\""
+ " (right) allocations (controlled by parameter: "
- + GREEDY_FAVOR_EARLY_ALLOCATION + ")");
+ + FAVOR_EARLY_ALLOCATION + ")");
}
planner =
- new IterativePlanner(new StageEarliestStartByJobArrival(),
+ new IterativePlanner(new StageExecutionIntervalUnconstrained(),
new StageAllocatorGreedyRLE(allocateLeft), allocateLeft);
}
@@ -123,4 +120,4 @@ public class GreedyReservationAgent implements ReservationAgent {
}
-}
\ No newline at end of file
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a615ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/IterativePlanner.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/IterativePlanner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/IterativePlanner.java
index 24d237a..83f272e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/IterativePlanner.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/IterativePlanner.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.yarn.server.resourcemanager.reservation.planning;
-import java.util.HashMap;
import java.util.HashSet;
import java.util.ListIterator;
import java.util.Map;
@@ -32,26 +31,24 @@ import org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.Plan;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation.RLEOperator;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationAllocation;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval;
-import org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation.RLEOperator;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.ContractValidationException;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningException;
import org.apache.hadoop.yarn.util.resource.Resources;
/**
* A planning algorithm consisting of two main phases. The algorithm iterates
- * over the job stages in descending order. For each stage, the algorithm: 1.
- * Determines an interval [stageArrivalTime, stageDeadline) in which the stage
- * is allocated. 2. Computes an allocation for the stage inside the interval.
- *
- * For ANY and ALL jobs, phase 1 sets the allocation window of each stage to be
- * [jobArrival, jobDeadline]. For ORDER and ORDER_NO_GAP jobs, the deadline of
- * each stage is set as succcessorStartTime - the starting time of its
- * succeeding stage (or jobDeadline if it is the last stage).
- *
- * The phases are set using the two functions: 1. setAlgEarliestStartTime 2.
- * setAlgComputeStageAllocation
+ * over the job stages in ascending/descending order, depending on the flag
+ * allocateLeft. For each stage, the algorithm: 1. Determines an interval
+ * [stageArrival, stageDeadline) in which the stage is allocated. 2. Computes an
+ * allocation for the stage inside the interval. For ANY and ALL jobs, phase 1
+ * sets the allocation window of each stage to be [jobArrival, jobDeadline]. For
+ * ORDER and ORDER_NO_GAP jobs, the deadline of each stage is set as
+ * succcessorStartTime - the starting time of its succeeding stage (or
+ * jobDeadline if it is the last stage). The phases are set using the two
+ * functions: 1. setAlgStageExecutionInterval 2.setAlgStageAllocator
*/
public class IterativePlanner extends PlanningAlgorithm {
@@ -60,7 +57,7 @@ public class IterativePlanner extends PlanningAlgorithm {
private RLESparseResourceAllocation planModifications;
// Data extracted from plan
- private Map<Long, Resource> planLoads;
+ private RLESparseResourceAllocation planLoads;
private Resource capacity;
private long step;
@@ -70,16 +67,16 @@ public class IterativePlanner extends PlanningAlgorithm {
private long jobDeadline;
// Phase algorithms
- private StageEarliestStart algStageEarliestStart = null;
+ private StageExecutionInterval algStageExecutionInterval = null;
private StageAllocator algStageAllocator = null;
private final boolean allocateLeft;
// Constructor
- public IterativePlanner(StageEarliestStart algEarliestStartTime,
+ public IterativePlanner(StageExecutionInterval algStageExecutionInterval,
StageAllocator algStageAllocator, boolean allocateLeft) {
this.allocateLeft = allocateLeft;
- setAlgStageEarliestStart(algEarliestStartTime);
+ setAlgStageExecutionInterval(algStageExecutionInterval);
setAlgStageAllocator(algStageAllocator);
}
@@ -101,12 +98,6 @@ public class IterativePlanner extends PlanningAlgorithm {
// Current stage
ReservationRequest currentReservationStage;
- // Stage deadlines
- long stageDeadline = stepRoundDown(reservation.getDeadline(), step);
- long successorStartingTime = -1;
- long predecessorEndTime = stepRoundDown(reservation.getArrival(), step);
- long stageArrivalTime = -1;
-
// Iterate the stages in reverse order
while (stageProvider.hasNext()) {
@@ -116,27 +107,17 @@ public class IterativePlanner extends PlanningAlgorithm {
// Validate that the ReservationRequest respects basic constraints
validateInputStage(plan, currentReservationStage);
- // Compute an adjusted earliestStart for this resource
- // (we need this to provision some space for the ORDER contracts)
+ // Set the stageArrival and stageDeadline
+ ReservationInterval stageInterval =
+ setStageExecutionInterval(plan, reservation, currentReservationStage,
+ allocations);
+ Long stageArrival = stageInterval.getStartTime();
+ Long stageDeadline = stageInterval.getEndTime();
- if (allocateLeft) {
- stageArrivalTime = predecessorEndTime;
- } else {
- stageArrivalTime = reservation.getArrival();
- if (jobType == ReservationRequestInterpreter.R_ORDER
- || jobType == ReservationRequestInterpreter.R_ORDER_NO_GAP) {
- stageArrivalTime =
- computeEarliestStartingTime(plan, reservation,
- stageProvider.getCurrentIndex(), currentReservationStage,
- stageDeadline);
- }
- stageArrivalTime = stepRoundUp(stageArrivalTime, step);
- stageArrivalTime = Math.max(stageArrivalTime, reservation.getArrival());
- }
- // Compute the allocation of a single stage
+ // Compute stage allocation
Map<ReservationInterval, Resource> curAlloc =
- computeStageAllocation(plan, currentReservationStage,
- stageArrivalTime, stageDeadline, user, reservationId);
+ computeStageAllocation(plan, currentReservationStage, stageArrival,
+ stageDeadline, user, reservationId);
// If we did not find an allocation, return NULL
// (unless it's an ANY job, then we simply continue).
@@ -152,9 +133,13 @@ public class IterativePlanner extends PlanningAlgorithm {
}
- // Get the start & end time of the current allocation
- Long stageStartTime = findEarliestTime(curAlloc);
- Long stageEndTime = findLatestTime(curAlloc);
+ // Validate ORDER_NO_GAP
+ if (jobType == ReservationRequestInterpreter.R_ORDER_NO_GAP) {
+ if (!validateOrderNoGap(allocations, curAlloc, allocateLeft)) {
+ throw new PlanningException(
+ "The allocation found does not respect ORDER_NO_GAP");
+ }
+ }
// If we did find an allocation for the stage, add it
for (Entry<ReservationInterval, Resource> entry : curAlloc.entrySet()) {
@@ -165,33 +150,6 @@ public class IterativePlanner extends PlanningAlgorithm {
if (jobType == ReservationRequestInterpreter.R_ANY) {
break;
}
-
- // If ORDER job, set the stageDeadline of the next stage to be processed
- if (jobType == ReservationRequestInterpreter.R_ORDER
- || jobType == ReservationRequestInterpreter.R_ORDER_NO_GAP) {
-
- // CHECK ORDER_NO_GAP
- // Verify that there is no gap, in case the job is ORDER_NO_GAP
- // note that the test is different left-to-right and right-to-left
- if (jobType == ReservationRequestInterpreter.R_ORDER_NO_GAP
- && successorStartingTime != -1
- && ((allocateLeft && predecessorEndTime < stageStartTime) ||
- (!allocateLeft && (stageEndTime < successorStartingTime))
- )
- || (!isNonPreemptiveAllocation(curAlloc))) {
- throw new PlanningException(
- "The allocation found does not respect ORDER_NO_GAP");
- }
-
- if (allocateLeft) {
- // Store the stageStartTime and set the new stageDeadline
- predecessorEndTime = stageEndTime;
- } else {
- // Store the stageStartTime and set the new stageDeadline
- successorStartingTime = stageStartTime;
- stageDeadline = stageStartTime;
- }
- }
}
// If the allocation is empty, return an error
@@ -200,7 +158,39 @@ public class IterativePlanner extends PlanningAlgorithm {
}
return allocations;
+ }
+ protected static boolean validateOrderNoGap(
+ RLESparseResourceAllocation allocations,
+ Map<ReservationInterval, Resource> curAlloc, boolean allocateLeft) {
+
+ // Left to right
+ if (allocateLeft) {
+ Long stageStartTime = findEarliestTime(curAlloc);
+ Long allocationEndTime = allocations.getLatestNonNullTime();
+
+ // Check that there is no gap between stages
+ if ((allocationEndTime != -1) && (allocationEndTime < stageStartTime)) {
+ return false;
+ }
+ // Right to left
+ } else {
+ Long stageEndTime = findLatestTime(curAlloc);
+ Long allocationStartTime = allocations.getEarliestStartTime();
+
+ // Check that there is no gap between stages
+ if ((allocationStartTime != -1) && (stageEndTime < allocationStartTime)) {
+ return false;
+ }
+ }
+
+ // Check that the stage allocation does not violate ORDER_NO_GAP
+ if (!isNonPreemptiveAllocation(curAlloc)) {
+ return false;
+ }
+
+ // The allocation is legal
+ return true;
}
protected void initialize(Plan plan, ReservationId reservationId,
@@ -223,35 +213,15 @@ public class IterativePlanner extends PlanningAlgorithm {
// planLoads are not used by other StageAllocators... and don't deal
// well with huge reservation ranges
- if (this.algStageAllocator instanceof StageAllocatorLowCostAligned) {
- planLoads = getAllLoadsInInterval(plan, jobArrival, jobDeadline);
- ReservationAllocation oldRes = plan.getReservationById(reservationId);
- if (oldRes != null) {
- planModifications =
- RLESparseResourceAllocation.merge(plan.getResourceCalculator(),
- plan.getTotalCapacity(), planModifications,
- oldRes.getResourcesOverTime(), RLEOperator.subtract,
- jobArrival, jobDeadline);
- }
+ planLoads = plan.getCumulativeLoadOverTime(jobArrival, jobDeadline);
+ ReservationAllocation oldRes = plan.getReservationById(reservationId);
+ if (oldRes != null) {
+ planLoads =
+ RLESparseResourceAllocation.merge(plan.getResourceCalculator(),
+ plan.getTotalCapacity(), planLoads,
+ oldRes.getResourcesOverTime(), RLEOperator.subtract, jobArrival,
+ jobDeadline);
}
-
- }
-
- private Map<Long, Resource> getAllLoadsInInterval(Plan plan, long startTime,
- long endTime) {
-
- // Create map
- Map<Long, Resource> loads = new HashMap<Long, Resource>();
-
- // Calculate the load for every time slot between [start,end)
- for (long t = startTime; t < endTime; t += step) {
- Resource load = plan.getTotalCommittedResources(t);
- loads.put(t, load);
- }
-
- // Return map
- return loads;
-
}
private void validateInputStage(Plan plan, ReservationRequest rr)
@@ -286,7 +256,7 @@ public class IterativePlanner extends PlanningAlgorithm {
}
- private boolean isNonPreemptiveAllocation(
+ private static boolean isNonPreemptiveAllocation(
Map<ReservationInterval, Resource> curAlloc) {
// Checks whether a stage allocation is non preemptive or not.
@@ -329,14 +299,13 @@ public class IterativePlanner extends PlanningAlgorithm {
}
- // Call algEarliestStartTime()
- protected long computeEarliestStartingTime(Plan plan,
- ReservationDefinition reservation, int index,
- ReservationRequest currentReservationStage, long stageDeadline) {
-
- return algStageEarliestStart.setEarliestStartTime(plan, reservation, index,
- currentReservationStage, stageDeadline);
-
+ // Call setStageExecutionInterval()
+ protected ReservationInterval setStageExecutionInterval(Plan plan,
+ ReservationDefinition reservation,
+ ReservationRequest currentReservationStage,
+ RLESparseResourceAllocation allocations) {
+ return algStageExecutionInterval.computeExecutionInterval(plan,
+ reservation, currentReservationStage, allocateLeft, allocations);
}
// Call algStageAllocator
@@ -350,10 +319,11 @@ public class IterativePlanner extends PlanningAlgorithm {
}
- // Set the algorithm: algStageEarliestStart
- public IterativePlanner setAlgStageEarliestStart(StageEarliestStart alg) {
+ // Set the algorithm: algStageExecutionInterval
+ public IterativePlanner setAlgStageExecutionInterval(
+ StageExecutionInterval alg) {
- this.algStageEarliestStart = alg;
+ this.algStageExecutionInterval = alg;
return this; // To allow concatenation of setAlg() functions
}
@@ -375,7 +345,7 @@ public class IterativePlanner extends PlanningAlgorithm {
private final boolean allocateLeft;
- private ListIterator<ReservationRequest> li;
+ private final ListIterator<ReservationRequest> li;
public StageProvider(boolean allocateLeft,
ReservationDefinition reservation) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a615ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/ReservationAgent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/ReservationAgent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/ReservationAgent.java
index 52e7055..3c448b3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/ReservationAgent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/ReservationAgent.java
@@ -29,14 +29,25 @@ import org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.Plan
public interface ReservationAgent {
/**
+ * Constant defining the preferential treatment of time for equally valid
+ * allocations.
+ */
+ final static String FAVOR_EARLY_ALLOCATION =
+ "yarn.resourcemanager.reservation-system.favor-early-allocation";
+ /**
+ * By default favor early allocations.
+ */
+ final static boolean DEFAULT_GREEDY_FAVOR_EARLY_ALLOCATION = true;
+
+ /**
* Create a reservation for the user that abides by the specified contract
- *
+ *
* @param reservationId the identifier of the reservation to be created.
* @param user the user who wants to create the reservation
* @param plan the Plan to which the reservation must be fitted
* @param contract encapsulates the resources the user requires for his
* session
- *
+ *
* @return whether the create operation was successful or not
* @throws PlanningException if the session cannot be fitted into the plan
*/
@@ -45,13 +56,13 @@ public interface ReservationAgent {
/**
* Update a reservation for the user that abides by the specified contract
- *
+ *
* @param reservationId the identifier of the reservation to be updated
* @param user the user who wants to create the session
* @param plan the Plan to which the reservation must be fitted
* @param contract encapsulates the resources the user requires for his
* reservation
- *
+ *
* @return whether the update operation was successful or not
* @throws PlanningException if the reservation cannot be fitted into the plan
*/
@@ -60,11 +71,11 @@ public interface ReservationAgent {
/**
* Delete an user reservation
- *
+ *
* @param reservationId the identifier of the reservation to be deleted
* @param user the user who wants to create the reservation
* @param plan the Plan to which the session must be fitted
- *
+ *
* @return whether the delete operation was successful or not
* @throws PlanningException if the reservation cannot be fitted into the plan
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a615ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/SimpleCapacityReplanner.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/SimpleCapacityReplanner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/SimpleCapacityReplanner.java
index 7507783..7bfc730 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/SimpleCapacityReplanner.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/SimpleCapacityReplanner.java
@@ -42,7 +42,7 @@ import com.google.common.annotations.VisibleForTesting;
* This (re)planner scan a period of time from now to a maximum time window (or
* the end of the last session, whichever comes first) checking the overall
* capacity is not violated.
- *
+ *
* It greedily removes sessions in reversed order of acceptance (latest accepted
* is the first removed).
*/
@@ -90,8 +90,8 @@ public class SimpleCapacityReplanner implements Planner {
// loop on all moment in time from now to the end of the check Zone
// or the end of the planned sessions whichever comes first
- for (long t = now;
- (t < plan.getLastEndTime() && t < (now + lengthOfCheckZone));
+ for (long t = now;
+ (t < plan.getLastEndTime() && t < (now + lengthOfCheckZone));
t += plan.getStep()) {
Resource excessCap =
Resources.subtract(plan.getTotalCommittedResources(t), totCap);
@@ -102,7 +102,7 @@ public class SimpleCapacityReplanner implements Planner {
new TreeSet<ReservationAllocation>(plan.getReservationsAtTime(t));
for (Iterator<ReservationAllocation> resIter =
curReservations.iterator(); resIter.hasNext()
- && Resources.greaterThan(resCalc, totCap, excessCap,
+ && Resources.greaterThan(resCalc, totCap, excessCap,
ZERO_RESOURCE);) {
ReservationAllocation reservation = resIter.next();
plan.deleteReservation(reservation.getReservationId());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a615ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocator.java
index b95f8d4..ec6d9c0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocator.java
@@ -41,19 +41,21 @@ public interface StageAllocator {
* @param planModifications the allocations performed by the planning
* algorithm which are not yet reflected by plan
* @param rr the stage
- * @param stageEarliestStart the arrival time (earliest starting time) set for
+ * @param stageArrival the arrival time (earliest starting time) set for
* the stage by the two phase planning algorithm
* @param stageDeadline the deadline of the stage set by the two phase
* planning algorithm
+ * @param user name of the user
+ * @param oldId identifier of the old reservation
*
* @return The computed allocation (or null if the stage could not be
* allocated)
* @throws PlanningException
*/
Map<ReservationInterval, Resource> computeStageAllocation(Plan plan,
- Map<Long, Resource> planLoads,
+ RLESparseResourceAllocation planLoads,
RLESparseResourceAllocation planModifications, ReservationRequest rr,
- long stageEarliestStart, long stageDeadline, String user,
+ long stageArrival, long stageDeadline, String user,
ReservationId oldId) throws PlanningException;
-}
\ No newline at end of file
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a615ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedy.java
index c836970..da04336 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedy.java
@@ -26,8 +26,8 @@ import org.apache.hadoop.yarn.api.records.ReservationRequest;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.Plan;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation;
-import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation.RLEOperator;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningException;
import org.apache.hadoop.yarn.util.resource.Resources;
@@ -41,7 +41,7 @@ public class StageAllocatorGreedy implements StageAllocator {
@Override
public Map<ReservationInterval, Resource> computeStageAllocation(Plan plan,
- Map<Long, Resource> planLoads,
+ RLESparseResourceAllocation planLoads,
RLESparseResourceAllocation planModifications, ReservationRequest rr,
long stageEarliestStart, long stageDeadline, String user,
ReservationId oldId) throws PlanningException {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a615ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedyRLE.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedyRLE.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedyRLE.java
index 5e748fc..ec83e02 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedyRLE.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorGreedyRLE.java
@@ -29,8 +29,8 @@ import org.apache.hadoop.yarn.api.records.ReservationRequest;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.Plan;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation;
-import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation.RLEOperator;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningException;
import org.apache.hadoop.yarn.util.resource.Resources;
@@ -52,7 +52,7 @@ public class StageAllocatorGreedyRLE implements StageAllocator {
@Override
public Map<ReservationInterval, Resource> computeStageAllocation(Plan plan,
- Map<Long, Resource> planLoads,
+ RLESparseResourceAllocation planLoads,
RLESparseResourceAllocation planModifications, ReservationRequest rr,
long stageEarliestStart, long stageDeadline, String user,
ReservationId oldId) throws PlanningException {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a615ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorLowCostAligned.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorLowCostAligned.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorLowCostAligned.java
index b9fd8e1..e45f58c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorLowCostAligned.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageAllocatorLowCostAligned.java
@@ -18,8 +18,12 @@
package org.apache.hadoop.yarn.server.resourcemanager.reservation.planning;
+import java.util.ArrayList;
import java.util.Comparator;
+import java.util.List;
import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NavigableMap;
import java.util.TreeSet;
import org.apache.hadoop.yarn.api.records.ReservationId;
@@ -27,46 +31,55 @@ import org.apache.hadoop.yarn.api.records.ReservationRequest;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.Plan;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation.RLEOperator;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningException;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
/**
* A stage allocator that iteratively allocates containers in the
* {@link DurationInterval} with lowest overall cost. The algorithm only
- * considers intervals of the form: [stageDeadline - (n+1)*duration,
- * stageDeadline - n*duration) for an integer n. This guarantees that the
- * allocations are aligned (as opposed to overlapping duration intervals).
- *
- * The smoothnessFactor parameter controls the number of containers that are
- * simultaneously allocated in each iteration of the algorithm.
+ * considers non-overlapping intervals of length 'duration'. This guarantees
+ * that the allocations are aligned. If 'allocateLeft == true', the intervals
+ * considered by the algorithm are aligned to stageArrival; otherwise, they are
+ * aligned to stageDeadline. The smoothnessFactor parameter controls the number
+ * of containers that are simultaneously allocated in each iteration of the
+ * algorithm.
*/
public class StageAllocatorLowCostAligned implements StageAllocator {
+ private final boolean allocateLeft;
// Smoothness factor
private int smoothnessFactor = 10;
// Constructor
- public StageAllocatorLowCostAligned() {
+ public StageAllocatorLowCostAligned(boolean allocateLeft) {
+ this.allocateLeft = allocateLeft;
}
// Constructor
- public StageAllocatorLowCostAligned(int smoothnessFactor) {
+ public StageAllocatorLowCostAligned(int smoothnessFactor,
+ boolean allocateLeft) {
+ this.allocateLeft = allocateLeft;
this.smoothnessFactor = smoothnessFactor;
}
- // computeJobAllocation()
@Override
- public Map<ReservationInterval, Resource> computeStageAllocation(
- Plan plan, Map<Long, Resource> planLoads,
+ public Map<ReservationInterval, Resource> computeStageAllocation(Plan plan,
+ RLESparseResourceAllocation planLoads,
RLESparseResourceAllocation planModifications, ReservationRequest rr,
- long stageEarliestStart, long stageDeadline, String user,
- ReservationId oldId) {
+ long stageArrival, long stageDeadline, String user, ReservationId oldId)
+ throws PlanningException {
// Initialize
ResourceCalculator resCalc = plan.getResourceCalculator();
Resource capacity = plan.getTotalCapacity();
+
+ RLESparseResourceAllocation netRLERes = plan
+ .getAvailableResourceOverTime(user, oldId, stageArrival, stageDeadline);
+
long step = plan.getStep();
// Create allocationRequestsearlies
@@ -76,16 +89,15 @@ public class StageAllocatorLowCostAligned implements StageAllocator {
// Initialize parameters
long duration = stepRoundUp(rr.getDuration(), step);
int windowSizeInDurations =
- (int) ((stageDeadline - stageEarliestStart) / duration);
+ (int) ((stageDeadline - stageArrival) / duration);
int totalGangs = rr.getNumContainers() / rr.getConcurrency();
int numContainersPerGang = rr.getConcurrency();
Resource gang =
Resources.multiply(rr.getCapability(), numContainersPerGang);
// Set maxGangsPerUnit
- int maxGangsPerUnit =
- (int) Math.max(
- Math.floor(((double) totalGangs) / windowSizeInDurations), 1);
+ int maxGangsPerUnit = (int) Math
+ .max(Math.floor(((double) totalGangs) / windowSizeInDurations), 1);
maxGangsPerUnit = Math.max(maxGangsPerUnit / smoothnessFactor, 1);
// If window size is too small, return null
@@ -93,6 +105,8 @@ public class StageAllocatorLowCostAligned implements StageAllocator {
return null;
}
+ final int preferLeft = allocateLeft ? 1 : -1;
+
// Initialize tree sorted by costs
TreeSet<DurationInterval> durationIntervalsSortedByCost =
new TreeSet<DurationInterval>(new Comparator<DurationInterval>() {
@@ -104,23 +118,26 @@ public class StageAllocatorLowCostAligned implements StageAllocator {
return cmp;
}
- return (-1) * Long.compare(val1.getEndTime(), val2.getEndTime());
+ return preferLeft
+ * Long.compare(val1.getEndTime(), val2.getEndTime());
}
});
+ List<Long> intervalEndTimes =
+ computeIntervalEndTimes(stageArrival, stageDeadline, duration);
+
// Add durationIntervals that end at (endTime - n*duration) for some n.
- for (long intervalEnd = stageDeadline; intervalEnd >= stageEarliestStart
- + duration; intervalEnd -= duration) {
+ for (long intervalEnd : intervalEndTimes) {
long intervalStart = intervalEnd - duration;
// Get duration interval [intervalStart,intervalEnd)
DurationInterval durationInterval =
getDurationInterval(intervalStart, intervalEnd, planLoads,
- planModifications, capacity, resCalc, step);
+ planModifications, capacity, netRLERes, resCalc, step, gang);
// If the interval can fit a gang, add it to the tree
- if (durationInterval.canAllocate(gang, capacity, resCalc)) {
+ if (durationInterval.canAllocate()) {
durationIntervalsSortedByCost.add(durationInterval);
}
}
@@ -139,8 +156,7 @@ public class StageAllocatorLowCostAligned implements StageAllocator {
durationIntervalsSortedByCost.first();
int numGangsToAllocate = Math.min(maxGangsPerUnit, remainingGangs);
numGangsToAllocate =
- Math.min(numGangsToAllocate,
- bestDurationInterval.numCanFit(gang, capacity, resCalc));
+ Math.min(numGangsToAllocate, bestDurationInterval.numCanFit());
// Add it
remainingGangs -= numGangsToAllocate;
@@ -148,9 +164,8 @@ public class StageAllocatorLowCostAligned implements StageAllocator {
new ReservationInterval(bestDurationInterval.getStartTime(),
bestDurationInterval.getEndTime());
- Resource reservationRes =
- Resources.multiply(rr.getCapability(), rr.getConcurrency()
- * numGangsToAllocate);
+ Resource reservationRes = Resources.multiply(rr.getCapability(),
+ rr.getConcurrency() * numGangsToAllocate);
planModifications.addInterval(reservationInt, reservationRes);
allocationRequests.addInterval(reservationInt, reservationRes);
@@ -162,10 +177,10 @@ public class StageAllocatorLowCostAligned implements StageAllocator {
DurationInterval updatedDurationInterval =
getDurationInterval(bestDurationInterval.getStartTime(),
bestDurationInterval.getStartTime() + duration, planLoads,
- planModifications, capacity, resCalc, step);
+ planModifications, capacity, netRLERes, resCalc, step, gang);
// Add to tree, if possible
- if (updatedDurationInterval.canAllocate(gang, capacity, resCalc)) {
+ if (updatedDurationInterval.canAllocate()) {
durationIntervalsSortedByCost.add(updatedDurationInterval);
}
@@ -180,10 +195,12 @@ public class StageAllocatorLowCostAligned implements StageAllocator {
return allocations;
} else {
- // If we are here is because we did not manage to satisfy this request.
- // We remove unwanted side-effect from planModifications (needed for ANY).
- for (Map.Entry<ReservationInterval, Resource> tempAllocation
- : allocations.entrySet()) {
+ // If we are here is because we did not manage to satisfy this
+ // request.
+ // We remove unwanted side-effect from planModifications (needed for
+ // ANY).
+ for (Map.Entry<ReservationInterval, Resource> tempAllocation : allocations
+ .entrySet()) {
planModifications.removeInterval(tempAllocation.getKey(),
tempAllocation.getValue());
@@ -196,37 +213,144 @@ public class StageAllocatorLowCostAligned implements StageAllocator {
}
- protected DurationInterval getDurationInterval(long startTime, long endTime,
- Map<Long, Resource> planLoads,
+ private List<Long> computeIntervalEndTimes(long stageEarliestStart,
+ long stageDeadline, long duration) {
+
+ List<Long> intervalEndTimes = new ArrayList<Long>();
+ if (!allocateLeft) {
+ for (long intervalEnd = stageDeadline; intervalEnd >= stageEarliestStart
+ + duration; intervalEnd -= duration) {
+ intervalEndTimes.add(intervalEnd);
+ }
+ } else {
+ for (long intervalStart =
+ stageEarliestStart; intervalStart <= stageDeadline
+ - duration; intervalStart += duration) {
+ intervalEndTimes.add(intervalStart + duration);
+ }
+ }
+
+ return intervalEndTimes;
+ }
+
+ protected static DurationInterval getDurationInterval(long startTime,
+ long endTime, RLESparseResourceAllocation planLoads,
RLESparseResourceAllocation planModifications, Resource capacity,
- ResourceCalculator resCalc, long step) {
+ RLESparseResourceAllocation netRLERes, ResourceCalculator resCalc,
+ long step, Resource requestedResources) throws PlanningException {
- // Initialize the dominant loads structure
- Resource dominantResources = Resource.newInstance(0, 0);
+ // Get the total cost associated with the duration interval
+ double totalCost = getDurationIntervalTotalCost(startTime, endTime,
+ planLoads, planModifications, capacity, resCalc, step);
- // Calculate totalCost and maxLoad
- double totalCost = 0.0;
- for (long t = startTime; t < endTime; t += step) {
+ // Calculate how many gangs can fit, i.e., how many times can 'capacity'
+ // be allocated within the duration interval [startTime, endTime)
+ int gangsCanFit = getDurationIntervalGangsCanFit(startTime, endTime,
+ planModifications, capacity, netRLERes, resCalc, requestedResources);
+
+ // Return the desired durationInterval
+ return new DurationInterval(startTime, endTime, totalCost, gangsCanFit);
+
+ }
+
+ protected static double getDurationIntervalTotalCost(long startTime,
+ long endTime, RLESparseResourceAllocation planLoads,
+ RLESparseResourceAllocation planModifications, Resource capacity,
+ ResourceCalculator resCalc, long step) throws PlanningException {
+
+ // Compute the current resource load within the interval [startTime,endTime)
+ // by adding planLoads (existing load) and planModifications (load that
+ // corresponds to the current job).
+ RLESparseResourceAllocation currentLoad =
+ RLESparseResourceAllocation.merge(resCalc, capacity, planLoads,
+ planModifications, RLEOperator.add, startTime, endTime);
+
+ // Convert load from RLESparseResourceAllocation to a Map representation
+ NavigableMap<Long, Resource> mapCurrentLoad = currentLoad.getCumulative();
- // Get the load
- Resource load = getLoadAtTime(t, planLoads, planModifications);
+ // Initialize auxiliary variables
+ double totalCost = 0.0;
+ Long tPrev = -1L;
+ Resource loadPrev = Resources.none();
+ double cost = 0.0;
+
+ // Iterate over time points. For each point 't', accumulate the total cost
+ // that corresponds to the interval [tPrev, t). The cost associated within
+ // this interval is fixed for each of the time steps, therefore the cost of
+ // a single step is multiplied by (t - tPrev) / step.
+ for (Entry<Long, Resource> e : mapCurrentLoad.entrySet()) {
+ Long t = e.getKey();
+ Resource load = e.getValue();
+ if (tPrev != -1L) {
+ tPrev = Math.max(tPrev, startTime);
+ cost = calcCostOfLoad(loadPrev, capacity, resCalc);
+ totalCost = totalCost + cost * (t - tPrev) / step;
+ }
- // Increase the total cost
- totalCost += calcCostOfLoad(load, capacity, resCalc);
+ tPrev = t;
+ loadPrev = load;
+ }
- // Update the dominant resources
- dominantResources = Resources.componentwiseMax(dominantResources, load);
+ // Add the cost associated with the last interval (the for loop does not
+ // calculate it).
+ if (loadPrev != null) {
+ // This takes care of the corner case of a single entry
+ tPrev = Math.max(tPrev, startTime);
+ cost = calcCostOfLoad(loadPrev, capacity, resCalc);
+ totalCost = totalCost + cost * (endTime - tPrev) / step;
}
- // Return the corresponding durationInterval
- return new DurationInterval(startTime, endTime, totalCost,
- dominantResources);
+ // Return the overall cost
+ return totalCost;
+ }
+
+ protected static int getDurationIntervalGangsCanFit(long startTime,
+ long endTime, RLESparseResourceAllocation planModifications,
+ Resource capacity, RLESparseResourceAllocation netRLERes,
+ ResourceCalculator resCalc, Resource requestedResources)
+ throws PlanningException {
+
+ // Initialize auxiliary variables
+ int gangsCanFit = Integer.MAX_VALUE;
+ int curGangsCanFit;
+
+ // Calculate the total amount of available resources between startTime
+ // and endTime, by subtracting planModifications from netRLERes
+ RLESparseResourceAllocation netAvailableResources =
+ RLESparseResourceAllocation.merge(resCalc, capacity, netRLERes,
+ planModifications, RLEOperator.subtractTestNonNegative, startTime,
+ endTime);
+
+ // Convert result to a map
+ NavigableMap<Long, Resource> mapAvailableCapacity =
+ netAvailableResources.getCumulative();
+
+ // Iterate over the map representation.
+ // At each point, calculate how many times does 'requestedResources' fit.
+ // The result is the minimum over all time points.
+ for (Entry<Long, Resource> e : mapAvailableCapacity.entrySet()) {
+ Long t = e.getKey();
+ Resource curAvailable = e.getValue();
+ if (t >= endTime) {
+ break;
+ }
+ if (curAvailable == null) {
+ gangsCanFit = 0;
+ } else {
+ curGangsCanFit = (int) Math.floor(Resources.divide(resCalc, capacity,
+ curAvailable, requestedResources));
+ if (curGangsCanFit < gangsCanFit) {
+ gangsCanFit = curGangsCanFit;
+ }
+ }
+ }
+ return gangsCanFit;
}
protected double calcCostOfInterval(long startTime, long endTime,
- Map<Long, Resource> planLoads,
+ RLESparseResourceAllocation planLoads,
RLESparseResourceAllocation planModifications, Resource capacity,
ResourceCalculator resCalc, long step) {
@@ -242,7 +366,8 @@ public class StageAllocatorLowCostAligned implements StageAllocator {
}
- protected double calcCostOfTimeSlot(long t, Map<Long, Resource> planLoads,
+ protected double calcCostOfTimeSlot(long t,
+ RLESparseResourceAllocation planLoads,
RLESparseResourceAllocation planModifications, Resource capacity,
ResourceCalculator resCalc) {
@@ -254,17 +379,17 @@ public class StageAllocatorLowCostAligned implements StageAllocator {
}
- protected Resource getLoadAtTime(long t, Map<Long, Resource> planLoads,
+ protected Resource getLoadAtTime(long t,
+ RLESparseResourceAllocation planLoads,
RLESparseResourceAllocation planModifications) {
- Resource planLoad = planLoads.get(t);
- planLoad = (planLoad == null) ? Resource.newInstance(0, 0) : planLoad;
+ Resource planLoad = planLoads.getCapacityAtTime(t);
return Resources.add(planLoad, planModifications.getCapacityAtTime(t));
}
- protected double calcCostOfLoad(Resource load, Resource capacity,
+ protected static double calcCostOfLoad(Resource load, Resource capacity,
ResourceCalculator resCalc) {
return resCalc.ratio(load, capacity);
@@ -289,42 +414,30 @@ public class StageAllocatorLowCostAligned implements StageAllocator {
private long startTime;
private long endTime;
private double cost;
- private Resource maxLoad;
+ private final int gangsCanFit;
// Constructor
public DurationInterval(long startTime, long endTime, double cost,
- Resource maxLoad) {
+ int gangsCanfit) {
this.startTime = startTime;
this.endTime = endTime;
this.cost = cost;
- this.maxLoad = maxLoad;
+ this.gangsCanFit = gangsCanfit;
}
// canAllocate() - boolean function, returns whether requestedResources
// can be allocated during the durationInterval without
// violating capacity constraints
- public boolean canAllocate(Resource requestedResources, Resource capacity,
- ResourceCalculator resCalc) {
-
- Resource updatedMaxLoad = Resources.add(maxLoad, requestedResources);
- return (resCalc.compare(capacity, updatedMaxLoad, capacity) <= 0);
-
+ public boolean canAllocate() {
+ return (gangsCanFit > 0);
}
// numCanFit() - returns the maximal number of requestedResources can be
// allocated during the durationInterval without violating
// capacity constraints
- public int numCanFit(Resource requestedResources, Resource capacity,
- ResourceCalculator resCalc) {
-
- // Represents the largest resource demand that can be satisfied throughout
- // the entire DurationInterval (i.e., during [startTime,endTime))
- Resource availableResources = Resources.subtract(capacity, maxLoad);
-
- // Maximal number of requestedResources that fit inside the interval
- return (int) Math.floor(Resources.divide(resCalc, capacity,
- availableResources, requestedResources));
+ public int numCanFit() {
+ return gangsCanFit;
}
public long getStartTime() {
@@ -343,14 +456,6 @@ public class StageAllocatorLowCostAligned implements StageAllocator {
this.endTime = value;
}
- public Resource getMaxLoad() {
- return this.maxLoad;
- }
-
- public void setMaxLoad(Resource value) {
- this.maxLoad = value;
- }
-
public double getTotalCost() {
return this.cost;
}
@@ -359,11 +464,17 @@ public class StageAllocatorLowCostAligned implements StageAllocator {
this.cost = value;
}
+ @Override
public String toString() {
+
StringBuilder sb = new StringBuilder();
+
sb.append(" start: " + startTime).append(" end: " + endTime)
- .append(" cost: " + cost).append(" maxLoad: " + maxLoad);
+ .append(" cost: " + cost).append(" gangsCanFit: " + gangsCanFit);
+
return sb.toString();
+
}
+
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a615ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageEarliestStart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageEarliestStart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageEarliestStart.java
deleted file mode 100644
index 547616a..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageEarliestStart.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.reservation.planning;
-
-import org.apache.hadoop.yarn.api.records.ReservationDefinition;
-import org.apache.hadoop.yarn.api.records.ReservationRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.reservation.Plan;
-
-/**
- * Interface for setting the earliest start time of a stage in IterativePlanner.
- */
-public interface StageEarliestStart {
-
- /**
- * Computes the earliest allowed starting time for a given stage.
- *
- * @param plan the Plan to which the reservation must be fitted
- * @param reservation the job contract
- * @param index the index of the stage in the job contract
- * @param currentReservationStage the stage
- * @param stageDeadline the deadline of the stage set by the two phase
- * planning algorithm
- *
- * @return the earliest allowed starting time for the stage.
- */
- long setEarliestStartTime(Plan plan, ReservationDefinition reservation,
- int index, ReservationRequest currentReservationStage,
- long stageDeadline);
-
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a615ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageEarliestStartByDemand.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageEarliestStartByDemand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageEarliestStartByDemand.java
deleted file mode 100644
index 43d6584..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageEarliestStartByDemand.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.reservation.planning;
-
-import java.util.ListIterator;
-
-import org.apache.hadoop.yarn.api.records.ReservationDefinition;
-import org.apache.hadoop.yarn.api.records.ReservationRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.reservation.Plan;
-
-/**
- * Sets the earliest start time of a stage proportional to the job weight. The
- * interval [jobArrival, stageDeadline) is divided as follows. First, each stage
- * is guaranteed at least its requested duration. Then, the stage receives a
- * fraction of the remaining time. The fraction is calculated as the ratio
- * between the weight (total requested resources) of the stage and the total
- * weight of all proceeding stages.
- */
-
-public class StageEarliestStartByDemand implements StageEarliestStart {
-
- private long step;
-
- @Override
- public long setEarliestStartTime(Plan plan,
- ReservationDefinition reservation, int index, ReservationRequest current,
- long stageDeadline) {
-
- step = plan.getStep();
-
- // If this is the first stage, don't bother with the computation.
- if (index < 1) {
- return reservation.getArrival();
- }
-
- // Get iterator
- ListIterator<ReservationRequest> li =
- reservation.getReservationRequests().getReservationResources()
- .listIterator(index);
- ReservationRequest rr;
-
- // Calculate the total weight & total duration
- double totalWeight = calcWeight(current);
- long totalDuration = getRoundedDuration(current, plan);
-
- while (li.hasPrevious()) {
- rr = li.previous();
- totalWeight += calcWeight(rr);
- totalDuration += getRoundedDuration(rr, plan);
- }
-
- // Compute the weight of the current stage as compared to remaining ones
- double ratio = calcWeight(current) / totalWeight;
-
- // Estimate an early start time, such that:
- // 1. Every stage is guaranteed to receive at least its duration
- // 2. The remainder of the window is divided between stages
- // proportionally to its workload (total memory consumption)
- long window = stageDeadline - reservation.getArrival();
- long windowRemainder = window - totalDuration;
- long earlyStart =
- (long) (stageDeadline - getRoundedDuration(current, plan)
- - (windowRemainder * ratio));
-
- // Realign if necessary (since we did some arithmetic)
- earlyStart = stepRoundUp(earlyStart, step);
-
- // Return
- return earlyStart;
-
- }
-
- // Weight = total memory consumption of stage
- protected double calcWeight(ReservationRequest stage) {
- return (stage.getDuration() * stage.getCapability().getMemorySize())
- * (stage.getNumContainers());
- }
-
- protected long getRoundedDuration(ReservationRequest stage, Plan plan) {
- return stepRoundUp(stage.getDuration(), step);
- }
-
- protected static long stepRoundDown(long t, long step) {
- return (t / step) * step;
- }
-
- protected static long stepRoundUp(long t, long step) {
- return ((t + step - 1) / step) * step;
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a615ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageEarliestStartByJobArrival.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageEarliestStartByJobArrival.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageEarliestStartByJobArrival.java
deleted file mode 100644
index 8347816..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageEarliestStartByJobArrival.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.reservation.planning;
-
-import org.apache.hadoop.yarn.api.records.ReservationDefinition;
-import org.apache.hadoop.yarn.api.records.ReservationRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.reservation.Plan;
-
-/**
- * Sets the earliest start time of a stage as the job arrival time.
- */
-public class StageEarliestStartByJobArrival implements StageEarliestStart {
-
- @Override
- public long setEarliestStartTime(Plan plan,
- ReservationDefinition reservation, int index, ReservationRequest current,
- long stageDeadline) {
-
- return reservation.getArrival();
-
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a615ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageExecutionInterval.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageExecutionInterval.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageExecutionInterval.java
new file mode 100644
index 0000000..8f7f5f7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageExecutionInterval.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.reservation.planning;
+
+import org.apache.hadoop.yarn.api.records.ReservationDefinition;
+import org.apache.hadoop.yarn.api.records.ReservationRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.Plan;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval;
+
+/**
+ * An auxiliary class used to compute the time interval in which the stage can
+ * be allocated resources by {@link IterativePlanner}.
+ */
+public interface StageExecutionInterval {
+ /**
+ * Computes the earliest allowed starting time for a given stage.
+ *
+ * @param plan the Plan to which the reservation must be fitted
+ * @param reservation the job contract
+ * @param currentReservationStage the stage
+ * @param allocateLeft is the job allocated from left to right
+ * @param allocations Existing resource assignments for the job
+ * @return the time interval in which the stage can get resources.
+ */
+ ReservationInterval computeExecutionInterval(Plan plan,
+ ReservationDefinition reservation,
+ ReservationRequest currentReservationStage, boolean allocateLeft,
+ RLESparseResourceAllocation allocations);
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a615ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageExecutionIntervalByDemand.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageExecutionIntervalByDemand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageExecutionIntervalByDemand.java
new file mode 100644
index 0000000..95f1d4b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageExecutionIntervalByDemand.java
@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.reservation.planning;
+
+import org.apache.hadoop.yarn.api.records.ReservationDefinition;
+import org.apache.hadoop.yarn.api.records.ReservationRequest;
+import org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.Plan;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.planning.IterativePlanner.StageProvider;
+
+/**
+ * An implementation of {@link StageExecutionInterval}, which sets the execution
+ * interval of the stage. For ANY and ALL jobs, the interval is
+ * [jobArrival,jobDeadline]. For ORDER jobs, the the maximal possible time
+ * interval is divided as follows: First, each stage is guaranteed at least its
+ * requested duration. Then, the stage receives a fraction of the remaining
+ * time. The fraction is calculated as the ratio between the weight (total
+ * requested resources) of the stage and the total weight of all remaining
+ * stages.
+ */
+
+public class StageExecutionIntervalByDemand implements StageExecutionInterval {
+
+ private long step;
+
+ @Override
+ public ReservationInterval computeExecutionInterval(Plan plan,
+ ReservationDefinition reservation,
+ ReservationRequest currentReservationStage, boolean allocateLeft,
+ RLESparseResourceAllocation allocations) {
+
+ // Use StageExecutionIntervalUnconstrained to get the maximal interval
+ ReservationInterval maxInterval =
+ (new StageExecutionIntervalUnconstrained()).computeExecutionInterval(
+ plan, reservation, currentReservationStage, allocateLeft,
+ allocations);
+
+ ReservationRequestInterpreter jobType =
+ reservation.getReservationRequests().getInterpreter();
+
+ // For unconstrained jobs, such as ALL & ANY, we can use the unconstrained
+ // version
+ if ((jobType != ReservationRequestInterpreter.R_ORDER)
+ && (jobType != ReservationRequestInterpreter.R_ORDER_NO_GAP)) {
+ return maxInterval;
+ }
+
+ // For ORDER and ORDER_NO_GAP, take a sub-interval of maxInterval
+ step = plan.getStep();
+
+ double totalWeight = 0.0;
+ long totalDuration = 0;
+
+ // Iterate over the stages that haven't been allocated.
+ // For allocateLeft == True, we iterate in reverse order, starting from the
+ // last
+ // stage, until we reach the current stage.
+ // For allocateLeft == False, we do the opposite.
+ StageProvider stageProvider = new StageProvider(!allocateLeft, reservation);
+
+ while (stageProvider.hasNext()) {
+ ReservationRequest rr = stageProvider.next();
+ totalWeight += calcWeight(rr);
+ totalDuration += getRoundedDuration(rr, step);
+
+ // Stop once we reach current
+ if (rr == currentReservationStage) {
+ break;
+ }
+ }
+
+ // Compute the weight of the current stage as compared to remaining ones
+ double ratio = calcWeight(currentReservationStage) / totalWeight;
+
+ // Estimate an early start time, such that:
+ // 1. Every stage is guaranteed to receive at least its duration
+ // 2. The remainder of the window is divided between stages
+ // proportionally to its workload (total memory consumption)
+ long maxIntervalArrival = maxInterval.getStartTime();
+ long maxIntervalDeadline = maxInterval.getEndTime();
+ long window = maxIntervalDeadline - maxIntervalArrival;
+ long windowRemainder = window - totalDuration;
+
+ if (allocateLeft) {
+ long latestEnd =
+ (long) (maxIntervalArrival
+ + getRoundedDuration(currentReservationStage, step)
+ + (windowRemainder * ratio));
+
+ // Realign if necessary (since we did some arithmetic)
+ latestEnd = stepRoundDown(latestEnd, step);
+
+ // Return new interval
+ return new ReservationInterval(maxIntervalArrival, latestEnd);
+ } else {
+ long earlyStart =
+ (long) (maxIntervalDeadline
+ - getRoundedDuration(currentReservationStage, step)
+ - (windowRemainder * ratio));
+
+ // Realign if necessary (since we did some arithmetic)
+ earlyStart = stepRoundUp(earlyStart, step);
+
+ // Return new interval
+ return new ReservationInterval(earlyStart, maxIntervalDeadline);
+ }
+ }
+
+ // Weight = total memory consumption of stage
+ protected double calcWeight(ReservationRequest stage) {
+ return (stage.getDuration() * stage.getCapability().getMemorySize())
+ * (stage.getNumContainers());
+ }
+
+ protected long getRoundedDuration(ReservationRequest stage, Long s) {
+ return stepRoundUp(stage.getDuration(), s);
+ }
+
+ protected static long stepRoundDown(long t, long s) {
+ return (t / s) * s;
+ }
+
+ protected static long stepRoundUp(long t, long s) {
+ return ((t + s - 1) / s) * s;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a615ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageExecutionIntervalUnconstrained.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageExecutionIntervalUnconstrained.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageExecutionIntervalUnconstrained.java
new file mode 100644
index 0000000..cccd9d8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageExecutionIntervalUnconstrained.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.reservation.planning;
+
+import org.apache.hadoop.yarn.api.records.ReservationDefinition;
+import org.apache.hadoop.yarn.api.records.ReservationRequest;
+import org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.Plan;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.RLESparseResourceAllocation;
+import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval;
+
+/**
+ * An implementation of {@link StageExecutionInterval} which gives each stage
+ * the maximal possible time interval, given the job constraints. Specifically,
+ * for ANY and ALL jobs, the interval would be [jobArrival, jobDeadline). For
+ * ORDER jobs, the stage cannot start before its predecessors (if allocateLeft
+ * == true) or cannot end before its successors (if allocateLeft == false)
+ */
+public class StageExecutionIntervalUnconstrained implements
+ StageExecutionInterval {
+
+ @Override
+ public ReservationInterval computeExecutionInterval(Plan plan,
+ ReservationDefinition reservation,
+ ReservationRequest currentReservationStage, boolean allocateLeft,
+ RLESparseResourceAllocation allocations) {
+
+ Long stageArrival = reservation.getArrival();
+ Long stageDeadline = reservation.getDeadline();
+
+ ReservationRequestInterpreter jobType =
+ reservation.getReservationRequests().getInterpreter();
+
+ // Left to right
+ if (allocateLeft) {
+ // If ORDER job, change the stage arrival time
+ if ((jobType == ReservationRequestInterpreter.R_ORDER)
+ || (jobType == ReservationRequestInterpreter.R_ORDER_NO_GAP)) {
+ Long allocationEndTime = allocations.getLatestNonNullTime();
+ if (allocationEndTime != -1) {
+ stageArrival = allocationEndTime;
+ }
+ }
+ // Right to left
+ } else {
+ // If ORDER job, change the stage deadline
+ if ((jobType == ReservationRequestInterpreter.R_ORDER)
+ || (jobType == ReservationRequestInterpreter.R_ORDER_NO_GAP)) {
+ Long allocationStartTime = allocations.getEarliestStartTime();
+ if (allocationStartTime != -1) {
+ stageDeadline = allocationStartTime;
+ }
+ }
+ }
+ return new ReservationInterval(stageArrival, stageDeadline);
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[41/50] [abbrv] hadoop git commit: HADOOP-14298.
TestHadoopArchiveLogsRunner fails. Contribute dby Akira Ajisaka.
Posted by ae...@apache.org.
HADOOP-14298. TestHadoopArchiveLogsRunner fails. Contribute dby Akira Ajisaka.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8065129d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8065129d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8065129d
Branch: refs/heads/HDFS-7240
Commit: 8065129d87e17f7d4424b1b1619c2e4acabee199
Parents: d6eed5a
Author: Andrew Wang <wa...@apache.org>
Authored: Sun May 7 13:59:15 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Sun May 7 13:59:15 2017 -0700
----------------------------------------------------------------------
hadoop-tools/hadoop-archive-logs/pom.xml | 5 +++++
1 file changed, 5 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8065129d/hadoop-tools/hadoop-archive-logs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-archive-logs/pom.xml b/hadoop-tools/hadoop-archive-logs/pom.xml
index 8fdad6ea..b45ad98 100644
--- a/hadoop-tools/hadoop-archive-logs/pom.xml
+++ b/hadoop-tools/hadoop-archive-logs/pom.xml
@@ -62,6 +62,11 @@
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-hdfs-client</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-tests</artifactId>
<type>test-jar</type>
<scope>test</scope>
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[02/50] [abbrv] hadoop git commit: YARN-6518. Fix warnings from
Spotbugs in hadoop-yarn-server-timelineservice. Contributed by Weiwei Yang.
Posted by ae...@apache.org.
YARN-6518. Fix warnings from Spotbugs in hadoop-yarn-server-timelineservice. Contributed by Weiwei Yang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f1af317
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f1af317
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f1af317
Branch: refs/heads/HDFS-7240
Commit: 0f1af3178eb2a38aaaa1e1a27edd409cda19a198
Parents: 4b5bd73
Author: Naganarasimha <na...@apache.org>
Authored: Mon May 1 16:49:30 2017 +0530
Committer: Naganarasimha <na...@apache.org>
Committed: Mon May 1 16:49:30 2017 +0530
----------------------------------------------------------------------
.../storage/FileSystemTimelineReaderImpl.java | 129 ++++++++++---------
1 file changed, 67 insertions(+), 62 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f1af317/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java
index cfd5bd4..967702b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java
@@ -278,69 +278,74 @@ public class FileSystemTimelineReaderImpl extends AbstractService
}
}
);
- for (File entityFile : dir.listFiles()) {
- if (!entityFile.getName().contains(TIMELINE_SERVICE_STORAGE_EXTENSION)) {
- continue;
- }
- try (BufferedReader reader =
- new BufferedReader(
- new InputStreamReader(
- new FileInputStream(
- entityFile), Charset.forName("UTF-8")))) {
- TimelineEntity entity = readEntityFromFile(reader);
- if (!entity.getType().equals(entityType)) {
- continue;
- }
- if (!isTimeInRange(entity.getCreatedTime(),
- filters.getCreatedTimeBegin(), filters.getCreatedTimeEnd())) {
- continue;
- }
- if (filters.getRelatesTo() != null &&
- !filters.getRelatesTo().getFilterList().isEmpty() &&
- !TimelineStorageUtils.matchRelatesTo(entity,
- filters.getRelatesTo())) {
- continue;
- }
- if (filters.getIsRelatedTo() != null &&
- !filters.getIsRelatedTo().getFilterList().isEmpty() &&
- !TimelineStorageUtils.matchIsRelatedTo(entity,
- filters.getIsRelatedTo())) {
- continue;
- }
- if (filters.getInfoFilters() != null &&
- !filters.getInfoFilters().getFilterList().isEmpty() &&
- !TimelineStorageUtils.matchInfoFilters(entity,
- filters.getInfoFilters())) {
- continue;
- }
- if (filters.getConfigFilters() != null &&
- !filters.getConfigFilters().getFilterList().isEmpty() &&
- !TimelineStorageUtils.matchConfigFilters(entity,
- filters.getConfigFilters())) {
- continue;
- }
- if (filters.getMetricFilters() != null &&
- !filters.getMetricFilters().getFilterList().isEmpty() &&
- !TimelineStorageUtils.matchMetricFilters(entity,
- filters.getMetricFilters())) {
- continue;
- }
- if (filters.getEventFilters() != null &&
- !filters.getEventFilters().getFilterList().isEmpty() &&
- !TimelineStorageUtils.matchEventFilters(entity,
- filters.getEventFilters())) {
- continue;
- }
- TimelineEntity entityToBeReturned = createEntityToBeReturned(
- entity, dataToRetrieve.getFieldsToRetrieve());
- Set<TimelineEntity> entitiesCreatedAtSameTime =
- sortedEntities.get(entityToBeReturned.getCreatedTime());
- if (entitiesCreatedAtSameTime == null) {
- entitiesCreatedAtSameTime = new HashSet<TimelineEntity>();
+ if (dir != null) {
+ File[] files = dir.listFiles();
+ if (files != null) {
+ for (File entityFile : files) {
+ if (!entityFile.getName()
+ .contains(TIMELINE_SERVICE_STORAGE_EXTENSION)) {
+ continue;
+ }
+ try (BufferedReader reader = new BufferedReader(
+ new InputStreamReader(new FileInputStream(entityFile),
+ Charset.forName("UTF-8")))) {
+ TimelineEntity entity = readEntityFromFile(reader);
+ if (!entity.getType().equals(entityType)) {
+ continue;
+ }
+ if (!isTimeInRange(entity.getCreatedTime(),
+ filters.getCreatedTimeBegin(),
+ filters.getCreatedTimeEnd())) {
+ continue;
+ }
+ if (filters.getRelatesTo() != null &&
+ !filters.getRelatesTo().getFilterList().isEmpty() &&
+ !TimelineStorageUtils.matchRelatesTo(entity,
+ filters.getRelatesTo())) {
+ continue;
+ }
+ if (filters.getIsRelatedTo() != null &&
+ !filters.getIsRelatedTo().getFilterList().isEmpty() &&
+ !TimelineStorageUtils.matchIsRelatedTo(entity,
+ filters.getIsRelatedTo())) {
+ continue;
+ }
+ if (filters.getInfoFilters() != null &&
+ !filters.getInfoFilters().getFilterList().isEmpty() &&
+ !TimelineStorageUtils.matchInfoFilters(entity,
+ filters.getInfoFilters())) {
+ continue;
+ }
+ if (filters.getConfigFilters() != null &&
+ !filters.getConfigFilters().getFilterList().isEmpty() &&
+ !TimelineStorageUtils.matchConfigFilters(entity,
+ filters.getConfigFilters())) {
+ continue;
+ }
+ if (filters.getMetricFilters() != null &&
+ !filters.getMetricFilters().getFilterList().isEmpty() &&
+ !TimelineStorageUtils.matchMetricFilters(entity,
+ filters.getMetricFilters())) {
+ continue;
+ }
+ if (filters.getEventFilters() != null &&
+ !filters.getEventFilters().getFilterList().isEmpty() &&
+ !TimelineStorageUtils.matchEventFilters(entity,
+ filters.getEventFilters())) {
+ continue;
+ }
+ TimelineEntity entityToBeReturned = createEntityToBeReturned(
+ entity, dataToRetrieve.getFieldsToRetrieve());
+ Set<TimelineEntity> entitiesCreatedAtSameTime =
+ sortedEntities.get(entityToBeReturned.getCreatedTime());
+ if (entitiesCreatedAtSameTime == null) {
+ entitiesCreatedAtSameTime = new HashSet<TimelineEntity>();
+ }
+ entitiesCreatedAtSameTime.add(entityToBeReturned);
+ sortedEntities.put(entityToBeReturned.getCreatedTime(),
+ entitiesCreatedAtSameTime);
+ }
}
- entitiesCreatedAtSameTime.add(entityToBeReturned);
- sortedEntities.put(
- entityToBeReturned.getCreatedTime(), entitiesCreatedAtSameTime);
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[25/50] [abbrv] hadoop git commit: YARN-6374. Improve test coverage
and add utility classes for common Docker operations. Contributed by Shane
Kumpf
Posted by ae...@apache.org.
YARN-6374. Improve test coverage and add utility classes for common Docker operations. Contributed by Shane Kumpf
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd5cb2c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd5cb2c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd5cb2c9
Branch: refs/heads/HDFS-7240
Commit: fd5cb2c9468070abdea3305974ecfc3aa4b0be12
Parents: 83dded5
Author: Sidharta S <si...@apache.org>
Authored: Wed May 3 14:49:30 2017 -0700
Committer: Sidharta S <si...@apache.org>
Committed: Wed May 3 14:49:30 2017 -0700
----------------------------------------------------------------------
.../runtime/docker/DockerCommandExecutor.java | 191 ++++++++++++++++
.../linux/runtime/docker/package-info.java | 26 +++
.../MockPrivilegedOperationCaptor.java | 68 ++++++
.../runtime/TestDockerContainerRuntime.java | 13 +-
.../docker/TestDockerCommandExecutor.java | 218 +++++++++++++++++++
.../runtime/docker/TestDockerLoadCommand.java | 48 ++++
.../runtime/docker/TestDockerRunCommand.java | 63 ++++++
7 files changed, 624 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd5cb2c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java
new file mode 100644
index 0000000..9026d22
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java
@@ -0,0 +1,191 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
+
+import java.util.Map;
+
+/**
+ * Utility class for executing common docker operations.
+ */
+public final class DockerCommandExecutor {
+ private static final Log LOG = LogFactory.getLog(DockerCommandExecutor.class);
+
+ /**
+ * Potential states that the docker status can return.
+ */
+ public enum DockerContainerStatus {
+ CREATED("created"),
+ RUNNING("running"),
+ STOPPED("stopped"),
+ RESTARTING("restarting"),
+ REMOVING("removing"),
+ DEAD("dead"),
+ EXITED("exited"),
+ NONEXISTENT("nonexistent"),
+ UNKNOWN("unknown");
+
+ private final String name;
+
+ DockerContainerStatus(String name) {
+ this.name = name;
+ }
+
+ public String getName() {
+ return name;
+ }
+ }
+
+ private DockerCommandExecutor() {
+ }
+
+ /**
+ * Execute a docker command and return the output.
+ *
+ * @param dockerCommand the docker command to run.
+ * @param containerId the id of the container.
+ * @param env environment for the container.
+ * @param conf the hadoop configuration.
+ * @param privilegedOperationExecutor the privileged operations executor.
+ * @param disableFailureLogging disable logging for known rc failures.
+ * @return the output of the operation.
+ * @throws ContainerExecutionException if the operation fails.
+ */
+ public static String executeDockerCommand(DockerCommand dockerCommand,
+ String containerId, Map<String, String> env, Configuration conf,
+ PrivilegedOperationExecutor privilegedOperationExecutor,
+ boolean disableFailureLogging)
+ throws ContainerExecutionException {
+ DockerClient dockerClient = new DockerClient(conf);
+ String commandFile =
+ dockerClient.writeCommandToTempFile(dockerCommand, containerId);
+ PrivilegedOperation dockerOp = new PrivilegedOperation(
+ PrivilegedOperation.OperationType.RUN_DOCKER_CMD);
+ dockerOp.appendArgs(commandFile);
+ if (disableFailureLogging) {
+ dockerOp.disableFailureLogging();
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Running docker command: "
+ + dockerCommand.getCommandWithArguments());
+ }
+ try {
+ String result = privilegedOperationExecutor
+ .executePrivilegedOperation(null, dockerOp, null,
+ env, true, false);
+ if (result != null && !result.isEmpty()) {
+ result = result.trim();
+ }
+ return result;
+ } catch (PrivilegedOperationException e) {
+ throw new ContainerExecutionException("Docker operation failed",
+ e.getExitCode(), e.getOutput(), e.getErrorOutput());
+ }
+ }
+
+ /**
+ * Get the status of the docker container. This runs a docker inspect to
+ * get the status. If the container no longer exists, docker inspect throws
+ * an exception and the nonexistent status is returned.
+ *
+ * @param containerId the id of the container.
+ * @param conf the hadoop configuration.
+ * @param privilegedOperationExecutor the privileged operations executor.
+ * @return a {@link DockerContainerStatus} representing the current status.
+ */
+ public static DockerContainerStatus getContainerStatus(String containerId,
+ Configuration conf,
+ PrivilegedOperationExecutor privilegedOperationExecutor) {
+ try {
+ DockerContainerStatus dockerContainerStatus;
+ String currentContainerStatus =
+ executeStatusCommand(containerId, conf, privilegedOperationExecutor);
+ if (currentContainerStatus == null) {
+ dockerContainerStatus = DockerContainerStatus.UNKNOWN;
+ } else if (currentContainerStatus
+ .equals(DockerContainerStatus.CREATED.getName())) {
+ dockerContainerStatus = DockerContainerStatus.CREATED;
+ } else if (currentContainerStatus
+ .equals(DockerContainerStatus.RUNNING.getName())) {
+ dockerContainerStatus = DockerContainerStatus.RUNNING;
+ } else if (currentContainerStatus
+ .equals(DockerContainerStatus.STOPPED.getName())) {
+ dockerContainerStatus = DockerContainerStatus.STOPPED;
+ } else if (currentContainerStatus
+ .equals(DockerContainerStatus.RESTARTING.getName())) {
+ dockerContainerStatus = DockerContainerStatus.RESTARTING;
+ } else if (currentContainerStatus
+ .equals(DockerContainerStatus.REMOVING.getName())) {
+ dockerContainerStatus = DockerContainerStatus.REMOVING;
+ } else if (currentContainerStatus
+ .equals(DockerContainerStatus.DEAD.getName())) {
+ dockerContainerStatus = DockerContainerStatus.DEAD;
+ } else if (currentContainerStatus
+ .equals(DockerContainerStatus.EXITED.getName())) {
+ dockerContainerStatus = DockerContainerStatus.EXITED;
+ } else if (currentContainerStatus
+ .equals(DockerContainerStatus.NONEXISTENT.getName())) {
+ dockerContainerStatus = DockerContainerStatus.NONEXISTENT;
+ } else {
+ dockerContainerStatus = DockerContainerStatus.UNKNOWN;
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Container Status: " + dockerContainerStatus.getName()
+ + " ContainerId: " + containerId);
+ }
+ return dockerContainerStatus;
+ } catch (ContainerExecutionException e) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Container Status: "
+ + DockerContainerStatus.NONEXISTENT.getName()
+ + " ContainerId: " + containerId);
+ }
+ return DockerContainerStatus.NONEXISTENT;
+ }
+ }
+
+ /**
+ * Execute the docker inspect command to retrieve the docker container's
+ * status.
+ *
+ * @param containerId the id of the container.
+ * @param conf the hadoop configuration.
+ * @param privilegedOperationExecutor the privileged operations executor.
+ * @return the current container status.
+ * @throws ContainerExecutionException if the docker operation fails to run.
+ */
+ private static String executeStatusCommand(String containerId,
+ Configuration conf,
+ PrivilegedOperationExecutor privilegedOperationExecutor)
+ throws ContainerExecutionException {
+ DockerInspectCommand dockerInspectCommand =
+ new DockerInspectCommand(containerId).getContainerStatus();
+ try {
+ return DockerCommandExecutor.executeDockerCommand(dockerInspectCommand,
+ containerId, null, conf, privilegedOperationExecutor, false);
+ } catch (ContainerExecutionException e) {
+ throw new ContainerExecutionException(e);
+ }
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd5cb2c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/package-info.java
new file mode 100644
index 0000000..189167c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/package-info.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Package containing classes related to Docker commands and common operations
+ * used within the @{link DockerLinuxContainerRuntime}.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd5cb2c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/MockPrivilegedOperationCaptor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/MockPrivilegedOperationCaptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/MockPrivilegedOperationCaptor.java
new file mode 100644
index 0000000..187da6b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/MockPrivilegedOperationCaptor.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged;
+
+import org.mockito.ArgumentCaptor;
+import org.mockito.Mockito;
+
+import java.io.File;
+import java.util.List;
+import java.util.Map;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyList;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+/**
+ * Captures operations from mock {@link PrivilegedOperation} instances.
+ */
+public final class MockPrivilegedOperationCaptor {
+
+ private MockPrivilegedOperationCaptor() {}
+
+ /**
+ * Capture the operation that should be performed by the
+ * PrivilegedOperationExecutor.
+ *
+ * @param mockExecutor mock PrivilegedOperationExecutor.
+ * @param invocationCount number of invocations expected.
+ * @return a list of operations that were invoked.
+ * @throws PrivilegedOperationException when the operation fails to execute.
+ */
+ @SuppressWarnings("unchecked")
+ public static List<PrivilegedOperation> capturePrivilegedOperations(
+ PrivilegedOperationExecutor mockExecutor, int invocationCount,
+ boolean grabOutput) throws PrivilegedOperationException {
+ ArgumentCaptor<PrivilegedOperation> opCaptor =
+ ArgumentCaptor.forClass(PrivilegedOperation.class);
+
+ //one or more invocations expected
+ //due to type erasure + mocking, this verification requires a suppress
+ // warning annotation on the entire method
+ verify(mockExecutor, times(invocationCount))
+ .executePrivilegedOperation(anyList(), opCaptor.capture(),
+ any(File.class), any(Map.class), eq(grabOutput), eq(false));
+
+ //verification completed. we need to isolate specific invications.
+ // hence, reset mock here
+ Mockito.reset(mockExecutor);
+
+ return opCaptor.getAllValues();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd5cb2c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index 3253394..ee1f25c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -816,7 +816,7 @@ public class TestDockerContainerRuntime {
.setExecutionAttribute(USER, user)
.setExecutionAttribute(PID, signalPid)
.setExecutionAttribute(SIGNAL, ContainerExecutor.Signal.NULL);
- runtime.initialize(getConfigurationWithMockContainerExecutor());
+ runtime.initialize(enableMockContainerExecutor(conf));
runtime.signalContainer(builder.build());
PrivilegedOperation op = capturePrivilegedOperation();
@@ -870,7 +870,7 @@ public class TestDockerContainerRuntime {
.setExecutionAttribute(USER, user)
.setExecutionAttribute(PID, signalPid)
.setExecutionAttribute(SIGNAL, signal);
- runtime.initialize(getConfigurationWithMockContainerExecutor());
+ runtime.initialize(enableMockContainerExecutor(conf));
runtime.signalContainer(builder.build());
PrivilegedOperation op = capturePrivilegedOperation();
@@ -881,7 +881,14 @@ public class TestDockerContainerRuntime {
Charset.forName("UTF-8"));
}
- private Configuration getConfigurationWithMockContainerExecutor() {
+ /**
+ * Return a configuration object with the mock container executor binary
+ * preconfigured.
+ *
+ * @param conf The hadoop configuration.
+ * @return The hadoop configuration.
+ */
+ public static Configuration enableMockContainerExecutor(Configuration conf) {
File f = new File("./src/test/resources/mock-container-executor");
if(!FileUtil.canExecute(f)) {
FileUtil.setExecutable(f, true);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd5cb2c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
new file mode 100644
index 0000000..60fce40
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
@@ -0,0 +1,218 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.MockPrivilegedOperationCaptor;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DockerLinuxContainerRuntime;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.TestDockerContainerRuntime;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_ID_STR;
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerCommandExecutor.DockerContainerStatus;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test common docker commands.
+ */
+public class TestDockerCommandExecutor {
+
+ private static final String MOCK_CONTAINER_ID = "container_id";
+ private static final String MOCK_LOCAL_IMAGE_NAME = "local_image_name";
+ private static final String MOCK_IMAGE_NAME = "image_name";
+
+ private PrivilegedOperationExecutor mockExecutor;
+ private CGroupsHandler mockCGroupsHandler;
+ private Configuration configuration;
+ private ContainerRuntimeContext.Builder builder;
+ private DockerLinuxContainerRuntime runtime;
+ private Container container;
+ private ContainerId cId;
+ private ContainerLaunchContext context;
+ private HashMap<String, String> env;
+
+ @Before
+ public void setUp() throws Exception {
+ mockExecutor = mock(PrivilegedOperationExecutor.class);
+ mockCGroupsHandler = mock(CGroupsHandler.class);
+ configuration = new Configuration();
+ runtime = new DockerLinuxContainerRuntime(mockExecutor, mockCGroupsHandler);
+ container = mock(Container.class);
+ cId = mock(ContainerId.class);
+ context = mock(ContainerLaunchContext.class);
+ env = new HashMap<>();
+ builder = new ContainerRuntimeContext.Builder(container);
+
+ when(container.getContainerId()).thenReturn(cId);
+ when(cId.toString()).thenReturn(MOCK_CONTAINER_ID);
+ when(container.getLaunchContext()).thenReturn(context);
+ when(context.getEnvironment()).thenReturn(env);
+
+ builder.setExecutionAttribute(CONTAINER_ID_STR, MOCK_CONTAINER_ID);
+ runtime.initialize(
+ TestDockerContainerRuntime.enableMockContainerExecutor(configuration));
+ }
+
+ @Test
+ public void testExecuteDockerCommand() throws Exception {
+ DockerStopCommand dockerStopCommand =
+ new DockerStopCommand(MOCK_CONTAINER_ID);
+ DockerCommandExecutor
+ .executeDockerCommand(dockerStopCommand, cId.toString(), env,
+ configuration, mockExecutor, false);
+ List<PrivilegedOperation> ops = MockPrivilegedOperationCaptor
+ .capturePrivilegedOperations(mockExecutor, 1, true);
+ assertEquals(1, ops.size());
+ assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
+ ops.get(0).getOperationType().name());
+ }
+
+ @Test
+ public void testExecuteDockerRm() throws Exception {
+ DockerRmCommand dockerCommand = new DockerRmCommand(MOCK_CONTAINER_ID);
+ DockerCommandExecutor
+ .executeDockerCommand(dockerCommand, MOCK_CONTAINER_ID, env,
+ configuration, mockExecutor, false);
+ List<PrivilegedOperation> ops = MockPrivilegedOperationCaptor
+ .capturePrivilegedOperations(mockExecutor, 1, true);
+ List<String> dockerCommands = getValidatedDockerCommands(ops);
+ assertEquals(1, ops.size());
+ assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
+ ops.get(0).getOperationType().name());
+ assertEquals(1, dockerCommands.size());
+ assertEquals("rm " + MOCK_CONTAINER_ID, dockerCommands.get(0));
+ }
+
+ @Test
+ public void testExecuteDockerStop() throws Exception {
+ DockerStopCommand dockerCommand = new DockerStopCommand(MOCK_CONTAINER_ID);
+ DockerCommandExecutor
+ .executeDockerCommand(dockerCommand, MOCK_CONTAINER_ID, env,
+ configuration, mockExecutor, false);
+ List<PrivilegedOperation> ops = MockPrivilegedOperationCaptor
+ .capturePrivilegedOperations(mockExecutor, 1, true);
+ List<String> dockerCommands = getValidatedDockerCommands(ops);
+ assertEquals(1, ops.size());
+ assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
+ ops.get(0).getOperationType().name());
+ assertEquals(1, dockerCommands.size());
+ assertEquals("stop " + MOCK_CONTAINER_ID, dockerCommands.get(0));
+ }
+
+ @Test
+ public void testExecuteDockerInspectStatus() throws Exception {
+ DockerInspectCommand dockerCommand =
+ new DockerInspectCommand(MOCK_CONTAINER_ID).getContainerStatus();
+ DockerCommandExecutor
+ .executeDockerCommand(dockerCommand, MOCK_CONTAINER_ID, env,
+ configuration, mockExecutor, false);
+ List<PrivilegedOperation> ops = MockPrivilegedOperationCaptor
+ .capturePrivilegedOperations(mockExecutor, 1, true);
+ List<String> dockerCommands = getValidatedDockerCommands(ops);
+ assertEquals(1, ops.size());
+ assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
+ ops.get(0).getOperationType().name());
+ assertEquals(1, dockerCommands.size());
+ assertEquals("inspect --format='{{.State.Status}}' " + MOCK_CONTAINER_ID,
+ dockerCommands.get(0));
+ }
+
+ @Test
+ public void testExecuteDockerPull() throws Exception {
+ DockerPullCommand dockerCommand =
+ new DockerPullCommand(MOCK_IMAGE_NAME);
+ DockerCommandExecutor
+ .executeDockerCommand(dockerCommand, MOCK_CONTAINER_ID, env,
+ configuration, mockExecutor, false);
+ List<PrivilegedOperation> ops = MockPrivilegedOperationCaptor
+ .capturePrivilegedOperations(mockExecutor, 1, true);
+ List<String> dockerCommands = getValidatedDockerCommands(ops);
+ assertEquals(1, ops.size());
+ assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
+ ops.get(0).getOperationType().name());
+ assertEquals(1, dockerCommands.size());
+ assertEquals("pull " + MOCK_IMAGE_NAME, dockerCommands.get(0));
+ }
+
+ @Test
+ public void testExecuteDockerLoad() throws Exception {
+ DockerLoadCommand dockerCommand =
+ new DockerLoadCommand(MOCK_LOCAL_IMAGE_NAME);
+ DockerCommandExecutor
+ .executeDockerCommand(dockerCommand, MOCK_CONTAINER_ID, env,
+ configuration, mockExecutor, false);
+ List<PrivilegedOperation> ops = MockPrivilegedOperationCaptor
+ .capturePrivilegedOperations(mockExecutor, 1, true);
+ List<String> dockerCommands = getValidatedDockerCommands(ops);
+ assertEquals(1, ops.size());
+ assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(),
+ ops.get(0).getOperationType().name());
+ assertEquals(1, dockerCommands.size());
+ assertEquals("load --i=" + MOCK_LOCAL_IMAGE_NAME, dockerCommands.get(0));
+ }
+
+ @Test
+ public void testGetContainerStatus() throws Exception {
+ for (DockerContainerStatus status : DockerContainerStatus.values()) {
+ when(mockExecutor.executePrivilegedOperation(eq(null),
+ any(PrivilegedOperation.class), eq(null), any(), eq(true), eq(false)))
+ .thenReturn(status.getName());
+ assertEquals(status, DockerCommandExecutor
+ .getContainerStatus(MOCK_CONTAINER_ID, configuration, mockExecutor));
+ }
+ }
+
+ private List<String> getValidatedDockerCommands(
+ List<PrivilegedOperation> ops) throws IOException {
+ try {
+ List<String> dockerCommands = new ArrayList<>();
+ for (PrivilegedOperation op : ops) {
+ Assert.assertEquals(op.getOperationType(),
+ PrivilegedOperation.OperationType.RUN_DOCKER_CMD);
+ String dockerCommandFile = op.getArguments().get(0);
+ List<String> dockerCommandFileContents = Files
+ .readAllLines(Paths.get(dockerCommandFile),
+ Charset.forName("UTF-8"));
+ dockerCommands.addAll(dockerCommandFileContents);
+ }
+ return dockerCommands;
+ } catch (IOException e) {
+ throw new IOException("Unable to read the docker command file.", e);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd5cb2c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerLoadCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerLoadCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerLoadCommand.java
new file mode 100644
index 0000000..85fa0f8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerLoadCommand.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Tests the docker load command and its command
+ * line arguments.
+ */
+public class TestDockerLoadCommand {
+ private DockerLoadCommand dockerLoadCommand;
+
+ private static final String LOCAL_IMAGE_NAME = "foo";
+
+ @Before
+ public void setup() {
+ dockerLoadCommand = new DockerLoadCommand(LOCAL_IMAGE_NAME);
+ }
+
+ @Test
+ public void testGetCommandOption() {
+ assertEquals("load", dockerLoadCommand.getCommandOption());
+ }
+
+ @Test
+ public void testGetCommandWithArguments() {
+ assertEquals("load --i=foo",
+ dockerLoadCommand.getCommandWithArguments());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd5cb2c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerRunCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerRunCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerRunCommand.java
new file mode 100644
index 0000000..85bccd2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerRunCommand.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Tests the docker run command and its command
+ * line arguments.
+ */
+
+public class TestDockerRunCommand {
+ private DockerRunCommand dockerRunCommand;
+
+ private static final String CONTAINER_NAME = "foo";
+ private static final String USER_ID = "user_id";
+ private static final String IMAGE_NAME = "image_name";
+
+ @Before
+ public void setUp() throws Exception {
+ dockerRunCommand = new DockerRunCommand(CONTAINER_NAME, USER_ID,
+ IMAGE_NAME);
+ }
+
+ @Test
+ public void testGetCommandOption() {
+ assertEquals("run", dockerRunCommand.getCommandOption());
+ }
+
+ @Test
+ public void testCommandArguments() {
+ String sourceDevice = "source";
+ String destDevice = "dest";
+ dockerRunCommand.addDevice(sourceDevice, destDevice);
+ List<String> commands = new ArrayList<>();
+ commands.add("launch_command");
+ dockerRunCommand.setOverrideCommandWithArgs(commands);
+ dockerRunCommand.removeContainerOnExit();
+ assertEquals("run --name=foo --user=user_id --device=source:dest --rm "
+ + "image_name launch_command",
+ dockerRunCommand.getCommandWithArguments());
+ }
+}
\ No newline at end of file
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[38/50] [abbrv] hadoop git commit: HDFS-9807. Add an optional
StorageID to writes. Contributed by Ewan Higgs
Posted by ae...@apache.org.
HDFS-9807. Add an optional StorageID to writes. Contributed by Ewan Higgs
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3954cca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3954cca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3954cca
Branch: refs/heads/HDFS-7240
Commit: a3954ccab148bddc290cb96528e63ff19799bcc9
Parents: 4e6bbd0
Author: Chris Douglas <cd...@apache.org>
Authored: Fri May 5 12:01:26 2017 -0700
Committer: Chris Douglas <cd...@apache.org>
Committed: Fri May 5 12:01:26 2017 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/hdfs/DataStreamer.java | 35 +-
.../apache/hadoop/hdfs/StripedDataStreamer.java | 10 +-
.../datatransfer/DataTransferProtocol.java | 19 +-
.../hdfs/protocol/datatransfer/Sender.java | 29 +-
.../hadoop/hdfs/protocolPB/PBHelperClient.java | 13 +
.../token/block/BlockTokenIdentifier.java | 36 +-
.../src/main/proto/datatransfer.proto | 4 +
.../src/main/proto/hdfs.proto | 1 +
.../hdfs/protocol/datatransfer/Receiver.java | 20 +-
.../block/BlockPoolTokenSecretManager.java | 22 +-
.../token/block/BlockTokenSecretManager.java | 55 ++--
.../hadoop/hdfs/server/balancer/Dispatcher.java | 5 +-
.../hadoop/hdfs/server/balancer/KeyManager.java | 4 +-
.../server/blockmanagement/BlockManager.java | 6 +-
.../hdfs/server/datanode/BPOfferService.java | 3 +-
.../hdfs/server/datanode/BlockReceiver.java | 12 +-
.../hadoop/hdfs/server/datanode/DataNode.java | 42 ++-
.../hdfs/server/datanode/DataXceiver.java | 68 ++--
.../erasurecode/ErasureCodingWorker.java | 3 +-
.../erasurecode/StripedBlockReader.java | 2 +-
.../erasurecode/StripedBlockWriter.java | 10 +-
.../erasurecode/StripedReconstructionInfo.java | 16 +-
.../datanode/erasurecode/StripedWriter.java | 5 +-
.../AvailableSpaceVolumeChoosingPolicy.java | 20 +-
.../server/datanode/fsdataset/FsDatasetSpi.java | 6 +-
.../RoundRobinVolumeChoosingPolicy.java | 2 +-
.../fsdataset/VolumeChoosingPolicy.java | 5 +-
.../datanode/fsdataset/impl/FsDatasetImpl.java | 21 +-
.../datanode/fsdataset/impl/FsVolumeList.java | 19 +-
.../org/apache/hadoop/hdfs/DFSTestUtil.java | 3 +-
.../hadoop/hdfs/TestBlockStoragePolicy.java | 75 ++++-
.../hadoop/hdfs/TestDataTransferProtocol.java | 3 +-
.../hdfs/TestWriteBlockGetsBlockLengthHint.java | 6 +-
.../security/token/block/TestBlockToken.java | 98 +++---
.../server/datanode/BlockReportTestBase.java | 2 +-
.../server/datanode/SimulatedFSDataset.java | 19 +-
.../hdfs/server/datanode/TestBlockRecovery.java | 6 +-
.../server/datanode/TestBlockReplacement.java | 2 +-
.../TestDataXceiverLazyPersistHint.java | 4 +-
.../hdfs/server/datanode/TestDiskError.java | 5 +-
.../server/datanode/TestSimulatedFSDataset.java | 4 +-
.../extdataset/ExternalDatasetImpl.java | 10 +-
.../TestAvailableSpaceVolumeChoosingPolicy.java | 76 +++--
.../TestRoundRobinVolumeChoosingPolicy.java | 29 +-
.../fsdataset/impl/TestFsDatasetImpl.java | 4 +-
.../fsdataset/impl/TestFsVolumeList.java | 2 +-
.../fsdataset/impl/TestWriteToReplica.java | 29 +-
.../namenode/TestNamenodeStorageDirectives.java | 330 +++++++++++++++++++
48 files changed, 903 insertions(+), 297 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 0268537..49c17b9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -174,10 +174,12 @@ class DataStreamer extends Daemon {
void sendTransferBlock(final DatanodeInfo[] targets,
final StorageType[] targetStorageTypes,
+ final String[] targetStorageIDs,
final Token<BlockTokenIdentifier> blockToken) throws IOException {
//send the TRANSFER_BLOCK request
new Sender(out).transferBlock(block.getCurrentBlock(), blockToken,
- dfsClient.clientName, targets, targetStorageTypes);
+ dfsClient.clientName, targets, targetStorageTypes,
+ targetStorageIDs);
out.flush();
//ack
BlockOpResponseProto transferResponse = BlockOpResponseProto
@@ -1367,9 +1369,11 @@ class DataStreamer extends Daemon {
final DatanodeInfo src = original[tried % original.length];
final DatanodeInfo[] targets = {nodes[d]};
final StorageType[] targetStorageTypes = {storageTypes[d]};
+ final String[] targetStorageIDs = {storageIDs[d]};
try {
- transfer(src, targets, targetStorageTypes, lb.getBlockToken());
+ transfer(src, targets, targetStorageTypes, targetStorageIDs,
+ lb.getBlockToken());
} catch (IOException ioe) {
DFSClient.LOG.warn("Error transferring data from " + src + " to " +
nodes[d] + ": " + ioe.getMessage());
@@ -1400,6 +1404,7 @@ class DataStreamer extends Daemon {
private void transfer(final DatanodeInfo src, final DatanodeInfo[] targets,
final StorageType[] targetStorageTypes,
+ final String[] targetStorageIDs,
final Token<BlockTokenIdentifier> blockToken)
throws IOException {
//transfer replica to the new datanode
@@ -1412,7 +1417,8 @@ class DataStreamer extends Daemon {
streams = new StreamerStreams(src, writeTimeout, readTimeout,
blockToken);
- streams.sendTransferBlock(targets, targetStorageTypes, blockToken);
+ streams.sendTransferBlock(targets, targetStorageTypes,
+ targetStorageIDs, blockToken);
return;
} catch (InvalidEncryptionKeyException e) {
policy.recordFailure(e);
@@ -1440,11 +1446,12 @@ class DataStreamer extends Daemon {
streamerClosed = true;
return;
}
- setupPipelineInternal(nodes, storageTypes);
+ setupPipelineInternal(nodes, storageTypes, storageIDs);
}
protected void setupPipelineInternal(DatanodeInfo[] datanodes,
- StorageType[] nodeStorageTypes) throws IOException {
+ StorageType[] nodeStorageTypes, String[] nodeStorageIDs)
+ throws IOException {
boolean success = false;
long newGS = 0L;
while (!success && !streamerClosed && dfsClient.clientRunning) {
@@ -1465,7 +1472,8 @@ class DataStreamer extends Daemon {
accessToken = lb.getBlockToken();
// set up the pipeline again with the remaining nodes
- success = createBlockOutputStream(nodes, storageTypes, newGS, isRecovery);
+ success = createBlockOutputStream(nodes, storageTypes, storageIDs, newGS,
+ isRecovery);
failPacket4Testing();
@@ -1601,7 +1609,8 @@ class DataStreamer extends Daemon {
protected LocatedBlock nextBlockOutputStream() throws IOException {
LocatedBlock lb;
DatanodeInfo[] nodes;
- StorageType[] storageTypes;
+ StorageType[] nextStorageTypes;
+ String[] nextStorageIDs;
int count = dfsClient.getConf().getNumBlockWriteRetry();
boolean success;
final ExtendedBlock oldBlock = block.getCurrentBlock();
@@ -1617,10 +1626,12 @@ class DataStreamer extends Daemon {
bytesSent = 0;
accessToken = lb.getBlockToken();
nodes = lb.getLocations();
- storageTypes = lb.getStorageTypes();
+ nextStorageTypes = lb.getStorageTypes();
+ nextStorageIDs = lb.getStorageIDs();
// Connect to first DataNode in the list.
- success = createBlockOutputStream(nodes, storageTypes, 0L, false);
+ success = createBlockOutputStream(nodes, nextStorageTypes, nextStorageIDs,
+ 0L, false);
if (!success) {
LOG.warn("Abandoning " + block);
@@ -1643,7 +1654,8 @@ class DataStreamer extends Daemon {
// Returns true if success, otherwise return failure.
//
boolean createBlockOutputStream(DatanodeInfo[] nodes,
- StorageType[] nodeStorageTypes, long newGS, boolean recoveryFlag) {
+ StorageType[] nodeStorageTypes, String[] nodeStorageIDs,
+ long newGS, boolean recoveryFlag) {
if (nodes.length == 0) {
LOG.info("nodes are empty for write pipeline of " + block);
return false;
@@ -1696,7 +1708,8 @@ class DataStreamer extends Daemon {
dfsClient.clientName, nodes, nodeStorageTypes, null, bcs,
nodes.length, block.getNumBytes(), bytesSent, newGS,
checksum4WriteBlock, cachingStrategy.get(), isLazyPersistFile,
- (targetPinnings != null && targetPinnings[0]), targetPinnings);
+ (targetPinnings != null && targetPinnings[0]), targetPinnings,
+ nodeStorageIDs[0], nodeStorageIDs);
// receive ack for connect
BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripedDataStreamer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripedDataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripedDataStreamer.java
index b457edb..d920f18 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripedDataStreamer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripedDataStreamer.java
@@ -100,9 +100,11 @@ public class StripedDataStreamer extends DataStreamer {
DatanodeInfo[] nodes = lb.getLocations();
StorageType[] storageTypes = lb.getStorageTypes();
+ String[] storageIDs = lb.getStorageIDs();
// Connect to the DataNode. If fail the internal error state will be set.
- success = createBlockOutputStream(nodes, storageTypes, 0L, false);
+ success = createBlockOutputStream(nodes, storageTypes, storageIDs, 0L,
+ false);
if (!success) {
block.setCurrentBlock(null);
@@ -121,7 +123,8 @@ public class StripedDataStreamer extends DataStreamer {
@Override
protected void setupPipelineInternal(DatanodeInfo[] nodes,
- StorageType[] nodeStorageTypes) throws IOException {
+ StorageType[] nodeStorageTypes, String[] nodeStorageIDs)
+ throws IOException {
boolean success = false;
while (!success && !streamerClosed() && dfsClient.clientRunning) {
if (!handleRestartingDatanode()) {
@@ -141,7 +144,8 @@ public class StripedDataStreamer extends DataStreamer {
// set up the pipeline again with the remaining nodes. when a striped
// data streamer comes here, it must be in external error state.
assert getErrorState().hasExternalError();
- success = createBlockOutputStream(nodes, nodeStorageTypes, newGS, true);
+ success = createBlockOutputStream(nodes, nodeStorageTypes,
+ nodeStorageIDs, newGS, true);
failPacket4Testing();
getErrorState().checkRestartingNodeDeadline(nodes);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
index 6c5883c..fe20c37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
@@ -101,6 +101,11 @@ public interface DataTransferProtocol {
* written to disk lazily
* @param pinning whether to pin the block, so Balancer won't move it.
* @param targetPinnings whether to pin the block on target datanode
+ * @param storageID optional StorageIDs designating where to write the
+ * block. An empty String or null indicates that this
+ * has not been provided.
+ * @param targetStorageIDs target StorageIDs corresponding to the target
+ * datanodes.
*/
void writeBlock(final ExtendedBlock blk,
final StorageType storageType,
@@ -118,7 +123,9 @@ public interface DataTransferProtocol {
final CachingStrategy cachingStrategy,
final boolean allowLazyPersist,
final boolean pinning,
- final boolean[] targetPinnings) throws IOException;
+ final boolean[] targetPinnings,
+ final String storageID,
+ final String[] targetStorageIDs) throws IOException;
/**
* Transfer a block to another datanode.
* The block stage must be
@@ -129,12 +136,15 @@ public interface DataTransferProtocol {
* @param blockToken security token for accessing the block.
* @param clientName client's name.
* @param targets target datanodes.
+ * @param targetStorageIDs StorageID designating where to write the
+ * block.
*/
void transferBlock(final ExtendedBlock blk,
final Token<BlockTokenIdentifier> blockToken,
final String clientName,
final DatanodeInfo[] targets,
- final StorageType[] targetStorageTypes) throws IOException;
+ final StorageType[] targetStorageTypes,
+ final String[] targetStorageIDs) throws IOException;
/**
* Request short circuit access file descriptors from a DataNode.
@@ -179,12 +189,15 @@ public interface DataTransferProtocol {
* @param blockToken security token for accessing the block.
* @param delHint the hint for deleting the block in the original datanode.
* @param source the source datanode for receiving the block.
+ * @param storageId an optional storage ID to designate where the block is
+ * replaced to.
*/
void replaceBlock(final ExtendedBlock blk,
final StorageType storageType,
final Token<BlockTokenIdentifier> blockToken,
final String delHint,
- final DatanodeInfo source) throws IOException;
+ final DatanodeInfo source,
+ final String storageId) throws IOException;
/**
* Copy a block.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
index e133975..8a8d20d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
@@ -22,6 +22,7 @@ import static org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
+import java.util.Arrays;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -132,7 +133,9 @@ public class Sender implements DataTransferProtocol {
final CachingStrategy cachingStrategy,
final boolean allowLazyPersist,
final boolean pinning,
- final boolean[] targetPinnings) throws IOException {
+ final boolean[] targetPinnings,
+ final String storageId,
+ final String[] targetStorageIds) throws IOException {
ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
blk, clientName, blockToken);
@@ -154,11 +157,14 @@ public class Sender implements DataTransferProtocol {
.setCachingStrategy(getCachingStrategy(cachingStrategy))
.setAllowLazyPersist(allowLazyPersist)
.setPinning(pinning)
- .addAllTargetPinnings(PBHelperClient.convert(targetPinnings, 1));
-
+ .addAllTargetPinnings(PBHelperClient.convert(targetPinnings, 1))
+ .addAllTargetStorageIds(PBHelperClient.convert(targetStorageIds, 1));
if (source != null) {
proto.setSource(PBHelperClient.convertDatanodeInfo(source));
}
+ if (storageId != null) {
+ proto.setStorageId(storageId);
+ }
send(out, Op.WRITE_BLOCK, proto.build());
}
@@ -168,7 +174,8 @@ public class Sender implements DataTransferProtocol {
final Token<BlockTokenIdentifier> blockToken,
final String clientName,
final DatanodeInfo[] targets,
- final StorageType[] targetStorageTypes) throws IOException {
+ final StorageType[] targetStorageTypes,
+ final String[] targetStorageIds) throws IOException {
OpTransferBlockProto proto = OpTransferBlockProto.newBuilder()
.setHeader(DataTransferProtoUtil.buildClientHeader(
@@ -176,6 +183,7 @@ public class Sender implements DataTransferProtocol {
.addAllTargets(PBHelperClient.convert(targets))
.addAllTargetStorageTypes(
PBHelperClient.convertStorageTypes(targetStorageTypes))
+ .addAllTargetStorageIds(Arrays.asList(targetStorageIds))
.build();
send(out, Op.TRANSFER_BLOCK, proto);
@@ -233,15 +241,18 @@ public class Sender implements DataTransferProtocol {
final StorageType storageType,
final Token<BlockTokenIdentifier> blockToken,
final String delHint,
- final DatanodeInfo source) throws IOException {
- OpReplaceBlockProto proto = OpReplaceBlockProto.newBuilder()
+ final DatanodeInfo source,
+ final String storageId) throws IOException {
+ OpReplaceBlockProto.Builder proto = OpReplaceBlockProto.newBuilder()
.setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken))
.setStorageType(PBHelperClient.convertStorageType(storageType))
.setDelHint(delHint)
- .setSource(PBHelperClient.convertDatanodeInfo(source))
- .build();
+ .setSource(PBHelperClient.convertDatanodeInfo(source));
+ if (storageId != null) {
+ proto.setStorageId(storageId);
+ }
- send(out, Op.REPLACE_BLOCK, proto);
+ send(out, Op.REPLACE_BLOCK, proto.build());
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 2b8f102..614f653 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -345,6 +345,16 @@ public class PBHelperClient {
return pinnings;
}
+ public static List<String> convert(String[] targetIds, int idx) {
+ List<String> ids = new ArrayList<>();
+ if (targetIds != null) {
+ for (; idx < targetIds.length; ++idx) {
+ ids.add(targetIds[idx]);
+ }
+ }
+ return ids;
+ }
+
public static ExtendedBlock convert(ExtendedBlockProto eb) {
if (eb == null) return null;
return new ExtendedBlock( eb.getPoolId(), eb.getBlockId(), eb.getNumBytes(),
@@ -640,6 +650,9 @@ public class PBHelperClient {
for (StorageType storageType : blockTokenSecret.getStorageTypes()) {
builder.addStorageTypes(convertStorageType(storageType));
}
+ for (String storageId : blockTokenSecret.getStorageIds()) {
+ builder.addStorageIds(storageId);
+ }
return builder.build();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
index 228a7b6..5950752 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
@@ -53,16 +53,19 @@ public class BlockTokenIdentifier extends TokenIdentifier {
private long blockId;
private final EnumSet<AccessMode> modes;
private StorageType[] storageTypes;
+ private String[] storageIds;
private boolean useProto;
private byte [] cache;
public BlockTokenIdentifier() {
- this(null, null, 0, EnumSet.noneOf(AccessMode.class), null, false);
+ this(null, null, 0, EnumSet.noneOf(AccessMode.class), null, null,
+ false);
}
public BlockTokenIdentifier(String userId, String bpid, long blockId,
- EnumSet<AccessMode> modes, StorageType[] storageTypes, boolean useProto) {
+ EnumSet<AccessMode> modes, StorageType[] storageTypes,
+ String[] storageIds, boolean useProto) {
this.cache = null;
this.userId = userId;
this.blockPoolId = bpid;
@@ -70,6 +73,8 @@ public class BlockTokenIdentifier extends TokenIdentifier {
this.modes = modes == null ? EnumSet.noneOf(AccessMode.class) : modes;
this.storageTypes = Optional.ofNullable(storageTypes)
.orElse(StorageType.EMPTY_ARRAY);
+ this.storageIds = Optional.ofNullable(storageIds)
+ .orElse(new String[0]);
this.useProto = useProto;
}
@@ -125,6 +130,10 @@ public class BlockTokenIdentifier extends TokenIdentifier {
return storageTypes;
}
+ public String[] getStorageIds(){
+ return storageIds;
+ }
+
@Override
public String toString() {
return "block_token_identifier (expiryDate=" + this.getExpiryDate()
@@ -132,7 +141,8 @@ public class BlockTokenIdentifier extends TokenIdentifier {
+ ", blockPoolId=" + this.getBlockPoolId()
+ ", blockId=" + this.getBlockId() + ", access modes="
+ this.getAccessModes() + ", storageTypes= "
- + Arrays.toString(this.getStorageTypes()) + ")";
+ + Arrays.toString(this.getStorageTypes()) + ", storageIds= "
+ + Arrays.toString(this.getStorageIds()) + ")";
}
static boolean isEqual(Object a, Object b) {
@@ -151,7 +161,8 @@ public class BlockTokenIdentifier extends TokenIdentifier {
&& isEqual(this.blockPoolId, that.blockPoolId)
&& this.blockId == that.blockId
&& isEqual(this.modes, that.modes)
- && Arrays.equals(this.storageTypes, that.storageTypes);
+ && Arrays.equals(this.storageTypes, that.storageTypes)
+ && Arrays.equals(this.storageIds, that.storageIds);
}
return false;
}
@@ -161,7 +172,8 @@ public class BlockTokenIdentifier extends TokenIdentifier {
return (int) expiryDate ^ keyId ^ (int) blockId ^ modes.hashCode()
^ (userId == null ? 0 : userId.hashCode())
^ (blockPoolId == null ? 0 : blockPoolId.hashCode())
- ^ (storageTypes == null ? 0 : Arrays.hashCode(storageTypes));
+ ^ (storageTypes == null ? 0 : Arrays.hashCode(storageTypes))
+ ^ (storageIds == null ? 0 : Arrays.hashCode(storageIds));
}
/**
@@ -220,6 +232,14 @@ public class BlockTokenIdentifier extends TokenIdentifier {
readStorageTypes[i] = WritableUtils.readEnum(in, StorageType.class);
}
storageTypes = readStorageTypes;
+
+ length = WritableUtils.readVInt(in);
+ String[] readStorageIds = new String[length];
+ for (int i = 0; i < length; i++) {
+ readStorageIds[i] = WritableUtils.readString(in);
+ }
+ storageIds = readStorageIds;
+
useProto = false;
}
@@ -248,6 +268,8 @@ public class BlockTokenIdentifier extends TokenIdentifier {
storageTypes = blockTokenSecretProto.getStorageTypesList().stream()
.map(PBHelperClient::convertStorageType)
.toArray(StorageType[]::new);
+ storageIds = blockTokenSecretProto.getStorageIdsList().stream()
+ .toArray(String[]::new);
useProto = true;
}
@@ -275,6 +297,10 @@ public class BlockTokenIdentifier extends TokenIdentifier {
for (StorageType type: storageTypes){
WritableUtils.writeEnum(out, type);
}
+ WritableUtils.writeVInt(out, storageIds.length);
+ for (String id: storageIds) {
+ WritableUtils.writeString(out, id);
+ }
}
@VisibleForTesting
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto
index 889361a..2356201 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto
@@ -125,12 +125,15 @@ message OpWriteBlockProto {
//whether to pin the block, so Balancer won't move it.
optional bool pinning = 14 [default = false];
repeated bool targetPinnings = 15;
+ optional string storageId = 16;
+ repeated string targetStorageIds = 17;
}
message OpTransferBlockProto {
required ClientOperationHeaderProto header = 1;
repeated DatanodeInfoProto targets = 2;
repeated StorageTypeProto targetStorageTypes = 3;
+ repeated string targetStorageIds = 4;
}
message OpReplaceBlockProto {
@@ -138,6 +141,7 @@ message OpReplaceBlockProto {
required string delHint = 2;
required DatanodeInfoProto source = 3;
optional StorageTypeProto storageType = 4 [default = DISK];
+ optional string storageId = 5;
}
message OpCopyBlockProto {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
index 3e27427..08ed3c8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
@@ -570,4 +570,5 @@ message BlockTokenSecretProto {
optional uint64 blockId = 5;
repeated AccessModeProto modes = 6;
repeated StorageTypeProto storageTypes = 7;
+ repeated string storageIds = 8;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
index 08ab967..bab2e8d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
@@ -25,7 +25,9 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.StripedBlockInfo;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
@@ -185,7 +187,9 @@ public abstract class Receiver implements DataTransferProtocol {
CachingStrategy.newDefaultStrategy()),
(proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false),
(proto.hasPinning() ? proto.getPinning(): false),
- (PBHelperClient.convertBooleanList(proto.getTargetPinningsList())));
+ (PBHelperClient.convertBooleanList(proto.getTargetPinningsList())),
+ proto.getStorageId(),
+ proto.getTargetStorageIdsList().toArray(new String[0]));
} finally {
if (traceScope != null) traceScope.close();
}
@@ -199,11 +203,18 @@ public abstract class Receiver implements DataTransferProtocol {
TraceScope traceScope = continueTraceSpan(proto.getHeader(),
proto.getClass().getSimpleName());
try {
- transferBlock(PBHelperClient.convert(proto.getHeader().getBaseHeader().getBlock()),
+ final ExtendedBlock block =
+ PBHelperClient.convert(proto.getHeader().getBaseHeader().getBlock());
+ final StorageType[] targetStorageTypes =
+ PBHelperClient.convertStorageTypes(proto.getTargetStorageTypesList(),
+ targets.length);
+ transferBlock(block,
PBHelperClient.convert(proto.getHeader().getBaseHeader().getToken()),
proto.getHeader().getClientName(),
targets,
- PBHelperClient.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length));
+ targetStorageTypes,
+ proto.getTargetStorageIdsList().toArray(new String[0])
+ );
} finally {
if (traceScope != null) traceScope.close();
}
@@ -264,7 +275,8 @@ public abstract class Receiver implements DataTransferProtocol {
PBHelperClient.convertStorageType(proto.getStorageType()),
PBHelperClient.convert(proto.getHeader().getToken()),
proto.getDelHint(),
- PBHelperClient.convert(proto.getSource()));
+ PBHelperClient.convert(proto.getSource()),
+ proto.getStorageId());
} finally {
if (traceScope != null) traceScope.close();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockPoolTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockPoolTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockPoolTokenSecretManager.java
index 29fb73f..8400b4f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockPoolTokenSecretManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockPoolTokenSecretManager.java
@@ -84,25 +84,27 @@ public class BlockPoolTokenSecretManager extends
/**
* See {@link BlockTokenSecretManager#checkAccess(BlockTokenIdentifier,
* String, ExtendedBlock, BlockTokenIdentifier.AccessMode,
- * StorageType[])}
+ * StorageType[], String[])}
*/
public void checkAccess(BlockTokenIdentifier id, String userId,
ExtendedBlock block, AccessMode mode,
- StorageType[] storageTypes) throws InvalidToken {
+ StorageType[] storageTypes, String[] storageIds)
+ throws InvalidToken {
get(block.getBlockPoolId()).checkAccess(id, userId, block, mode,
- storageTypes);
+ storageTypes, storageIds);
}
/**
* See {@link BlockTokenSecretManager#checkAccess(Token, String,
* ExtendedBlock, BlockTokenIdentifier.AccessMode,
- * StorageType[])}.
+ * StorageType[], String[])}
*/
public void checkAccess(Token<BlockTokenIdentifier> token,
String userId, ExtendedBlock block, AccessMode mode,
- StorageType[] storageTypes) throws InvalidToken {
+ StorageType[] storageTypes, String[] storageIds)
+ throws InvalidToken {
get(block.getBlockPoolId()).checkAccess(token, userId, block, mode,
- storageTypes);
+ storageTypes, storageIds);
}
/**
@@ -115,11 +117,13 @@ public class BlockPoolTokenSecretManager extends
/**
* See {@link BlockTokenSecretManager#generateToken(ExtendedBlock, EnumSet,
- * StorageType[])}
+ * StorageType[], String[])}.
*/
public Token<BlockTokenIdentifier> generateToken(ExtendedBlock b,
- EnumSet<AccessMode> of, StorageType[] storageTypes) throws IOException {
- return get(b.getBlockPoolId()).generateToken(b, of, storageTypes);
+ EnumSet<AccessMode> of, StorageType[] storageTypes, String[] storageIds)
+ throws IOException {
+ return get(b.getBlockPoolId()).generateToken(b, of, storageTypes,
+ storageIds);
}
@VisibleForTesting
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
index f3bec83..6b54490 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
@@ -247,18 +247,19 @@ public class BlockTokenSecretManager extends
/** Generate an block token for current user */
public Token<BlockTokenIdentifier> generateToken(ExtendedBlock block,
EnumSet<BlockTokenIdentifier.AccessMode> modes,
- StorageType[] storageTypes) throws IOException {
+ StorageType[] storageTypes, String[] storageIds) throws IOException {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
String userID = (ugi == null ? null : ugi.getShortUserName());
- return generateToken(userID, block, modes, storageTypes);
+ return generateToken(userID, block, modes, storageTypes, storageIds);
}
/** Generate a block token for a specified user */
public Token<BlockTokenIdentifier> generateToken(String userId,
ExtendedBlock block, EnumSet<BlockTokenIdentifier.AccessMode> modes,
- StorageType[] storageTypes) throws IOException {
+ StorageType[] storageTypes, String[] storageIds) throws IOException {
BlockTokenIdentifier id = new BlockTokenIdentifier(userId, block
- .getBlockPoolId(), block.getBlockId(), modes, storageTypes, useProto);
+ .getBlockPoolId(), block.getBlockId(), modes, storageTypes,
+ storageIds, useProto);
return new Token<BlockTokenIdentifier>(id, this);
}
@@ -272,10 +273,13 @@ public class BlockTokenSecretManager extends
*/
public void checkAccess(BlockTokenIdentifier id, String userId,
ExtendedBlock block, BlockTokenIdentifier.AccessMode mode,
- StorageType[] storageTypes) throws InvalidToken {
+ StorageType[] storageTypes, String[] storageIds) throws InvalidToken {
checkAccess(id, userId, block, mode);
if (storageTypes != null && storageTypes.length > 0) {
- checkAccess(id.getStorageTypes(), storageTypes);
+ checkAccess(id.getStorageTypes(), storageTypes, "StorageTypes");
+ }
+ if (storageIds != null && storageIds.length > 0) {
+ checkAccess(id.getStorageIds(), storageIds, "StorageIDs");
}
}
@@ -309,30 +313,31 @@ public class BlockTokenSecretManager extends
}
/**
- * Check if the requested StorageTypes match the StorageTypes in the
- * BlockTokenIdentifier.
- * Empty candidateStorageTypes specifiers mean 'all is permitted'. They
- * would otherwise be nonsensical.
+ * Check if the requested values can be satisfied with the values in the
+ * BlockToken. This is intended for use with StorageTypes and StorageIDs.
+ *
+ * The current node can only verify that one of the storage [Type|ID] is
+ * available. The rest will be on different nodes.
*/
- public static void checkAccess(StorageType[] candidateStorageTypes,
- StorageType[] storageTypesRequested) throws InvalidToken {
- if (storageTypesRequested.length == 0) {
- throw new InvalidToken("The request has no StorageTypes. "
+ public static <T> void checkAccess(T[] candidates, T[] requested, String msg)
+ throws InvalidToken {
+ if (requested.length == 0) {
+ throw new InvalidToken("The request has no " + msg + ". "
+ "This is probably a configuration error.");
}
- if (candidateStorageTypes.length == 0) {
+ if (candidates.length == 0) {
return;
}
- List<StorageType> unseenCandidates = new ArrayList<StorageType>();
- unseenCandidates.addAll(Arrays.asList(candidateStorageTypes));
- for (StorageType storageType : storageTypesRequested) {
- final int index = unseenCandidates.indexOf(storageType);
+ List unseenCandidates = new ArrayList<T>();
+ unseenCandidates.addAll(Arrays.asList(candidates));
+ for (T req : requested) {
+ final int index = unseenCandidates.indexOf(req);
if (index == -1) {
- throw new InvalidToken("Block token with StorageTypes "
- + Arrays.toString(candidateStorageTypes)
- + " not valid for access with StorageTypes "
- + Arrays.toString(storageTypesRequested));
+ throw new InvalidToken("Block token with " + msg + " "
+ + Arrays.toString(candidates)
+ + " not valid for access with " + msg + " "
+ + Arrays.toString(requested));
}
Collections.swap(unseenCandidates, index, unseenCandidates.size()-1);
unseenCandidates.remove(unseenCandidates.size()-1);
@@ -342,7 +347,7 @@ public class BlockTokenSecretManager extends
/** Check if access should be allowed. userID is not checked if null */
public void checkAccess(Token<BlockTokenIdentifier> token, String userId,
ExtendedBlock block, BlockTokenIdentifier.AccessMode mode,
- StorageType[] storageTypes) throws InvalidToken {
+ StorageType[] storageTypes, String[] storageIds) throws InvalidToken {
BlockTokenIdentifier id = new BlockTokenIdentifier();
try {
id.readFields(new DataInputStream(new ByteArrayInputStream(token
@@ -352,7 +357,7 @@ public class BlockTokenSecretManager extends
"Unable to de-serialize block token identifier for user=" + userId
+ ", block=" + block + ", access mode=" + mode);
}
- checkAccess(id, userId, block, mode, storageTypes);
+ checkAccess(id, userId, block, mode, storageTypes, storageIds);
if (!Arrays.equals(retrievePassword(id), token.getPassword())) {
throw new InvalidToken("Block token with " + id.toString()
+ " doesn't have the correct token password");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
index 91dc907..f855e45 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
@@ -357,7 +357,7 @@ public class Dispatcher {
reportedBlock.getBlock());
final KeyManager km = nnc.getKeyManager();
Token<BlockTokenIdentifier> accessToken = km.getAccessToken(eb,
- new StorageType[]{target.storageType});
+ new StorageType[]{target.storageType}, new String[0]);
IOStreamPair saslStreams = saslClient.socketSend(sock, unbufOut,
unbufIn, km, accessToken, target.getDatanodeInfo());
unbufOut = saslStreams.out;
@@ -411,7 +411,8 @@ public class Dispatcher {
private void sendRequest(DataOutputStream out, ExtendedBlock eb,
Token<BlockTokenIdentifier> accessToken) throws IOException {
new Sender(out).replaceBlock(eb, target.storageType, accessToken,
- source.getDatanodeInfo().getDatanodeUuid(), proxySource.datanode);
+ source.getDatanodeInfo().getDatanodeUuid(), proxySource.datanode,
+ null);
}
/** Check whether to continue waiting for response */
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java
index 06bf07f..faf95b7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java
@@ -95,7 +95,7 @@ public class KeyManager implements Closeable, DataEncryptionKeyFactory {
/** Get an access token for a block. */
public Token<BlockTokenIdentifier> getAccessToken(ExtendedBlock eb,
- StorageType[] storageTypes) throws IOException {
+ StorageType[] storageTypes, String[] storageIds) throws IOException {
if (!isBlockTokenEnabled) {
return BlockTokenSecretManager.DUMMY_TOKEN;
} else {
@@ -105,7 +105,7 @@ public class KeyManager implements Closeable, DataEncryptionKeyFactory {
}
return blockTokenSecretManager.generateToken(null, eb,
EnumSet.of(BlockTokenIdentifier.AccessMode.REPLACE,
- BlockTokenIdentifier.AccessMode.COPY), storageTypes);
+ BlockTokenIdentifier.AccessMode.COPY), storageTypes, storageIds);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index e63930a..8f58e25 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1283,13 +1283,15 @@ public class BlockManager implements BlockStatsMXBean {
internalBlock.setBlockId(b.getBlock().getBlockId() + indices[i]);
blockTokens[i] = blockTokenSecretManager.generateToken(
NameNode.getRemoteUser().getShortUserName(),
- internalBlock, EnumSet.of(mode), b.getStorageTypes());
+ internalBlock, EnumSet.of(mode), b.getStorageTypes(),
+ b.getStorageIDs());
}
sb.setBlockTokens(blockTokens);
} else {
b.setBlockToken(blockTokenSecretManager.generateToken(
NameNode.getRemoteUser().getShortUserName(),
- b.getBlock(), EnumSet.of(mode), b.getStorageTypes()));
+ b.getBlock(), EnumSet.of(mode), b.getStorageTypes(),
+ b.getStorageIDs()));
}
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index e0daca7..042169a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -679,7 +679,8 @@ class BPOfferService {
case DatanodeProtocol.DNA_TRANSFER:
// Send a copy of a block to another datanode
dn.transferBlocks(bcmd.getBlockPoolId(), bcmd.getBlocks(),
- bcmd.getTargets(), bcmd.getTargetStorageTypes());
+ bcmd.getTargets(), bcmd.getTargetStorageTypes(),
+ bcmd.getTargetStorageIDs());
break;
case DatanodeProtocol.DNA_INVALIDATE:
//
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 00109e0..2ab4067 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -151,7 +151,8 @@ class BlockReceiver implements Closeable {
final DataNode datanode, DataChecksum requestedChecksum,
CachingStrategy cachingStrategy,
final boolean allowLazyPersist,
- final boolean pinning) throws IOException {
+ final boolean pinning,
+ final String storageId) throws IOException {
try{
this.block = block;
this.in = in;
@@ -197,6 +198,7 @@ class BlockReceiver implements Closeable {
+ "\n allowLazyPersist=" + allowLazyPersist + ", pinning=" + pinning
+ ", isClient=" + isClient + ", isDatanode=" + isDatanode
+ ", responseInterval=" + responseInterval
+ + ", storageID=" + (storageId != null ? storageId : "null")
);
}
@@ -204,11 +206,13 @@ class BlockReceiver implements Closeable {
// Open local disk out
//
if (isDatanode) { //replication or move
- replicaHandler = datanode.data.createTemporary(storageType, block);
+ replicaHandler =
+ datanode.data.createTemporary(storageType, storageId, block);
} else {
switch (stage) {
case PIPELINE_SETUP_CREATE:
- replicaHandler = datanode.data.createRbw(storageType, block, allowLazyPersist);
+ replicaHandler = datanode.data.createRbw(storageType, storageId,
+ block, allowLazyPersist);
datanode.notifyNamenodeReceivingBlock(
block, replicaHandler.getReplica().getStorageUuid());
break;
@@ -233,7 +237,7 @@ class BlockReceiver implements Closeable {
case TRANSFER_FINALIZED:
// this is a transfer destination
replicaHandler =
- datanode.data.createTemporary(storageType, block);
+ datanode.data.createTemporary(storageType, storageId, block);
break;
default: throw new IOException("Unsupported stage " + stage +
" while receiving block " + block + " from " + inAddr);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 66ef89a..2305e0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1943,7 +1943,7 @@ public class DataNode extends ReconfigurableBase
LOG.debug("Got: " + id.toString());
}
blockPoolTokenSecretManager.checkAccess(id, null, block, accessMode,
- null);
+ null, null);
}
}
@@ -2224,7 +2224,8 @@ public class DataNode extends ReconfigurableBase
@VisibleForTesting
void transferBlock(ExtendedBlock block, DatanodeInfo[] xferTargets,
- StorageType[] xferTargetStorageTypes) throws IOException {
+ StorageType[] xferTargetStorageTypes, String[] xferTargetStorageIDs)
+ throws IOException {
BPOfferService bpos = getBPOSForBlock(block);
DatanodeRegistration bpReg = getDNRegistrationForBP(block.getBlockPoolId());
@@ -2281,17 +2282,19 @@ public class DataNode extends ReconfigurableBase
LOG.info(bpReg + " Starting thread to transfer " +
block + " to " + xfersBuilder);
- new Daemon(new DataTransfer(xferTargets, xferTargetStorageTypes, block,
+ new Daemon(new DataTransfer(xferTargets, xferTargetStorageTypes,
+ xferTargetStorageIDs, block,
BlockConstructionStage.PIPELINE_SETUP_CREATE, "")).start();
}
}
void transferBlocks(String poolId, Block blocks[],
- DatanodeInfo xferTargets[][], StorageType[][] xferTargetStorageTypes) {
+ DatanodeInfo[][] xferTargets, StorageType[][] xferTargetStorageTypes,
+ String[][] xferTargetStorageIDs) {
for (int i = 0; i < blocks.length; i++) {
try {
transferBlock(new ExtendedBlock(poolId, blocks[i]), xferTargets[i],
- xferTargetStorageTypes[i]);
+ xferTargetStorageTypes[i], xferTargetStorageIDs[i]);
} catch (IOException ie) {
LOG.warn("Failed to transfer block " + blocks[i], ie);
}
@@ -2395,6 +2398,7 @@ public class DataNode extends ReconfigurableBase
private class DataTransfer implements Runnable {
final DatanodeInfo[] targets;
final StorageType[] targetStorageTypes;
+ final private String[] targetStorageIds;
final ExtendedBlock b;
final BlockConstructionStage stage;
final private DatanodeRegistration bpReg;
@@ -2406,8 +2410,8 @@ public class DataNode extends ReconfigurableBase
* entire target list, the block, and the data.
*/
DataTransfer(DatanodeInfo targets[], StorageType[] targetStorageTypes,
- ExtendedBlock b, BlockConstructionStage stage,
- final String clientname) {
+ String[] targetStorageIds, ExtendedBlock b,
+ BlockConstructionStage stage, final String clientname) {
if (DataTransferProtocol.LOG.isDebugEnabled()) {
DataTransferProtocol.LOG.debug(getClass().getSimpleName() + ": "
+ b + " (numBytes=" + b.getNumBytes() + ")"
@@ -2415,10 +2419,13 @@ public class DataNode extends ReconfigurableBase
+ ", clientname=" + clientname
+ ", targets=" + Arrays.asList(targets)
+ ", target storage types=" + (targetStorageTypes == null ? "[]" :
- Arrays.asList(targetStorageTypes)));
+ Arrays.asList(targetStorageTypes))
+ + ", target storage IDs=" + (targetStorageIds == null ? "[]" :
+ Arrays.asList(targetStorageIds)));
}
this.targets = targets;
this.targetStorageTypes = targetStorageTypes;
+ this.targetStorageIds = targetStorageIds;
this.b = b;
this.stage = stage;
BPOfferService bpos = blockPoolManager.get(b.getBlockPoolId());
@@ -2456,7 +2463,7 @@ public class DataNode extends ReconfigurableBase
//
Token<BlockTokenIdentifier> accessToken = getBlockAccessToken(b,
EnumSet.of(BlockTokenIdentifier.AccessMode.WRITE),
- targetStorageTypes);
+ targetStorageTypes, targetStorageIds);
long writeTimeout = dnConf.socketWriteTimeout +
HdfsConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1);
@@ -2477,10 +2484,13 @@ public class DataNode extends ReconfigurableBase
DatanodeInfo srcNode = new DatanodeInfoBuilder().setNodeID(bpReg)
.build();
+ String storageId = targetStorageIds.length > 0 ?
+ targetStorageIds[0] : null;
new Sender(out).writeBlock(b, targetStorageTypes[0], accessToken,
clientname, targets, targetStorageTypes, srcNode,
stage, 0, 0, 0, 0, blockSender.getChecksum(), cachingStrategy,
- false, false, null);
+ false, false, null, storageId,
+ targetStorageIds);
// send data & checksum
blockSender.sendBlock(out, unbufOut, null);
@@ -2540,12 +2550,12 @@ public class DataNode extends ReconfigurableBase
*/
public Token<BlockTokenIdentifier> getBlockAccessToken(ExtendedBlock b,
EnumSet<AccessMode> mode,
- StorageType[] storageTypes) throws IOException {
+ StorageType[] storageTypes, String[] storageIds) throws IOException {
Token<BlockTokenIdentifier> accessToken =
BlockTokenSecretManager.DUMMY_TOKEN;
if (isBlockTokenEnabled) {
accessToken = blockPoolTokenSecretManager.generateToken(b, mode,
- storageTypes);
+ storageTypes, storageIds);
}
return accessToken;
}
@@ -2918,7 +2928,7 @@ public class DataNode extends ReconfigurableBase
LOG.debug("Got: " + id.toString());
}
blockPoolTokenSecretManager.checkAccess(id, null, block,
- BlockTokenIdentifier.AccessMode.READ, null);
+ BlockTokenIdentifier.AccessMode.READ, null, null);
}
}
}
@@ -2934,7 +2944,8 @@ public class DataNode extends ReconfigurableBase
*/
void transferReplicaForPipelineRecovery(final ExtendedBlock b,
final DatanodeInfo[] targets, final StorageType[] targetStorageTypes,
- final String client) throws IOException {
+ final String[] targetStorageIds, final String client)
+ throws IOException {
final long storedGS;
final long visible;
final BlockConstructionStage stage;
@@ -2967,7 +2978,8 @@ public class DataNode extends ReconfigurableBase
b.setNumBytes(visible);
if (targets.length > 0) {
- new DataTransfer(targets, targetStorageTypes, b, stage, client).run();
+ new DataTransfer(targets, targetStorageTypes, targetStorageIds, b, stage,
+ client).run();
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index cc13799..d42e330 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -354,7 +354,8 @@ class DataXceiver extends Receiver implements Runnable {
updateCurrentThreadName("Passing file descriptors for block " + blk);
DataOutputStream out = getBufferedOutputStream();
checkAccess(out, true, blk, token,
- Op.REQUEST_SHORT_CIRCUIT_FDS, BlockTokenIdentifier.AccessMode.READ);
+ Op.REQUEST_SHORT_CIRCUIT_FDS, BlockTokenIdentifier.AccessMode.READ,
+ null, null);
BlockOpResponseProto.Builder bld = BlockOpResponseProto.newBuilder();
FileInputStream fis[] = null;
SlotId registeredSlotId = null;
@@ -662,7 +663,7 @@ class DataXceiver extends Receiver implements Runnable {
final Token<BlockTokenIdentifier> blockToken,
final String clientname,
final DatanodeInfo[] targets,
- final StorageType[] targetStorageTypes,
+ final StorageType[] targetStorageTypes,
final DatanodeInfo srcDataNode,
final BlockConstructionStage stage,
final int pipelineSize,
@@ -673,7 +674,9 @@ class DataXceiver extends Receiver implements Runnable {
CachingStrategy cachingStrategy,
boolean allowLazyPersist,
final boolean pinning,
- final boolean[] targetPinnings) throws IOException {
+ final boolean[] targetPinnings,
+ final String storageId,
+ final String[] targetStorageIds) throws IOException {
previousOpClientName = clientname;
updateCurrentThreadName("Receiving block " + block);
final boolean isDatanode = clientname.length() == 0;
@@ -692,8 +695,15 @@ class DataXceiver extends Receiver implements Runnable {
if (targetStorageTypes.length > 0) {
System.arraycopy(targetStorageTypes, 0, storageTypes, 1, nst);
}
+ int nsi = targetStorageIds.length;
+ String[] storageIds = new String[nsi + 1];
+ storageIds[0] = storageId;
+ if (targetStorageTypes.length > 0) {
+ System.arraycopy(targetStorageIds, 0, storageIds, 1, nsi);
+ }
checkAccess(replyOut, isClient, block, blockToken, Op.WRITE_BLOCK,
- BlockTokenIdentifier.AccessMode.WRITE, storageTypes);
+ BlockTokenIdentifier.AccessMode.WRITE,
+ storageTypes, storageIds);
// check single target for transfer-RBW/Finalized
if (isTransfer && targets.length > 0) {
@@ -743,7 +753,7 @@ class DataXceiver extends Receiver implements Runnable {
peer.getLocalAddressString(),
stage, latestGenerationStamp, minBytesRcvd, maxBytesRcvd,
clientname, srcDataNode, datanode, requestedChecksum,
- cachingStrategy, allowLazyPersist, pinning));
+ cachingStrategy, allowLazyPersist, pinning, storageId));
replica = blockReceiver.getReplica();
} else {
replica = datanode.data.recoverClose(
@@ -796,16 +806,18 @@ class DataXceiver extends Receiver implements Runnable {
if (targetPinnings != null && targetPinnings.length > 0) {
new Sender(mirrorOut).writeBlock(originalBlock, targetStorageTypes[0],
- blockToken, clientname, targets, targetStorageTypes, srcDataNode,
- stage, pipelineSize, minBytesRcvd, maxBytesRcvd,
- latestGenerationStamp, requestedChecksum, cachingStrategy,
- allowLazyPersist, targetPinnings[0], targetPinnings);
+ blockToken, clientname, targets, targetStorageTypes,
+ srcDataNode, stage, pipelineSize, minBytesRcvd, maxBytesRcvd,
+ latestGenerationStamp, requestedChecksum, cachingStrategy,
+ allowLazyPersist, targetPinnings[0], targetPinnings,
+ targetStorageIds[0], targetStorageIds);
} else {
new Sender(mirrorOut).writeBlock(originalBlock, targetStorageTypes[0],
- blockToken, clientname, targets, targetStorageTypes, srcDataNode,
- stage, pipelineSize, minBytesRcvd, maxBytesRcvd,
- latestGenerationStamp, requestedChecksum, cachingStrategy,
- allowLazyPersist, false, targetPinnings);
+ blockToken, clientname, targets, targetStorageTypes,
+ srcDataNode, stage, pipelineSize, minBytesRcvd, maxBytesRcvd,
+ latestGenerationStamp, requestedChecksum, cachingStrategy,
+ allowLazyPersist, false, targetPinnings,
+ targetStorageIds[0], targetStorageIds);
}
mirrorOut.flush();
@@ -929,17 +941,19 @@ class DataXceiver extends Receiver implements Runnable {
final Token<BlockTokenIdentifier> blockToken,
final String clientName,
final DatanodeInfo[] targets,
- final StorageType[] targetStorageTypes) throws IOException {
+ final StorageType[] targetStorageTypes,
+ final String[] targetStorageIds) throws IOException {
previousOpClientName = clientName;
updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);
final DataOutputStream out = new DataOutputStream(
getOutputStream());
checkAccess(out, true, blk, blockToken, Op.TRANSFER_BLOCK,
- BlockTokenIdentifier.AccessMode.COPY, targetStorageTypes);
+ BlockTokenIdentifier.AccessMode.COPY, targetStorageTypes,
+ targetStorageIds);
try {
datanode.transferReplicaForPipelineRecovery(blk, targets,
- targetStorageTypes, clientName);
+ targetStorageTypes, targetStorageIds, clientName);
writeResponse(Status.SUCCESS, null, out);
} catch (IOException ioe) {
LOG.info("transferBlock " + blk + " received exception " + ioe);
@@ -1105,12 +1119,14 @@ class DataXceiver extends Receiver implements Runnable {
final StorageType storageType,
final Token<BlockTokenIdentifier> blockToken,
final String delHint,
- final DatanodeInfo proxySource) throws IOException {
+ final DatanodeInfo proxySource,
+ final String storageId) throws IOException {
updateCurrentThreadName("Replacing block " + block + " from " + delHint);
DataOutputStream replyOut = new DataOutputStream(getOutputStream());
checkAccess(replyOut, true, block, blockToken,
Op.REPLACE_BLOCK, BlockTokenIdentifier.AccessMode.REPLACE,
- new StorageType[]{ storageType });
+ new StorageType[]{storageType},
+ new String[]{storageId});
if (!dataXceiverServer.balanceThrottler.acquire()) { // not able to start
String msg = "Not able to receive block " + block.getBlockId() +
@@ -1131,7 +1147,7 @@ class DataXceiver extends Receiver implements Runnable {
// Move the block to different storage in the same datanode
if (proxySource.equals(datanode.getDatanodeId())) {
ReplicaInfo oldReplica = datanode.data.moveBlockAcrossStorage(block,
- storageType);
+ storageType, storageId);
if (oldReplica != null) {
LOG.info("Moved " + block + " from StorageType "
+ oldReplica.getVolume().getStorageType() + " to " + storageType);
@@ -1188,7 +1204,7 @@ class DataXceiver extends Receiver implements Runnable {
proxyReply, proxySock.getRemoteSocketAddress().toString(),
proxySock.getLocalSocketAddress().toString(),
null, 0, 0, 0, "", null, datanode, remoteChecksum,
- CachingStrategy.newDropBehind(), false, false));
+ CachingStrategy.newDropBehind(), false, false, storageId));
// receive a block
blockReceiver.receiveBlock(null, null, replyOut, null,
@@ -1258,11 +1274,12 @@ class DataXceiver extends Receiver implements Runnable {
final DataNode dn, DataChecksum requestedChecksum,
CachingStrategy cachingStrategy,
final boolean allowLazyPersist,
- final boolean pinning) throws IOException {
+ final boolean pinning,
+ final String storageId) throws IOException {
return new BlockReceiver(block, storageType, in,
inAddr, myAddr, stage, newGs, minBytesRcvd, maxBytesRcvd,
clientname, srcDataNode, dn, requestedChecksum,
- cachingStrategy, allowLazyPersist, pinning);
+ cachingStrategy, allowLazyPersist, pinning, storageId);
}
/**
@@ -1365,7 +1382,7 @@ class DataXceiver extends Receiver implements Runnable {
private void checkAccess(OutputStream out, final boolean reply,
ExtendedBlock blk, Token<BlockTokenIdentifier> t, Op op,
BlockTokenIdentifier.AccessMode mode) throws IOException {
- checkAccess(out, reply, blk, t, op, mode, null);
+ checkAccess(out, reply, blk, t, op, mode, null, null);
}
private void checkAccess(OutputStream out, final boolean reply,
@@ -1373,7 +1390,8 @@ class DataXceiver extends Receiver implements Runnable {
final Token<BlockTokenIdentifier> t,
final Op op,
final BlockTokenIdentifier.AccessMode mode,
- final StorageType[] storageTypes) throws IOException {
+ final StorageType[] storageTypes,
+ final String[] storageIds) throws IOException {
checkAndWaitForBP(blk);
if (datanode.isBlockTokenEnabled) {
if (LOG.isDebugEnabled()) {
@@ -1382,7 +1400,7 @@ class DataXceiver extends Receiver implements Runnable {
}
try {
datanode.blockPoolTokenSecretManager.checkAccess(t, null, blk, mode,
- storageTypes);
+ storageTypes, storageIds);
} catch(InvalidToken e) {
try {
if (reply) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
index 1492e5d..e076dda 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
@@ -111,7 +111,8 @@ public final class ErasureCodingWorker {
new StripedReconstructionInfo(
reconInfo.getExtendedBlock(), reconInfo.getErasureCodingPolicy(),
reconInfo.getLiveBlockIndices(), reconInfo.getSourceDnInfos(),
- reconInfo.getTargetDnInfos(), reconInfo.getTargetStorageTypes());
+ reconInfo.getTargetDnInfos(), reconInfo.getTargetStorageTypes(),
+ reconInfo.getTargetStorageIDs());
final StripedBlockReconstructor task =
new StripedBlockReconstructor(this, stripedReconInfo);
if (task.hasValidTargets()) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
index b3884c2..39ef67e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
@@ -110,7 +110,7 @@ class StripedBlockReader {
stripedReader.getSocketAddress4Transfer(source);
Token<BlockTokenIdentifier> blockToken = datanode.getBlockAccessToken(
block, EnumSet.of(BlockTokenIdentifier.AccessMode.READ),
- StorageType.EMPTY_ARRAY);
+ StorageType.EMPTY_ARRAY, new String[0]);
/*
* This can be further improved if the replica is local, then we can
* read directly from DN and need to check the replica is FINALIZED
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockWriter.java
index a6989d4..24c1d61 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockWriter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockWriter.java
@@ -61,6 +61,7 @@ class StripedBlockWriter {
private final ExtendedBlock block;
private final DatanodeInfo target;
private final StorageType storageType;
+ private final String storageId;
private Socket targetSocket;
private DataOutputStream targetOutputStream;
@@ -72,8 +73,8 @@ class StripedBlockWriter {
StripedBlockWriter(StripedWriter stripedWriter, DataNode datanode,
Configuration conf, ExtendedBlock block,
- DatanodeInfo target, StorageType storageType)
- throws IOException {
+ DatanodeInfo target, StorageType storageType,
+ String storageId) throws IOException {
this.stripedWriter = stripedWriter;
this.datanode = datanode;
this.conf = conf;
@@ -81,6 +82,7 @@ class StripedBlockWriter {
this.block = block;
this.target = target;
this.storageType = storageType;
+ this.storageId = storageId;
this.targetBuffer = stripedWriter.allocateWriteBuffer();
@@ -117,7 +119,7 @@ class StripedBlockWriter {
Token<BlockTokenIdentifier> blockToken =
datanode.getBlockAccessToken(block,
EnumSet.of(BlockTokenIdentifier.AccessMode.WRITE),
- new StorageType[]{storageType});
+ new StorageType[]{storageType}, new String[]{storageId});
long writeTimeout = datanode.getDnConf().getSocketWriteTimeout();
OutputStream unbufOut = NetUtils.getOutputStream(socket, writeTimeout);
@@ -141,7 +143,7 @@ class StripedBlockWriter {
new StorageType[]{storageType}, source,
BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0, 0, 0,
stripedWriter.getChecksum(), stripedWriter.getCachingStrategy(),
- false, false, null);
+ false, false, null, storageId, new String[]{storageId});
targetSocket = socket;
targetOutputStream = out;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructionInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructionInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructionInfo.java
index a5c328b..a619c34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructionInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructionInfo.java
@@ -40,24 +40,27 @@ public class StripedReconstructionInfo {
private final byte[] targetIndices;
private final DatanodeInfo[] targets;
private final StorageType[] targetStorageTypes;
+ private final String[] targetStorageIds;
public StripedReconstructionInfo(ExtendedBlock blockGroup,
ErasureCodingPolicy ecPolicy, byte[] liveIndices, DatanodeInfo[] sources,
byte[] targetIndices) {
- this(blockGroup, ecPolicy, liveIndices, sources, targetIndices, null, null);
+ this(blockGroup, ecPolicy, liveIndices, sources, targetIndices, null,
+ null, null);
}
StripedReconstructionInfo(ExtendedBlock blockGroup,
ErasureCodingPolicy ecPolicy, byte[] liveIndices, DatanodeInfo[] sources,
- DatanodeInfo[] targets, StorageType[] targetStorageTypes) {
+ DatanodeInfo[] targets, StorageType[] targetStorageTypes,
+ String[] targetStorageIds) {
this(blockGroup, ecPolicy, liveIndices, sources, null, targets,
- targetStorageTypes);
+ targetStorageTypes, targetStorageIds);
}
private StripedReconstructionInfo(ExtendedBlock blockGroup,
ErasureCodingPolicy ecPolicy, byte[] liveIndices, DatanodeInfo[] sources,
byte[] targetIndices, DatanodeInfo[] targets,
- StorageType[] targetStorageTypes) {
+ StorageType[] targetStorageTypes, String[] targetStorageIds) {
this.blockGroup = blockGroup;
this.ecPolicy = ecPolicy;
@@ -66,6 +69,7 @@ public class StripedReconstructionInfo {
this.targetIndices = targetIndices;
this.targets = targets;
this.targetStorageTypes = targetStorageTypes;
+ this.targetStorageIds = targetStorageIds;
}
ExtendedBlock getBlockGroup() {
@@ -95,5 +99,9 @@ public class StripedReconstructionInfo {
StorageType[] getTargetStorageTypes() {
return targetStorageTypes;
}
+
+ String[] getTargetStorageIds() {
+ return targetStorageIds;
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedWriter.java
index 225a7ed..762506c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedWriter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedWriter.java
@@ -55,6 +55,7 @@ class StripedWriter {
private final short[] targetIndices;
private boolean hasValidTargets;
private final StorageType[] targetStorageTypes;
+ private final String[] targetStorageIds;
private StripedBlockWriter[] writers;
@@ -77,6 +78,8 @@ class StripedWriter {
assert targets != null;
this.targetStorageTypes = stripedReconInfo.getTargetStorageTypes();
assert targetStorageTypes != null;
+ this.targetStorageIds = stripedReconInfo.getTargetStorageIds();
+ assert targetStorageIds != null;
writers = new StripedBlockWriter[targets.length];
@@ -192,7 +195,7 @@ class StripedWriter {
private StripedBlockWriter createWriter(short index) throws IOException {
return new StripedBlockWriter(this, datanode, conf,
reconstructor.getBlock(targetIndices[index]), targets[index],
- targetStorageTypes[index]);
+ targetStorageTypes[index], targetStorageIds[index]);
}
ByteBuffer allocateWriteBuffer() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java
index 39d9547..efe222f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java
@@ -113,8 +113,8 @@ public class AvailableSpaceVolumeChoosingPolicy<V extends FsVolumeSpi>
new RoundRobinVolumeChoosingPolicy<V>();
@Override
- public V chooseVolume(List<V> volumes,
- long replicaSize) throws IOException {
+ public V chooseVolume(List<V> volumes, long replicaSize, String storageId)
+ throws IOException {
if (volumes.size() < 1) {
throw new DiskOutOfSpaceException("No more available volumes");
}
@@ -125,19 +125,20 @@ public class AvailableSpaceVolumeChoosingPolicy<V extends FsVolumeSpi>
storageType.ordinal() : StorageType.DEFAULT.ordinal();
synchronized (syncLocks[index]) {
- return doChooseVolume(volumes, replicaSize);
+ return doChooseVolume(volumes, replicaSize, storageId);
}
}
- private V doChooseVolume(final List<V> volumes,
- long replicaSize) throws IOException {
+ private V doChooseVolume(final List<V> volumes, long replicaSize,
+ String storageId) throws IOException {
AvailableSpaceVolumeList volumesWithSpaces =
new AvailableSpaceVolumeList(volumes);
if (volumesWithSpaces.areAllVolumesWithinFreeSpaceThreshold()) {
// If they're actually not too far out of whack, fall back on pure round
// robin.
- V volume = roundRobinPolicyBalanced.chooseVolume(volumes, replicaSize);
+ V volume = roundRobinPolicyBalanced.chooseVolume(volumes, replicaSize,
+ storageId);
if (LOG.isDebugEnabled()) {
LOG.debug("All volumes are within the configured free space balance " +
"threshold. Selecting " + volume + " for write of block size " +
@@ -165,7 +166,7 @@ public class AvailableSpaceVolumeChoosingPolicy<V extends FsVolumeSpi>
if (mostAvailableAmongLowVolumes < replicaSize ||
random.nextFloat() < scaledPreferencePercent) {
volume = roundRobinPolicyHighAvailable.chooseVolume(
- highAvailableVolumes, replicaSize);
+ highAvailableVolumes, replicaSize, storageId);
if (LOG.isDebugEnabled()) {
LOG.debug("Volumes are imbalanced. Selecting " + volume +
" from high available space volumes for write of block size "
@@ -173,7 +174,7 @@ public class AvailableSpaceVolumeChoosingPolicy<V extends FsVolumeSpi>
}
} else {
volume = roundRobinPolicyLowAvailable.chooseVolume(
- lowAvailableVolumes, replicaSize);
+ lowAvailableVolumes, replicaSize, storageId);
if (LOG.isDebugEnabled()) {
LOG.debug("Volumes are imbalanced. Selecting " + volume +
" from low available space volumes for write of block size "
@@ -266,7 +267,8 @@ public class AvailableSpaceVolumeChoosingPolicy<V extends FsVolumeSpi>
/**
* Used so that we only check the available space on a given volume once, at
- * the beginning of {@link AvailableSpaceVolumeChoosingPolicy#chooseVolume(List, long)}.
+ * the beginning of
+ * {@link AvailableSpaceVolumeChoosingPolicy#chooseVolume}.
*/
private class AvailableSpaceVolumePair {
private final V volume;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
index 9e979f7..d7e29cf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
@@ -318,7 +318,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* @return the meta info of the replica which is being written to
* @throws IOException if an error occurs
*/
- ReplicaHandler createTemporary(StorageType storageType,
+ ReplicaHandler createTemporary(StorageType storageType, String storageId,
ExtendedBlock b) throws IOException;
/**
@@ -328,7 +328,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* @return the meta info of the replica which is being written to
* @throws IOException if an error occurs
*/
- ReplicaHandler createRbw(StorageType storageType,
+ ReplicaHandler createRbw(StorageType storageType, String storageId,
ExtendedBlock b, boolean allowLazyPersist) throws IOException;
/**
@@ -623,7 +623,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
* Move block from one storage to another storage
*/
ReplicaInfo moveBlockAcrossStorage(final ExtendedBlock block,
- StorageType targetStorageType) throws IOException;
+ StorageType targetStorageType, String storageId) throws IOException;
/**
* Set a block to be pinned on this datanode so that it cannot be moved
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/RoundRobinVolumeChoosingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/RoundRobinVolumeChoosingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/RoundRobinVolumeChoosingPolicy.java
index 9474b92..b9bcf1f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/RoundRobinVolumeChoosingPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/RoundRobinVolumeChoosingPolicy.java
@@ -50,7 +50,7 @@ public class RoundRobinVolumeChoosingPolicy<V extends FsVolumeSpi>
}
@Override
- public V chooseVolume(final List<V> volumes, long blockSize)
+ public V chooseVolume(final List<V> volumes, long blockSize, String storageId)
throws IOException {
if (volumes.size() < 1) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/VolumeChoosingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/VolumeChoosingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/VolumeChoosingPolicy.java
index 62b1e75..8cbc058 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/VolumeChoosingPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/VolumeChoosingPolicy.java
@@ -36,8 +36,11 @@ public interface VolumeChoosingPolicy<V extends FsVolumeSpi> {
*
* @param volumes - a list of available volumes.
* @param replicaSize - the size of the replica for which a volume is sought.
+ * @param storageId - the storage id of the Volume nominated by the namenode.
+ * This can usually be ignored by the VolumeChoosingPolicy.
* @return the chosen volume.
* @throws IOException when disks are unavailable or are full.
*/
- public V chooseVolume(List<V> volumes, long replicaSize) throws IOException;
+ V chooseVolume(List<V> volumes, long replicaSize, String storageId)
+ throws IOException;
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3954cca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 169e0e6..9a5002a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -927,7 +927,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
*/
@Override
public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block,
- StorageType targetStorageType) throws IOException {
+ StorageType targetStorageType, String targetStorageId)
+ throws IOException {
ReplicaInfo replicaInfo = getReplicaInfo(block);
if (replicaInfo.getState() != ReplicaState.FINALIZED) {
throw new ReplicaNotFoundException(
@@ -952,7 +953,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
FsVolumeReference volumeRef = null;
try (AutoCloseableLock lock = datasetLock.acquire()) {
- volumeRef = volumes.getNextVolume(targetStorageType, block.getNumBytes());
+ volumeRef = volumes.getNextVolume(targetStorageType, targetStorageId,
+ block.getNumBytes());
}
try {
moveBlock(block, replicaInfo, volumeRef);
@@ -1298,11 +1300,11 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
}
}
}
-
+
@Override // FsDatasetSpi
public ReplicaHandler createRbw(
- StorageType storageType, ExtendedBlock b, boolean allowLazyPersist)
- throws IOException {
+ StorageType storageType, String storageId, ExtendedBlock b,
+ boolean allowLazyPersist) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(),
b.getBlockId());
@@ -1335,7 +1337,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
}
if (ref == null) {
- ref = volumes.getNextVolume(storageType, b.getNumBytes());
+ ref = volumes.getNextVolume(storageType, storageId, b.getNumBytes());
}
FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
@@ -1503,7 +1505,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
@Override // FsDatasetSpi
public ReplicaHandler createTemporary(
- StorageType storageType, ExtendedBlock b) throws IOException {
+ StorageType storageType, String storageId, ExtendedBlock b)
+ throws IOException {
long startTimeMs = Time.monotonicNow();
long writerStopTimeoutMs = datanode.getDnConf().getXceiverStopTimeout();
ReplicaInfo lastFoundReplicaInfo = null;
@@ -1516,7 +1519,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
invalidate(b.getBlockPoolId(), new Block[] { lastFoundReplicaInfo });
}
FsVolumeReference ref =
- volumes.getNextVolume(storageType, b.getNumBytes());
+ volumes.getNextVolume(storageType, storageId, b.getNumBytes());
FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
ReplicaInPipeline newReplicaInfo;
try {
@@ -2899,7 +2902,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
replicaInfo.getVolume().isTransientStorage()) {
// Pick a target volume to persist the block.
targetReference = volumes.getNextVolume(
- StorageType.DEFAULT, replicaInfo.getNumBytes());
+ StorageType.DEFAULT, null, replicaInfo.getNumBytes());
targetVolume = (FsVolumeImpl) targetReference.getVolume();
ramDiskReplicaTracker.recordStartLazyPersist(
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[08/50] [abbrv] hadoop git commit: HADOOP-14366. Maven upgrade broke
start-build-env.sh. Contributed by Akira Ajisaka.
Posted by ae...@apache.org.
HADOOP-14366. Maven upgrade broke start-build-env.sh. Contributed by
Akira Ajisaka.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14b5c93f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14b5c93f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14b5c93f
Branch: refs/heads/HDFS-7240
Commit: 14b5c93f3cba4a04369989f93f14ea99409aa1d8
Parents: 4cfc866
Author: Sangjin Lee <sj...@apache.org>
Authored: Mon May 1 15:54:01 2017 -0700
Committer: Sangjin Lee <sj...@apache.org>
Committed: Mon May 1 15:54:01 2017 -0700
----------------------------------------------------------------------
dev-support/docker/Dockerfile | 1 +
1 file changed, 1 insertion(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b5c93f/dev-support/docker/Dockerfile
----------------------------------------------------------------------
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index f939b1d..1775323 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -93,6 +93,7 @@ RUN mkdir -p /opt/maven && \
-o /opt/maven.tar.gz && \
tar xzf /opt/maven.tar.gz --strip-components 1 -C /opt/maven
ENV MAVEN_HOME /opt/maven
+ENV PATH "$PATH:/opt/maven/bin"
######
# Install findbugs
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[05/50] [abbrv] hadoop git commit: HDFS-11609. Some blocks can be
permanently lost if nodes are decommissioned while dead. Contributed by
Kihwal Lee.
Posted by ae...@apache.org.
HDFS-11609. Some blocks can be permanently lost if nodes are decommissioned while dead. Contributed by Kihwal Lee.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07b98e78
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07b98e78
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07b98e78
Branch: refs/heads/HDFS-7240
Commit: 07b98e7830c2214340cb7f434df674057e89df94
Parents: 30fc580
Author: Kihwal Lee <ki...@apache.org>
Authored: Mon May 1 14:19:02 2017 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Mon May 1 14:19:02 2017 -0500
----------------------------------------------------------------------
.../server/blockmanagement/BlockManager.java | 30 ++++-
.../blockmanagement/LowRedundancyBlocks.java | 6 +-
.../namenode/TestDecommissioningStatus.java | 113 ++++++++++++++++++-
3 files changed, 139 insertions(+), 10 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/07b98e78/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 7309846..e63930a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2031,7 +2031,8 @@ public class BlockManager implements BlockStatsMXBean {
*
* We prefer nodes that are in DECOMMISSION_INPROGRESS state to other nodes
* since the former do not have write traffic and hence are less busy.
- * We do not use already decommissioned nodes as a source.
+ * We do not use already decommissioned nodes as a source, unless there is
+ * no other choice.
* Otherwise we randomly choose nodes among those that did not reach their
* replication limits. However, if the recovery work is of the highest
* priority and all nodes have reached their replication limits, we will
@@ -2067,6 +2068,7 @@ public class BlockManager implements BlockStatsMXBean {
List<DatanodeDescriptor> srcNodes = new ArrayList<>();
liveBlockIndices.clear();
final boolean isStriped = block.isStriped();
+ DatanodeDescriptor decommissionedSrc = null;
BitSet bitSet = isStriped ?
new BitSet(((BlockInfoStriped) block).getTotalBlockNum()) : null;
@@ -2085,13 +2087,24 @@ public class BlockManager implements BlockStatsMXBean {
continue;
}
- // never use already decommissioned nodes, maintenance node not
- // suitable for read or unknown state replicas.
- if (state == null || state == StoredReplicaState.DECOMMISSIONED
+ // Never use maintenance node not suitable for read
+ // or unknown state replicas.
+ if (state == null
|| state == StoredReplicaState.MAINTENANCE_NOT_FOR_READ) {
continue;
}
+ // Save the live decommissioned replica in case we need it. Such replicas
+ // are normally not used for replication, but if nothing else is
+ // available, one can be selected as a source.
+ if (state == StoredReplicaState.DECOMMISSIONED) {
+ if (decommissionedSrc == null ||
+ ThreadLocalRandom.current().nextBoolean()) {
+ decommissionedSrc = node;
+ }
+ continue;
+ }
+
if (priority != LowRedundancyBlocks.QUEUE_HIGHEST_PRIORITY
&& (!node.isDecommissionInProgress() && !node.isEnteringMaintenance())
&& node.getNumberOfBlocksToBeReplicated() >= maxReplicationStreams) {
@@ -2123,6 +2136,13 @@ public class BlockManager implements BlockStatsMXBean {
srcNodes.set(0, node);
}
}
+
+ // Pick a live decommissioned replica, if nothing else is available.
+ if (!isStriped && nodesContainingLiveReplicas.isEmpty() &&
+ srcNodes.isEmpty() && decommissionedSrc != null) {
+ srcNodes.add(decommissionedSrc);
+ }
+
return srcNodes.toArray(new DatanodeDescriptor[srcNodes.size()]);
}
@@ -3036,7 +3056,7 @@ public class BlockManager implements BlockStatsMXBean {
int curReplicaDelta;
if (result == AddBlockResult.ADDED) {
- curReplicaDelta = 1;
+ curReplicaDelta = (node.isDecommissioned()) ? 0 : 1;
if (logEveryBlock) {
blockLog.debug("BLOCK* addStoredBlock: {} is added to {} (size={})",
node, storedBlock, storedBlock.getNumBytes());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/07b98e78/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
index 3a26f4a..1a38480 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
@@ -346,9 +346,9 @@ class LowRedundancyBlocks implements Iterable<BlockInfo> {
" curPri " + curPri +
" oldPri " + oldPri);
}
- if(oldPri != curPri) {
- remove(block, oldPri);
- }
+ // oldPri is mostly correct, but not always. If not found with oldPri,
+ // other levels will be searched until the block is found & removed.
+ remove(block, oldPri);
if(priorityQueues.get(curPri).add(block)) {
NameNode.blockStateChangeLog.debug(
"BLOCK* NameSystem.LowRedundancyBlock.update: {} has only {} "
http://git-wip-us.apache.org/repos/asf/hadoop/blob/07b98e78/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
index 8bdaa74..3cf025c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
@@ -72,6 +73,7 @@ public class TestDecommissioningStatus {
private static FileSystem fileSys;
private static HostsFileWriter hostsFileWriter;
private static Configuration conf;
+ private Logger LOG;
final ArrayList<String> decommissionedNodes = new ArrayList<String>(numDatanodes);
@@ -89,8 +91,7 @@ public class TestDecommissioningStatus {
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(
DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 4);
- conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
- 1000);
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 1);
@@ -100,6 +101,7 @@ public class TestDecommissioningStatus {
cluster.getNamesystem().getBlockManager().getDatanodeManager()
.setHeartbeatExpireInterval(3000);
Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG);
+ LOG = Logger.getLogger(TestDecommissioningStatus.class);
}
@After
@@ -366,4 +368,111 @@ public class TestDecommissioningStatus {
hostsFileWriter.initExcludeHost("");
dm.refreshNodes(conf);
}
+
+ @Test(timeout=120000)
+ public void testDecommissionLosingData() throws Exception {
+ ArrayList<String> nodes = new ArrayList<String>(2);
+ FSNamesystem fsn = cluster.getNamesystem();
+ BlockManager bm = fsn.getBlockManager();
+ DatanodeManager dm = bm.getDatanodeManager();
+ Path file1 = new Path("decommissionLosingData.dat");
+ DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+ (short)2, seed);
+ Thread.sleep(1000);
+
+ // Shutdown dn1
+ LOG.info("Shutdown dn1");
+ DatanodeID dnID = cluster.getDataNodes().get(1).getDatanodeId();
+ String dnName = dnID.getXferAddr();
+ DatanodeDescriptor dnDescriptor1 = dm.getDatanode(dnID);
+ nodes.add(dnName);
+ DataNodeProperties stoppedDN1 = cluster.stopDataNode(1);
+ DFSTestUtil.waitForDatanodeState(cluster, dnID.getDatanodeUuid(),
+ false, 30000);
+
+ // Shutdown dn0
+ LOG.info("Shutdown dn0");
+ dnID = cluster.getDataNodes().get(0).getDatanodeId();
+ dnName = dnID.getXferAddr();
+ DatanodeDescriptor dnDescriptor0 = dm.getDatanode(dnID);
+ nodes.add(dnName);
+ DataNodeProperties stoppedDN0 = cluster.stopDataNode(0);
+ DFSTestUtil.waitForDatanodeState(cluster, dnID.getDatanodeUuid(),
+ false, 30000);
+
+ // Decommission the nodes.
+ LOG.info("Decommissioning nodes");
+ hostsFileWriter.initExcludeHosts(nodes);
+ dm.refreshNodes(conf);
+ BlockManagerTestUtil.recheckDecommissionState(dm);
+ assertTrue(dnDescriptor0.isDecommissioned());
+ assertTrue(dnDescriptor1.isDecommissioned());
+
+ // All nodes are dead and decommed. Blocks should be missing.
+ long missingBlocks = bm.getMissingBlocksCount();
+ long underreplicated = bm.getUnderReplicatedBlocksCount();
+ assertTrue(missingBlocks > 0);
+ assertTrue(underreplicated > 0);
+
+ // Bring back dn0
+ LOG.info("Bring back dn0");
+ cluster.restartDataNode(stoppedDN0, true);
+ do {
+ dnID = cluster.getDataNodes().get(0).getDatanodeId();
+ } while (dnID == null);
+ dnDescriptor0 = dm.getDatanode(dnID);
+ // Wait until it sends a block report.
+ while (dnDescriptor0.numBlocks() == 0) {
+ Thread.sleep(100);
+ }
+
+ // Bring back dn1
+ LOG.info("Bring back dn1");
+ cluster.restartDataNode(stoppedDN1, true);
+ do {
+ dnID = cluster.getDataNodes().get(1).getDatanodeId();
+ } while (dnID == null);
+ dnDescriptor1 = dm.getDatanode(dnID);
+ // Wait until it sends a block report.
+ while (dnDescriptor1.numBlocks() == 0) {
+ Thread.sleep(100);
+ }
+
+ // Blocks should be still be under-replicated
+ Thread.sleep(2000); // Let replication monitor run
+ assertEquals(underreplicated, bm.getUnderReplicatedBlocksCount());
+
+ // Start up a node.
+ LOG.info("Starting two more nodes");
+ cluster.startDataNodes(conf, 2, true, null, null);
+ cluster.waitActive();
+ // Replication should fix it.
+ int count = 0;
+ while((bm.getUnderReplicatedBlocksCount() > 0 ||
+ bm.getPendingReconstructionBlocksCount() > 0) &&
+ count++ < 10) {
+ Thread.sleep(1000);
+ }
+
+ assertEquals(0, bm.getUnderReplicatedBlocksCount());
+ assertEquals(0, bm.getPendingReconstructionBlocksCount());
+ assertEquals(0, bm.getMissingBlocksCount());
+
+ // Shutdown the extra nodes.
+ dnID = cluster.getDataNodes().get(3).getDatanodeId();
+ cluster.stopDataNode(3);
+ DFSTestUtil.waitForDatanodeState(cluster, dnID.getDatanodeUuid(),
+ false, 30000);
+
+ dnID = cluster.getDataNodes().get(2).getDatanodeId();
+ cluster.stopDataNode(2);
+ DFSTestUtil.waitForDatanodeState(cluster, dnID.getDatanodeUuid(),
+ false, 30000);
+
+ // Call refreshNodes on FSNamesystem with empty exclude file to remove the
+ // datanode from decommissioning list and make it available again.
+ hostsFileWriter.initExcludeHost("");
+ dm.refreshNodes(conf);
+ fileSys.delete(file1, false);
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[40/50] [abbrv] hadoop git commit: YARN-6565. Fix memory leak and
finish app trigger in AMRMProxy. (Botong Huang via Subru).
Posted by ae...@apache.org.
YARN-6565. Fix memory leak and finish app trigger in AMRMProxy. (Botong Huang via Subru).
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6eed5ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6eed5ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6eed5ac
Branch: refs/heads/HDFS-7240
Commit: d6eed5acca222a281401c1d524824f0460ea7826
Parents: e4f34ec
Author: Subru Krishnan <su...@apache.org>
Authored: Fri May 5 16:27:49 2017 -0700
Committer: Subru Krishnan <su...@apache.org>
Committed: Fri May 5 16:27:49 2017 -0700
----------------------------------------------------------------------
.../nodemanager/amrmproxy/AMRMProxyService.java | 6 +-
.../TestAMRMProxyTokenSecretManager.java | 81 ++++++++++++++++++++
2 files changed, 86 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6eed5ac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
index 5e91a20..9f2d9a1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
@@ -320,6 +320,10 @@ public class AMRMProxyService extends AbstractService implements
LOG.info("Request to stop an application that does not exist. Id:"
+ applicationId);
} else {
+ // Remove the appAttempt in AMRMTokenSecretManager
+ this.secretManager
+ .applicationMasterFinished(pipeline.getApplicationAttemptId());
+
LOG.info("Stopping the request processing pipeline for application: "
+ applicationId);
try {
@@ -548,7 +552,7 @@ public class AMRMProxyService extends AbstractService implements
event.getApplicationID());
if (app != null) {
switch (event.getType()) {
- case FINISH_APPLICATION:
+ case APPLICATION_RESOURCES_CLEANEDUP:
LOG.info("Application stop event received for stopping AppId:"
+ event.getApplicationID().toString());
AMRMProxyService.this.stopApplication(event.getApplicationID());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6eed5ac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyTokenSecretManager.java
new file mode 100644
index 0000000..927563e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyTokenSecretManager.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.amrmproxy;
+
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
+
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Unit test for AMRMProxyTokenSecretManager.
+ */
+public class TestAMRMProxyTokenSecretManager {
+
+ private YarnConfiguration conf;
+ private AMRMProxyTokenSecretManager secretManager;
+
+ @Before
+ public void setup() {
+ conf = new YarnConfiguration();
+ secretManager = new AMRMProxyTokenSecretManager(conf);
+ secretManager.start();
+ }
+
+ @After
+ public void breakdown() {
+ if (secretManager != null) {
+ secretManager.stop();
+ }
+ }
+
+ @Test
+ public void testNormalCase() throws IOException {
+ ApplicationId appId = ApplicationId.newInstance(1, 1);
+ ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
+
+ Token<AMRMTokenIdentifier> localToken =
+ secretManager.createAndGetAMRMToken(attemptId);
+
+ AMRMTokenIdentifier identifier = secretManager.createIdentifier();
+ identifier.readFields(new DataInputStream(
+ new ByteArrayInputStream(localToken.getIdentifier())));
+
+ secretManager.retrievePassword(identifier);
+
+ secretManager.applicationMasterFinished(attemptId);
+
+ try {
+ secretManager.retrievePassword(identifier);
+ Assert.fail("Expect InvalidToken exception");
+ } catch (InvalidToken e) {
+ }
+ }
+
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[36/50] [abbrv] hadoop git commit: HADOOP-14382 Remove usages of
MoreObjects.toStringHelper. Contributed by Andrew Wang
Posted by ae...@apache.org.
HADOOP-14382 Remove usages of MoreObjects.toStringHelper.
Contributed by Andrew Wang
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4e6bbd04
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4e6bbd04
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4e6bbd04
Branch: refs/heads/HDFS-7240
Commit: 4e6bbd049dead7008942bda09dfd54542c407f48
Parents: 97c2e57
Author: Steve Loughran <st...@apache.org>
Authored: Fri May 5 13:03:09 2017 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Fri May 5 13:03:09 2017 +0100
----------------------------------------------------------------------
.../apache/hadoop/metrics2/AbstractMetric.java | 16 +++++++++-------
.../org/apache/hadoop/metrics2/MetricsTag.java | 13 +++++++------
.../metrics2/impl/AbstractMetricsRecord.java | 16 ++++++++--------
.../org/apache/hadoop/metrics2/impl/MsInfo.java | 9 +++++----
.../hadoop/metrics2/lib/MetricsInfoImpl.java | 11 +++++++----
.../hadoop/metrics2/lib/MetricsRegistry.java | 20 +++++++++++---------
.../hadoop/metrics2/source/JvmMetricsInfo.java | 9 +++++----
.../hadoop/metrics2/util/MetricsCache.java | 18 +++++++++---------
.../hadoop/metrics2/impl/TestKafkaMetrics.java | 8 +++++---
9 files changed, 66 insertions(+), 54 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e6bbd04/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/AbstractMetric.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/AbstractMetric.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/AbstractMetric.java
index 0605156..e2574f6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/AbstractMetric.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/AbstractMetric.java
@@ -18,13 +18,14 @@
package org.apache.hadoop.metrics2;
-import com.google.common.base.MoreObjects;
import com.google.common.base.Objects;
-import static com.google.common.base.Preconditions.*;
-
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import java.util.StringJoiner;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
/**
* The immutable metric
*/
@@ -84,10 +85,11 @@ public abstract class AbstractMetric implements MetricsInfo {
return Objects.hashCode(info, value());
}
- @Override public String toString() {
- return MoreObjects.toStringHelper(this)
- .add("info", info)
- .add("value", value())
+ @Override
+ public String toString() {
+ return new StringJoiner(", ", this.getClass().getSimpleName() + "{", "}")
+ .add("info=" + info)
+ .add("value=" + value())
.toString();
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e6bbd04/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsTag.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsTag.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsTag.java
index 68b0737..db8a5d9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsTag.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsTag.java
@@ -18,13 +18,14 @@
package org.apache.hadoop.metrics2;
-import com.google.common.base.MoreObjects;
import com.google.common.base.Objects;
-import static com.google.common.base.Preconditions.*;
-
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import java.util.StringJoiner;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
/**
* Immutable tag for metrics (for grouping on host/queue/username etc.)
*/
@@ -81,9 +82,9 @@ public class MetricsTag implements MetricsInfo {
}
@Override public String toString() {
- return MoreObjects.toStringHelper(this)
- .add("info", info)
- .add("value", value())
+ return new StringJoiner(", ", this.getClass().getSimpleName() + "{", "}")
+ .add("info=" + info)
+ .add("value=" + value())
.toString();
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e6bbd04/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/AbstractMetricsRecord.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/AbstractMetricsRecord.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/AbstractMetricsRecord.java
index fec29c2..a4632c6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/AbstractMetricsRecord.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/AbstractMetricsRecord.java
@@ -18,12 +18,12 @@
package org.apache.hadoop.metrics2.impl;
-import com.google.common.base.MoreObjects;
import com.google.common.base.Objects;
import com.google.common.collect.Iterables;
-
import org.apache.hadoop.metrics2.MetricsRecord;
+import java.util.StringJoiner;
+
abstract class AbstractMetricsRecord implements MetricsRecord {
@Override public boolean equals(Object obj) {
@@ -44,12 +44,12 @@ abstract class AbstractMetricsRecord implements MetricsRecord {
}
@Override public String toString() {
- return MoreObjects.toStringHelper(this)
- .add("timestamp", timestamp())
- .add("name", name())
- .add("description", description())
- .add("tags", tags())
- .add("metrics", Iterables.toString(metrics()))
+ return new StringJoiner(", ", this.getClass().getSimpleName() + "{", "}")
+ .add("timestamp=" + timestamp())
+ .add("name=" + name())
+ .add("description=" + description())
+ .add("tags=" + tags())
+ .add("metrics=" + Iterables.toString(metrics()))
.toString();
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e6bbd04/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MsInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MsInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MsInfo.java
index 5de7edc..0bf5c78 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MsInfo.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MsInfo.java
@@ -18,11 +18,11 @@
package org.apache.hadoop.metrics2.impl;
-import com.google.common.base.MoreObjects;
-
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsInfo;
+import java.util.StringJoiner;
+
/**
* Metrics system related metrics info instances
*/
@@ -48,8 +48,9 @@ public enum MsInfo implements MetricsInfo {
}
@Override public String toString() {
- return MoreObjects.toStringHelper(this)
- .add("name", name()).add("description", desc)
+ return new StringJoiner(", ", this.getClass().getSimpleName() + "{", "}")
+ .add("name=" + name())
+ .add("description=" + desc)
.toString();
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e6bbd04/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsInfoImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsInfoImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsInfoImpl.java
index 054f211..e3adc82 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsInfoImpl.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsInfoImpl.java
@@ -18,11 +18,13 @@
package org.apache.hadoop.metrics2.lib;
-import com.google.common.base.MoreObjects;
import com.google.common.base.Objects;
-import static com.google.common.base.Preconditions.*;
import org.apache.hadoop.metrics2.MetricsInfo;
+import java.util.StringJoiner;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
/**
* Making implementing metric info a little easier
*/
@@ -56,8 +58,9 @@ class MetricsInfoImpl implements MetricsInfo {
}
@Override public String toString() {
- return MoreObjects.toStringHelper(this)
- .add("name", name).add("description", description)
+ return new StringJoiner(", ", this.getClass().getSimpleName() + "{", "}")
+ .add("name=" + name)
+ .add("description=" + description)
.toString();
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e6bbd04/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsRegistry.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsRegistry.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsRegistry.java
index 7070869..9727954 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsRegistry.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsRegistry.java
@@ -18,20 +18,19 @@
package org.apache.hadoop.metrics2.lib;
-import java.util.Collection;
-import java.util.Map;
-
import com.google.common.collect.Maps;
-import com.google.common.base.MoreObjects;
-
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsException;
+import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsTag;
import org.apache.hadoop.metrics2.impl.MsInfo;
+import java.util.Collection;
+import java.util.Map;
+import java.util.StringJoiner;
+
/**
* An optional metrics registry class for creating and maintaining a
* collection of MetricsMutables, making writing metrics source easier.
@@ -440,9 +439,12 @@ public class MetricsRegistry {
}
}
- @Override public String toString() {
- return MoreObjects.toStringHelper(this)
- .add("info", metricsInfo).add("tags", tags()).add("metrics", metrics())
+ @Override
+ public String toString() {
+ return new StringJoiner(", ", this.getClass().getSimpleName() + "{", "}")
+ .add("info=" + metricsInfo.toString())
+ .add("tags=" + tags())
+ .add("metrics=" + metrics())
.toString();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e6bbd04/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetricsInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetricsInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetricsInfo.java
index 59a79fd..8da6785 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetricsInfo.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetricsInfo.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.metrics2.source;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsInfo;
-import com.google.common.base.MoreObjects;
+import java.util.StringJoiner;
/**
* JVM and logging related metrics info instances
@@ -60,8 +60,9 @@ public enum JvmMetricsInfo implements MetricsInfo {
@Override public String description() { return desc; }
@Override public String toString() {
- return MoreObjects.toStringHelper(this)
- .add("name", name()).add("description", desc)
- .toString();
+ return new StringJoiner(", ", this.getClass().getSimpleName() + "{", "}")
+ .add("name=" + name())
+ .add("description=" + desc)
+ .toString();
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e6bbd04/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MetricsCache.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MetricsCache.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MetricsCache.java
index 753e307..cfd126c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MetricsCache.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MetricsCache.java
@@ -18,11 +18,7 @@
package org.apache.hadoop.metrics2.util;
-import java.util.Collection;
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.Set;
-
+import com.google.common.collect.Maps;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -31,8 +27,11 @@ import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsRecord;
import org.apache.hadoop.metrics2.MetricsTag;
-import com.google.common.base.MoreObjects;
-import com.google.common.collect.Maps;
+import java.util.Collection;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.StringJoiner;
/**
* A metrics cache for sinks that don't support sparse updates.
@@ -127,8 +126,9 @@ public class MetricsCache {
}
@Override public String toString() {
- return MoreObjects.toStringHelper(this)
- .add("tags", tags).add("metrics", metrics)
+ return new StringJoiner(", ", this.getClass().getSimpleName() + "{", "}")
+ .add("tags=" + tags)
+ .add("metrics=" + metrics)
.toString();
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e6bbd04/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java b/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java
index bee6aaa..cd40ac8 100644
--- a/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java
+++ b/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.metrics2.impl;
-import com.google.common.base.MoreObjects;
import com.google.common.collect.Lists;
import org.apache.commons.configuration2.SubsetConfiguration;
import org.apache.hadoop.metrics2.AbstractMetric;
@@ -40,6 +39,7 @@ import org.slf4j.LoggerFactory;
import java.net.InetAddress;
import java.text.SimpleDateFormat;
import java.util.Date;
+import java.util.StringJoiner;
import java.util.concurrent.Future;
import static org.junit.Assert.assertEquals;
@@ -74,8 +74,10 @@ public class TestKafkaMetrics {
@Override
public String toString() {
- return MoreObjects.toStringHelper(this).add("name", name())
- .add("description", desc).toString();
+ return new StringJoiner(", ", this.getClass().getSimpleName() + "{", "}")
+ .add("name=" + name())
+ .add("description=" + desc)
+ .toString();
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[35/50] [abbrv] hadoop git commit: HDFS-11530. Use HDFS specific
network topology to choose datanode in BlockPlacementPolicyDefault.
Contributed by Yiqun Lin and Chen Liang.
Posted by ae...@apache.org.
HDFS-11530. Use HDFS specific network topology to choose datanode in BlockPlacementPolicyDefault. Contributed by Yiqun Lin and Chen Liang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97c2e576
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97c2e576
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97c2e576
Branch: refs/heads/HDFS-7240
Commit: 97c2e576c91c2316c2b52bfc948bae9bff8ca49f
Parents: 3082552
Author: Yiqun Lin <yq...@apache.org>
Authored: Fri May 5 11:54:50 2017 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Fri May 5 11:54:50 2017 +0800
----------------------------------------------------------------------
.../org/apache/hadoop/hdfs/DFSConfigKeys.java | 4 +
.../hadoop/hdfs/net/DFSNetworkTopology.java | 24 +++-
.../hadoop/hdfs/net/DFSTopologyNodeImpl.java | 137 +++++++++++++++++++
.../BlockPlacementPolicyDefault.java | 36 ++++-
.../blockmanagement/DatanodeDescriptor.java | 36 ++++-
.../server/blockmanagement/DatanodeManager.java | 19 ++-
.../src/main/resources/hdfs-default.xml | 8 ++
.../TestDefaultBlockPlacementPolicy.java | 46 +++++++
8 files changed, 302 insertions(+), 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/97c2e576/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 0ca344c..b95c7e6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1085,6 +1085,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
"httpfs.buffer.size";
public static final int HTTP_BUFFER_SIZE_DEFAULT = 4096;
+ public static final String DFS_USE_DFS_NETWORK_TOPOLOGY_KEY =
+ "dfs.use.dfs.network.topology";
+ public static final boolean DFS_USE_DFS_NETWORK_TOPOLOGY_DEFAULT = false;
+
// dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry
@Deprecated
public static final String DFS_CLIENT_RETRY_POLICY_ENABLED_KEY
http://git-wip-us.apache.org/repos/asf/hadoop/blob/97c2e576/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
index 259e275..e74cdec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
@@ -19,8 +19,10 @@ package org.apache.hadoop.hdfs.net;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
@@ -204,10 +206,24 @@ public class DFSNetworkTopology extends NetworkTopology {
}
if (excludedNodes != null) {
for (Node excludedNode : excludedNodes) {
- // all excluded nodes should be DatanodeDescriptor
- Preconditions.checkArgument(excludedNode instanceof DatanodeDescriptor);
- availableCount -= ((DatanodeDescriptor) excludedNode)
- .hasStorageType(type) ? 1 : 0;
+ if (excludedNode instanceof DatanodeDescriptor) {
+ availableCount -= ((DatanodeDescriptor) excludedNode)
+ .hasStorageType(type) ? 1 : 0;
+ } else if (excludedNode instanceof DFSTopologyNodeImpl) {
+ availableCount -= ((DFSTopologyNodeImpl) excludedNode)
+ .getSubtreeStorageCount(type);
+ } else if (excludedNode instanceof DatanodeInfo) {
+ // find out the corresponding DatanodeDescriptor object, beacuse
+ // we need to get its storage type info.
+ // could be expensive operation, fortunately the size of excluded
+ // nodes set is supposed to be very small.
+ String nodeLocation = excludedNode.getNetworkLocation()
+ + "/" + excludedNode.getName();
+ DatanodeDescriptor dn = (DatanodeDescriptor)getNode(nodeLocation);
+ availableCount -= dn.hasStorageType(type)? 1 : 0;
+ } else {
+ LOG.error("Unexpected node type: {}.", excludedNode.getClass());
+ }
}
}
if (availableCount <= 0) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/97c2e576/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSTopologyNodeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSTopologyNodeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSTopologyNodeImpl.java
index 6d80db5..002f4fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSTopologyNodeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSTopologyNodeImpl.java
@@ -18,11 +18,14 @@
package org.apache.hadoop.hdfs.net;
import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.net.InnerNode;
import org.apache.hadoop.net.InnerNodeImpl;
import org.apache.hadoop.net.Node;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.util.EnumMap;
import java.util.EnumSet;
@@ -36,6 +39,9 @@ import java.util.HashMap;
*/
public class DFSTopologyNodeImpl extends InnerNodeImpl {
+ public static final Logger LOG =
+ LoggerFactory.getLogger(DFSTopologyNodeImpl.class);
+
static final InnerNodeImpl.Factory FACTORY
= new DFSTopologyNodeImpl.Factory();
@@ -127,8 +133,68 @@ public class DFSTopologyNodeImpl extends InnerNodeImpl {
}
}
+ /**
+ * Called when add() is called to add a node that already exist.
+ *
+ * In normal execution, nodes are added only once and this should not happen.
+ * However if node restarts, we may run into the case where the same node
+ * tries to add itself again with potentially different storage type info.
+ * In this case this method will update the meta data according to the new
+ * storage info.
+ *
+ * Note that it is important to also update all the ancestors if we do have
+ * updated the local node storage info.
+ *
+ * @param dnDescriptor the node that is added another time, with potentially
+ * different storage types.
+ */
+ private void updateExistingDatanode(DatanodeDescriptor dnDescriptor) {
+ if (childrenStorageInfo.containsKey(dnDescriptor.getName())) {
+ // all existing node should have an entry in childrenStorageInfo
+ boolean same = dnDescriptor.getStorageTypes().size()
+ == childrenStorageInfo.get(dnDescriptor.getName()).keySet().size();
+ for (StorageType type :
+ childrenStorageInfo.get(dnDescriptor.getName()).keySet()) {
+ same = same && dnDescriptor.hasStorageType(type);
+ }
+ if (same) {
+ // if the storage type hasn't been changed, do nothing.
+ return;
+ }
+ // not same means we need to update the storage info.
+ DFSTopologyNodeImpl parent = (DFSTopologyNodeImpl)getParent();
+ for (StorageType type :
+ childrenStorageInfo.get(dnDescriptor.getName()).keySet()) {
+ if (!dnDescriptor.hasStorageType(type)) {
+ // remove this type, because the new storage info does not have it.
+ // also need to remove decrement the count for all the ancestors.
+ // since this is the parent of n, where n is a datanode,
+ // the map must have 1 as the value of all keys
+ childrenStorageInfo.get(dnDescriptor.getName()).remove(type);
+ decStorageTypeCount(type);
+ if (parent != null) {
+ parent.childRemoveStorage(getName(), type);
+ }
+ }
+ }
+ for (StorageType type : dnDescriptor.getStorageTypes()) {
+ if (!childrenStorageInfo.get(dnDescriptor.getName())
+ .containsKey(type)) {
+ // there is a new type in new storage info, add this locally,
+ // as well as all ancestors.
+ childrenStorageInfo.get(dnDescriptor.getName()).put(type, 1);
+ incStorageTypeCount(type);
+ if (parent != null) {
+ parent.childAddStorage(getName(), type);
+ }
+ }
+ }
+ }
+ }
+
@Override
public boolean add(Node n) {
+ LOG.debug("adding node {}", n.getName());
if (!isAncestor(n)) {
throw new IllegalArgumentException(n.getName()
+ ", which is located at " + n.getNetworkLocation()
@@ -149,6 +215,7 @@ public class DFSTopologyNodeImpl extends InnerNodeImpl {
for(int i=0; i<children.size(); i++) {
if (children.get(i).getName().equals(n.getName())) {
children.set(i, n);
+ updateExistingDatanode(dnDescriptor);
return false;
}
}
@@ -227,6 +294,7 @@ public class DFSTopologyNodeImpl extends InnerNodeImpl {
@Override
public boolean remove(Node n) {
+ LOG.debug("removing node {}", n.getName());
if (!isAncestor(n)) {
throw new IllegalArgumentException(n.getName()
+ ", which is located at " + n.getNetworkLocation()
@@ -299,4 +367,73 @@ public class DFSTopologyNodeImpl extends InnerNodeImpl {
return isRemoved;
}
}
+
+ /**
+ * Called by a child node of the current node to increment a storage count.
+ *
+ * lock is needed as different datanodes may call recursively to modify
+ * the same parent.
+ * TODO : this may not happen at all, depending on how heartheat is processed
+ * @param childName the name of the child that tries to add the storage type
+ * @param type the type being incremented.
+ */
+ public synchronized void childAddStorage(
+ String childName, StorageType type) {
+ LOG.debug("child add storage: {}:{}", childName, type);
+ // childrenStorageInfo should definitely contain this node already
+ // because updateStorage is called after node added
+ Preconditions.checkArgument(childrenStorageInfo.containsKey(childName));
+ EnumMap<StorageType, Integer> typeCount =
+ childrenStorageInfo.get(childName);
+ if (typeCount.containsKey(type)) {
+ typeCount.put(type, typeCount.get(type) + 1);
+ } else {
+ // Please be aware that, the counts are always "number of datanodes in
+ // this subtree" rather than "number of storages in this storage".
+ // so if the caller is a datanode, it should always be this branch rather
+ // than the +1 branch above. This depends on the caller in
+ // DatanodeDescriptor to make sure only when a *new* storage type is added
+ // it calls this. (should not call this when a already existing storage
+ // is added).
+ // but no such restriction for inner nodes.
+ typeCount.put(type, 1);
+ }
+ if (storageTypeCounts.containsKey(type)) {
+ storageTypeCounts.put(type, storageTypeCounts.get(type) + 1);
+ } else {
+ storageTypeCounts.put(type, 1);
+ }
+ if (getParent() != null) {
+ ((DFSTopologyNodeImpl)getParent()).childAddStorage(getName(), type);
+ }
+ }
+
+ /**
+ * Called by a child node of the current node to decrement a storage count.
+ *
+ * @param childName the name of the child removing a storage type.
+ * @param type the type being removed.
+ */
+ public synchronized void childRemoveStorage(
+ String childName, StorageType type) {
+ LOG.debug("child remove storage: {}:{}", childName, type);
+ Preconditions.checkArgument(childrenStorageInfo.containsKey(childName));
+ EnumMap<StorageType, Integer> typeCount =
+ childrenStorageInfo.get(childName);
+ Preconditions.checkArgument(typeCount.containsKey(type));
+ if (typeCount.get(type) > 1) {
+ typeCount.put(type, typeCount.get(type) - 1);
+ } else {
+ typeCount.remove(type);
+ }
+ Preconditions.checkArgument(storageTypeCounts.containsKey(type));
+ if (storageTypeCounts.get(type) > 1) {
+ storageTypeCounts.put(type, storageTypeCounts.get(type) - 1);
+ } else {
+ storageTypeCounts.remove(type);
+ }
+ if (getParent() != null) {
+ ((DFSTopologyNodeImpl)getParent()).childRemoveStorage(getName(), type);
+ }
+ }
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/97c2e576/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 7676334..a245f0c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.AddBlockFlag;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.net.DFSNetworkTopology;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
@@ -713,7 +714,22 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
boolean badTarget = false;
DatanodeStorageInfo firstChosen = null;
while (numOfReplicas > 0) {
- DatanodeDescriptor chosenNode = chooseDataNode(scope, excludedNodes);
+ // the storage type that current node has
+ StorageType includeType = null;
+ DatanodeDescriptor chosenNode = null;
+ if (clusterMap instanceof DFSNetworkTopology) {
+ for (StorageType type : storageTypes.keySet()) {
+ chosenNode = chooseDataNode(scope, excludedNodes, type);
+
+ if (chosenNode != null) {
+ includeType = type;
+ break;
+ }
+ }
+ } else {
+ chosenNode = chooseDataNode(scope, excludedNodes);
+ }
+
if (chosenNode == null) {
break;
}
@@ -729,6 +745,13 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
for (Iterator<Map.Entry<StorageType, Integer>> iter = storageTypes
.entrySet().iterator(); iter.hasNext();) {
Map.Entry<StorageType, Integer> entry = iter.next();
+
+ // If there is one storage type the node has already contained,
+ // then no need to loop through other storage type.
+ if (includeType != null && entry.getKey() != includeType) {
+ continue;
+ }
+
storage = chooseStorage4Block(
chosenNode, blocksize, results, entry.getKey());
if (storage != null) {
@@ -782,6 +805,17 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
}
/**
+ * Choose a datanode from the given <i>scope</i> with specified
+ * storage type.
+ * @return the chosen node, if there is any.
+ */
+ protected DatanodeDescriptor chooseDataNode(final String scope,
+ final Collection<Node> excludedNodes, StorageType type) {
+ return (DatanodeDescriptor) ((DFSNetworkTopology) clusterMap)
+ .chooseRandomWithStorageTypeTwoTrial(scope, excludedNodes, type);
+ }
+
+ /**
* Choose a good storage of given storage type from datanode, and add it to
* the result list.
*
http://git-wip-us.apache.org/repos/asf/hadoop/blob/97c2e576/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index d0583b3..4b87fd4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -35,6 +35,7 @@ import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.net.DFSTopologyNodeImpl;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -494,7 +495,16 @@ public class DatanodeDescriptor extends DatanodeInfo {
// blocks.
for (final DatanodeStorageInfo storageInfo : excessStorages.values()) {
if (storageInfo.numBlocks() == 0) {
- storageMap.remove(storageInfo.getStorageID());
+ DatanodeStorageInfo info =
+ storageMap.remove(storageInfo.getStorageID());
+ if (!hasStorageType(info.getStorageType())) {
+ // we removed a storage, and as result there is no more such storage
+ // type, inform the parent about this.
+ if (getParent() instanceof DFSTopologyNodeImpl) {
+ ((DFSTopologyNodeImpl) getParent()).childRemoveStorage(getName(),
+ info.getStorageType());
+ }
+ }
LOG.info("Removed storage {} from DataNode {}", storageInfo, this);
} else {
// This can occur until all block reports are received.
@@ -911,9 +921,20 @@ public class DatanodeDescriptor extends DatanodeInfo {
DatanodeStorageInfo updateStorage(DatanodeStorage s) {
synchronized (storageMap) {
DatanodeStorageInfo storage = storageMap.get(s.getStorageID());
+ DFSTopologyNodeImpl parent = null;
+ if (getParent() instanceof DFSTopologyNodeImpl) {
+ parent = (DFSTopologyNodeImpl) getParent();
+ }
+
if (storage == null) {
LOG.info("Adding new storage ID {} for DN {}", s.getStorageID(),
getXferAddr());
+ StorageType type = s.getStorageType();
+ if (!hasStorageType(type) && parent != null) {
+ // we are about to add a type this node currently does not have,
+ // inform the parent that a new type is added to this datanode
+ parent.childAddStorage(getName(), s.getStorageType());
+ }
storage = new DatanodeStorageInfo(this, s);
storageMap.put(s.getStorageID(), storage);
} else if (storage.getState() != s.getState() ||
@@ -921,8 +942,21 @@ public class DatanodeDescriptor extends DatanodeInfo {
// For backwards compatibility, make sure that the type and
// state are updated. Some reports from older datanodes do
// not include these fields so we may have assumed defaults.
+ StorageType oldType = storage.getStorageType();
+ StorageType newType = s.getStorageType();
+ if (oldType != newType && !hasStorageType(newType) && parent != null) {
+ // we are about to add a type this node currently does not have
+ // inform the parent that a new type is added to this datanode
+ // if old == new, nothing's changed. don't bother
+ parent.childAddStorage(getName(), newType);
+ }
storage.updateFromStorage(s);
storageMap.put(storage.getStorageID(), storage);
+ if (oldType != newType && !hasStorageType(oldType) && parent != null) {
+ // there is no more old type storage on this datanode, inform parent
+ // about this change.
+ parent.childRemoveStorage(getName(), oldType);
+ }
}
return storage;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/97c2e576/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index a61aa78..7dcc9fd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.net.DFSNetworkTopology;
import org.apache.hadoop.hdfs.protocol.*;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
@@ -186,6 +187,11 @@ public class DatanodeManager {
*/
private final boolean dataNodeDiskStatsEnabled;
+ /**
+ * If we use DfsNetworkTopology to choose nodes for placing replicas.
+ */
+ private final boolean useDfsNetworkTopology;
+
@Nullable
private final SlowPeerTracker slowPeerTracker;
@Nullable
@@ -205,8 +211,17 @@ public class DatanodeManager {
final Configuration conf) throws IOException {
this.namesystem = namesystem;
this.blockManager = blockManager;
-
- networktopology = NetworkTopology.getInstance(conf);
+
+ // TODO: Enables DFSNetworkTopology by default after more stress
+ // testings/validations.
+ this.useDfsNetworkTopology = conf.getBoolean(
+ DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_KEY,
+ DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_DEFAULT);
+ if (useDfsNetworkTopology) {
+ networktopology = DFSNetworkTopology.getInstance(conf);
+ } else {
+ networktopology = NetworkTopology.getInstance(conf);
+ }
this.heartbeatManager = new HeartbeatManager(namesystem, blockManager, conf);
this.decomManager = new DecommissionManager(namesystem, blockManager,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/97c2e576/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 0f33b70..f0f2220 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4505,4 +4505,12 @@
</description>
</property>
+ <property>
+ <name>dfs.use.dfs.network.topology</name>
+ <value>false</value>
+ <description>
+ Enables DFSNetworkTopology to choose nodes for placing replicas.
+ </description>
+ </property>
+
</configuration>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/97c2e576/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDefaultBlockPlacementPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDefaultBlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDefaultBlockPlacementPolicy.java
index 0931ff4..eab1199 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDefaultBlockPlacementPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDefaultBlockPlacementPolicy.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.net.DFSNetworkTopology;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -98,6 +99,51 @@ public class TestDefaultBlockPlacementPolicy {
}
/**
+ * Verify local node selection with using DFSNetworkTopology.
+ */
+ @Test
+ public void testPlacementWithDFSNetworkTopology() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ final String[] racks = {"/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2"};
+ final String[] hosts = {"/host0", "/host1", "/host2", "/host3", "/host4"};
+
+ // enables DFSNetworkTopology
+ conf.setBoolean(DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_KEY, true);
+ conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
+ conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,
+ DEFAULT_BLOCK_SIZE / 2);
+
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).racks(racks)
+ .hosts(hosts).build();
+ cluster.waitActive();
+ nameNodeRpc = cluster.getNameNodeRpc();
+ namesystem = cluster.getNamesystem();
+
+ DatanodeManager dm = namesystem.getBlockManager().getDatanodeManager();
+ assertTrue(dm.getNetworkTopology() instanceof DFSNetworkTopology);
+
+ String clientMachine = "/host3";
+ String clientRack = "/RACK3";
+ String src = "/test";
+ // Create the file with client machine
+ HdfsFileStatus fileStatus = namesystem.startFile(src, perm, clientMachine,
+ clientMachine, EnumSet.of(CreateFlag.CREATE), true, REPLICATION_FACTOR,
+ DEFAULT_BLOCK_SIZE, null, null, false);
+ LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine, null,
+ null, fileStatus.getFileId(), null, null);
+
+ assertEquals("Block should be allocated sufficient locations",
+ REPLICATION_FACTOR, locatedBlock.getLocations().length);
+ assertEquals("First datanode should be rack local", clientRack,
+ locatedBlock.getLocations()[0].getNetworkLocation());
+ nameNodeRpc.abandonBlock(locatedBlock.getBlock(), fileStatus.getFileId(),
+ src, clientMachine);
+ }
+
+ /**
* Verify decommissioned nodes should not be selected.
*/
@Test
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[24/50] [abbrv] hadoop git commit: HDFS-11739. Fix regression in
tests caused by YARN-679. Contributed by Steve Loughran
Posted by ae...@apache.org.
HDFS-11739. Fix regression in tests caused by YARN-679. Contributed by Steve Loughran
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/83dded55
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/83dded55
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/83dded55
Branch: refs/heads/HDFS-7240
Commit: 83dded556dc179fcff078451fb80533065e116f0
Parents: d4631e4
Author: Mingliang Liu <li...@apache.org>
Authored: Wed May 3 11:22:44 2017 -0700
Committer: Mingliang Liu <li...@apache.org>
Committed: Wed May 3 11:22:44 2017 -0700
----------------------------------------------------------------------
.../src/main/java/org/apache/hadoop/util/ExitUtil.java | 8 ++++++--
.../hdfs/server/namenode/TestMetadataVersionOutput.java | 4 +++-
.../org/apache/hadoop/hdfs/server/namenode/TestStartup.java | 5 +++--
.../apache/hadoop/mapred/gridmix/TestGridmixSubmission.java | 3 ++-
4 files changed, 14 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/83dded55/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ExitUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ExitUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ExitUtil.java
index 5642a23..dbe6663 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ExitUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ExitUtil.java
@@ -38,6 +38,10 @@ public final class ExitUtil {
private static volatile boolean systemHaltDisabled = false;
private static volatile ExitException firstExitException;
private static volatile HaltException firstHaltException;
+ /** Message raised from an exit exception if none were provided: {@value}. */
+ public static final String EXIT_EXCEPTION_MESSAGE = "ExitException";
+ /** Message raised from a halt exception if none were provided: {@value}. */
+ public static final String HALT_EXCEPTION_MESSAGE = "HaltException";
private ExitUtil() {
}
@@ -285,7 +289,7 @@ public final class ExitUtil {
* @throws ExitException if {@link System#exit(int)} is disabled.
*/
public static void terminate(int status) throws ExitException {
- terminate(status, "");
+ terminate(status, EXIT_EXCEPTION_MESSAGE);
}
/**
@@ -306,7 +310,7 @@ public final class ExitUtil {
* @throws HaltException if {@link Runtime#halt(int)} is disabled.
*/
public static void halt(int status) throws HaltException {
- halt(status, "");
+ halt(status, HALT_EXCEPTION_MESSAGE);
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/83dded55/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java
index dc8f70a..e1663e2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java
@@ -24,6 +24,8 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.util.ExitUtil;
+
import org.junit.After;
import org.junit.Test;
@@ -78,7 +80,7 @@ public class TestMetadataVersionOutput {
try {
NameNode.createNameNode(new String[] { "-metadataVersion" }, conf);
} catch (Exception e) {
- assertExceptionContains("ExitException", e);
+ assertExceptionContains(ExitUtil.EXIT_EXCEPTION_MESSAGE, e);
}
/* Check if meta data version is printed correctly. */
final String verNumStr = HdfsServerConstants.NAMENODE_LAYOUT_VERSION + "";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/83dded55/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
index 8c2acf6..29a6064 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
@@ -425,8 +425,9 @@ public class TestStartup {
SecondaryNameNode.main(argv);
fail("Failed to handle runtime exceptions during SNN startup!");
} catch (ExitException ee) {
- GenericTestUtils.assertExceptionContains("ExitException", ee);
- assertTrue("Didn't termiated properly ", ExitUtil.terminateCalled());
+ GenericTestUtils.assertExceptionContains(
+ ExitUtil.EXIT_EXCEPTION_MESSAGE, ee);
+ assertTrue("Didn't terminate properly ", ExitUtil.terminateCalled());
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/83dded55/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java
index f1800c1..d4bfddc 100644
--- a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java
+++ b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java
@@ -39,6 +39,7 @@ import java.io.IOException;
import java.io.PrintStream;
import java.util.zip.GZIPInputStream;
+import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
import static org.junit.Assert.*;
public class TestGridmixSubmission extends CommonJobTest {
@@ -185,7 +186,7 @@ public class TestGridmixSubmission extends CommonJobTest {
DebugGridmix.main(argv);
} catch (ExitUtil.ExitException e) {
- assertEquals("ExitException", e.getMessage());
+ assertExceptionContains(ExitUtil.EXIT_EXCEPTION_MESSAGE, e);
ExitUtil.resetFirstExitException();
} finally {
System.setErr(oldOut);
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[04/50] [abbrv] hadoop git commit: YARN-6519. Fix warnings from
Spotbugs in hadoop-yarn-server-resourcemanager. Contributed by Weiwei Yang.
Posted by ae...@apache.org.
YARN-6519. Fix warnings from Spotbugs in hadoop-yarn-server-resourcemanager. Contributed by Weiwei Yang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30fc5801
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30fc5801
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30fc5801
Branch: refs/heads/HDFS-7240
Commit: 30fc5801966feb7f9bdd7d79db75acc595102913
Parents: 64f68cb
Author: Naganarasimha <na...@apache.org>
Authored: Mon May 1 20:15:27 2017 +0530
Committer: Naganarasimha <na...@apache.org>
Committed: Mon May 1 20:15:27 2017 +0530
----------------------------------------------------------------------
.../ApplicationMasterService.java | 2 +-
.../ProportionalCapacityPreemptionPolicy.java | 17 ++++++----------
.../rmapp/attempt/RMAppAttemptImpl.java | 6 ++++--
.../rmapp/attempt/RMAppAttemptMetrics.java | 2 +-
.../scheduler/AbstractYarnScheduler.java | 2 +-
.../resourcemanager/scheduler/NodeType.java | 12 +++++++++--
.../resourcemanager/scheduler/QueueMetrics.java | 21 ++++++++++++++------
.../scheduler/capacity/CSQueueMetrics.java | 4 ++--
.../capacity/CapacitySchedulerQueueManager.java | 6 ++++--
.../scheduler/fair/FSQueueMetrics.java | 4 ++--
.../scheduler/fair/FSSchedulerNode.java | 8 ++++----
11 files changed, 50 insertions(+), 34 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30fc5801/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
index 70a46a1..55b8fbb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
@@ -393,7 +393,7 @@ public class ApplicationMasterService extends AbstractService implements
return hasApplicationMasterRegistered;
}
- protected final static List<Container> EMPTY_CONTAINER_LIST =
+ private final static List<Container> EMPTY_CONTAINER_LIST =
new ArrayList<Container>();
protected static final Allocation EMPTY_ALLOCATION = new Allocation(
EMPTY_CONTAINER_LIST, Resources.createResource(0), null, null, null);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30fc5801/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 3bf6994..dc6f1c2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -52,7 +52,6 @@ import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
-import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
@@ -306,16 +305,12 @@ public class ProportionalCapacityPreemptionPolicy
private void cleanupStaledPreemptionCandidates(long currentTime) {
// Keep the preemptionCandidates list clean
- for (Iterator<RMContainer> i = preemptionCandidates.keySet().iterator();
- i.hasNext(); ) {
- RMContainer id = i.next();
- // garbage collect containers that are irrelevant for preemption
- // And avoid preempt selected containers for *this execution*
- // or within 1 ms
- if (preemptionCandidates.get(id) + 2 * maxWaitTime < currentTime) {
- i.remove();
- }
- }
+ // garbage collect containers that are irrelevant for preemption
+ // And avoid preempt selected containers for *this execution*
+ // or within 1 ms
+ preemptionCandidates.entrySet()
+ .removeIf(candidate ->
+ candidate.getValue() + 2 * maxWaitTime < currentTime);
}
private Set<String> getLeafQueueNames(TempQueuePerPartition q) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30fc5801/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 19503e5..d66a97d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -1000,9 +1000,11 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
// if am crashed and not received this response, we should resend
// this msg again after am restart
if (!this.finishedContainersSentToAM.isEmpty()) {
- for (NodeId nodeId : this.finishedContainersSentToAM.keySet()) {
+ for (Map.Entry<NodeId, List<ContainerStatus>> finishedContainer
+ : this.finishedContainersSentToAM.entrySet()) {
List<ContainerStatus> containerStatuses =
- this.finishedContainersSentToAM.get(nodeId);
+ finishedContainer.getValue();
+ NodeId nodeId = finishedContainer.getKey();
this.justFinishedContainers.putIfAbsent(nodeId, new ArrayList<>());
this.justFinishedContainers.get(nodeId).addAll(containerStatuses);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30fc5801/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
index a642e45..e089050 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
@@ -152,7 +152,7 @@ public class RMAppAttemptMetrics {
public void incNumAllocatedContainers(NodeType containerType,
NodeType requestType) {
- localityStatistics[containerType.index][requestType.index]++;
+ localityStatistics[containerType.getIndex()][requestType.getIndex()]++;
totalAllocatedContainers++;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30fc5801/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index b954bdf..c00b7be 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -132,7 +132,7 @@ public abstract class AbstractYarnScheduler
protected int nmExpireInterval;
protected long nmHeartbeatInterval;
- protected final static List<Container> EMPTY_CONTAINER_LIST =
+ private final static List<Container> EMPTY_CONTAINER_LIST =
new ArrayList<Container>();
protected static final Allocation EMPTY_ALLOCATION = new Allocation(
EMPTY_CONTAINER_LIST, Resources.createResource(0), null, null, null);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30fc5801/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeType.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeType.java
index 2b193bb..7bd15f0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeType.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/NodeType.java
@@ -23,9 +23,17 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
*/
public enum NodeType {
NODE_LOCAL(0), RACK_LOCAL(1), OFF_SWITCH(2);
- public int index;
- private NodeType(int index) {
+ private final int index;
+
+ NodeType(int index) {
this.index = index;
}
+
+ /**
+ * @return the index of the node type
+ */
+ public int getIndex() {
+ return index;
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30fc5801/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
index 007d2b3..9a57876 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
@@ -142,20 +142,29 @@ public class QueueMetrics implements MetricsSource {
*/
@Private
public synchronized static void clearQueueMetrics() {
- queueMetrics.clear();
+ QUEUE_METRICS.clear();
}
-
+
/**
* Simple metrics cache to help prevent re-registrations.
*/
- protected final static Map<String, QueueMetrics> queueMetrics =
+ private static final Map<String, QueueMetrics> QUEUE_METRICS =
new HashMap<String, QueueMetrics>();
-
+
+ /**
+ * Returns the metrics cache to help prevent re-registrations.
+ *
+ * @return A string to {@link QueueMetrics} map.
+ */
+ protected static Map<String, QueueMetrics> getQueueMetrics() {
+ return QUEUE_METRICS;
+ }
+
public synchronized
static QueueMetrics forQueue(MetricsSystem ms, String queueName,
Queue parent, boolean enableUserMetrics,
Configuration conf) {
- QueueMetrics metrics = queueMetrics.get(queueName);
+ QueueMetrics metrics = QUEUE_METRICS.get(queueName);
if (metrics == null) {
metrics =
new QueueMetrics(ms, queueName, parent, enableUserMetrics, conf).
@@ -168,7 +177,7 @@ public class QueueMetrics implements MetricsSource {
sourceName(queueName).toString(),
"Metrics for queue: " + queueName, metrics);
}
- queueMetrics.put(queueName, metrics);
+ QUEUE_METRICS.put(queueName, metrics);
}
return metrics;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30fc5801/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java
index a601b7b..c4d1934 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java
@@ -115,7 +115,7 @@ public class CSQueueMetrics extends QueueMetrics {
public synchronized static CSQueueMetrics forQueue(String queueName,
Queue parent, boolean enableUserMetrics, Configuration conf) {
MetricsSystem ms = DefaultMetricsSystem.instance();
- QueueMetrics metrics = queueMetrics.get(queueName);
+ QueueMetrics metrics = QueueMetrics.getQueueMetrics().get(queueName);
if (metrics == null) {
metrics =
new CSQueueMetrics(ms, queueName, parent, enableUserMetrics, conf)
@@ -127,7 +127,7 @@ public class CSQueueMetrics extends QueueMetrics {
ms.register(sourceName(queueName).toString(), "Metrics for queue: "
+ queueName, metrics);
}
- queueMetrics.put(queueName, metrics);
+ QueueMetrics.getQueueMetrics().put(queueName, metrics);
}
return (CSQueueMetrics) metrics;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30fc5801/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
index be6243d..e33fbb3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
@@ -69,9 +69,11 @@ public class CapacitySchedulerQueueManager implements SchedulerQueueManager<
new Comparator<CSQueue>() {
@Override
public int compare(CSQueue q1, CSQueue q2) {
- if (q1.getUsedCapacity() < q2.getUsedCapacity()) {
+ int result = Float.compare(q1.getUsedCapacity(),
+ q2.getUsedCapacity());
+ if (result < 0) {
return -1;
- } else if (q1.getUsedCapacity() > q2.getUsedCapacity()) {
+ } else if (result > 0) {
return 1;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30fc5801/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
index 22306a0..4fe3973 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
@@ -206,7 +206,7 @@ public class FSQueueMetrics extends QueueMetrics {
public synchronized
static FSQueueMetrics forQueue(MetricsSystem ms, String queueName,
Queue parent, boolean enableUserMetrics, Configuration conf) {
- QueueMetrics metrics = queueMetrics.get(queueName);
+ QueueMetrics metrics = QueueMetrics.getQueueMetrics().get(queueName);
if (metrics == null) {
metrics = new FSQueueMetrics(ms, queueName, parent, enableUserMetrics, conf)
.tag(QUEUE_INFO, queueName);
@@ -217,7 +217,7 @@ public class FSQueueMetrics extends QueueMetrics {
sourceName(queueName).toString(),
"Metrics for queue: " + queueName, metrics);
}
- queueMetrics.put(queueName, metrics);
+ QueueMetrics.getQueueMetrics().put(queueName, metrics);
}
return (FSQueueMetrics)metrics;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30fc5801/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java
index 663e3c8..6575e0c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java
@@ -155,10 +155,10 @@ public class FSSchedulerNode extends SchedulerNode {
* Remove apps that have their preemption requests fulfilled.
*/
private synchronized void cleanupPreemptionList() {
- Iterator<FSAppAttempt> iterator =
- resourcesPreemptedForApp.keySet().iterator();
- while (iterator.hasNext()) {
- FSAppAttempt app = iterator.next();
+ Iterator<Map.Entry<FSAppAttempt, Resource>> iterator =
+ resourcesPreemptedForApp.entrySet().iterator();
+ while(iterator.hasNext()) {
+ FSAppAttempt app = iterator.next().getKey();
if (app.isStopped() || !app.isStarved()) {
// App does not need more resources
Resources.subtractFrom(totalResourcesPreempted,
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[47/50] [abbrv] hadoop git commit: YARN-6234. Support multiple
attempts on the node when AMRMProxy is enabled. (Giovanni Matteo Fumarola via
Subru).
Posted by ae...@apache.org.
YARN-6234. Support multiple attempts on the node when AMRMProxy is enabled. (Giovanni Matteo Fumarola via Subru).
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cd9ff27f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cd9ff27f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cd9ff27f
Branch: refs/heads/HDFS-7240
Commit: cd9ff27ffc9369820d0c39200a11bf00e6a767c8
Parents: 1769b12
Author: Subru Krishnan <su...@apache.org>
Authored: Mon May 8 16:41:30 2017 -0700
Committer: Subru Krishnan <su...@apache.org>
Committed: Mon May 8 16:41:30 2017 -0700
----------------------------------------------------------------------
.../nodemanager/amrmproxy/AMRMProxyService.java | 32 ++++++++++++++---
.../amrmproxy/TestAMRMProxyService.java | 36 ++++++++++++++++++++
2 files changed, 63 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9ff27f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
index 9f2d9a1..2696bca 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
@@ -270,18 +270,40 @@ public class AMRMProxyService extends AbstractService implements
* @param user
* @param amrmToken
*/
- protected void initializePipeline(
- ApplicationAttemptId applicationAttemptId, String user,
- Token<AMRMTokenIdentifier> amrmToken,
+ protected void initializePipeline(ApplicationAttemptId applicationAttemptId,
+ String user, Token<AMRMTokenIdentifier> amrmToken,
Token<AMRMTokenIdentifier> localToken) {
RequestInterceptorChainWrapper chainWrapper = null;
synchronized (applPipelineMap) {
- if (applPipelineMap.containsKey(applicationAttemptId.getApplicationId())) {
+ if (applPipelineMap
+ .containsKey(applicationAttemptId.getApplicationId())) {
LOG.warn("Request to start an already existing appId was received. "
+ " This can happen if an application failed and a new attempt "
+ "was created on this machine. ApplicationId: "
+ applicationAttemptId.toString());
- return;
+
+ RequestInterceptorChainWrapper chainWrapperBackup =
+ this.applPipelineMap.get(applicationAttemptId.getApplicationId());
+ if (chainWrapperBackup != null
+ && chainWrapperBackup.getApplicationAttemptId() != null
+ && !chainWrapperBackup.getApplicationAttemptId()
+ .equals(applicationAttemptId)) {
+ // Remove the existing pipeline
+ LOG.info("Remove the previous pipeline for ApplicationId: "
+ + applicationAttemptId.toString());
+ RequestInterceptorChainWrapper pipeline =
+ applPipelineMap.remove(applicationAttemptId.getApplicationId());
+ try {
+ pipeline.getRootInterceptor().shutdown();
+ } catch (Throwable ex) {
+ LOG.warn(
+ "Failed to shutdown the request processing pipeline for app:"
+ + applicationAttemptId.getApplicationId(),
+ ex);
+ }
+ } else {
+ return;
+ }
}
chainWrapper = new RequestInterceptorChainWrapper();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9ff27f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
index 7fffddf..837278c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.yarn.server.nodemanager.amrmproxy;
+import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
@@ -27,10 +28,14 @@ import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.nodemanager.amrmproxy.AMRMProxyService.RequestInterceptorChainWrapper;
import org.apache.hadoop.yarn.util.Records;
import org.junit.Assert;
import org.junit.Test;
@@ -380,6 +385,37 @@ public class TestAMRMProxyService extends BaseAMRMProxyTest {
}
}
+ @Test
+ public void testMultipleAttemptsSameNode()
+ throws YarnException, IOException, Exception {
+
+ String user = "hadoop";
+ ApplicationId appId = ApplicationId.newInstance(1, 1);
+ ApplicationAttemptId applicationAttemptId;
+
+ // First Attempt
+
+ RegisterApplicationMasterResponse response1 =
+ registerApplicationMaster(appId.getId());
+ Assert.assertNotNull(response1);
+
+ AllocateResponse allocateResponse = allocate(appId.getId());
+ Assert.assertNotNull(allocateResponse);
+
+ // Second Attempt
+
+ applicationAttemptId = ApplicationAttemptId.newInstance(appId, 2);
+ getAMRMProxyService().initializePipeline(applicationAttemptId, user, null,
+ null);
+
+ RequestInterceptorChainWrapper chain2 =
+ getAMRMProxyService().getPipelines().get(appId);
+ Assert.assertEquals(applicationAttemptId, chain2.getApplicationAttemptId());
+
+ allocateResponse = allocate(appId.getId());
+ Assert.assertNotNull(allocateResponse);
+ }
+
private List<Container> getContainersAndAssert(int appId,
int numberOfResourceRequests) throws Exception {
AllocateRequest allocateRequest =
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[33/50] [abbrv] hadoop git commit: HDFS-11448. JN log segment syncing
should support HA upgrade. Contributed by Hanisha Koneru.
Posted by ae...@apache.org.
HDFS-11448. JN log segment syncing should support HA upgrade. Contributed by Hanisha Koneru.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07761af3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07761af3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07761af3
Branch: refs/heads/HDFS-7240
Commit: 07761af357ef4da791df2972d7d3f049d6011c8d
Parents: 54e2b9e
Author: Arpit Agarwal <ar...@apache.org>
Authored: Thu May 4 15:57:44 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Thu May 4 15:57:44 2017 -0700
----------------------------------------------------------------------
.../hadoop/hdfs/qjournal/server/JNStorage.java | 25 ++++-
.../hadoop/hdfs/qjournal/server/Journal.java | 21 +++-
.../hdfs/qjournal/server/JournalNodeSyncer.java | 104 ++++++++++++-------
.../hdfs/qjournal/TestJournalNodeSync.java | 1 +
4 files changed, 105 insertions(+), 46 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/07761af3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
index 8f40f6b..7226cae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
@@ -58,6 +58,8 @@ class JNStorage extends Storage {
private static final List<Pattern> PAXOS_DIR_PURGE_REGEXES =
ImmutableList.of(Pattern.compile("(\\d+)"));
+ private static final String STORAGE_EDITS_SYNC = "edits.sync";
+
/**
* @param conf Configuration object
* @param logDir the path to the directory in which data will be stored
@@ -120,12 +122,29 @@ class JNStorage extends Storage {
return new File(sd.getCurrentDir(), name);
}
- File getTemporaryEditsFile(long startTxId, long endTxId, long timestamp) {
- return NNStorage.getTemporaryEditsFile(sd, startTxId, endTxId, timestamp);
+ File getCurrentDir() {
+ return sd.getCurrentDir();
+ }
+
+ /**
+ * Directory {@code edits.sync} temporarily holds the log segments
+ * downloaded through {@link JournalNodeSyncer} before they are moved to
+ * {@code current} directory.
+ *
+ * @return the directory path
+ */
+ File getEditsSyncDir() {
+ return new File(sd.getRoot(), STORAGE_EDITS_SYNC);
+ }
+
+ File getTemporaryEditsFile(long startTxId, long endTxId) {
+ return new File(getEditsSyncDir(), String.format("%s_%019d-%019d",
+ NNStorage.NameNodeFile.EDITS.getName(), startTxId, endTxId));
}
File getFinalizedEditsFile(long startTxId, long endTxId) {
- return NNStorage.getFinalizedEditsFile(sd, startTxId, endTxId);
+ return new File(sd.getCurrentDir(), String.format("%s_%019d-%019d",
+ NNStorage.NameNodeFile.EDITS.getName(), startTxId, endTxId));
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/07761af3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
index ca21373..0041d5e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
@@ -24,6 +24,8 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStreamWriter;
import java.net.URL;
+import java.nio.file.Files;
+import java.nio.file.StandardCopyOption;
import java.security.PrivilegedExceptionAction;
import java.util.Iterator;
import java.util.List;
@@ -1092,19 +1094,28 @@ public class Journal implements Closeable {
committedTxnId.set(startTxId - 1);
}
- synchronized boolean renameTmpSegment(File tmpFile, File finalFile,
+ synchronized boolean moveTmpSegmentToCurrent(File tmpFile, File finalFile,
long endTxId) throws IOException {
final boolean success;
if (endTxId <= committedTxnId.get()) {
- success = tmpFile.renameTo(finalFile);
- if (!success) {
- LOG.warn("Unable to rename edits file from " + tmpFile + " to " +
+ if (!finalFile.getParentFile().exists()) {
+ LOG.error(finalFile.getParentFile() + " doesn't exist. Aborting tmp " +
+ "segment move to current directory");
+ return false;
+ }
+ Files.move(tmpFile.toPath(), finalFile.toPath(),
+ StandardCopyOption.ATOMIC_MOVE);
+ if (finalFile.exists() && FileUtil.canRead(finalFile)) {
+ success = true;
+ } else {
+ success = false;
+ LOG.warn("Unable to move edits file from " + tmpFile + " to " +
finalFile);
}
} else {
success = false;
LOG.error("The endTxId of the temporary file is not less than the " +
- "last committed transaction id. Aborting renaming to final file" +
+ "last committed transaction id. Aborting move to final file" +
finalFile);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/07761af3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
index f195c00..788c5de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.util.Daemon;
-import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -98,6 +97,11 @@ public class JournalNodeSyncer {
void stopSync() {
shouldSync = false;
+ // Delete the edits.sync directory
+ File editsSyncDir = journal.getStorage().getEditsSyncDir();
+ if (editsSyncDir.exists()) {
+ FileUtil.fullyDelete(editsSyncDir);
+ }
if (syncJournalDaemon != null) {
syncJournalDaemon.interrupt();
}
@@ -112,6 +116,15 @@ public class JournalNodeSyncer {
}
}
+ private boolean createEditsSyncDir() {
+ File editsSyncDir = journal.getStorage().getEditsSyncDir();
+ if (editsSyncDir.exists()) {
+ LOG.info(editsSyncDir + " directory already exists.");
+ return true;
+ }
+ return editsSyncDir.mkdir();
+ }
+
private boolean getOtherJournalNodeProxies() {
List<InetSocketAddress> otherJournalNodes = getOtherJournalNodeAddrs();
if (otherJournalNodes == null || otherJournalNodes.isEmpty()) {
@@ -135,35 +148,51 @@ public class JournalNodeSyncer {
}
private void startSyncJournalsDaemon() {
- syncJournalDaemon = new Daemon(new Runnable() {
- @Override
- public void run() {
- while(shouldSync) {
- try {
- if (!journal.isFormatted()) {
- LOG.warn("Journal not formatted. Cannot sync.");
+ syncJournalDaemon = new Daemon(() -> {
+ // Wait for journal to be formatted to create edits.sync directory
+ while(!journal.isFormatted()) {
+ try {
+ Thread.sleep(journalSyncInterval);
+ } catch (InterruptedException e) {
+ LOG.error("JournalNodeSyncer daemon received Runtime exception.", e);
+ Thread.currentThread().interrupt();
+ return;
+ }
+ }
+ if (!createEditsSyncDir()) {
+ LOG.error("Failed to create directory for downloading log " +
+ "segments: %s. Stopping Journal Node Sync.",
+ journal.getStorage().getEditsSyncDir());
+ return;
+ }
+ while(shouldSync) {
+ try {
+ if (!journal.isFormatted()) {
+ LOG.warn("Journal cannot sync. Not formatted.");
+ } else {
+ syncJournals();
+ }
+ Thread.sleep(journalSyncInterval);
+ } catch (Throwable t) {
+ if (!shouldSync) {
+ if (t instanceof InterruptedException) {
+ LOG.info("Stopping JournalNode Sync.");
+ Thread.currentThread().interrupt();
+ return;
} else {
- syncJournals();
+ LOG.warn("JournalNodeSyncer received an exception while " +
+ "shutting down.", t);
}
- Thread.sleep(journalSyncInterval);
- } catch (Throwable t) {
- if (!shouldSync) {
- if (t instanceof InterruptedException) {
- LOG.info("Stopping JournalNode Sync.");
- } else {
- LOG.warn("JournalNodeSyncer received an exception while " +
- "shutting down.", t);
- }
- break;
- } else {
- if (t instanceof InterruptedException) {
- LOG.warn("JournalNodeSyncer interrupted", t);
- break;
- }
+ break;
+ } else {
+ if (t instanceof InterruptedException) {
+ LOG.warn("JournalNodeSyncer interrupted", t);
+ Thread.currentThread().interrupt();
+ return;
}
- LOG.error(
- "JournalNodeSyncer daemon received Runtime exception. ", t);
}
+ LOG.error(
+ "JournalNodeSyncer daemon received Runtime exception. ", t);
}
}
});
@@ -335,8 +364,8 @@ public class JournalNodeSyncer {
/**
* Transfer an edit log from one journal node to another for sync-up.
*/
- private boolean downloadMissingLogSegment(URL url, RemoteEditLog log) throws
- IOException {
+ private boolean downloadMissingLogSegment(URL url, RemoteEditLog log)
+ throws IOException {
LOG.info("Downloading missing Edit Log from " + url + " to " + jnStorage
.getRoot());
@@ -350,9 +379,10 @@ public class JournalNodeSyncer {
return true;
}
- final long milliTime = Time.monotonicNow();
- File tmpEditsFile = jnStorage.getTemporaryEditsFile(log.getStartTxId(), log
- .getEndTxId(), milliTime);
+ // Download the log segment to current.tmp directory first.
+ File tmpEditsFile = jnStorage.getTemporaryEditsFile(
+ log.getStartTxId(), log.getEndTxId());
+
try {
Util.doGetUrl(url, ImmutableList.of(tmpEditsFile), jnStorage, false,
logSegmentTransferTimeout, throttler);
@@ -367,14 +397,12 @@ public class JournalNodeSyncer {
LOG.info("Downloaded file " + tmpEditsFile.getName() + " of size " +
tmpEditsFile.length() + " bytes.");
- LOG.debug("Renaming " + tmpEditsFile.getName() + " to "
- + finalEditsFile.getName());
- boolean renameSuccess = journal.renameTmpSegment(tmpEditsFile,
+ final boolean moveSuccess = journal.moveTmpSegmentToCurrent(tmpEditsFile,
finalEditsFile, log.getEndTxId());
- if (!renameSuccess) {
- //If rename is not successful, delete the tmpFile
- LOG.debug("Renaming unsuccessful. Deleting temporary file: "
- + tmpEditsFile);
+ if (!moveSuccess) {
+ // If move is not successful, delete the tmpFile
+ LOG.debug("Move to current directory unsuccessful. Deleting temporary " +
+ "file: " + tmpEditsFile);
if (!tmpEditsFile.delete()) {
LOG.warn("Deleting " + tmpEditsFile + " has failed");
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/07761af3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestJournalNodeSync.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestJournalNodeSync.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestJournalNodeSync.java
index 5375b02..8415a6f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestJournalNodeSync.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestJournalNodeSync.java
@@ -57,6 +57,7 @@ public class TestJournalNodeSync {
@Before
public void setUpMiniCluster() throws IOException {
final Configuration conf = new HdfsConfiguration();
+ conf.setBoolean(DFSConfigKeys.DFS_JOURNALNODE_ENABLE_SYNC_KEY, true);
conf.setLong(DFSConfigKeys.DFS_JOURNALNODE_SYNC_INTERVAL_KEY, 1000L);
qjmhaCluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(2)
.build();
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[32/50] [abbrv] hadoop git commit: YARN-6375 App level aggregation
should not consider metric values reported in the previous aggregation cycle
(Varun Saxena via Vrushali C)
Posted by ae...@apache.org.
YARN-6375 App level aggregation should not consider metric values reported in the previous aggregation cycle (Varun Saxena via Vrushali C)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54e2b9e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54e2b9e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54e2b9e8
Branch: refs/heads/HDFS-7240
Commit: 54e2b9e876fd91712c14ffbc4c49cd946f305aeb
Parents: 61858a5
Author: Vrushali Channapattan <vr...@apache.org>
Authored: Thu May 4 15:25:56 2017 -0700
Committer: Vrushali Channapattan <vr...@apache.org>
Committed: Thu May 4 15:25:56 2017 -0700
----------------------------------------------------------------------
.../collector/TimelineCollector.java | 23 +++--
.../collector/TestTimelineCollector.java | 95 +++++++++++++++++++-
2 files changed, 108 insertions(+), 10 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/54e2b9e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
index c94c505..5416b26 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
@@ -310,13 +310,15 @@ public abstract class TimelineCollector extends CompositeService {
// Update aggregateTable
Map<String, TimelineMetric> aggrRow = aggregateTable.get(m);
if (aggrRow == null) {
- Map<String, TimelineMetric> tempRow = new ConcurrentHashMap<>();
+ Map<String, TimelineMetric> tempRow = new HashMap<>();
aggrRow = aggregateTable.putIfAbsent(m, tempRow);
if (aggrRow == null) {
aggrRow = tempRow;
}
}
- aggrRow.put(entityId, m);
+ synchronized (aggrRow) {
+ aggrRow.put(entityId, m);
+ }
}
}
@@ -335,14 +337,17 @@ public abstract class TimelineCollector extends CompositeService {
}
aggrMetric.setRealtimeAggregationOp(TimelineMetricOperation.NOP);
Map<Object, Object> status = new HashMap<>();
- for (TimelineMetric m : aggrRow.values()) {
- TimelineMetric.aggregateTo(m, aggrMetric, status);
- // getRealtimeAggregationOp returns an enum so we can directly
- // compare with "!=".
- if (m.getRealtimeAggregationOp()
- != aggrMetric.getRealtimeAggregationOp()) {
- aggrMetric.setRealtimeAggregationOp(m.getRealtimeAggregationOp());
+ synchronized (aggrRow) {
+ for (TimelineMetric m : aggrRow.values()) {
+ TimelineMetric.aggregateTo(m, aggrMetric, status);
+ // getRealtimeAggregationOp returns an enum so we can directly
+ // compare with "!=".
+ if (m.getRealtimeAggregationOp()
+ != aggrMetric.getRealtimeAggregationOp()) {
+ aggrMetric.setRealtimeAggregationOp(m.getRealtimeAggregationOp());
+ }
}
+ aggrRow.clear();
}
Set<TimelineMetric> metrics = e.getMetrics();
metrics.remove(aggrMetric);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/54e2b9e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestTimelineCollector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestTimelineCollector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestTimelineCollector.java
index a55f227..0f17553 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestTimelineCollector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestTimelineCollector.java
@@ -18,19 +18,27 @@
package org.apache.hadoop.yarn.server.timelineservice.collector;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetricOperation;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollector.AggregationStatusTable;
import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter;
import org.junit.Test;
+import com.google.common.collect.Sets;
+
import java.io.IOException;
import java.util.HashSet;
+import java.util.Map;
import java.util.Set;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyLong;
@@ -187,4 +195,89 @@ public class TestTimelineCollector {
return context;
}
}
-}
+
+ private static TimelineEntity createEntity(String id, String type) {
+ TimelineEntity entity = new TimelineEntity();
+ entity.setId(id);
+ entity.setType(type);
+ return entity;
+ }
+
+ private static TimelineMetric createDummyMetric(long ts, Long value) {
+ TimelineMetric metric = new TimelineMetric();
+ metric.setId("dummy_metric");
+ metric.addValue(ts, value);
+ metric.setRealtimeAggregationOp(TimelineMetricOperation.SUM);
+ return metric;
+ }
+
+ @Test
+ public void testClearPreviousEntitiesOnAggregation() throws Exception {
+ final long ts = System.currentTimeMillis();
+ TimelineCollector collector = new TimelineCollector("") {
+ @Override
+ public TimelineCollectorContext getTimelineEntityContext() {
+ return new TimelineCollectorContext("cluster", "user", "flow", "1",
+ 1L, ApplicationId.newInstance(ts, 1).toString());
+ }
+ };
+ collector.init(new Configuration());
+ collector.setWriter(mock(TimelineWriter.class));
+
+ // Put 5 entities with different metric values.
+ TimelineEntities entities = new TimelineEntities();
+ for (int i = 1; i <=5; i++) {
+ TimelineEntity entity = createEntity("e" + i, "type");
+ entity.addMetric(createDummyMetric(ts + i, Long.valueOf(i * 50)));
+ entities.addEntity(entity);
+ }
+ collector.putEntities(entities, UserGroupInformation.getCurrentUser());
+
+ TimelineCollectorContext currContext = collector.getTimelineEntityContext();
+ // Aggregate the entities.
+ Map<String, AggregationStatusTable> aggregationGroups
+ = collector.getAggregationGroups();
+ assertEquals(Sets.newHashSet("type"), aggregationGroups.keySet());
+ TimelineEntity aggregatedEntity = TimelineCollector.
+ aggregateWithoutGroupId(aggregationGroups, currContext.getAppId(),
+ TimelineEntityType.YARN_APPLICATION.toString());
+ TimelineMetric aggregatedMetric =
+ aggregatedEntity.getMetrics().iterator().next();
+ assertEquals(750L, aggregatedMetric.getValues().values().iterator().next());
+ assertEquals(TimelineMetricOperation.SUM,
+ aggregatedMetric.getRealtimeAggregationOp());
+
+ // Aggregate entities.
+ aggregatedEntity = TimelineCollector.
+ aggregateWithoutGroupId(aggregationGroups, currContext.getAppId(),
+ TimelineEntityType.YARN_APPLICATION.toString());
+ aggregatedMetric = aggregatedEntity.getMetrics().iterator().next();
+ // No values aggregated as no metrics put for an entity between this
+ // aggregation and the previous one.
+ assertTrue(aggregatedMetric.getValues().isEmpty());
+ assertEquals(TimelineMetricOperation.NOP,
+ aggregatedMetric.getRealtimeAggregationOp());
+
+ // Put 3 entities.
+ entities = new TimelineEntities();
+ for (int i = 1; i <=3; i++) {
+ TimelineEntity entity = createEntity("e" + i, "type");
+ entity.addMetric(createDummyMetric(System.currentTimeMillis() + i, 50L));
+ entities.addEntity(entity);
+ }
+ aggregationGroups = collector.getAggregationGroups();
+ collector.putEntities(entities, UserGroupInformation.getCurrentUser());
+
+ // Aggregate entities.
+ aggregatedEntity = TimelineCollector.
+ aggregateWithoutGroupId(aggregationGroups, currContext.getAppId(),
+ TimelineEntityType.YARN_APPLICATION.toString());
+ // Last 3 entities picked up for aggregation.
+ aggregatedMetric = aggregatedEntity.getMetrics().iterator().next();
+ assertEquals(150L, aggregatedMetric.getValues().values().iterator().next());
+ assertEquals(TimelineMetricOperation.SUM,
+ aggregatedMetric.getRealtimeAggregationOp());
+
+ collector.close();
+ }
+}
\ No newline at end of file
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[42/50] [abbrv] hadoop git commit: HDFS-9342. Erasure coding: client
should update and commit block based on acknowledged size. Contributed by
SammiChen.
Posted by ae...@apache.org.
HDFS-9342. Erasure coding: client should update and commit block based on acknowledged size. Contributed by SammiChen.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9a3d219
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9a3d219
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9a3d219
Branch: refs/heads/HDFS-7240
Commit: a9a3d219fed2dd9d7bb84c228f6b8d97eadbe1f6
Parents: 8065129
Author: Andrew Wang <wa...@apache.org>
Authored: Sun May 7 14:45:26 2017 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Sun May 7 14:45:26 2017 -0700
----------------------------------------------------------------------
.../hadoop/hdfs/DFSStripedOutputStream.java | 28 ++++++++++++++++++++
1 file changed, 28 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9a3d219/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 3dd07f7..0fdae8c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -772,9 +772,37 @@ public class DFSStripedOutputStream extends DFSOutputStream {
newStorageIDs[i] = "";
}
}
+
+ // should update the block group length based on the acked length
+ final long sentBytes = currentBlockGroup.getNumBytes();
+ final long ackedBytes = getNumAckedStripes() * cellSize * numDataBlocks;
+ Preconditions.checkState(ackedBytes <= sentBytes);
+ currentBlockGroup.setNumBytes(ackedBytes);
+ newBG.setNumBytes(ackedBytes);
dfsClient.namenode.updatePipeline(dfsClient.clientName, currentBlockGroup,
newBG, newNodes, newStorageIDs);
currentBlockGroup = newBG;
+ currentBlockGroup.setNumBytes(sentBytes);
+ }
+
+ /**
+ * Get the number of acked stripes. An acked stripe means at least data block
+ * number size cells of the stripe were acked.
+ */
+ private long getNumAckedStripes() {
+ int minStripeNum = Integer.MAX_VALUE;
+ for (int i = 0; i < numAllBlocks; i++) {
+ final StripedDataStreamer streamer = getStripedDataStreamer(i);
+ if (streamer.isHealthy()) {
+ int curStripeNum = 0;
+ if (streamer.getBlock() != null) {
+ curStripeNum = (int) (streamer.getBlock().getNumBytes() / cellSize);
+ }
+ minStripeNum = Math.min(curStripeNum, minStripeNum);
+ }
+ }
+ assert minStripeNum != Integer.MAX_VALUE;
+ return minStripeNum;
}
private int stripeDataSize() {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[22/50] [abbrv] hadoop git commit: HDFS-11717. Add unit test for
HDFS-11709 StandbyCheckpointer should handle non-existing legacyOivImageDir
gracefully. Contributed by Erik Krogen.
Posted by ae...@apache.org.
HDFS-11717. Add unit test for HDFS-11709 StandbyCheckpointer should handle non-existing legacyOivImageDir gracefully. Contributed by Erik Krogen.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9014bda
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9014bda
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9014bda
Branch: refs/heads/HDFS-7240
Commit: d9014bda93760f223789d2ec9f5e35f40de157d4
Parents: 8b82317
Author: Erik Krogen <ek...@linkedin.com>
Authored: Tue May 2 17:56:19 2017 -0700
Committer: Konstantin V Shvachko <sh...@apache.org>
Committed: Tue May 2 18:34:11 2017 -0700
----------------------------------------------------------------------
.../server/namenode/ha/StandbyCheckpointer.java | 2 +-
.../namenode/ha/TestStandbyCheckpoints.java | 20 ++++++++++++++++++++
2 files changed, 21 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9014bda/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
index 2196caa..753447b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
@@ -200,7 +200,7 @@ public class StandbyCheckpointer {
try {
img.saveLegacyOIVImage(namesystem, outputDir, canceler);
} catch (IOException ioe) {
- LOG.error("Exception encountered while saving legacy OIV image; "
+ LOG.warn("Exception encountered while saving legacy OIV image; "
+ "continuing with other checkpointing steps", ioe);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9014bda/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
index ada62ba..2af373f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
@@ -27,6 +27,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -545,6 +546,25 @@ public class TestStandbyCheckpoints {
HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(23));
}
+ /**
+ * Test that checkpointing is still successful even if an issue
+ * was encountered while writing the legacy OIV image.
+ */
+ @Test(timeout=300000)
+ public void testCheckpointSucceedsWithLegacyOIVException() throws Exception {
+ // Delete the OIV image dir to cause an IOException while saving
+ FileUtil.fullyDelete(tmpOivImgDir);
+
+ doEdits(0, 10);
+ HATestUtil.waitForStandbyToCatchUp(nns[0], nns[1]);
+ // Once the standby catches up, it should notice that it needs to
+ // do a checkpoint and save one to its local directories.
+ HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12));
+
+ // It should also upload it back to the active.
+ HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12));
+ }
+
private void doEdits(int start, int stop) throws IOException {
for (int i = start; i < stop; i++) {
Path p = new Path("/test" + i);
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org