You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xy...@apache.org on 2018/07/02 20:32:46 UTC
[29/45] hadoop git commit: HADOOP-15548: Randomize local dirs.
Contributed by Jim Brennan.
HADOOP-15548: Randomize local dirs. Contributed by Jim Brennan.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d36f6b9e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d36f6b9e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d36f6b9e
Branch: refs/heads/HDDS-4
Commit: d36f6b9e93e4c30d24d0e837cb00bd24ffa8f274
Parents: 1004701
Author: Eric E Payne <er...@oath.com>
Authored: Fri Jun 29 18:18:32 2018 +0000
Committer: Eric E Payne <er...@oath.com>
Committed: Fri Jun 29 18:18:32 2018 +0000
----------------------------------------------------------------------
.../org/apache/hadoop/fs/LocalDirAllocator.java | 7 ++-
.../apache/hadoop/fs/TestLocalDirAllocator.java | 59 ++++++++++++++++++++
2 files changed, 65 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36f6b9e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
index c1e9d21..1c216f4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
@@ -418,7 +418,12 @@ public class LocalDirAllocator {
}
}
} else {
- int dirNum = ctx.getAndIncrDirNumLastAccessed();
+ // Start linear search with random increment if possible
+ int randomInc = 1;
+ if (numDirs > 2) {
+ randomInc += dirIndexRandomizer.nextInt(numDirs - 1);
+ }
+ int dirNum = ctx.getAndIncrDirNumLastAccessed(randomInc);
while (numDirsSearched < numDirs) {
long capacity = ctx.dirDF[dirNum].getAvailable();
if (capacity > size) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36f6b9e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java
index 825efe0..acda898 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java
@@ -265,6 +265,65 @@ public class TestLocalDirAllocator {
}
}
+ /**
+ * Five buffer dirs, on read-write disk.
+ *
+ * Try to create a whole bunch of files.
+ * Verify that each successive creation uses a different disk
+ * than the previous one (for sized requests).
+ *
+ * Would ideally check statistical properties of distribution, but
+ * we don't have the nerve to risk false-positives here.
+ *
+ * @throws Exception
+ */
+ @Test (timeout = 30000)
+ public void testCreateManyFilesRandom() throws Exception {
+ assumeNotWindows();
+ final int numDirs = 5;
+ final int numTries = 100;
+ String[] dirs = new String[numDirs];
+ for (int d = 0; d < numDirs; ++d) {
+ dirs[d] = buildBufferDir(ROOT, d);
+ }
+ boolean next_dir_not_selected_at_least_once = false;
+ try {
+ conf.set(CONTEXT, dirs[0] + "," + dirs[1] + "," + dirs[2] + ","
+ + dirs[3] + "," + dirs[4]);
+ Path[] paths = new Path[5];
+ for (int d = 0; d < numDirs; ++d) {
+ paths[d] = new Path(dirs[d]);
+ assertTrue(localFs.mkdirs(paths[d]));
+ }
+
+ int inDir=0;
+ int prevDir = -1;
+ int[] counts = new int[5];
+ for(int i = 0; i < numTries; ++i) {
+ File result = createTempFile(SMALL_FILE_SIZE);
+ for (int d = 0; d < numDirs; ++d) {
+ if (result.getPath().startsWith(paths[d].toUri().getPath())) {
+ inDir = d;
+ break;
+ }
+ }
+ // Verify we always select a different dir
+ assertNotEquals(prevDir, inDir);
+ // Verify we are not always selecting the next dir - that was the old
+ // algorithm.
+ if ((prevDir != -1) && (inDir != ((prevDir + 1) % numDirs))) {
+ next_dir_not_selected_at_least_once = true;
+ }
+ prevDir = inDir;
+ counts[inDir]++;
+ result.delete();
+ }
+ } finally {
+ rmBufferDirs();
+ }
+ assertTrue(next_dir_not_selected_at_least_once);
+ }
+
/** Two buffer dirs. The first dir does not exist & is on a read-only disk;
* The second dir exists & is RW
* getLocalPathForWrite with checkAccess set to false should create a parent
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org