You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by vi...@apache.org on 2017/05/11 21:42:43 UTC
[47/50] [abbrv] hadoop git commit: HDFS-11663. [READ] Fix
NullPointerException in ProvidedBlocksBuilder
HDFS-11663. [READ] Fix NullPointerException in ProvidedBlocksBuilder
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a38e3a36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a38e3a36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a38e3a36
Branch: refs/heads/HDFS-9806
Commit: a38e3a36d0f3f3dc7e58ccaa70b04016a3cbdd22
Parents: 1eb223d
Author: Virajith Jalaparti <vi...@apache.org>
Authored: Thu May 4 13:06:53 2017 -0700
Committer: Virajith Jalaparti <vi...@apache.org>
Committed: Thu May 11 14:41:16 2017 -0700
----------------------------------------------------------------------
.../blockmanagement/ProvidedStorageMap.java | 40 ++++++-----
.../TestNameNodeProvidedImplementation.java | 70 +++++++++++++++-----
2 files changed, 77 insertions(+), 33 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a38e3a36/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
index d222344..518b7e9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java
@@ -134,11 +134,13 @@ public class ProvidedStorageMap {
class ProvidedBlocksBuilder extends LocatedBlockBuilder {
private ShadowDatanodeInfoWithStorage pending;
+ private boolean hasProvidedLocations;
ProvidedBlocksBuilder(int maxBlocks) {
super(maxBlocks);
pending = new ShadowDatanodeInfoWithStorage(
providedDescriptor, storageId);
+ hasProvidedLocations = false;
}
@Override
@@ -154,6 +156,7 @@ public class ProvidedStorageMap {
types[i] = storages[i].getStorageType();
if (StorageType.PROVIDED.equals(storages[i].getStorageType())) {
locs[i] = pending;
+ hasProvidedLocations = true;
} else {
locs[i] = new DatanodeInfoWithStorage(
storages[i].getDatanodeDescriptor(), sids[i], types[i]);
@@ -165,25 +168,28 @@ public class ProvidedStorageMap {
@Override
LocatedBlocks build(DatanodeDescriptor client) {
// TODO: to support multiple provided storages, need to pass/maintain map
- // set all fields of pending DatanodeInfo
- List<String> excludedUUids = new ArrayList<String>();
- for (LocatedBlock b: blocks) {
- DatanodeInfo[] infos = b.getLocations();
- StorageType[] types = b.getStorageTypes();
-
- for (int i = 0; i < types.length; i++) {
- if (!StorageType.PROVIDED.equals(types[i])) {
- excludedUUids.add(infos[i].getDatanodeUuid());
+ if (hasProvidedLocations) {
+ // set all fields of pending DatanodeInfo
+ List<String> excludedUUids = new ArrayList<String>();
+ for (LocatedBlock b : blocks) {
+ DatanodeInfo[] infos = b.getLocations();
+ StorageType[] types = b.getStorageTypes();
+
+ for (int i = 0; i < types.length; i++) {
+ if (!StorageType.PROVIDED.equals(types[i])) {
+ excludedUUids.add(infos[i].getDatanodeUuid());
+ }
}
}
- }
- DatanodeDescriptor dn = providedDescriptor.choose(client, excludedUUids);
- if (dn == null) {
- dn = providedDescriptor.choose(client);
+ DatanodeDescriptor dn =
+ providedDescriptor.choose(client, excludedUUids);
+ if (dn == null) {
+ dn = providedDescriptor.choose(client);
+ }
+ pending.replaceInternal(dn);
}
- pending.replaceInternal(dn);
return new LocatedBlocks(
flen, isUC, blocks, last, lastComplete, feInfo, ecPolicy);
}
@@ -278,7 +284,8 @@ public class ProvidedStorageMap {
DatanodeDescriptor choose(DatanodeDescriptor client) {
// exact match for now
- DatanodeDescriptor dn = dns.get(client.getDatanodeUuid());
+ DatanodeDescriptor dn = client != null ?
+ dns.get(client.getDatanodeUuid()) : null;
if (null == dn) {
dn = chooseRandom();
}
@@ -288,7 +295,8 @@ public class ProvidedStorageMap {
DatanodeDescriptor choose(DatanodeDescriptor client,
List<String> excludedUUids) {
// exact match for now
- DatanodeDescriptor dn = dns.get(client.getDatanodeUuid());
+ DatanodeDescriptor dn = client != null ?
+ dns.get(client.getDatanodeUuid()) : null;
if (null == dn || excludedUUids.contains(client.getDatanodeUuid())) {
dn = null;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a38e3a36/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
index 3b75806..5062439 100644
--- a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
+++ b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockFormatProvider;
@@ -69,6 +70,10 @@ public class TestNameNodeProvidedImplementation {
final Path BLOCKFILE = new Path(NNDIRPATH, "blocks.csv");
final String SINGLEUSER = "usr1";
final String SINGLEGROUP = "grp1";
+ private final int numFiles = 10;
+ private final String filePrefix = "file";
+ private final String fileSuffix = ".dat";
+ private final int baseFileLen = 1024;
Configuration conf;
MiniDFSCluster cluster;
@@ -114,15 +119,16 @@ public class TestNameNodeProvidedImplementation {
}
// create 10 random files under BASE
- for (int i=0; i < 10; i++) {
- File newFile = new File(new Path(NAMEPATH, "file" + i).toUri());
+ for (int i=0; i < numFiles; i++) {
+ File newFile = new File(
+ new Path(NAMEPATH, filePrefix + i + fileSuffix).toUri());
if(!newFile.exists()) {
try {
LOG.info("Creating " + newFile.toString());
newFile.createNewFile();
Writer writer = new OutputStreamWriter(
new FileOutputStream(newFile.getAbsolutePath()), "utf-8");
- for(int j=0; j < 10*i; j++) {
+ for(int j=0; j < baseFileLen*i; j++) {
writer.write("0");
}
writer.flush();
@@ -161,29 +167,30 @@ public class TestNameNodeProvidedImplementation {
void startCluster(Path nspath, int numDatanodes,
StorageType[] storageTypes,
- StorageType[][] storageTypesPerDatanode)
+ StorageType[][] storageTypesPerDatanode,
+ boolean doFormat)
throws IOException {
conf.set(DFS_NAMENODE_NAME_DIR_KEY, nspath.toString());
if (storageTypesPerDatanode != null) {
cluster = new MiniDFSCluster.Builder(conf)
- .format(false)
- .manageNameDfsDirs(false)
+ .format(doFormat)
+ .manageNameDfsDirs(doFormat)
.numDataNodes(numDatanodes)
.storageTypes(storageTypesPerDatanode)
.build();
} else if (storageTypes != null) {
cluster = new MiniDFSCluster.Builder(conf)
- .format(false)
- .manageNameDfsDirs(false)
+ .format(doFormat)
+ .manageNameDfsDirs(doFormat)
.numDataNodes(numDatanodes)
.storagesPerDatanode(storageTypes.length)
.storageTypes(storageTypes)
.build();
} else {
cluster = new MiniDFSCluster.Builder(conf)
- .format(false)
- .manageNameDfsDirs(false)
+ .format(doFormat)
+ .manageNameDfsDirs(doFormat)
.numDataNodes(numDatanodes)
.build();
}
@@ -195,7 +202,8 @@ public class TestNameNodeProvidedImplementation {
final long seed = r.nextLong();
LOG.info("NAMEPATH: " + NAMEPATH);
createImage(new RandomTreeWalk(seed), NNDIRPATH, FixedBlockResolver.class);
- startCluster(NNDIRPATH, 0, new StorageType[] {StorageType.PROVIDED}, null);
+ startCluster(NNDIRPATH, 0, new StorageType[] {StorageType.PROVIDED},
+ null, false);
FileSystem fs = cluster.getFileSystem();
for (TreePath e : new RandomTreeWalk(seed)) {
@@ -220,7 +228,8 @@ public class TestNameNodeProvidedImplementation {
SingleUGIResolver.class, UGIResolver.class);
createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
FixedBlockResolver.class);
- startCluster(NNDIRPATH, 1, new StorageType[] {StorageType.PROVIDED}, null);
+ startCluster(NNDIRPATH, 1, new StorageType[] {StorageType.PROVIDED},
+ null, false);
}
@Test(timeout=500000)
@@ -232,10 +241,10 @@ public class TestNameNodeProvidedImplementation {
// make the last Datanode with only DISK
startCluster(NNDIRPATH, 3, null,
new StorageType[][] {
- {StorageType.PROVIDED},
- {StorageType.PROVIDED},
- {StorageType.DISK}}
- );
+ {StorageType.PROVIDED},
+ {StorageType.PROVIDED},
+ {StorageType.DISK}},
+ false);
// wait for the replication to finish
Thread.sleep(50000);
@@ -290,7 +299,8 @@ public class TestNameNodeProvidedImplementation {
FsUGIResolver.class, UGIResolver.class);
createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
FixedBlockResolver.class);
- startCluster(NNDIRPATH, 3, new StorageType[] {StorageType.PROVIDED}, null);
+ startCluster(NNDIRPATH, 3, new StorageType[] {StorageType.PROVIDED},
+ null, false);
FileSystem fs = cluster.getFileSystem();
Thread.sleep(2000);
int count = 0;
@@ -342,4 +352,30 @@ public class TestNameNodeProvidedImplementation {
}
}
}
+
+ private BlockLocation[] createFile(Path path, short replication,
+ long fileLen, long blockLen) throws IOException {
+ FileSystem fs = cluster.getFileSystem();
+ //create a sample file that is not provided
+ DFSTestUtil.createFile(fs, path, false, (int) blockLen,
+ fileLen, blockLen, replication, 0, true);
+ return fs.getFileBlockLocations(path, 0, fileLen);
+ }
+
+ @Test
+ public void testClusterWithEmptyImage() throws IOException {
+ // start a cluster with 2 datanodes without any provided storage
+ startCluster(NNDIRPATH, 2, null,
+ new StorageType[][] {
+ {StorageType.DISK},
+ {StorageType.DISK}},
+ true);
+ assertTrue(cluster.isClusterUp());
+ assertTrue(cluster.isDataNodeUp());
+
+ BlockLocation[] locations = createFile(new Path("/testFile1.dat"),
+ (short) 2, 1024*1024, 1024*1024);
+ assertEquals(1, locations.length);
+ assertEquals(2, locations[0].getHosts().length);
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org