You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cd...@apache.org on 2017/12/16 02:10:52 UTC
[25/46] hadoop git commit: HDFS-12776. [READ] Increasing replication
for PROVIDED files should create local replicas
HDFS-12776. [READ] Increasing replication for PROVIDED files should create local replicas
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90d1b47a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90d1b47a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90d1b47a
Branch: refs/heads/trunk
Commit: 90d1b47a2a400e07e2b6b812c4bbd9c4f2877786
Parents: 87dc026
Author: Virajith Jalaparti <vi...@apache.org>
Authored: Thu Nov 9 13:03:41 2017 -0800
Committer: Chris Douglas <cd...@apache.org>
Committed: Fri Dec 15 17:51:39 2017 -0800
----------------------------------------------------------------------
.../hdfs/server/blockmanagement/BlockInfo.java | 7 ++--
.../datanode/fsdataset/impl/FsDatasetImpl.java | 25 +++++++++++---
.../TestNameNodeProvidedImplementation.java | 36 +++++++++++---------
3 files changed, 45 insertions(+), 23 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/90d1b47a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index eb09b7b..8f59df6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -187,20 +187,23 @@ public abstract class BlockInfo extends Block
*/
DatanodeStorageInfo findStorageInfo(DatanodeDescriptor dn) {
int len = getCapacity();
+ DatanodeStorageInfo providedStorageInfo = null;
for(int idx = 0; idx < len; idx++) {
DatanodeStorageInfo cur = getStorageInfo(idx);
if(cur != null) {
if (cur.getStorageType() == StorageType.PROVIDED) {
//if block resides on provided storage, only match the storage ids
if (dn.getStorageInfo(cur.getStorageID()) != null) {
- return cur;
+ // do not return here as we have to check the other
+ // DatanodeStorageInfos for this block which could be local
+ providedStorageInfo = cur;
}
} else if (cur.getDatanodeDescriptor() == dn) {
return cur;
}
}
}
- return null;
+ return providedStorageInfo;
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/90d1b47a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index db8d60c..fd06a56 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1512,6 +1512,13 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
}
}
+ private boolean isReplicaProvided(ReplicaInfo replicaInfo) {
+ if (replicaInfo == null) {
+ return false;
+ }
+ return replicaInfo.getVolume().getStorageType() == StorageType.PROVIDED;
+ }
+
@Override // FsDatasetSpi
public ReplicaHandler createTemporary(StorageType storageType,
String storageId, ExtendedBlock b, boolean isTransfer)
@@ -1530,12 +1537,14 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
isInPipeline = currentReplicaInfo.getState() == ReplicaState.TEMPORARY
|| currentReplicaInfo.getState() == ReplicaState.RBW;
/*
- * If the current block is old, reject.
+ * If the current block is not PROVIDED and old, reject.
* else If transfer request, then accept it.
* else if state is not RBW/Temporary, then reject
+ * If current block is PROVIDED, ignore the replica.
*/
- if ((currentReplicaInfo.getGenerationStamp() >= b.getGenerationStamp())
- || (!isTransfer && !isInPipeline)) {
+ if (((currentReplicaInfo.getGenerationStamp() >= b
+ .getGenerationStamp()) || (!isTransfer && !isInPipeline))
+ && !isReplicaProvided(currentReplicaInfo)) {
throw new ReplicaAlreadyExistsException("Block " + b
+ " already exists in state " + currentReplicaInfo.getState()
+ " and thus cannot be created.");
@@ -1555,11 +1564,17 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
+ " after " + writerStopMs + " miniseconds.");
}
+ // if lastFoundReplicaInfo is PROVIDED and FINALIZED,
+ // stopWriter isn't required.
+ if (isReplicaProvided(lastFoundReplicaInfo) &&
+ lastFoundReplicaInfo.getState() == ReplicaState.FINALIZED) {
+ continue;
+ }
// Stop the previous writer
((ReplicaInPipeline)lastFoundReplicaInfo).stopWriter(writerStopTimeoutMs);
} while (true);
-
- if (lastFoundReplicaInfo != null) {
+ if (lastFoundReplicaInfo != null
+ && !isReplicaProvided(lastFoundReplicaInfo)) {
// Old blockfile should be deleted synchronously as it might collide
// with the new block if allocated in same volume.
// Do the deletion outside of lock as its DISK IO.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/90d1b47a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
index f0303b5..1f6aebb 100644
--- a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
+++ b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java
@@ -401,33 +401,37 @@ public class TestNameNodeProvidedImplementation {
public void testSetReplicationForProvidedFiles() throws Exception {
createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
FixedBlockResolver.class);
- startCluster(NNDIRPATH, 2, null,
- new StorageType[][]{
- {StorageType.PROVIDED},
- {StorageType.DISK}},
+ // 10 Datanodes with both DISK and PROVIDED storage
+ startCluster(NNDIRPATH, 10,
+ new StorageType[]{
+ StorageType.PROVIDED, StorageType.DISK},
+ null,
false);
String filename = "/" + filePrefix + (numFiles - 1) + fileSuffix;
Path file = new Path(filename);
FileSystem fs = cluster.getFileSystem();
- //set the replication to 2, and test that the file has
- //the required replication.
- fs.setReplication(file, (short) 2);
+ // set the replication to 4, and test that the file has
+ // the required replication.
+ short newReplication = 4;
+ LOG.info("Setting replication of file {} to {}", filename, newReplication);
+ fs.setReplication(file, newReplication);
DFSTestUtil.waitForReplication((DistributedFileSystem) fs,
- file, (short) 2, 10000);
+ file, newReplication, 10000);
DFSClient client = new DFSClient(new InetSocketAddress("localhost",
cluster.getNameNodePort()), cluster.getConfiguration(0));
- getAndCheckBlockLocations(client, filename, 2);
+ getAndCheckBlockLocations(client, filename, newReplication);
- //set the replication back to 1
- fs.setReplication(file, (short) 1);
+ // set the replication back to 1
+ newReplication = 1;
+ LOG.info("Setting replication of file {} back to {}",
+ filename, newReplication);
+ fs.setReplication(file, newReplication);
DFSTestUtil.waitForReplication((DistributedFileSystem) fs,
- file, (short) 1, 10000);
- //the only replica left should be the PROVIDED datanode
- DatanodeInfo[] infos = getAndCheckBlockLocations(client, filename, 1);
- assertEquals(cluster.getDataNodes().get(0).getDatanodeUuid(),
- infos[0].getDatanodeUuid());
+ file, newReplication, 10000);
+ // the only replica left should be the PROVIDED datanode
+ getAndCheckBlockLocations(client, filename, newReplication);
}
@Test
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org