You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by aw...@apache.org on 2015/07/07 21:08:23 UTC
[1/9] hadoop git commit: HDFS-8652. Track BlockInfo instead of Block
in CorruptReplicasMap. Contributed by Jing Zhao.
Repository: hadoop
Updated Branches:
refs/heads/HADOOP-12111 60343661c -> 0d7a70857
HDFS-8652. Track BlockInfo instead of Block in CorruptReplicasMap. Contributed by Jing Zhao.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d62b63d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d62b63d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d62b63d2
Branch: refs/heads/HADOOP-12111
Commit: d62b63d297bff12d93de560dd50ddd48743b851d
Parents: 47a69ec
Author: Jing Zhao <ji...@apache.org>
Authored: Mon Jul 6 15:54:07 2015 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Mon Jul 6 15:54:25 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +
.../hdfs/server/blockmanagement/BlockInfo.java | 7 +-
.../blockmanagement/BlockInfoContiguous.java | 9 +-
.../BlockInfoUnderConstruction.java | 22 ++-
.../BlockInfoUnderConstructionContiguous.java | 13 +-
.../server/blockmanagement/BlockManager.java | 143 +++++++++----------
.../hdfs/server/blockmanagement/BlocksMap.java | 4 +-
.../ContiguousBlockStorageOp.java | 7 +-
.../blockmanagement/CorruptReplicasMap.java | 62 ++++----
.../hdfs/server/namenode/FSDirWriteFileOp.java | 6 +-
.../hdfs/server/namenode/FSEditLogLoader.java | 2 +-
.../hdfs/server/namenode/NamenodeFsck.java | 12 +-
.../org/apache/hadoop/hdfs/DFSTestUtil.java | 6 +-
.../blockmanagement/BlockManagerTestUtil.java | 7 +-
.../server/blockmanagement/TestBlockInfo.java | 10 +-
.../blockmanagement/TestBlockManager.java | 10 +-
.../blockmanagement/TestCorruptReplicaInfo.java | 15 +-
17 files changed, 169 insertions(+), 168 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62b63d2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9edc2af..d264f74 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -701,6 +701,8 @@ Release 2.8.0 - UNRELEASED
HDFS-8709. Clarify automatic sync in FSEditLog#logEdit. (wang)
+ HDFS-8652. Track BlockInfo instead of Block in CorruptReplicasMap. (jing9)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62b63d2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index 5ad992b..4df2f0e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -179,7 +179,7 @@ public abstract class BlockInfo extends Block
* information indicating the index of the block in the
* corresponding block group.
*/
- abstract boolean addStorage(DatanodeStorageInfo storage, Block reportedBlock);
+ abstract void addStorage(DatanodeStorageInfo storage, Block reportedBlock);
/**
* Remove {@link DatanodeStorageInfo} location for a block
@@ -193,6 +193,11 @@ public abstract class BlockInfo extends Block
abstract void replaceBlock(BlockInfo newBlock);
/**
+ * @return true if there is no storage storing the block
+ */
+ abstract boolean hasEmptyStorage();
+
+ /**
* Find specified DatanodeStorageInfo.
* @return DatanodeStorageInfo or null if not found.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62b63d2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
index de64ad8..561faca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
@@ -45,8 +45,8 @@ public class BlockInfoContiguous extends BlockInfo {
}
@Override
- boolean addStorage(DatanodeStorageInfo storage, Block reportedBlock) {
- return ContiguousBlockStorageOp.addStorage(this, storage);
+ void addStorage(DatanodeStorageInfo storage, Block reportedBlock) {
+ ContiguousBlockStorageOp.addStorage(this, storage);
}
@Override
@@ -73,4 +73,9 @@ public class BlockInfoContiguous extends BlockInfo {
ucBlock.setBlockCollection(getBlockCollection());
return ucBlock;
}
+
+ @Override
+ boolean hasEmptyStorage() {
+ return ContiguousBlockStorageOp.hasEmptyStorage(this);
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62b63d2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
index 9cd3987..7924709 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.IOException;
-import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
@@ -274,18 +273,17 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo {
"No blocks found, lease removed.");
}
boolean allLiveReplicasTriedAsPrimary = true;
- for (int i = 0; i < replicas.size(); i++) {
+ for (ReplicaUnderConstruction replica : replicas) {
// Check if all replicas have been tried or not.
- if (replicas.get(i).isAlive()) {
- allLiveReplicasTriedAsPrimary =
- (allLiveReplicasTriedAsPrimary &&
- replicas.get(i).getChosenAsPrimary());
+ if (replica.isAlive()) {
+ allLiveReplicasTriedAsPrimary = allLiveReplicasTriedAsPrimary
+ && replica.getChosenAsPrimary();
}
}
if (allLiveReplicasTriedAsPrimary) {
// Just set all the replicas to be chosen whether they are alive or not.
- for (int i = 0; i < replicas.size(); i++) {
- replicas.get(i).setChosenAsPrimary(false);
+ for (ReplicaUnderConstruction replica : replicas) {
+ replica.setChosenAsPrimary(false);
}
}
long mostRecentLastUpdate = 0;
@@ -345,10 +343,6 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo {
* Convert an under construction block to a complete block.
*
* @return a complete block.
- * @throws IOException
- * if the state of the block (the generation stamp and the length)
- * has not been committed by the client or it does not have at
- * least a minimal number of replicas reported from data-nodes.
*/
public abstract BlockInfo convertToCompleteBlock();
@@ -386,8 +380,8 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo {
}
private void appendUCParts(StringBuilder sb) {
- sb.append("{UCState=").append(blockUCState)
- .append(", truncateBlock=" + truncateBlock)
+ sb.append("{UCState=").append(blockUCState).append(", truncateBlock=")
+ .append(truncateBlock)
.append(", primaryNodeIndex=").append(primaryNodeIndex)
.append(", replicas=[");
if (replicas != null) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62b63d2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java
index d3cb337..963f247 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java
@@ -55,10 +55,6 @@ public class BlockInfoUnderConstructionContiguous extends
* Convert an under construction block to a complete block.
*
* @return BlockInfo - a complete block.
- * @throws IOException if the state of the block
- * (the generation stamp and the length) has not been committed by
- * the client or it does not have at least a minimal number of replicas
- * reported from data-nodes.
*/
@Override
public BlockInfoContiguous convertToCompleteBlock() {
@@ -69,8 +65,8 @@ public class BlockInfoUnderConstructionContiguous extends
}
@Override
- boolean addStorage(DatanodeStorageInfo storage, Block reportedBlock) {
- return ContiguousBlockStorageOp.addStorage(this, storage);
+ void addStorage(DatanodeStorageInfo storage, Block reportedBlock) {
+ ContiguousBlockStorageOp.addStorage(this, storage);
}
@Override
@@ -89,6 +85,11 @@ public class BlockInfoUnderConstructionContiguous extends
}
@Override
+ boolean hasEmptyStorage() {
+ return ContiguousBlockStorageOp.hasEmptyStorage(this);
+ }
+
+ @Override
public void setExpectedLocations(DatanodeStorageInfo[] targets) {
int numLocations = targets == null ? 0 : targets.length;
this.replicas = new ArrayList<>(numLocations);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62b63d2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 0b60a97..6ae3ee2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
import static org.apache.hadoop.util.ExitUtil.terminate;
import java.io.IOException;
@@ -197,7 +196,7 @@ public class BlockManager implements BlockStatsMXBean {
* notified of all block deletions that might have been pending
* when the failover happened.
*/
- private final Set<Block> postponedMisreplicatedBlocks = Sets.newHashSet();
+ private final Set<BlockInfo> postponedMisreplicatedBlocks = Sets.newHashSet();
/**
* Maps a StorageID to the set of blocks that are "extra" for this
@@ -338,8 +337,7 @@ public class BlockManager implements BlockStatsMXBean {
DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY,
DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT);
this.shouldCheckForEnoughRacks =
- conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null
- ? false : true;
+ conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) != null;
this.blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
@@ -465,8 +463,7 @@ public class BlockManager implements BlockStatsMXBean {
/** Should the access keys be updated? */
boolean shouldUpdateBlockKey(final long updateTime) throws IOException {
- return isBlockTokenEnabled()? blockTokenSecretManager.updateKeys(updateTime)
- : false;
+ return isBlockTokenEnabled() && blockTokenSecretManager.updateKeys(updateTime);
}
public void activate(Configuration conf) {
@@ -519,14 +516,14 @@ public class BlockManager implements BlockStatsMXBean {
synchronized (neededReplications) {
out.println("Metasave: Blocks waiting for replication: " +
neededReplications.size());
- for (Block block : neededReplications) {
+ for (BlockInfo block : neededReplications) {
dumpBlockMeta(block, out);
}
}
// Dump any postponed over-replicated blocks
out.println("Mis-replicated blocks that have been postponed:");
- for (Block block : postponedMisreplicatedBlocks) {
+ for (BlockInfo block : postponedMisreplicatedBlocks) {
dumpBlockMeta(block, out);
}
@@ -544,11 +541,9 @@ public class BlockManager implements BlockStatsMXBean {
* Dump the metadata for the given block in a human-readable
* form.
*/
- private void dumpBlockMeta(Block block, PrintWriter out) {
- List<DatanodeDescriptor> containingNodes =
- new ArrayList<DatanodeDescriptor>();
- List<DatanodeStorageInfo> containingLiveReplicasNodes =
- new ArrayList<>();
+ private void dumpBlockMeta(BlockInfo block, PrintWriter out) {
+ List<DatanodeDescriptor> containingNodes = new ArrayList<>();
+ List<DatanodeStorageInfo> containingLiveReplicasNodes = new ArrayList<>();
NumberReplicas numReplicas = new NumberReplicas();
// source node returned is not used
@@ -556,17 +551,16 @@ public class BlockManager implements BlockStatsMXBean {
containingLiveReplicasNodes, numReplicas,
UnderReplicatedBlocks.LEVEL);
- // containingLiveReplicasNodes can include READ_ONLY_SHARED replicas which are
- // not included in the numReplicas.liveReplicas() count
+ // containingLiveReplicasNodes can include READ_ONLY_SHARED replicas which
+ // are not included in the numReplicas.liveReplicas() count
assert containingLiveReplicasNodes.size() >= numReplicas.liveReplicas();
int usableReplicas = numReplicas.liveReplicas() +
numReplicas.decommissionedAndDecommissioning();
-
- if (block instanceof BlockInfo) {
- BlockCollection bc = ((BlockInfo) block).getBlockCollection();
- String fileName = (bc == null) ? "[orphaned]" : bc.getName();
- out.print(fileName + ": ");
- }
+
+ BlockCollection bc = block.getBlockCollection();
+ String fileName = (bc == null) ? "[orphaned]" : bc.getName();
+ out.print(fileName + ": ");
+
// l: == live:, d: == decommissioned c: == corrupt e: == excess
out.print(block + ((usableReplicas > 0)? "" : " MISSING") +
" (replicas:" +
@@ -575,8 +569,8 @@ public class BlockManager implements BlockStatsMXBean {
" c: " + numReplicas.corruptReplicas() +
" e: " + numReplicas.excessReplicas() + ") ");
- Collection<DatanodeDescriptor> corruptNodes =
- corruptReplicas.getNodes(block);
+ Collection<DatanodeDescriptor> corruptNodes =
+ corruptReplicas.getNodes(block);
for (DatanodeStorageInfo storage : getStorages(block)) {
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
@@ -813,7 +807,8 @@ public class BlockManager implements BlockStatsMXBean {
final long offset, final long length, final int nrBlocksToReturn,
final AccessMode mode) throws IOException {
int curBlk;
- long curPos = 0, blkSize = 0;
+ long curPos = 0;
+ long blkSize;
int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length;
for (curBlk = 0; curBlk < nrBlocks; curBlk++) {
blkSize = blocks[curBlk].getNumBytes();
@@ -1204,10 +1199,11 @@ public class BlockManager implements BlockStatsMXBean {
}
/**
- *
- * @param b
+ * Mark a replica (of a contiguous block) or an internal block (of a striped
+ * block group) as corrupt.
+ * @param b Indicating the reported bad block and the corresponding BlockInfo
+ * stored in blocksMap.
* @param storageInfo storage that contains the block, if known. null otherwise.
- * @throws IOException
*/
private void markBlockAsCorrupt(BlockToMarkCorrupt b,
DatanodeStorageInfo storageInfo,
@@ -1228,7 +1224,7 @@ public class BlockManager implements BlockStatsMXBean {
}
// Add this replica to corruptReplicas Map
- corruptReplicas.addToCorruptReplicasMap(b.corrupted, node, b.reason,
+ corruptReplicas.addToCorruptReplicasMap(b.stored, node, b.reason,
b.reasonCode);
NumberReplicas numberOfReplicas = countNodes(b.stored);
@@ -1250,7 +1246,7 @@ public class BlockManager implements BlockStatsMXBean {
if (hasEnoughLiveReplicas || hasMoreCorruptReplicas
|| corruptedDuringWrite) {
// the block is over-replicated so invalidate the replicas immediately
- invalidateBlock(b, node);
+ invalidateBlock(b, node, numberOfReplicas);
} else if (namesystem.isPopulatingReplQueues()) {
// add the block to neededReplication
updateNeededReplications(b.stored, -1, 0);
@@ -1258,12 +1254,15 @@ public class BlockManager implements BlockStatsMXBean {
}
/**
- * Invalidates the given block on the given datanode.
- * @return true if the block was successfully invalidated and no longer
- * present in the BlocksMap
+ * Invalidates the given block on the given datanode. Note that before this
+ * call we have already checked the current live replicas of the block and
+ * make sure it's safe to invalidate the replica.
+ *
+ * @return true if the replica was successfully invalidated and no longer
+ * associated with the DataNode.
*/
- private boolean invalidateBlock(BlockToMarkCorrupt b, DatanodeInfo dn
- ) throws IOException {
+ private boolean invalidateBlock(BlockToMarkCorrupt b, DatanodeInfo dn,
+ NumberReplicas nr) throws IOException {
blockLog.info("BLOCK* invalidateBlock: {} on {}", b, dn);
DatanodeDescriptor node = getDatanodeManager().getDatanode(dn);
if (node == null) {
@@ -1272,35 +1271,30 @@ public class BlockManager implements BlockStatsMXBean {
}
// Check how many copies we have of the block
- NumberReplicas nr = countNodes(b.stored);
if (nr.replicasOnStaleNodes() > 0) {
blockLog.info("BLOCK* invalidateBlocks: postponing " +
"invalidation of {} on {} because {} replica(s) are located on " +
"nodes with potentially out-of-date block reports", b, dn,
nr.replicasOnStaleNodes());
- postponeBlock(b.corrupted);
+ postponeBlock(b.stored);
return false;
- } else if (nr.liveReplicas() >= 1) {
- // If we have at least one copy on a live node, then we can delete it.
+ } else {
+ // we already checked the number of replicas in the caller of this
+ // function and we know there is at least one copy on a live node, so we
+ // can delete it.
addToInvalidates(b.corrupted, dn);
removeStoredBlock(b.stored, node);
blockLog.debug("BLOCK* invalidateBlocks: {} on {} listed for deletion.",
b, dn);
return true;
- } else {
- blockLog.info("BLOCK* invalidateBlocks: {} on {} is the only copy and" +
- " was not deleted", b, dn);
- return false;
}
}
-
public void setPostponeBlocksFromFuture(boolean postpone) {
this.shouldPostponeBlocksFromFuture = postpone;
}
-
- private void postponeBlock(Block blk) {
+ private void postponeBlock(BlockInfo blk) {
if (postponedMisreplicatedBlocks.add(blk)) {
postponedMisreplicatedBlocksCount.incrementAndGet();
}
@@ -1374,7 +1368,7 @@ public class BlockManager implements BlockStatsMXBean {
int requiredReplication, numEffectiveReplicas;
List<DatanodeDescriptor> containingNodes;
DatanodeDescriptor srcNode;
- BlockCollection bc = null;
+ BlockCollection bc;
int additionalReplRequired;
int scheduledWork = 0;
@@ -1535,9 +1529,9 @@ public class BlockManager implements BlockStatsMXBean {
DatanodeStorageInfo[] targets = rw.targets;
if (targets != null && targets.length != 0) {
StringBuilder targetList = new StringBuilder("datanode(s)");
- for (int k = 0; k < targets.length; k++) {
+ for (DatanodeStorageInfo target : targets) {
targetList.append(' ');
- targetList.append(targets[k].getDatanodeDescriptor());
+ targetList.append(target.getDatanodeDescriptor());
}
blockLog.info("BLOCK* ask {} to replicate {} to {}", rw.srcNode,
rw.block, targetList);
@@ -1614,8 +1608,8 @@ public class BlockManager implements BlockStatsMXBean {
List<DatanodeDescriptor> datanodeDescriptors = null;
if (nodes != null) {
datanodeDescriptors = new ArrayList<>(nodes.size());
- for (int i = 0; i < nodes.size(); i++) {
- DatanodeDescriptor node = datanodeManager.getDatanodeDescriptor(nodes.get(i));
+ for (String nodeStr : nodes) {
+ DatanodeDescriptor node = datanodeManager.getDatanodeDescriptor(nodeStr);
if (node != null) {
datanodeDescriptors.add(node);
}
@@ -1654,7 +1648,7 @@ public class BlockManager implements BlockStatsMXBean {
* the given block
*/
@VisibleForTesting
- DatanodeDescriptor chooseSourceDatanode(Block block,
+ DatanodeDescriptor chooseSourceDatanode(BlockInfo block,
List<DatanodeDescriptor> containingNodes,
List<DatanodeStorageInfo> nodesContainingLiveReplicas,
NumberReplicas numReplicas,
@@ -1734,16 +1728,16 @@ public class BlockManager implements BlockStatsMXBean {
if (timedOutItems != null) {
namesystem.writeLock();
try {
- for (int i = 0; i < timedOutItems.length; i++) {
+ for (BlockInfo timedOutItem : timedOutItems) {
/*
* Use the blockinfo from the blocksmap to be certain we're working
* with the most up-to-date block information (e.g. genstamp).
*/
- BlockInfo bi = getStoredBlock(timedOutItems[i]);
+ BlockInfo bi = getStoredBlock(timedOutItem);
if (bi == null) {
continue;
}
- NumberReplicas num = countNodes(timedOutItems[i]);
+ NumberReplicas num = countNodes(timedOutItem);
if (isNeededReplication(bi, getReplication(bi), num.liveReplicas())) {
neededReplications.add(bi, num.liveReplicas(),
num.decommissionedAndDecommissioning(), getReplication(bi));
@@ -1760,7 +1754,7 @@ public class BlockManager implements BlockStatsMXBean {
public long requestBlockReportLeaseId(DatanodeRegistration nodeReg) {
assert namesystem.hasReadLock();
- DatanodeDescriptor node = null;
+ DatanodeDescriptor node;
try {
node = datanodeManager.getDatanode(nodeReg);
} catch (UnregisteredNodeException e) {
@@ -2022,7 +2016,7 @@ public class BlockManager implements BlockStatsMXBean {
startIndex += (base+1);
}
}
- Iterator<Block> it = postponedMisreplicatedBlocks.iterator();
+ Iterator<BlockInfo> it = postponedMisreplicatedBlocks.iterator();
for (int tmp = 0; tmp < startIndex; tmp++) {
it.next();
}
@@ -2117,7 +2111,7 @@ public class BlockManager implements BlockStatsMXBean {
long oldGenerationStamp, long oldNumBytes,
DatanodeStorageInfo[] newStorages) throws IOException {
assert namesystem.hasWriteLock();
- BlockToMarkCorrupt b = null;
+ BlockToMarkCorrupt b;
if (block.getGenerationStamp() != oldGenerationStamp) {
b = new BlockToMarkCorrupt(oldBlock, block, oldGenerationStamp,
"genstamp does not match " + oldGenerationStamp
@@ -2719,7 +2713,7 @@ public class BlockManager implements BlockStatsMXBean {
" but corrupt replicas map has " + corruptReplicasCount);
}
if ((corruptReplicasCount > 0) && (numLiveReplicas >= fileReplication)) {
- invalidateCorruptReplicas(storedBlock, reportedBlock);
+ invalidateCorruptReplicas(storedBlock, reportedBlock, num);
}
return storedBlock;
}
@@ -2752,18 +2746,20 @@ public class BlockManager implements BlockStatsMXBean {
*
* @param blk Block whose corrupt replicas need to be invalidated
*/
- private void invalidateCorruptReplicas(BlockInfo blk, Block reported) {
+ private void invalidateCorruptReplicas(BlockInfo blk, Block reported,
+ NumberReplicas numberReplicas) {
Collection<DatanodeDescriptor> nodes = corruptReplicas.getNodes(blk);
boolean removedFromBlocksMap = true;
if (nodes == null)
return;
// make a copy of the array of nodes in order to avoid
// ConcurrentModificationException, when the block is removed from the node
- DatanodeDescriptor[] nodesCopy = nodes.toArray(new DatanodeDescriptor[0]);
+ DatanodeDescriptor[] nodesCopy = nodes.toArray(
+ new DatanodeDescriptor[nodes.size()]);
for (DatanodeDescriptor node : nodesCopy) {
try {
if (!invalidateBlock(new BlockToMarkCorrupt(reported, blk, null,
- Reason.ANY), node)) {
+ Reason.ANY), node, numberReplicas)) {
removedFromBlocksMap = false;
}
} catch (IOException e) {
@@ -2813,7 +2809,6 @@ public class BlockManager implements BlockStatsMXBean {
replicationQueuesInitializer.join();
} catch (final InterruptedException e) {
LOG.warn("Interrupted while waiting for replicationQueueInitializer. Returning..");
- return;
} finally {
replicationQueuesInitializer = null;
}
@@ -3175,8 +3170,7 @@ public class BlockManager implements BlockStatsMXBean {
CachedBlock cblock = namesystem.getCacheManager().getCachedBlocks()
.get(new CachedBlock(storedBlock.getBlockId(), (short) 0, false));
if (cblock != null) {
- boolean removed = false;
- removed |= node.getPendingCached().remove(cblock);
+ boolean removed = node.getPendingCached().remove(cblock);
removed |= node.getCached().remove(cblock);
removed |= node.getPendingUncached().remove(cblock);
if (removed) {
@@ -3392,7 +3386,7 @@ public class BlockManager implements BlockStatsMXBean {
int excess = 0;
int stale = 0;
Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b);
- for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
+ for (DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
if ((nodesCorrupt != null) && (nodesCorrupt.contains(node))) {
corrupt++;
@@ -3413,7 +3407,8 @@ public class BlockManager implements BlockStatsMXBean {
stale++;
}
}
- return new NumberReplicas(live, decommissioned, decommissioning, corrupt, excess, stale);
+ return new NumberReplicas(live, decommissioned, decommissioning, corrupt,
+ excess, stale);
}
/**
@@ -3596,8 +3591,6 @@ public class BlockManager implements BlockStatsMXBean {
String src, BlockInfo[] blocks) {
for (BlockInfo b: blocks) {
if (!b.isComplete()) {
- final BlockInfoUnderConstruction uc =
- (BlockInfoUnderConstruction)b;
final int numNodes = b.numNodes();
final int min = getMinStorageNum(b);
final BlockUCState state = b.getBlockUCState();
@@ -3723,11 +3716,7 @@ public class BlockManager implements BlockStatsMXBean {
return blocksMap.getBlockCollection(b);
}
- public int numCorruptReplicas(Block block) {
- return corruptReplicas.numCorruptReplicas(block);
- }
-
- public void removeBlockFromMap(Block block) {
+ public void removeBlockFromMap(BlockInfo block) {
removeFromExcessReplicateMap(block);
blocksMap.removeBlock(block);
// If block is removed from blocksMap remove it from corruptReplicasMap
@@ -3737,7 +3726,7 @@ public class BlockManager implements BlockStatsMXBean {
/**
* If a block is removed from blocksMap, remove it from excessReplicateMap.
*/
- private void removeFromExcessReplicateMap(Block block) {
+ private void removeFromExcessReplicateMap(BlockInfo block) {
for (DatanodeStorageInfo info : getStorages(block)) {
String uuid = info.getDatanodeDescriptor().getDatanodeUuid();
LightWeightLinkedSet<BlockInfo> excessReplicas =
@@ -3768,14 +3757,14 @@ public class BlockManager implements BlockStatsMXBean {
/**
* Get the replicas which are corrupt for a given block.
*/
- public Collection<DatanodeDescriptor> getCorruptReplicas(Block block) {
+ public Collection<DatanodeDescriptor> getCorruptReplicas(BlockInfo block) {
return corruptReplicas.getNodes(block);
}
/**
* Get reason for certain corrupted replicas for a given block and a given dn.
*/
- public String getCorruptReason(Block block, DatanodeDescriptor node) {
+ public String getCorruptReason(BlockInfo block, DatanodeDescriptor node) {
return corruptReplicas.getCorruptReason(block, node);
}
@@ -3869,7 +3858,7 @@ public class BlockManager implements BlockStatsMXBean {
datanodeManager.clearPendingQueues();
postponedMisreplicatedBlocks.clear();
postponedMisreplicatedBlocksCount.set(0);
- };
+ }
public static LocatedBlock newLocatedBlock(
ExtendedBlock b, DatanodeStorageInfo[] storages,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62b63d2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
index 0dbf485..85cea5a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
@@ -117,7 +117,7 @@ class BlocksMap {
* remove it from all data-node lists it belongs to;
* and remove all data-node locations associated with the block.
*/
- void removeBlock(Block block) {
+ void removeBlock(BlockInfo block) {
BlockInfo blockInfo = blocks.remove(block);
if (blockInfo == null)
return;
@@ -190,7 +190,7 @@ class BlocksMap {
// remove block from the data-node list and the node from the block info
boolean removed = node.removeBlock(info);
- if (info.getDatanode(0) == null // no datanodes left
+ if (info.hasEmptyStorage() // no datanodes left
&& info.isDeleted()) { // does not belong to a file
blocks.remove(b); // remove block from the map
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62b63d2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ContiguousBlockStorageOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ContiguousBlockStorageOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ContiguousBlockStorageOp.java
index 092f65e..70251e1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ContiguousBlockStorageOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ContiguousBlockStorageOp.java
@@ -45,13 +45,12 @@ class ContiguousBlockStorageOp {
return last;
}
- static boolean addStorage(BlockInfo b, DatanodeStorageInfo storage) {
+ static void addStorage(BlockInfo b, DatanodeStorageInfo storage) {
// find the last null node
int lastNode = ensureCapacity(b, 1);
b.setStorageInfo(lastNode, storage);
b.setNext(lastNode, null);
b.setPrevious(lastNode, null);
- return true;
}
static boolean removeStorage(BlockInfo b,
@@ -103,4 +102,8 @@ class ContiguousBlockStorageOp {
"newBlock already exists.");
}
}
+
+ static boolean hasEmptyStorage(BlockInfo b) {
+ return b.getStorageInfo(0) == null;
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62b63d2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
index fc2e234..9a0023d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
+import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.ipc.Server;
@@ -46,8 +46,12 @@ public class CorruptReplicasMap{
CORRUPTION_REPORTED // client or datanode reported the corruption
}
- private final SortedMap<Block, Map<DatanodeDescriptor, Reason>> corruptReplicasMap =
- new TreeMap<Block, Map<DatanodeDescriptor, Reason>>();
+ /**
+ * Used to track corrupted replicas (for contiguous block) or internal blocks
+ * (for striped block) and the corresponding DataNodes. For a striped block,
+ * the key here is the striped block group object stored in the blocksMap.
+ */
+ private final SortedMap<BlockInfo, Map<DatanodeDescriptor, Reason>> corruptReplicasMap = new TreeMap<>();
/**
* Mark the block belonging to datanode as corrupt.
@@ -57,21 +61,21 @@ public class CorruptReplicasMap{
* @param reason a textual reason (for logging purposes)
* @param reasonCode the enum representation of the reason
*/
- void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn,
+ void addToCorruptReplicasMap(BlockInfo blk, DatanodeDescriptor dn,
String reason, Reason reasonCode) {
Map <DatanodeDescriptor, Reason> nodes = corruptReplicasMap.get(blk);
if (nodes == null) {
- nodes = new HashMap<DatanodeDescriptor, Reason>();
+ nodes = new HashMap<>();
corruptReplicasMap.put(blk, nodes);
}
-
+
String reasonText;
if (reason != null) {
reasonText = " because " + reason;
} else {
reasonText = "";
}
-
+
if (!nodes.keySet().contains(dn)) {
NameNode.blockStateChangeLog.info(
"BLOCK NameSystem.addToCorruptReplicasMap: {} added as corrupt on "
@@ -92,7 +96,7 @@ public class CorruptReplicasMap{
*
* @param blk Block to be removed
*/
- void removeFromCorruptReplicasMap(Block blk) {
+ void removeFromCorruptReplicasMap(BlockInfo blk) {
if (corruptReplicasMap != null) {
corruptReplicasMap.remove(blk);
}
@@ -105,12 +109,13 @@ public class CorruptReplicasMap{
* @return true if the removal is successful;
false if the replica is not in the map
*/
- boolean removeFromCorruptReplicasMap(Block blk, DatanodeDescriptor datanode) {
+ boolean removeFromCorruptReplicasMap(BlockInfo blk,
+ DatanodeDescriptor datanode) {
return removeFromCorruptReplicasMap(blk, datanode, Reason.ANY);
}
- boolean removeFromCorruptReplicasMap(Block blk, DatanodeDescriptor datanode,
- Reason reason) {
+ boolean removeFromCorruptReplicasMap(BlockInfo blk,
+ DatanodeDescriptor datanode, Reason reason) {
Map <DatanodeDescriptor, Reason> datanodes = corruptReplicasMap.get(blk);
if (datanodes==null)
return false;
@@ -139,11 +144,9 @@ public class CorruptReplicasMap{
* @param blk Block for which nodes are requested
* @return collection of nodes. Null if does not exists
*/
- Collection<DatanodeDescriptor> getNodes(Block blk) {
- Map <DatanodeDescriptor, Reason> nodes = corruptReplicasMap.get(blk);
- if (nodes == null)
- return null;
- return nodes.keySet();
+ Collection<DatanodeDescriptor> getNodes(BlockInfo blk) {
+ Map<DatanodeDescriptor, Reason> nodes = corruptReplicasMap.get(blk);
+ return nodes != null ? nodes.keySet() : null;
}
/**
@@ -153,12 +156,12 @@ public class CorruptReplicasMap{
* @param node DatanodeDescriptor which holds the replica
* @return true if replica is corrupt, false if does not exists in this map
*/
- boolean isReplicaCorrupt(Block blk, DatanodeDescriptor node) {
+ boolean isReplicaCorrupt(BlockInfo blk, DatanodeDescriptor node) {
Collection<DatanodeDescriptor> nodes = getNodes(blk);
return ((nodes != null) && (nodes.contains(node)));
}
- int numCorruptReplicas(Block blk) {
+ int numCorruptReplicas(BlockInfo blk) {
Collection<DatanodeDescriptor> nodes = getNodes(blk);
return (nodes == null) ? 0 : nodes.size();
}
@@ -168,9 +171,9 @@ public class CorruptReplicasMap{
}
/**
- * Return a range of corrupt replica block ids. Up to numExpectedBlocks
+ * Return a range of corrupt replica block ids. Up to numExpectedBlocks
* blocks starting at the next block after startingBlockId are returned
- * (fewer if numExpectedBlocks blocks are unavailable). If startingBlockId
+ * (fewer if numExpectedBlocks blocks are unavailable). If startingBlockId
* is null, up to numExpectedBlocks blocks are returned from the beginning.
* If startingBlockId cannot be found, null is returned.
*
@@ -181,44 +184,39 @@ public class CorruptReplicasMap{
* @return Up to numExpectedBlocks blocks from startingBlockId if it exists
*
*/
+ @VisibleForTesting
long[] getCorruptReplicaBlockIds(int numExpectedBlocks,
Long startingBlockId) {
if (numExpectedBlocks < 0 || numExpectedBlocks > 100) {
return null;
}
-
- Iterator<Block> blockIt = corruptReplicasMap.keySet().iterator();
-
+ Iterator<BlockInfo> blockIt = corruptReplicasMap.keySet().iterator();
// if the starting block id was specified, iterate over keys until
// we find the matching block. If we find a matching block, break
- // to leave the iterator on the next block after the specified block.
+ // to leave the iterator on the next block after the specified block.
if (startingBlockId != null) {
boolean isBlockFound = false;
while (blockIt.hasNext()) {
- Block b = blockIt.next();
+ BlockInfo b = blockIt.next();
if (b.getBlockId() == startingBlockId) {
isBlockFound = true;
- break;
+ break;
}
}
-
if (!isBlockFound) {
return null;
}
}
- ArrayList<Long> corruptReplicaBlockIds = new ArrayList<Long>();
-
+ ArrayList<Long> corruptReplicaBlockIds = new ArrayList<>();
// append up to numExpectedBlocks blockIds to our list
for(int i=0; i<numExpectedBlocks && blockIt.hasNext(); i++) {
corruptReplicaBlockIds.add(blockIt.next().getBlockId());
}
-
long[] ret = new long[corruptReplicaBlockIds.size()];
for(int i=0; i<ret.length; i++) {
ret[i] = corruptReplicaBlockIds.get(i);
}
-
return ret;
}
@@ -229,7 +227,7 @@ public class CorruptReplicasMap{
* @param node datanode that contains this corrupted replica
* @return reason
*/
- String getCorruptReason(Block block, DatanodeDescriptor node) {
+ String getCorruptReason(BlockInfo block, DatanodeDescriptor node) {
Reason reason = null;
if(corruptReplicasMap.containsKey(block)) {
if (corruptReplicasMap.get(block).containsKey(node)) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62b63d2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 4830d5d..eebeac0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -71,7 +71,7 @@ class FSDirWriteFileOp {
private FSDirWriteFileOp() {}
static boolean unprotectedRemoveBlock(
FSDirectory fsd, String path, INodesInPath iip, INodeFile fileNode,
- Block block) throws IOException {
+ BlockInfo block) throws IOException {
// modify file-> block and blocksMap
// fileNode should be under construction
BlockInfoUnderConstruction uc = fileNode.removeLastBlock(block);
@@ -136,7 +136,9 @@ class FSDirWriteFileOp {
fsd.writeLock();
try {
// Remove the block from the pending creates list
- if (!unprotectedRemoveBlock(fsd, src, iip, file, localBlock)) {
+ BlockInfo storedBlock = fsd.getBlockManager().getStoredBlock(localBlock);
+ if (storedBlock != null &&
+ !unprotectedRemoveBlock(fsd, src, iip, file, storedBlock)) {
return;
}
} finally {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62b63d2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 63ef985..96d6982 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -1035,7 +1035,7 @@ public class FSEditLogLoader {
throw new IOException("Trying to remove more than one block from file "
+ path);
}
- Block oldBlock = oldBlocks[oldBlocks.length - 1];
+ BlockInfo oldBlock = oldBlocks[oldBlocks.length - 1];
boolean removed = FSDirWriteFileOp.unprotectedRemoveBlock(
fsDir, path, iip, file, oldBlock);
if (!removed && !(op instanceof UpdateBlocksOp)) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62b63d2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index ab179b4..2a8231a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -267,10 +267,8 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
out.println("No. of corrupted Replica: " +
numberReplicas.corruptReplicas());
//record datanodes that have corrupted block replica
- Collection<DatanodeDescriptor> corruptionRecord = null;
- if (bm.getCorruptReplicas(block) != null) {
- corruptionRecord = bm.getCorruptReplicas(block);
- }
+ Collection<DatanodeDescriptor> corruptionRecord =
+ bm.getCorruptReplicas(blockInfo);
//report block replicas status on datanodes
for(int idx = (blockInfo.numNodes()-1); idx >= 0; idx--) {
@@ -279,7 +277,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
dn.getNetworkLocation() + " ");
if (corruptionRecord != null && corruptionRecord.contains(dn)) {
out.print(CORRUPT_STATUS+"\t ReasonCode: "+
- bm.getCorruptReason(block,dn));
+ bm.getCorruptReason(blockInfo, dn));
} else if (dn.isDecommissioned() ){
out.print(DECOMMISSIONED_STATUS);
} else if (dn.isDecommissionInProgress()) {
@@ -650,7 +648,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
LightWeightLinkedSet<BlockInfo> blocksExcess =
bm.excessReplicateMap.get(dnDesc.getDatanodeUuid());
Collection<DatanodeDescriptor> corruptReplicas =
- bm.getCorruptReplicas(block.getLocalBlock());
+ bm.getCorruptReplicas(storedBlock);
sb.append("(");
if (dnDesc.isDecommissioned()) {
sb.append("DECOMMISSIONED)");
@@ -658,7 +656,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
sb.append("DECOMMISSIONING)");
} else if (corruptReplicas != null && corruptReplicas.contains(dnDesc)) {
sb.append("CORRUPT)");
- } else if (blocksExcess != null && blocksExcess.contains(block.getLocalBlock())) {
+ } else if (blocksExcess != null && blocksExcess.contains(storedBlock)) {
sb.append("EXCESS)");
} else if (dnDesc.isStale(this.staleInterval)) {
sb.append("STALE_NODE)");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62b63d2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 89ee674..af1e023 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -560,7 +560,8 @@ public class DFSTestUtil {
throws TimeoutException, InterruptedException {
int count = 0;
final int ATTEMPTS = 50;
- int repls = ns.getBlockManager().numCorruptReplicas(b.getLocalBlock());
+ int repls = BlockManagerTestUtil.numCorruptReplicas(ns.getBlockManager(),
+ b.getLocalBlock());
while (repls != corruptRepls && count < ATTEMPTS) {
try {
IOUtils.copyBytes(fs.open(file), new IOUtils.NullOutputStream(),
@@ -572,7 +573,8 @@ public class DFSTestUtil {
count++;
// check more often so corrupt block reports are not easily missed
for (int i = 0; i < 10; i++) {
- repls = ns.getBlockManager().numCorruptReplicas(b.getLocalBlock());
+ repls = BlockManagerTestUtil.numCorruptReplicas(ns.getBlockManager(),
+ b.getLocalBlock());
Thread.sleep(100);
if (repls == corruptRepls) {
break;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62b63d2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
index 148135b..a899891 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
@@ -24,6 +24,7 @@ import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.ExecutionException;
+import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
@@ -87,7 +88,7 @@ public class BlockManagerTestUtil {
final Block b) {
final Set<String> rackSet = new HashSet<String>(0);
final Collection<DatanodeDescriptor> corruptNodes =
- getCorruptReplicas(blockManager).getNodes(b);
+ getCorruptReplicas(blockManager).getNodes(blockManager.getStoredBlock(b));
for(DatanodeStorageInfo storage : blockManager.blocksMap.getStorages(b)) {
final DatanodeDescriptor cur = storage.getDatanodeDescriptor();
if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) {
@@ -306,4 +307,8 @@ public class BlockManagerTestUtil {
throws ExecutionException, InterruptedException {
dm.getDecomManager().runMonitor();
}
+
+ public static int numCorruptReplicas(BlockManager bm, Block block) {
+ return bm.corruptReplicas.numCorruptReplicas(bm.getStoredBlock(block));
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62b63d2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
index bae4f1d..c23f3d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
@@ -63,9 +63,7 @@ public class TestBlockInfo {
final DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo("storageID", "127.0.0.1");
- boolean added = blockInfo.addStorage(storage, blockInfo);
-
- Assert.assertTrue(added);
+ blockInfo.addStorage(storage, blockInfo);
Assert.assertEquals(storage, blockInfo.getStorageInfo(0));
}
@@ -73,7 +71,7 @@ public class TestBlockInfo {
public void testCopyConstructor() {
BlockInfo old = new BlockInfoContiguous((short) 3);
try {
- BlockInfo copy = new BlockInfoContiguous((BlockInfoContiguous)old);
+ BlockInfo copy = new BlockInfoContiguous(old);
assertEquals(old.getBlockCollection(), copy.getBlockCollection());
assertEquals(old.getCapacity(), copy.getCapacity());
} catch (Exception e) {
@@ -110,8 +108,8 @@ public class TestBlockInfo {
final int MAX_BLOCKS = 10;
DatanodeStorageInfo dd = DFSTestUtil.createDatanodeStorageInfo("s1", "1.1.1.1");
- ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
- ArrayList<BlockInfo> blockInfoList = new ArrayList<BlockInfo>();
+ ArrayList<Block> blockList = new ArrayList<>(MAX_BLOCKS);
+ ArrayList<BlockInfo> blockInfoList = new ArrayList<>();
int headIndex;
int curIndex;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62b63d2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 9e31670..f6cc747 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -509,7 +509,7 @@ public class TestBlockManager {
+ " even if all available source nodes have reached their replication"
+ " limits below the hard limit.",
bm.chooseSourceDatanode(
- aBlock,
+ bm.getStoredBlock(aBlock),
cntNodes,
liveNodes,
new NumberReplicas(),
@@ -519,7 +519,7 @@ public class TestBlockManager {
+ " replication since all available source nodes have reached"
+ " their replication limits.",
bm.chooseSourceDatanode(
- aBlock,
+ bm.getStoredBlock(aBlock),
cntNodes,
liveNodes,
new NumberReplicas(),
@@ -532,7 +532,7 @@ public class TestBlockManager {
assertNull("Does not choose a source node for a highest-priority"
+ " replication when all available nodes exceed the hard limit.",
bm.chooseSourceDatanode(
- aBlock,
+ bm.getStoredBlock(aBlock),
cntNodes,
liveNodes,
new NumberReplicas(),
@@ -558,7 +558,7 @@ public class TestBlockManager {
+ " if all available source nodes have reached their replication"
+ " limits below the hard limit.",
bm.chooseSourceDatanode(
- aBlock,
+ bm.getStoredBlock(aBlock),
cntNodes,
liveNodes,
new NumberReplicas(),
@@ -572,7 +572,7 @@ public class TestBlockManager {
assertNull("Does not choose a source decommissioning node for a normal"
+ " replication when all available nodes exceed the hard limit.",
bm.chooseSourceDatanode(
- aBlock,
+ bm.getStoredBlock(aBlock),
cntNodes,
liveNodes,
new NumberReplicas(),
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62b63d2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
index 21fb54e..1a49bee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
@@ -48,20 +48,19 @@ public class TestCorruptReplicaInfo {
private static final Log LOG =
LogFactory.getLog(TestCorruptReplicaInfo.class);
- private final Map<Long, Block> block_map =
- new HashMap<Long, Block>();
+ private final Map<Long, BlockInfo> block_map = new HashMap<>();
// Allow easy block creation by block id
// Return existing block if one with same block id already exists
- private Block getBlock(Long block_id) {
+ private BlockInfo getBlock(Long block_id) {
if (!block_map.containsKey(block_id)) {
- block_map.put(block_id, new Block(block_id,0,0));
+ block_map.put(block_id,
+ new BlockInfoContiguous(new Block(block_id, 0, 0), (short) 1));
}
-
return block_map.get(block_id);
}
- private Block getBlock(int block_id) {
+ private BlockInfo getBlock(int block_id) {
return getBlock((long)block_id);
}
@@ -82,7 +81,7 @@ public class TestCorruptReplicaInfo {
// create a list of block_ids. A list is used to allow easy validation of the
// output of getCorruptReplicaBlockIds
int NUM_BLOCK_IDS = 140;
- List<Long> block_ids = new LinkedList<Long>();
+ List<Long> block_ids = new LinkedList<>();
for (int i=0;i<NUM_BLOCK_IDS;i++) {
block_ids.add((long)i);
}
@@ -130,7 +129,7 @@ public class TestCorruptReplicaInfo {
}
private static void addToCorruptReplicasMap(CorruptReplicasMap crm,
- Block blk, DatanodeDescriptor dn) {
+ BlockInfo blk, DatanodeDescriptor dn) {
crm.addToCorruptReplicasMap(blk, dn, "TEST", Reason.NONE);
}
}
[6/9] hadoop git commit: YARN-2194. Fix bug causing CGroups
functionality to fail on RHEL7. Contributed by Wei Yan.
Posted by aw...@apache.org.
YARN-2194. Fix bug causing CGroups functionality to fail on RHEL7. Contributed by Wei Yan.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c40bdb56
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c40bdb56
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c40bdb56
Branch: refs/heads/HADOOP-12111
Commit: c40bdb56a79fe1499c2284d493edc84620c0c078
Parents: 99c8c58
Author: Varun Vasudev <vv...@apache.org>
Authored: Tue Jul 7 16:59:29 2015 +0530
Committer: Varun Vasudev <vv...@apache.org>
Committed: Tue Jul 7 16:59:29 2015 +0530
----------------------------------------------------------------------
.../main/java/org/apache/hadoop/util/StringUtils.java | 8 ++++++++
.../java/org/apache/hadoop/util/TestStringUtils.java | 4 ++++
hadoop-yarn-project/CHANGES.txt | 3 +++
.../server/nodemanager/LinuxContainerExecutor.java | 12 ++++++++----
.../linux/privileged/PrivilegedOperation.java | 1 +
.../linux/privileged/PrivilegedOperationExecutor.java | 2 +-
.../nodemanager/util/CgroupsLCEResourcesHandler.java | 6 ++++--
.../native/container-executor/impl/configuration.c | 8 ++++----
.../container-executor/test/test-container-executor.c | 4 ++--
.../TestLinuxContainerExecutorWithMocks.java | 13 +++++++++----
10 files changed, 44 insertions(+), 17 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c40bdb56/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
index fc4b0ab..73f9c4f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
@@ -872,6 +872,10 @@ public class StringUtils {
return sb.toString();
}
+ public static String join(char separator, Iterable<?> strings) {
+ return join(separator + "", strings);
+ }
+
/**
* Concatenates strings, using a separator.
*
@@ -894,6 +898,10 @@ public class StringUtils {
return sb.toString();
}
+ public static String join(char separator, String[] strings) {
+ return join(separator + "", strings);
+ }
+
/**
* Convert SOME_STUFF to SomeStuff
*
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c40bdb56/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
index 85ab8c4..e3e613c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
@@ -278,8 +278,12 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
s.add("c");
assertEquals("", StringUtils.join(":", s.subList(0, 0)));
assertEquals("a", StringUtils.join(":", s.subList(0, 1)));
+ assertEquals("", StringUtils.join(':', s.subList(0, 0)));
+ assertEquals("a", StringUtils.join(':', s.subList(0, 1)));
assertEquals("a:b", StringUtils.join(":", s.subList(0, 2)));
assertEquals("a:b:c", StringUtils.join(":", s.subList(0, 3)));
+ assertEquals("a:b", StringUtils.join(':', s.subList(0, 2)));
+ assertEquals("a:b:c", StringUtils.join(':', s.subList(0, 3)));
}
@Test (timeout = 30000)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c40bdb56/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8f63187..2d1d6a2 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -595,6 +595,9 @@ Release 2.8.0 - UNRELEASED
YARN-3837. javadocs of TimelineAuthenticationFilterInitializer give wrong
prefix for auth options. (Bibin A Chundatt via devaraj)
+ YARN-2194. Fix bug causing CGroups functionality to fail on RHEL7.
+ (Wei Yan via vvasudev)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c40bdb56/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index dbe257d..b936969 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -241,8 +241,10 @@ public class LinuxContainerExecutor extends ContainerExecutor {
Integer.toString(Commands.INITIALIZE_CONTAINER.getValue()),
appId,
nmPrivateContainerTokensPath.toUri().getPath().toString(),
- StringUtils.join(",", localDirs),
- StringUtils.join(",", logDirs)));
+ StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR,
+ localDirs),
+ StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR,
+ logDirs)));
File jvm = // use same jvm as parent
new File(new File(System.getProperty("java.home"), "bin"), "java");
@@ -363,8 +365,10 @@ public class LinuxContainerExecutor extends ContainerExecutor {
nmPrivateContainerScriptPath.toUri().getPath().toString(),
nmPrivateTokensPath.toUri().getPath().toString(),
pidFilePath.toString(),
- StringUtils.join(",", localDirs),
- StringUtils.join(",", logDirs),
+ StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR,
+ localDirs),
+ StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR,
+ logDirs),
resourcesOptions));
if (tcCommandFile != null) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c40bdb56/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
index 74556a8..f220cbd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
@@ -36,6 +36,7 @@ import java.util.List;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class PrivilegedOperation {
+ public final static char LINUX_FILE_PATH_SEPARATOR = '%';
public enum OperationType {
CHECK_SETUP("--checksetup"),
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c40bdb56/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
index 1c4a51c..6fe0f5c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
@@ -234,7 +234,7 @@ public class PrivilegedOperationExecutor {
if (noneArgsOnly == false) {
//We have already appended at least one tasks file.
- finalOpArg.append(",");
+ finalOpArg.append(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR);
finalOpArg.append(tasksFile);
} else {
finalOpArg.append(tasksFile);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c40bdb56/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
index b38e559..6994fc3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
import org.apache.hadoop.yarn.util.Clock;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
import org.apache.hadoop.yarn.util.SystemClock;
@@ -409,10 +410,11 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
if (isCpuWeightEnabled()) {
sb.append(pathForCgroup(CONTROLLER_CPU, containerName) + "/tasks");
- sb.append(",");
+ sb.append(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR);
}
- if (sb.charAt(sb.length() - 1) == ',') {
+ if (sb.charAt(sb.length() - 1) ==
+ PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR) {
sb.deleteCharAt(sb.length() - 1);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c40bdb56/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
index 15b53ae..51adc97 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
@@ -283,7 +283,7 @@ char * get_value(const char* key) {
/**
* Function to return an array of values for a key.
- * Value delimiter is assumed to be a comma.
+ * Value delimiter is assumed to be a '%'.
*/
char ** get_values(const char * key) {
char *value = get_value(key);
@@ -291,7 +291,7 @@ char ** get_values(const char * key) {
}
/**
- * Extracts array of values from the comma separated list of values.
+ * Extracts array of values from the '%' separated list of values.
*/
char ** extract_values(char *value) {
char ** toPass = NULL;
@@ -303,14 +303,14 @@ char ** extract_values(char *value) {
//first allocate any array of 10
if(value != NULL) {
toPass = (char **) malloc(sizeof(char *) * toPassSize);
- tempTok = strtok_r((char *)value, ",", &tempstr);
+ tempTok = strtok_r((char *)value, "%", &tempstr);
while (tempTok != NULL) {
toPass[size++] = tempTok;
if(size == toPassSize) {
toPassSize += MAX_SIZE;
toPass = (char **) realloc(toPass,(sizeof(char *) * toPassSize));
}
- tempTok = strtok_r(NULL, ",", &tempstr);
+ tempTok = strtok_r(NULL, "%", &tempstr);
}
}
if (toPass != NULL) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c40bdb56/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index be6cc49..13604f0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -30,8 +30,8 @@
#define TEST_ROOT "/tmp/test-container-executor"
#define DONT_TOUCH_FILE "dont-touch-me"
-#define NM_LOCAL_DIRS TEST_ROOT "/local-1," TEST_ROOT "/local-2," \
- TEST_ROOT "/local-3," TEST_ROOT "/local-4," TEST_ROOT "/local-5"
+#define NM_LOCAL_DIRS TEST_ROOT "/local-1%" TEST_ROOT "/local-2%" \
+ TEST_ROOT "/local-3%" TEST_ROOT "/local-4%" TEST_ROOT "/local-5"
#define NM_LOG_DIRS TEST_ROOT "/logs/userlogs"
#define ARRAY_SIZE 1000
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c40bdb56/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
index d48ce13..82b7fd9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext;
import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
@@ -148,8 +149,10 @@ public class TestLinuxContainerExecutorWithMocks {
assertEquals(Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,
appSubmitter, cmd, appId, containerId,
workDir.toString(), "/bin/echo", "/dev/null", pidFile.toString(),
- StringUtils.join(",", dirsHandler.getLocalDirs()),
- StringUtils.join(",", dirsHandler.getLogDirs()), "cgroups=none"),
+ StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR,
+ dirsHandler.getLocalDirs()),
+ StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR,
+ dirsHandler.getLogDirs()), "cgroups=none"),
readMockParams());
}
@@ -312,8 +315,10 @@ public class TestLinuxContainerExecutorWithMocks {
assertEquals(Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,
appSubmitter, cmd, appId, containerId,
workDir.toString(), "/bin/echo", "/dev/null", pidFile.toString(),
- StringUtils.join(",", dirsHandler.getLocalDirs()),
- StringUtils.join(",", dirsHandler.getLogDirs()),
+ StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR,
+ dirsHandler.getLocalDirs()),
+ StringUtils.join(PrivilegedOperation.LINUX_FILE_PATH_SEPARATOR,
+ dirsHandler.getLogDirs()),
"cgroups=none"), readMockParams());
}
[8/9] hadoop git commit: Merge branch 'trunk' into HADOOP-12111
Posted by aw...@apache.org.
Merge branch 'trunk' into HADOOP-12111
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0589aa8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0589aa8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0589aa8
Branch: refs/heads/HADOOP-12111
Commit: b0589aa8f8a40f6084ca0643d96ae3e9ed93ba78
Parents: 6034366 e0febce
Author: Allen Wittenauer <aw...@apache.org>
Authored: Tue Jul 7 08:42:00 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Tue Jul 7 08:42:00 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 12 +-
.../org/apache/hadoop/conf/Configuration.java | 8 +-
.../org/apache/hadoop/util/StringUtils.java | 8 ++
.../org/apache/hadoop/net/unix/DomainSocket.c | 9 +-
.../apache/hadoop/conf/TestConfiguration.java | 15 ++
.../org/apache/hadoop/util/TestStringUtils.java | 4 +
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 +-
.../hdfs/server/blockmanagement/BlockInfo.java | 7 +-
.../blockmanagement/BlockInfoContiguous.java | 9 +-
.../BlockInfoUnderConstruction.java | 22 ++-
.../BlockInfoUnderConstructionContiguous.java | 13 +-
.../server/blockmanagement/BlockManager.java | 143 +++++++++----------
.../hdfs/server/blockmanagement/BlocksMap.java | 4 +-
.../ContiguousBlockStorageOp.java | 7 +-
.../blockmanagement/CorruptReplicasMap.java | 62 ++++----
.../hdfs/server/namenode/FSDirWriteFileOp.java | 6 +-
.../hdfs/server/namenode/FSEditLogLoader.java | 2 +-
.../hdfs/server/namenode/NamenodeFsck.java | 12 +-
.../org/apache/hadoop/hdfs/DFSTestUtil.java | 6 +-
.../blockmanagement/BlockManagerTestUtil.java | 7 +-
.../server/blockmanagement/TestBlockInfo.java | 10 +-
.../blockmanagement/TestBlockManager.java | 10 +-
.../blockmanagement/TestCorruptReplicaInfo.java | 15 +-
hadoop-mapreduce-project/CHANGES.txt | 2 +-
hadoop-yarn-project/CHANGES.txt | 8 +-
...TimelineAuthenticationFilterInitializer.java | 5 +-
.../nodemanager/LinuxContainerExecutor.java | 12 +-
.../linux/privileged/PrivilegedOperation.java | 1 +
.../privileged/PrivilegedOperationExecutor.java | 2 +-
.../util/CgroupsLCEResourcesHandler.java | 6 +-
.../container-executor/impl/configuration.c | 8 +-
.../test/test-container-executor.c | 4 +-
.../TestLinuxContainerExecutorWithMocks.java | 13 +-
33 files changed, 257 insertions(+), 199 deletions(-)
----------------------------------------------------------------------
[3/9] hadoop git commit: HADOOP-11974. Fix FIONREAD #include on
Solaris (Alan Burlison via Colin P. McCabe)
Posted by aw...@apache.org.
HADOOP-11974. Fix FIONREAD #include on Solaris (Alan Burlison via Colin P. McCabe)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/81f36443
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/81f36443
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/81f36443
Branch: refs/heads/HADOOP-12111
Commit: 81f364437608b21e85fc393f63546bf8b425ac71
Parents: bf89ddb
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Mon Jul 6 12:56:34 2015 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Mon Jul 6 17:17:59 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../native/src/org/apache/hadoop/net/unix/DomainSocket.c | 9 ++++++++-
2 files changed, 11 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/81f36443/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index fab78d4..faf5a5c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -678,6 +678,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-12045. Enable LocalFileSystem#setTimes to change atime.
(Kazuho Fujii via cnauroth)
+ HADOOP-11974. Fix FIONREAD #include on Solaris (Alan Burlison via Colin P.
+ McCabe)
+
OPTIMIZATIONS
HADOOP-11785. Reduce the number of listStatus operation in distcp
http://git-wip-us.apache.org/repos/asf/hadoop/blob/81f36443/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocket.c
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocket.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocket.c
index a3f27ee..e658d8f 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocket.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/net/unix/DomainSocket.c
@@ -31,7 +31,14 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <sys/ioctl.h> /* for FIONREAD */
+
+/* For FIONREAD */
+#if defined(__sun)
+#include <sys/filio.h>
+#else
+#include <sys/ioctl.h>
+#endif
+
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/types.h>
[4/9] hadoop git commit: YARN-3837. javadocs of
TimelineAuthenticationFilterInitializer give wrong prefix for auth options.
Contributed by Bibin A Chundatt.
Posted by aw...@apache.org.
YARN-3837. javadocs of TimelineAuthenticationFilterInitializer give wrong
prefix for auth options. Contributed by Bibin A Chundatt.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af63427c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af63427c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af63427c
Branch: refs/heads/HADOOP-12111
Commit: af63427c6d7d2fc251eafb1f152b7a90c5bd07e5
Parents: 81f3644
Author: Devaraj K <de...@apache.org>
Authored: Tue Jul 7 12:06:30 2015 +0530
Committer: Devaraj K <de...@apache.org>
Committed: Tue Jul 7 12:06:30 2015 +0530
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +++
.../security/TimelineAuthenticationFilterInitializer.java | 5 ++---
2 files changed, 5 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/af63427c/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f35be75..8f63187 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -592,6 +592,9 @@ Release 2.8.0 - UNRELEASED
YARN-3882. AggregatedLogFormat should close aclScanner and ownerScanner
after create them. (zhihai xu via xgong)
+ YARN-3837. javadocs of TimelineAuthenticationFilterInitializer give wrong
+ prefix for auth options. (Bibin A Chundatt via devaraj)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/af63427c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
index a3c136c..4e7c29a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
@@ -62,9 +62,8 @@ public class TimelineAuthenticationFilterInitializer extends FilterInitializer {
* Initializes {@link TimelineAuthenticationFilter}
* <p>
* Propagates to {@link TimelineAuthenticationFilter} configuration all YARN
- * configuration properties prefixed with
- * {@code yarn.timeline-service.authentication.}
- *
+ * configuration properties prefixed with {@value #PREFIX}
+ *
* @param container
* The filter container
* @param conf
[5/9] hadoop git commit: HADOOP-12117. Potential NPE from
Configuration#loadProperty with allowNullValueProperties set. (Contributed by
zhihai xu)
Posted by aw...@apache.org.
HADOOP-12117. Potential NPE from Configuration#loadProperty with allowNullValueProperties set. (Contributed by zhihai xu)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99c8c583
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99c8c583
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99c8c583
Branch: refs/heads/HADOOP-12111
Commit: 99c8c5839b65666e6099116e4d7024e0eb4682b9
Parents: af63427
Author: Vinayakumar B <vi...@apache.org>
Authored: Tue Jul 7 16:11:27 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Tue Jul 7 16:11:27 2015 +0530
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
.../java/org/apache/hadoop/conf/Configuration.java | 8 ++++----
.../org/apache/hadoop/conf/TestConfiguration.java | 15 +++++++++++++++
3 files changed, 22 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/99c8c583/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index faf5a5c..5d11db9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -933,6 +933,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-12186. ActiveStandbyElector shouldn't call monitorLockNodeAsync
multiple times (zhihai xu via vinayakumarb)
+ HADOOP-12117. Potential NPE from Configuration#loadProperty with
+ allowNullValueProperties set. (zhihai xu via vinayakumarb)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/99c8c583/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 54e07c6..0b45429 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -2735,14 +2735,14 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
to.put(entry.getKey(), entry.getValue());
}
}
-
+
private void loadProperty(Properties properties, String name, String attr,
String value, boolean finalParameter, String[] source) {
if (value != null || allowNullValueProperties) {
+ if (value == null) {
+ value = DEFAULT_STRING_CHECK;
+ }
if (!finalParameters.contains(attr)) {
- if (value==null && allowNullValueProperties) {
- value = DEFAULT_STRING_CHECK;
- }
properties.setProperty(attr, value);
if(source != null) {
updatingResource.put(attr, source);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/99c8c583/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index ec6c964..a039741 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -42,6 +42,7 @@ import static java.util.concurrent.TimeUnit.*;
import junit.framework.TestCase;
import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.fail;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration.IntegerRanges;
@@ -49,6 +50,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
+
import org.codehaus.jackson.map.ObjectMapper;
import org.mockito.Mockito;
@@ -1511,6 +1513,19 @@ public class TestConfiguration extends TestCase {
// it's expected behaviour.
}
+ public void testNullValueProperties() throws Exception {
+ Configuration conf = new Configuration();
+ conf.setAllowNullValueProperties(true);
+ out = new BufferedWriter(new FileWriter(CONFIG));
+ startConfig();
+ appendProperty("attr", "value", true);
+ appendProperty("attr", "", true);
+ endConfig();
+ Path fileResource = new Path(CONFIG);
+ conf.addResource(fileResource);
+ assertEquals("value", conf.get("attr"));
+ }
+
public static void main(String[] argv) throws Exception {
junit.textui.TestRunner.main(new String[]{
TestConfiguration.class.getName()
[9/9] hadoop git commit: HADOOP-12202. releasedocmaker drops missing
component and assignee entries (aw)
Posted by aw...@apache.org.
HADOOP-12202. releasedocmaker drops missing component and assignee entries (aw)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d7a7085
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d7a7085
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d7a7085
Branch: refs/heads/HADOOP-12111
Commit: 0d7a70857552a74b60de22773bea5ea47f6ad2a7
Parents: b0589aa
Author: Allen Wittenauer <aw...@apache.org>
Authored: Tue Jul 7 12:07:53 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Tue Jul 7 12:07:53 2015 -0700
----------------------------------------------------------------------
dev-support/releasedocmaker.py | 25 +++++++++++++------------
1 file changed, 13 insertions(+), 12 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d7a7085/dev-support/releasedocmaker.py
----------------------------------------------------------------------
diff --git a/dev-support/releasedocmaker.py b/dev-support/releasedocmaker.py
index 6e01260..e7d73fc 100755
--- a/dev-support/releasedocmaker.py
+++ b/dev-support/releasedocmaker.py
@@ -420,6 +420,8 @@ def main():
else:
title=options.title
+ haderrors=False
+
for v in versions:
vstr=str(v)
jlist = JiraIter(vstr,projects)
@@ -468,14 +470,6 @@ def main():
for jira in sorted(jlist):
if jira.getIncompatibleChange():
incompatlist.append(jira)
- if (len(jira.getReleaseNote())==0):
- warningCount+=1
-
- if jira.checkVersionString():
- warningCount+=1
-
- if jira.checkMissingComponent() or jira.checkMissingAssignee():
- errorCount+=1
elif jira.getType() == "Bug":
buglist.append(jira)
elif jira.getType() == "Improvement":
@@ -496,6 +490,7 @@ def main():
notableclean(jira.getSummary()))
if (jira.getIncompatibleChange()) and (len(jira.getReleaseNote())==0):
+ warningCount+=1
reloutputs.writeKeyRaw(jira.getProject(),"\n---\n\n")
reloutputs.writeKeyRaw(jira.getProject(), line)
line ='\n**WARNING: No release note provided for this incompatible change.**\n\n'
@@ -503,9 +498,11 @@ def main():
reloutputs.writeKeyRaw(jira.getProject(), line)
if jira.checkVersionString():
+ warningCount+=1
lintMessage += "\nWARNING: Version string problem for %s " % jira.getId()
if (jira.checkMissingComponent() or jira.checkMissingAssignee()):
+ errorCount+=1
errorMessage=[]
jira.checkMissingComponent() and errorMessage.append("component")
jira.checkMissingAssignee() and errorMessage.append("assignee")
@@ -520,11 +517,12 @@ def main():
if (options.lint is True):
print lintMessage
print "======================================="
- print "Error:%d, Warning:%d \n" % (errorCount, warningCount)
+ print "%s: Error:%d, Warning:%d \n" % (vstr, errorCount, warningCount)
- if (errorCount>0):
- cleanOutputDir(version)
- sys.exit(1)
+ if (errorCount>0):
+ haderrors=True
+ cleanOutputDir(vstr)
+ continue
reloutputs.writeAll("\n\n")
reloutputs.close()
@@ -571,5 +569,8 @@ def main():
if options.index:
buildindex(title,options.license)
+ if haderrors is True:
+ sys.exit(1)
+
if __name__ == "__main__":
main()
[2/9] hadoop git commit: Release process for 2.7.1: Set the release
date for 2.7.1.
Posted by aw...@apache.org.
Release process for 2.7.1: Set the release date for 2.7.1.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf89ddb9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf89ddb9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf89ddb9
Branch: refs/heads/HADOOP-12111
Commit: bf89ddb9b8ca27a34074b415f85599dd48b8fc50
Parents: d62b63d
Author: Vinod Kumar Vavilapalli (I am also known as @tshooter.) <vi...@apache.org>
Authored: Mon Jul 6 16:39:12 2015 -0700
Committer: Vinod Kumar Vavilapalli (I am also known as @tshooter.) <vi...@apache.org>
Committed: Mon Jul 6 16:39:59 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 2 +-
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +-
hadoop-mapreduce-project/CHANGES.txt | 2 +-
hadoop-yarn-project/CHANGES.txt | 2 +-
4 files changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf89ddb9/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index f2f9d5c..fab78d4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -942,7 +942,7 @@ Release 2.7.2 - UNRELEASED
BUG FIXES
-Release 2.7.1 - UNRELEASED
+Release 2.7.1 - 2015-07-06
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf89ddb9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d264f74..e40ea3d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1024,7 +1024,7 @@ Release 2.7.2 - UNRELEASED
BUG FIXES
-Release 2.7.1 - UNRELEASED
+Release 2.7.1 - 2015-07-06
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf89ddb9/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 2458403..4c60174 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -545,7 +545,7 @@ Release 2.7.2 - UNRELEASED
MAPREDUCE-6425. ShuffleHandler passes wrong "base" parameter to getMapOutputInfo
if mapId is not in the cache. (zhihai xu via devaraj)
-Release 2.7.1 - UNRELEASED
+Release 2.7.1 - 2015-07-06
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf89ddb9/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 803d725..f35be75 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -610,7 +610,7 @@ Release 2.7.2 - UNRELEASED
YARN-3508. Prevent processing preemption events on the main RM dispatcher.
(Varun Saxena via wangda)
-Release 2.7.1 - UNRELEASED
+Release 2.7.1 - 2015-07-06
INCOMPATIBLE CHANGES
[7/9] hadoop git commit: HADOOP-12186. ActiveStandbyElector shouldn't
call monitorLockNodeAsync multiple times (Contributed by zhihai xu) Moved
CHANGES.txt entry to 2.7.2
Posted by aw...@apache.org.
HADOOP-12186. ActiveStandbyElector shouldn't call monitorLockNodeAsync multiple times (Contributed by zhihai xu)
Moved CHANGES.txt entry to 2.7.2
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0febce0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0febce0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0febce0
Branch: refs/heads/HADOOP-12111
Commit: e0febce0e74ec69597376774f771da46834c42b1
Parents: c40bdb5
Author: Vinayakumar B <vi...@apache.org>
Authored: Tue Jul 7 18:13:35 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Tue Jul 7 18:13:35 2015 +0530
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0febce0/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5d11db9..ee96eee 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -930,9 +930,6 @@ Release 2.8.0 - UNRELEASED
HADOOP-12164. Fix TestMove and TestFsShellReturnCode failed to get command
name using reflection. (Lei (Eddy) Xu)
- HADOOP-12186. ActiveStandbyElector shouldn't call monitorLockNodeAsync
- multiple times (zhihai xu via vinayakumarb)
-
HADOOP-12117. Potential NPE from Configuration#loadProperty with
allowNullValueProperties set. (zhihai xu via vinayakumarb)
@@ -948,6 +945,9 @@ Release 2.7.2 - UNRELEASED
BUG FIXES
+ HADOOP-12186. ActiveStandbyElector shouldn't call monitorLockNodeAsync
+ multiple times (zhihai xu via vinayakumarb)
+
Release 2.7.1 - 2015-07-06
INCOMPATIBLE CHANGES