You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by wa...@apache.org on 2015/06/29 21:13:02 UTC
hadoop git commit: HDFS-8653. Code cleanup for DatanodeManager,
DatanodeDescriptor and DatanodeStorageInfo. Contributed by Zhe Zhang.
Repository: hadoop
Updated Branches:
refs/heads/trunk d3fed8e65 -> 2ffd84273
HDFS-8653. Code cleanup for DatanodeManager, DatanodeDescriptor and DatanodeStorageInfo. Contributed by Zhe Zhang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ffd8427
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ffd8427
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ffd8427
Branch: refs/heads/trunk
Commit: 2ffd84273ac490724fe7e7825664bb6d09ef0e99
Parents: d3fed8e
Author: Andrew Wang <wa...@apache.org>
Authored: Mon Jun 29 12:12:34 2015 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Mon Jun 29 12:12:41 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++
.../CacheReplicationMonitor.java | 4 +--
.../blockmanagement/DatanodeDescriptor.java | 33 ++++++++++----------
.../server/blockmanagement/DatanodeManager.java | 26 +++++++--------
.../blockmanagement/DatanodeStorageInfo.java | 13 ++++++--
5 files changed, 45 insertions(+), 34 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ffd8427/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e55f340..0c56f2b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -682,6 +682,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8623. Refactor NameNode handling of invalid, corrupt, and under-recovery
blocks. (Zhe Zhang via jing9)
+ HDFS-8653. Code cleanup for DatanodeManager, DatanodeDescriptor and
+ DatanodeStorageInfo. (Zhe Zhang via wang)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ffd8427/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
index a0f3503..2f81ddf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
@@ -452,8 +452,8 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
file.getFullPathName(), cachedTotal, neededTotal);
}
- private String findReasonForNotCaching(CachedBlock cblock,
- BlockInfo blockInfo) {
+ private String findReasonForNotCaching(CachedBlock cblock,
+ BlockInfo blockInfo) {
if (blockInfo == null) {
// Somehow, a cache report with the block arrived, but the block
// reports from the DataNode haven't (yet?) described such a block.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ffd8427/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index dd7b301..99def6b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -64,7 +64,8 @@ public class DatanodeDescriptor extends DatanodeInfo {
// Stores status of decommissioning.
// If node is not decommissioning, do not use this object for anything.
- public final DecommissioningStatus decommissioningStatus = new DecommissioningStatus();
+ public final DecommissioningStatus decommissioningStatus =
+ new DecommissioningStatus();
private long curBlockReportId = 0;
@@ -115,7 +116,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
return null;
}
- List<E> results = new ArrayList<E>();
+ List<E> results = new ArrayList<>();
for(; !blockq.isEmpty() && numBlocks > 0; numBlocks--) {
results.add(blockq.poll());
}
@@ -135,7 +136,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
}
private final Map<String, DatanodeStorageInfo> storageMap =
- new HashMap<String, DatanodeStorageInfo>();
+ new HashMap<>();
/**
* A list of CachedBlock objects on this datanode.
@@ -217,12 +218,14 @@ public class DatanodeDescriptor extends DatanodeInfo {
private long bandwidth;
/** A queue of blocks to be replicated by this datanode */
- private final BlockQueue<BlockTargetPair> replicateBlocks = new BlockQueue<BlockTargetPair>();
+ private final BlockQueue<BlockTargetPair> replicateBlocks =
+ new BlockQueue<>();
/** A queue of blocks to be recovered by this datanode */
private final BlockQueue<BlockInfoUnderConstruction> recoverBlocks =
- new BlockQueue<BlockInfoUnderConstruction>();
+ new BlockQueue<>();
/** A set of blocks to be invalidated by this datanode */
- private final LightWeightHashSet<Block> invalidateBlocks = new LightWeightHashSet<Block>();
+ private final LightWeightHashSet<Block> invalidateBlocks =
+ new LightWeightHashSet<>();
/* Variables for maintaining number of blocks scheduled to be written to
* this storage. This count is approximate and might be slightly bigger
@@ -230,9 +233,9 @@ public class DatanodeDescriptor extends DatanodeInfo {
* while writing the block).
*/
private EnumCounters<StorageType> currApproxBlocksScheduled
- = new EnumCounters<StorageType>(StorageType.class);
+ = new EnumCounters<>(StorageType.class);
private EnumCounters<StorageType> prevApproxBlocksScheduled
- = new EnumCounters<StorageType>(StorageType.class);
+ = new EnumCounters<>(StorageType.class);
private long lastBlocksScheduledRollTime = 0;
private static final int BLOCKS_SCHEDULED_ROLL_INTERVAL = 600*1000; //10min
private int volumeFailures = 0;
@@ -276,6 +279,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
return storageMap.get(storageID);
}
}
+
DatanodeStorageInfo[] getStorageInfos() {
synchronized (storageMap) {
final Collection<DatanodeStorageInfo> storages = storageMap.values();
@@ -321,7 +325,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
Long.toHexString(curBlockReportId));
iter.remove();
if (zombies == null) {
- zombies = new LinkedList<DatanodeStorageInfo>();
+ zombies = new LinkedList<>();
}
zombies.add(storageInfo);
}
@@ -350,10 +354,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
*/
boolean removeBlock(String storageID, BlockInfo b) {
DatanodeStorageInfo s = getStorageInfo(storageID);
- if (s != null) {
- return s.removeBlock(b);
- }
- return false;
+ return s != null && s.removeBlock(b);
}
public void resetBlocks() {
@@ -449,7 +450,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
+ this.volumeFailures + " to " + volFailures);
synchronized (storageMap) {
failedStorageInfos =
- new HashSet<DatanodeStorageInfo>(storageMap.values());
+ new HashSet<>(storageMap.values());
}
}
@@ -505,7 +506,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
HashMap<String, DatanodeStorageInfo> excessStorages;
// Init excessStorages with all known storages.
- excessStorages = new HashMap<String, DatanodeStorageInfo>(storageMap);
+ excessStorages = new HashMap<>(storageMap);
// Remove storages that the DN reported in the heartbeat.
for (final StorageReport report : reports) {
@@ -542,7 +543,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
private final List<Iterator<BlockInfo>> iterators;
private BlockIterator(final DatanodeStorageInfo... storages) {
- List<Iterator<BlockInfo>> iterators = new ArrayList<Iterator<BlockInfo>>();
+ List<Iterator<BlockInfo>> iterators = new ArrayList<>();
for (DatanodeStorageInfo e : storages) {
iterators.add(e.getBlockIterator());
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ffd8427/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 8143fb4..4266004 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -85,7 +85,7 @@ public class DatanodeManager {
* Mapping: StorageID -> DatanodeDescriptor
*/
private final Map<String, DatanodeDescriptor> datanodeMap
- = new HashMap<String, DatanodeDescriptor>();
+ = new HashMap<>();
/** Cluster network topology */
private final NetworkTopology networktopology;
@@ -162,7 +162,7 @@ public class DatanodeManager {
* Software version -> Number of datanodes with this version
*/
private HashMap<String, Integer> datanodesSoftwareVersions =
- new HashMap<String, Integer>(4, 0.75f);
+ new HashMap<>(4, 0.75f);
/**
* The minimum time between resending caching directives to Datanodes,
@@ -217,7 +217,7 @@ public class DatanodeManager {
// locations of those hosts in the include list and store the mapping
// in the cache; so future calls to resolve will be fast.
if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
- final ArrayList<String> locations = new ArrayList<String>();
+ final ArrayList<String> locations = new ArrayList<>();
for (InetSocketAddress addr : hostFileManager.getIncludes()) {
locations.add(addr.getAddress().getHostAddress());
}
@@ -370,7 +370,7 @@ public class DatanodeManager {
// here we should get node but not datanode only .
Node client = getDatanodeByHost(targethost);
if (client == null) {
- List<String> hosts = new ArrayList<String> (1);
+ List<String> hosts = new ArrayList<> (1);
hosts.add(targethost);
List<String> resolvedHosts = dnsToSwitchMapping.resolve(hosts);
if (resolvedHosts != null && !resolvedHosts.isEmpty()) {
@@ -522,7 +522,7 @@ public class DatanodeManager {
void datanodeDump(final PrintWriter out) {
synchronized (datanodeMap) {
Map<String,DatanodeDescriptor> sortedDatanodeMap =
- new TreeMap<String,DatanodeDescriptor>(datanodeMap);
+ new TreeMap<>(datanodeMap);
out.println("Metasave: Number of datanodes: " + datanodeMap.size());
for (DatanodeDescriptor node : sortedDatanodeMap.values()) {
out.println(node.dumpDatanode());
@@ -660,7 +660,7 @@ public class DatanodeManager {
private void countSoftwareVersions() {
synchronized(datanodeMap) {
- HashMap<String, Integer> versionCount = new HashMap<String, Integer>();
+ HashMap<String, Integer> versionCount = new HashMap<>();
for(DatanodeDescriptor dn: datanodeMap.values()) {
// Check isAlive too because right after removeDatanode(),
// isDatanodeDead() is still true
@@ -677,7 +677,7 @@ public class DatanodeManager {
public HashMap<String, Integer> getDatanodesSoftwareVersions() {
synchronized(datanodeMap) {
- return new HashMap<String, Integer> (this.datanodesSoftwareVersions);
+ return new HashMap<> (this.datanodesSoftwareVersions);
}
}
@@ -710,7 +710,7 @@ public class DatanodeManager {
*/
private String resolveNetworkLocation (DatanodeID node)
throws UnresolvedTopologyException {
- List<String> names = new ArrayList<String>(1);
+ List<String> names = new ArrayList<>(1);
if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
names.add(node.getIpAddr());
} else {
@@ -1000,7 +1000,7 @@ public class DatanodeManager {
// If the network location is invalid, clear the cached mappings
// so that we have a chance to re-add this DataNode with the
// correct network location later.
- List<String> invalidNodeNames = new ArrayList<String>(3);
+ List<String> invalidNodeNames = new ArrayList<>(3);
// clear cache for nodes in IP or Hostname
invalidNodeNames.add(nodeReg.getIpAddr());
invalidNodeNames.add(nodeReg.getHostName());
@@ -1275,7 +1275,7 @@ public class DatanodeManager {
final HostFileManager.HostSet excludedNodes = hostFileManager.getExcludes();
synchronized(datanodeMap) {
- nodes = new ArrayList<DatanodeDescriptor>(datanodeMap.size());
+ nodes = new ArrayList<>(datanodeMap.size());
for (DatanodeDescriptor dn : datanodeMap.values()) {
final boolean isDead = isDatanodeDead(dn);
final boolean isDecommissioning = dn.isDecommissionInProgress();
@@ -1351,7 +1351,7 @@ public class DatanodeManager {
VolumeFailureSummary volumeFailureSummary) throws IOException {
synchronized (heartbeatManager) {
synchronized (datanodeMap) {
- DatanodeDescriptor nodeinfo = null;
+ DatanodeDescriptor nodeinfo;
try {
nodeinfo = getDatanode(nodeReg);
} catch(UnregisteredNodeException e) {
@@ -1389,7 +1389,7 @@ public class DatanodeManager {
final DatanodeStorageInfo[] storages = b.getExpectedStorageLocations();
// Skip stale nodes during recovery - not heart beated for some time (30s by default).
final List<DatanodeStorageInfo> recoveryLocations =
- new ArrayList<DatanodeStorageInfo>(storages.length);
+ new ArrayList<>(storages.length);
for (int i = 0; i < storages.length; i++) {
if (!storages[i].getDatanodeDescriptor().isStale(staleInterval)) {
recoveryLocations.add(storages[i]);
@@ -1431,7 +1431,7 @@ public class DatanodeManager {
return new DatanodeCommand[] { brCommand };
}
- final List<DatanodeCommand> cmds = new ArrayList<DatanodeCommand>();
+ final List<DatanodeCommand> cmds = new ArrayList<>();
//check pending replication
List<BlockTargetPair> pendingList = nodeinfo.getReplicationCommand(
maxTransfers);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ffd8427/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index 65b83e1..92841a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
@@ -37,8 +37,9 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport;
public class DatanodeStorageInfo {
public static final DatanodeStorageInfo[] EMPTY_ARRAY = {};
- public static DatanodeInfo[] toDatanodeInfos(DatanodeStorageInfo[] storages) {
- return toDatanodeInfos(Arrays.asList(storages));
+ public static DatanodeInfo[] toDatanodeInfos(
+ DatanodeStorageInfo[] storages) {
+ return storages == null ? null: toDatanodeInfos(Arrays.asList(storages));
}
static DatanodeInfo[] toDatanodeInfos(List<DatanodeStorageInfo> storages) {
final DatanodeInfo[] datanodes = new DatanodeInfo[storages.size()];
@@ -58,6 +59,9 @@ public class DatanodeStorageInfo {
}
public static String[] toStorageIDs(DatanodeStorageInfo[] storages) {
+ if (storages == null) {
+ return null;
+ }
String[] storageIDs = new String[storages.length];
for(int i = 0; i < storageIDs.length; i++) {
storageIDs[i] = storages[i].getStorageID();
@@ -66,6 +70,9 @@ public class DatanodeStorageInfo {
}
public static StorageType[] toStorageTypes(DatanodeStorageInfo[] storages) {
+ if (storages == null) {
+ return null;
+ }
StorageType[] storageTypes = new StorageType[storages.length];
for(int i = 0; i < storageTypes.length; i++) {
storageTypes[i] = storages[i].getStorageType();
@@ -380,6 +387,6 @@ public class DatanodeStorageInfo {
}
static enum AddBlockResult {
- ADDED, REPLACED, ALREADY_EXIST;
+ ADDED, REPLACED, ALREADY_EXIST
}
}