You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by wh...@apache.org on 2014/05/11 07:08:03 UTC
svn commit: r1593756 - in
/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt
src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
Author: wheat9
Date: Sun May 11 05:08:03 2014
New Revision: 1593756
URL: http://svn.apache.org/r1593756
Log:
HDFS-6328. Merge r1593755 from trunk.
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1593756&r1=1593755&r2=1593756&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Sun May 11 05:08:03 2014
@@ -92,6 +92,8 @@ Release 2.5.0 - UNRELEASED
HDFS-6294. Use INode IDs to avoid conflicts when a file open for write is
renamed. (cmccabe)
+ HDFS-6328. Clean up dead code in FSDirectory. (wheat9)
+
OPTIMIZATIONS
HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1593756&r1=1593755&r2=1593756&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Sun May 11 05:08:03 2014
@@ -275,7 +275,7 @@ public class FSDirectory implements Clos
while (!ready) {
try {
cond.await(5000, TimeUnit.MILLISECONDS);
- } catch (InterruptedException ie) {
+ } catch (InterruptedException ignored) {
}
}
} finally {
@@ -722,9 +722,8 @@ public class FSDirectory implements Clos
} else {
withCount.getReferredINode().setLocalName(dstChildName);
int dstSnapshotId = dstIIP.getLatestSnapshotId();
- final INodeReference.DstReference ref = new INodeReference.DstReference(
+ toDst = new INodeReference.DstReference(
dstParent.asDirectory(), withCount, dstSnapshotId);
- toDst = ref;
}
added = addLastINodeNoQuotaCheck(dstIIP, toDst);
@@ -763,14 +762,12 @@ public class FSDirectory implements Clos
} else if (!srcChildIsReference) { // src must be in snapshot
// the withCount node will no longer be used thus no need to update
// its reference number here
- final INode originalChild = withCount.getReferredINode();
- srcChild = originalChild;
+ srcChild = withCount.getReferredINode();
srcChild.setLocalName(srcChildName);
} else {
withCount.removeReference(oldSrcChild.asReference());
- final INodeReference originalRef = new INodeReference.DstReference(
+ srcChild = new INodeReference.DstReference(
srcParent, withCount, srcRefDstSnapshot);
- srcChild = originalRef;
withCount.getReferredINode().setLocalName(srcChildName);
}
@@ -813,7 +810,7 @@ public class FSDirectory implements Clos
}
}
}
- String error = null;
+ final String error;
final INodesInPath srcIIP = getINodesInPath4Write(src, false);
final INode srcInode = srcIIP.getLastINode();
// validate source
@@ -980,9 +977,8 @@ public class FSDirectory implements Clos
} else {
withCount.getReferredINode().setLocalName(dstChildName);
int dstSnapshotId = dstIIP.getLatestSnapshotId();
- final INodeReference.DstReference ref = new INodeReference.DstReference(
+ toDst = new INodeReference.DstReference(
dstIIP.getINode(-2).asDirectory(), withCount, dstSnapshotId);
- toDst = ref;
}
// add src as dst to complete rename
@@ -1043,14 +1039,12 @@ public class FSDirectory implements Clos
} else if (!srcChildIsReference) { // src must be in snapshot
// the withCount node will no longer be used thus no need to update
// its reference number here
- final INode originalChild = withCount.getReferredINode();
- srcChild = originalChild;
+ srcChild = withCount.getReferredINode();
srcChild.setLocalName(srcChildName);
} else {
withCount.removeReference(oldSrcChild.asReference());
- final INodeReference originalRef = new INodeReference.DstReference(
+ srcChild = new INodeReference.DstReference(
srcParent, withCount, srcRefDstSnapshot);
- srcChild = originalRef;
withCount.getReferredINode().setLocalName(srcChildName);
}
@@ -1162,20 +1156,6 @@ public class FSDirectory implements Clos
}
}
- boolean exists(String src) throws UnresolvedLinkException {
- src = normalizePath(src);
- readLock();
- try {
- INode inode = getNode(src, false);
- if (inode == null) {
- return false;
- }
- return !inode.isFile() || inode.asFile().getBlocks() != null;
- } finally {
- readUnlock();
- }
- }
-
void setPermission(String src, FsPermission permission)
throws FileNotFoundException, UnresolvedLinkException,
QuotaExceededException, SnapshotAccessControlException {
@@ -1590,7 +1570,7 @@ public class FSDirectory implements Clos
throws UnresolvedLinkException, IOException {
Preconditions.checkState(hasReadLock());
Preconditions.checkArgument(
- src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
+ src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
"%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
final String dirPath = normalizePath(src.substring(0,
@@ -1654,7 +1634,7 @@ public class FSDirectory implements Clos
private INode getINode4DotSnapshot(String src) throws UnresolvedLinkException {
Preconditions.checkArgument(
- src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
+ src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
"%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
final String dirPath = normalizePath(src.substring(0,
@@ -1669,21 +1649,6 @@ public class FSDirectory implements Clos
return null;
}
- /**
- * Get the blocks associated with the file.
- */
- Block[] getFileBlocks(String src) throws UnresolvedLinkException {
- waitForReady();
- readLock();
- try {
- final INode i = getNode(src, false);
- return i != null && i.isFile()? i.asFile().getBlocks(): null;
- } finally {
- readUnlock();
- }
- }
-
-
INodesInPath getExistingPathINodes(byte[][] components)
throws UnresolvedLinkException {
return INodesInPath.resolve(rootDir, components);
@@ -1708,7 +1673,7 @@ public class FSDirectory implements Clos
readUnlock();
}
}
-
+
/**
* Get {@link INode} associated with the file / directory.
*/
@@ -1745,12 +1710,8 @@ public class FSDirectory implements Clos
String srcs = normalizePath(src);
readLock();
try {
- if (srcs.startsWith("/") && !srcs.endsWith("/")
- && getINode4Write(srcs, false) == null) {
- return true;
- } else {
- return false;
- }
+ return srcs.startsWith("/") && !srcs.endsWith("/")
+ && getINode4Write(srcs, false) == null;
} finally {
readUnlock();
}
@@ -2003,7 +1964,7 @@ public class FSDirectory implements Clos
// create directories beginning from the first null index
for(; i < inodes.length; i++) {
- pathbuilder.append(Path.SEPARATOR + names[i]);
+ pathbuilder.append(Path.SEPARATOR).append(names[i]);
unprotectedMkdir(namesystem.allocateNewInodeId(), iip, i,
components[i], (i < lastInodeIndex) ? parentPermissions
: permissions, null, now);
@@ -2132,7 +2093,7 @@ public class FSDirectory implements Clos
return;
}
int i = 0;
- for(; src[i] == dst[i]; i++);
+ while(src[i] == dst[i]) { i++; }
// src[i - 1] is the last common ancestor.
final Quota.Counts delta = src[src.length - 1].computeQuotaUsage();
@@ -2293,7 +2254,7 @@ public class FSDirectory implements Clos
counts.get(Quota.NAMESPACE), counts.get(Quota.DISKSPACE), checkQuota);
boolean isRename = (child.getParent() != null);
final INodeDirectory parent = inodes[pos-1].asDirectory();
- boolean added = false;
+ boolean added;
try {
added = parent.addChild(child, true, iip.getLatestSnapshotId());
} catch (QuotaExceededException e) {
@@ -2658,7 +2619,7 @@ public class FSDirectory implements Clos
blocksize = fileNode.getPreferredBlockSize();
final boolean inSnapshot = snapshot != Snapshot.CURRENT_STATE_ID;
- final boolean isUc = inSnapshot ? false : fileNode.isUnderConstruction();
+ final boolean isUc = !inSnapshot && fileNode.isUnderConstruction();
final long fileSize = !inSnapshot && isUc ?
fileNode.computeFileSizeNotIncludingLastUcBlock() : size;
loc = getFSNamesystem().getBlockManager().createLocatedBlocks(
@@ -3012,7 +2973,7 @@ public class FSDirectory implements Clos
return src;
}
final String inodeId = DFSUtil.bytes2String(pathComponents[3]);
- long id = 0;
+ final long id;
try {
id = Long.parseLong(inodeId);
} catch (NumberFormatException e) {