You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2013/01/03 22:24:03 UTC
svn commit: r1428601 [1/2] - in
/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project:
hadoop-hdfs-httpfs/ hadoop-hdfs/
hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/
hadoop-hdfs/src/main/docs/src/documentation/resources/images/ hado...
Author: suresh
Date: Thu Jan 3 21:23:58 2013
New Revision: 1428601
URL: http://svn.apache.org/viewvc?rev=1428601&view=rev
Log:
Merging trunk changes to branch-trunk-win
Added:
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java
- copied unchanged from r1428155, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsDesign.apt.vm
- copied unchanged from r1428155, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsDesign.apt.vm
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/
- copied from r1428155, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfs-logo.jpg
- copied unchanged from r1428155, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfs-logo.jpg
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsarchitecture.gif
- copied unchanged from r1428155, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsarchitecture.gif
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsarchitecture.odg
- copied unchanged from r1428155, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsarchitecture.odg
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsarchitecture.png
- copied unchanged from r1428155, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsarchitecture.png
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsdatanodes.gif
- copied unchanged from r1428155, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsdatanodes.gif
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsdatanodes.odg
- copied unchanged from r1428155, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsdatanodes.odg
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsdatanodes.png
- copied unchanged from r1428155, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsdatanodes.png
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsproxy-forward.jpg
- copied unchanged from r1428155, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsproxy-forward.jpg
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsproxy-overview.jpg
- copied unchanged from r1428155, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsproxy-overview.jpg
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsproxy-server.jpg
- copied unchanged from r1428155, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/hdfsproxy-server.jpg
Removed:
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_design.xml
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hdfs-logo.jpg
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hdfsarchitecture.gif
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hdfsarchitecture.odg
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hdfsarchitecture.png
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hdfsdatanodes.gif
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hdfsdatanodes.odg
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hdfsdatanodes.png
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hdfsproxy-forward.jpg
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hdfsproxy-overview.jpg
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/resources/images/hdfsproxy-server.jpg
Modified:
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/ (props changed)
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed)
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed)
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_connect.c
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_open.c
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.c
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.h
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/test/fuse_workload.c
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed)
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed)
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed)
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed)
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1423068-1428155
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml Thu Jan 3 21:23:58 2013
@@ -34,7 +34,7 @@
<description>Apache Hadoop HttpFS</description>
<properties>
- <tomcat.version>6.0.32</tomcat.version>
+ <tomcat.version>6.0.36</tomcat.version>
<httpfs.source.repository>REPO NOT AVAIL</httpfs.source.repository>
<httpfs.source.repository>REPO NOT AVAIL</httpfs.source.repository>
<httpfs.source.revision>REVISION NOT AVAIL</httpfs.source.revision>
@@ -531,7 +531,7 @@
<mkdir dir="downloads"/>
<get
src="${tomcat.download.url}"
- dest="downloads/tomcat.tar.gz" verbose="true" skipexisting="true"/>
+ dest="downloads/apache-tomcat-${tomcat.version}.tar.gz" verbose="true" skipexisting="true"/>
<delete dir="${project.build.directory}/tomcat.exp"/>
<mkdir dir="${project.build.directory}/tomcat.exp"/>
@@ -541,7 +541,7 @@
from os.path import abspath
import tarfile
-src = abspath(r"${basedir}/downloads/tomcat.tar.gz")
+src = abspath(r"${basedir}/downloads/apache-tomcat-${tomcat.version}.tar.gz")
dest = abspath(r"${project.build.directory}/tomcat.exp")
with tarfile.open(src, "r:gz") as tar:
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Jan 3 21:23:58 2013
@@ -172,6 +172,8 @@ Trunk (Unreleased)
HDFS-4234. Use generic code for choosing datanode in Balancer. (szetszwo)
+ HDFS-4334. Add a unique id to INode. (Brandon Li via szetszwo)
+
OPTIMIZATIONS
BUG FIXES
@@ -286,6 +288,9 @@ Trunk (Unreleased)
HDFS-4275. MiniDFSCluster-based tests fail on Windows due to failure
to delete test namenode directory. (Chris Nauroth via suresh)
+ HDFS-4338. TestNameNodeMetrics#testCorruptBlock is flaky. (Andrew Wang via
+ atm)
+
Release 2.0.3-alpha - Unreleased
INCOMPATIBLE CHANGES
@@ -430,6 +435,8 @@ Release 2.0.3-alpha - Unreleased
HDFS-4130. BKJM: The reading for editlog at NN starting using bkjm is not efficient.
(Han Xiao via umamahesh)
+ HDFS-4326. bump up Tomcat version for HttpFS to 6.0.36. (tucu via acmurthy)
+
OPTIMIZATIONS
BUG FIXES
@@ -622,6 +629,17 @@ Release 2.0.3-alpha - Unreleased
HDFS-4315. DNs with multiple BPs can have BPOfferServices fail to start
due to unsynchronized map access. (atm)
+ HDFS-4140. fuse-dfs handles open(O_TRUNC) poorly. (Colin Patrick McCabe
+ via atm)
+
+ HDFS-4308. addBlock() should persist file blocks once.
+ (Plamen Jeliazkov via shv)
+
+ HDFS-4347. Avoid infinite waiting checkpoint to complete in TestBackupNode.
+ (Plamen Jeliazkov via shv)
+
+ HDFS-4349. Add test for reading files from BackupNode. (shv)
+
BREAKDOWN OF HDFS-3077 SUBTASKS
HDFS-3077. Quorum-based protocol for reading and writing edit logs.
Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1423068-1428155
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java Thu Jan 3 21:23:58 2013
@@ -21,6 +21,8 @@ import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Arrays;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.server.common.Storage;
@@ -41,6 +43,7 @@ import org.apache.hadoop.security.UserGr
* int, int, byte[])
*/
class EditLogBackupOutputStream extends EditLogOutputStream {
+ private static Log LOG = LogFactory.getLog(EditLogFileOutputStream.class);
static int DEFAULT_BUFFER_SIZE = 256;
private final JournalProtocol backupNode; // RPC proxy to backup node
@@ -117,6 +120,11 @@ class EditLogBackupOutputStream extends
protected void flushAndSync(boolean durable) throws IOException {
assert out.getLength() == 0 : "Output buffer is not empty";
+ if (doubleBuf.isFlushed()) {
+ LOG.info("Nothing to flush");
+ return;
+ }
+
int numReadyTxns = doubleBuf.countReadyTxns();
long firstTxToFlush = doubleBuf.getFirstReadyTxId();
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Thu Jan 3 21:23:58 2013
@@ -76,8 +76,9 @@ import com.google.common.base.Preconditi
*************************************************/
public class FSDirectory implements Closeable {
private static INodeDirectoryWithQuota createRoot(FSNamesystem namesystem) {
- return new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME,
- namesystem.createFsOwnerPermissions(new FsPermission((short)0755)));
+ return new INodeDirectoryWithQuota(namesystem.allocateNewInodeId(),
+ INodeDirectory.ROOT_NAME,
+ namesystem.createFsOwnerPermissions(new FsPermission((short) 0755)));
}
INodeDirectoryWithQuota rootDir;
@@ -253,7 +254,9 @@ public class FSDirectory implements Clos
if (!mkdirs(parent.toString(), permissions, true, modTime)) {
return null;
}
+ long id = namesystem.allocateNewInodeId();
INodeFileUnderConstruction newNode = new INodeFileUnderConstruction(
+ id,
permissions,replication,
preferredBlockSize, modTime, clientName,
clientMachine, clientNode);
@@ -275,7 +278,8 @@ public class FSDirectory implements Clos
return newNode;
}
- INode unprotectedAddFile( String path,
+ INode unprotectedAddFile( long id,
+ String path,
PermissionStatus permissions,
short replication,
long modificationTime,
@@ -287,13 +291,11 @@ public class FSDirectory implements Clos
final INode newNode;
assert hasWriteLock();
if (underConstruction) {
- newNode = new INodeFileUnderConstruction(
- permissions, replication,
- preferredBlockSize, modificationTime, clientName,
- clientMachine, null);
+ newNode = new INodeFileUnderConstruction(id, permissions, replication,
+ preferredBlockSize, modificationTime, clientName, clientMachine, null);
} else {
- newNode = new INodeFile(permissions, BlockInfo.EMPTY_ARRAY, replication,
- modificationTime, atime, preferredBlockSize);
+ newNode = new INodeFile(id, permissions, BlockInfo.EMPTY_ARRAY,
+ replication, modificationTime, atime, preferredBlockSize);
}
try {
@@ -392,19 +394,16 @@ public class FSDirectory implements Clos
/**
* Remove a block from the file.
*/
- boolean removeBlock(String path, INodeFileUnderConstruction fileNode,
+ void removeBlock(String path, INodeFileUnderConstruction fileNode,
Block block) throws IOException {
waitForReady();
writeLock();
try {
unprotectedRemoveBlock(path, fileNode, block);
- // write modified block locations to log
- fsImage.getEditLog().logOpenFile(path, fileNode);
} finally {
writeUnlock();
}
- return true;
}
void unprotectedRemoveBlock(String path, INodeFileUnderConstruction fileNode,
@@ -1428,8 +1427,9 @@ public class FSDirectory implements Clos
// create directories beginning from the first null index
for(; i < inodes.length; i++) {
pathbuilder.append(Path.SEPARATOR + names[i]);
- unprotectedMkdir(inodesInPath, i, components[i],
- (i < lastInodeIndex) ? parentPermissions : permissions, now);
+ unprotectedMkdir(namesystem.allocateNewInodeId(), inodesInPath, i,
+ components[i], (i < lastInodeIndex) ? parentPermissions
+ : permissions, now);
if (inodes[i] == null) {
return false;
}
@@ -1451,7 +1451,7 @@ public class FSDirectory implements Clos
return true;
}
- INode unprotectedMkdir(String src, PermissionStatus permissions,
+ INode unprotectedMkdir(long inodeId, String src, PermissionStatus permissions,
long timestamp) throws QuotaExceededException,
UnresolvedLinkException {
assert hasWriteLock();
@@ -1460,7 +1460,8 @@ public class FSDirectory implements Clos
components.length, false);
INode[] inodes = inodesInPath.getINodes();
final int pos = inodes.length - 1;
- unprotectedMkdir(inodesInPath, pos, components[pos], permissions, timestamp);
+ unprotectedMkdir(inodeId, inodesInPath, pos, components[pos], permissions,
+ timestamp);
return inodes[pos];
}
@@ -1468,11 +1469,12 @@ public class FSDirectory implements Clos
* The parent path to the directory is at [0, pos-1].
* All ancestors exist. Newly created one stored at index pos.
*/
- private void unprotectedMkdir(INodesInPath inodesInPath, int pos,
- byte[] name, PermissionStatus permission,
- long timestamp) throws QuotaExceededException {
+ private void unprotectedMkdir(long inodeId, INodesInPath inodesInPath,
+ int pos, byte[] name, PermissionStatus permission, long timestamp)
+ throws QuotaExceededException {
assert hasWriteLock();
- final INodeDirectory dir = new INodeDirectory(name, permission, timestamp);
+ final INodeDirectory dir = new INodeDirectory(inodeId, name, permission,
+ timestamp);
if (addChild(inodesInPath, pos, dir, true)) {
inodesInPath.setINode(pos, dir);
}
@@ -2042,9 +2044,10 @@ public class FSDirectory implements Clos
}
final String userName = dirPerms.getUserName();
INodeSymlink newNode = null;
+ long id = namesystem.allocateNewInodeId();
writeLock();
try {
- newNode = unprotectedAddSymlink(path, target, modTime, modTime,
+ newNode = unprotectedAddSymlink(id, path, target, modTime, modTime,
new PermissionStatus(userName, null, FsPermission.getDefault()));
} finally {
writeUnlock();
@@ -2064,12 +2067,13 @@ public class FSDirectory implements Clos
/**
* Add the specified path into the namespace. Invoked from edit log processing.
*/
- INodeSymlink unprotectedAddSymlink(String path, String target, long mtime,
- long atime, PermissionStatus perm)
+ INodeSymlink unprotectedAddSymlink(long id, String path, String target,
+ long mtime, long atime, PermissionStatus perm)
throws UnresolvedLinkException, QuotaExceededException {
assert hasWriteLock();
- final INodeSymlink symlink = new INodeSymlink(target, mtime, atime, perm);
- return addINode(path, symlink)? symlink: null;
+ final INodeSymlink symlink = new INodeSymlink(id, target, mtime, atime,
+ perm);
+ return addINode(path, symlink) ? symlink : null;
}
/**
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Thu Jan 3 21:23:58 2013
@@ -120,7 +120,8 @@ public class FSEditLogLoader {
long lastTxId = in.getLastTxId();
long numTxns = (lastTxId - expectedStartingTxId) + 1;
long lastLogTime = now();
-
+ long lastInodeId = fsNamesys.getLastInodeId();
+
if (LOG.isDebugEnabled()) {
LOG.debug("edit log length: " + in.length() + ", start txid: "
+ expectedStartingTxId + ", last txid: " + lastTxId);
@@ -170,7 +171,10 @@ public class FSEditLogLoader {
}
}
try {
- applyEditLogOp(op, fsDir, in.getVersion());
+ long inodeId = applyEditLogOp(op, fsDir, in.getVersion());
+ if (lastInodeId < inodeId) {
+ lastInodeId = inodeId;
+ }
} catch (Throwable e) {
LOG.error("Encountered exception on operation " + op, e);
MetaRecoveryContext.editLogLoaderPrompt("Failed to " +
@@ -205,6 +209,7 @@ public class FSEditLogLoader {
}
}
} finally {
+ fsNamesys.resetLastInodeId(lastInodeId);
if(closeOnExit) {
in.close();
}
@@ -223,9 +228,9 @@ public class FSEditLogLoader {
}
@SuppressWarnings("deprecation")
- private void applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
+ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
int logVersion) throws IOException {
-
+ long inodeId = INodeId.GRANDFATHER_INODE_ID;
if (LOG.isTraceEnabled()) {
LOG.trace("replaying edit log: " + op);
}
@@ -255,11 +260,11 @@ public class FSEditLogLoader {
assert addCloseOp.blocks.length == 0;
// add to the file tree
- newFile = (INodeFile)fsDir.unprotectedAddFile(
- addCloseOp.path, addCloseOp.permissions,
- replication, addCloseOp.mtime,
- addCloseOp.atime, addCloseOp.blockSize,
- true, addCloseOp.clientName, addCloseOp.clientMachine);
+ inodeId = fsNamesys.allocateNewInodeId();
+ newFile = (INodeFile) fsDir.unprotectedAddFile(inodeId,
+ addCloseOp.path, addCloseOp.permissions, replication,
+ addCloseOp.mtime, addCloseOp.atime, addCloseOp.blockSize, true,
+ addCloseOp.clientName, addCloseOp.clientMachine);
fsNamesys.leaseManager.addLease(addCloseOp.clientName, addCloseOp.path);
} else { // This is OP_ADD on an existing file
@@ -370,7 +375,8 @@ public class FSEditLogLoader {
}
case OP_MKDIR: {
MkdirOp mkdirOp = (MkdirOp)op;
- fsDir.unprotectedMkdir(mkdirOp.path, mkdirOp.permissions,
+ inodeId = fsNamesys.allocateNewInodeId();
+ fsDir.unprotectedMkdir(inodeId, mkdirOp.path, mkdirOp.permissions,
mkdirOp.timestamp);
break;
}
@@ -423,9 +429,10 @@ public class FSEditLogLoader {
}
case OP_SYMLINK: {
SymlinkOp symlinkOp = (SymlinkOp)op;
- fsDir.unprotectedAddSymlink(symlinkOp.path, symlinkOp.value,
- symlinkOp.mtime, symlinkOp.atime,
- symlinkOp.permissionStatus);
+ inodeId = fsNamesys.allocateNewInodeId();
+ fsDir.unprotectedAddSymlink(inodeId, symlinkOp.path,
+ symlinkOp.value, symlinkOp.mtime,
+ symlinkOp.atime, symlinkOp.permissionStatus);
break;
}
case OP_RENAME: {
@@ -485,6 +492,7 @@ public class FSEditLogLoader {
default:
throw new IOException("Invalid operation read " + op.opCode);
}
+ return inodeId;
}
private static String formatEditLogReplayError(EditLogInputStream in,
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Thu Jan 3 21:23:58 2013
@@ -166,7 +166,8 @@ class FSImageFormat {
in = compression.unwrapInputStream(fin);
LOG.info("Loading image file " + curFile + " using " + compression);
-
+ // reset INodeId. TODO: remove this after inodeId is persisted in fsimage
+ namesystem.resetLastInodeIdWithoutChecking(INodeId.LAST_RESERVED_ID);
// load all inodes
LOG.info("Number of files = " + numFiles);
if (LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION,
@@ -334,6 +335,8 @@ class FSImageFormat {
long blockSize = 0;
int imgVersion = getLayoutVersion();
+ long inodeId = namesystem.allocateNewInodeId();
+
short replication = in.readShort();
replication = namesystem.getBlockManager().adjustReplication(replication);
modificationTime = in.readLong();
@@ -371,7 +374,7 @@ class FSImageFormat {
PermissionStatus permissions = PermissionStatus.read(in);
- return INode.newINode(permissions, blocks, symlink, replication,
+ return INode.newINode(inodeId, permissions, blocks, symlink, replication,
modificationTime, atime, nsQuota, dsQuota, blockSize);
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java Thu Jan 3 21:23:58 2013
@@ -107,7 +107,9 @@ public class FSImageSerialization {
int numLocs = in.readInt();
assert numLocs == 0 : "Unexpected block locations";
- return new INodeFileUnderConstruction(name,
+ //TODO: get inodeId from fsimage after inodeId is persisted
+ return new INodeFileUnderConstruction(INodeId.GRANDFATHER_INODE_ID,
+ name,
blockReplication,
modificationTime,
preferredBlockSize,
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Thu Jan 3 21:23:58 2013
@@ -375,6 +375,30 @@ public class FSNamesystem implements Nam
private final boolean haEnabled;
+ private INodeId inodeId;
+
+ /**
+ * Set the last allocated inode id when fsimage is loaded or editlog is
+ * applied.
+ * @throws IOException
+ */
+ public void resetLastInodeId(long newValue) throws IOException {
+ inodeId.resetLastInodeId(newValue);
+ }
+
+ /** Should only be used for tests to reset to any value */
+ void resetLastInodeIdWithoutChecking(long newValue) {
+ inodeId.resetLastInodeIdWithoutChecking(newValue);
+ }
+
+ public long getLastInodeId() {
+ return inodeId.getLastInodeId();
+ }
+
+ public long allocateNewInodeId() {
+ return inodeId.allocateNewInodeId();
+ }
+
/**
* Clear all loaded data
*/
@@ -383,6 +407,7 @@ public class FSNamesystem implements Nam
dtSecretManager.reset();
generationStamp.setStamp(GenerationStamp.FIRST_VALID_STAMP);
leaseManager.removeAllLeases();
+ inodeId.resetLastInodeIdWithoutChecking(INodeId.LAST_RESERVED_ID);
}
@VisibleForTesting
@@ -534,6 +559,8 @@ public class FSNamesystem implements Nam
this.standbyShouldCheckpoint = conf.getBoolean(
DFS_HA_STANDBY_CHECKPOINTS_KEY, DFS_HA_STANDBY_CHECKPOINTS_DEFAULT);
+ this.inodeId = new INodeId();
+
// For testing purposes, allow the DT secret manager to be started regardless
// of whether security is enabled.
alwaysUseDelegationTokensForTests = conf.getBoolean(
@@ -1931,6 +1958,7 @@ public class FSNamesystem implements Nam
String leaseHolder, String clientMachine, DatanodeDescriptor clientNode,
boolean writeToEditLog) throws IOException {
INodeFileUnderConstruction cons = new INodeFileUnderConstruction(
+ file.getId(),
file.getLocalNameBytes(),
file.getBlockReplication(),
file.getModificationTime(),
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java Thu Jan 3 21:23:58 2013
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
+import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.ArrayList;
@@ -102,6 +103,11 @@ abstract class INode implements Comparab
}
/**
+ * The inode id
+ */
+ final private long id;
+
+ /**
* The inode name is in java UTF8 encoding;
* The name in HdfsFileStatus should keep the same encoding as this.
* if this encoding is changed, implicitly getFileInfo and listStatus in
@@ -120,8 +126,9 @@ abstract class INode implements Comparab
protected long modificationTime = 0L;
protected long accessTime = 0L;
- private INode(byte[] name, long permission, INodeDirectory parent,
+ private INode(long id, byte[] name, long permission, INodeDirectory parent,
long modificationTime, long accessTime) {
+ this.id = id;
this.name = name;
this.permission = permission;
this.parent = parent;
@@ -129,26 +136,31 @@ abstract class INode implements Comparab
this.accessTime = accessTime;
}
- INode(byte[] name, PermissionStatus permissions, INodeDirectory parent,
- long modificationTime, long accessTime) {
- this(name, PermissionStatusFormat.toLong(permissions), parent,
+ INode(long id, byte[] name, PermissionStatus permissions,
+ INodeDirectory parent, long modificationTime, long accessTime) {
+ this(id, name, PermissionStatusFormat.toLong(permissions), parent,
modificationTime, accessTime);
}
-
- INode(PermissionStatus permissions, long mtime, long atime) {
- this(null, permissions, null, mtime, atime);
+
+ INode(long id, PermissionStatus permissions, long mtime, long atime) {
+ this(id, null, PermissionStatusFormat.toLong(permissions), null, mtime, atime);
}
-
- protected INode(String name, PermissionStatus permissions) {
- this(DFSUtil.string2Bytes(name), permissions, null, 0L, 0L);
+
+ protected INode(long id, String name, PermissionStatus permissions) {
+ this(id, DFSUtil.string2Bytes(name), permissions, null, 0L, 0L);
}
/** @param other Other node to be copied */
INode(INode other) {
- this(other.getLocalNameBytes(), other.permission, other.getParent(),
- other.getModificationTime(), other.getAccessTime());
+ this(other.getId(), other.getLocalNameBytes(), other.permission, other
+ .getParent(), other.getModificationTime(), other.getAccessTime());
}
+ /** Get inode id */
+ public long getId() {
+ return this.id;
+ }
+
/**
* Check whether this is the root inode.
*/
@@ -459,6 +471,7 @@ abstract class INode implements Comparab
/**
* Create an INode; the inode's name is not set yet
*
+ * @param id preassigned inode id
* @param permissions permissions
* @param blocks blocks if a file
* @param symlink symblic link if a symbolic link
@@ -470,7 +483,8 @@ abstract class INode implements Comparab
* @param preferredBlockSize block size
* @return an inode
*/
- static INode newINode(PermissionStatus permissions,
+ static INode newINode(long id,
+ PermissionStatus permissions,
BlockInfo[] blocks,
String symlink,
short replication,
@@ -480,17 +494,17 @@ abstract class INode implements Comparab
long dsQuota,
long preferredBlockSize) {
if (symlink.length() != 0) { // check if symbolic link
- return new INodeSymlink(symlink, modificationTime, atime, permissions);
+ return new INodeSymlink(id, symlink, modificationTime, atime, permissions);
} else if (blocks == null) { //not sym link and blocks null? directory!
if (nsQuota >= 0 || dsQuota >= 0) {
return new INodeDirectoryWithQuota(
- permissions, modificationTime, nsQuota, dsQuota);
+ id, permissions, modificationTime, nsQuota, dsQuota);
}
// regular directory
- return new INodeDirectory(permissions, modificationTime);
+ return new INodeDirectory(id, permissions, modificationTime);
}
// file
- return new INodeFile(permissions, blocks, replication,
+ return new INodeFile(id, permissions, blocks, replication,
modificationTime, atime, preferredBlockSize);
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Thu Jan 3 21:23:58 2013
@@ -53,17 +53,17 @@ class INodeDirectory extends INode {
private List<INode> children = null;
- INodeDirectory(String name, PermissionStatus permissions) {
- super(name, permissions);
+ INodeDirectory(long id, String name, PermissionStatus permissions) {
+ super(id, name, permissions);
}
- public INodeDirectory(PermissionStatus permissions, long mTime) {
- super(permissions, mTime, 0);
+ public INodeDirectory(long id, PermissionStatus permissions, long mTime) {
+ super(id, permissions, mTime, 0);
}
-
+
/** constructor */
- INodeDirectory(byte[] name, PermissionStatus permissions, long mtime) {
- super(name, permissions, null, mtime, 0L);
+ INodeDirectory(long id, byte[] name, PermissionStatus permissions, long mtime) {
+ super(id, name, permissions, null, mtime, 0L);
}
/** copy constructor
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java Thu Jan 3 21:23:58 2013
@@ -54,16 +54,16 @@ class INodeDirectoryWithQuota extends IN
}
/** constructor with no quota verification */
- INodeDirectoryWithQuota(PermissionStatus permissions, long modificationTime,
- long nsQuota, long dsQuota) {
- super(permissions, modificationTime);
+ INodeDirectoryWithQuota(long id, PermissionStatus permissions,
+ long modificationTime, long nsQuota, long dsQuota) {
+ super(id, permissions, modificationTime);
this.nsQuota = nsQuota;
this.dsQuota = dsQuota;
}
/** constructor with no quota verification */
- INodeDirectoryWithQuota(String name, PermissionStatus permissions) {
- super(name, permissions);
+ INodeDirectoryWithQuota(long id, String name, PermissionStatus permissions) {
+ super(id, name, permissions);
}
/** Get this directory's namespace quota
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Thu Jan 3 21:23:58 2013
@@ -86,15 +86,15 @@ public class INodeFile extends INode imp
private BlockInfo[] blocks;
- INodeFile(PermissionStatus permissions, BlockInfo[] blklist,
- short replication, long modificationTime,
- long atime, long preferredBlockSize) {
- super(permissions, modificationTime, atime);
+ INodeFile(long id, PermissionStatus permissions, BlockInfo[] blklist,
+ short replication, long modificationTime, long atime,
+ long preferredBlockSize) {
+ super(id, permissions, modificationTime, atime);
header = HeaderFormat.combineReplication(header, replication);
header = HeaderFormat.combinePreferredBlockSize(header, preferredBlockSize);
this.blocks = blklist;
}
-
+
/** @return true unconditionally. */
@Override
public final boolean isFile() {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java Thu Jan 3 21:23:58 2013
@@ -49,21 +49,23 @@ class INodeFileUnderConstruction extends
private final String clientMachine;
private final DatanodeDescriptor clientNode; // if client is a cluster node too.
- INodeFileUnderConstruction(PermissionStatus permissions,
+ INodeFileUnderConstruction(long id,
+ PermissionStatus permissions,
short replication,
long preferredBlockSize,
long modTime,
String clientName,
String clientMachine,
DatanodeDescriptor clientNode) {
- super(permissions.applyUMask(UMASK), BlockInfo.EMPTY_ARRAY, replication,
- modTime, modTime, preferredBlockSize);
+ super(id, permissions.applyUMask(UMASK), BlockInfo.EMPTY_ARRAY,
+ replication, modTime, modTime, preferredBlockSize);
this.clientName = clientName;
this.clientMachine = clientMachine;
this.clientNode = clientNode;
}
- INodeFileUnderConstruction(byte[] name,
+ INodeFileUnderConstruction(long id,
+ byte[] name,
short blockReplication,
long modificationTime,
long preferredBlockSize,
@@ -72,8 +74,8 @@ class INodeFileUnderConstruction extends
String clientName,
String clientMachine,
DatanodeDescriptor clientNode) {
- super(perm, blocks, blockReplication, modificationTime, modificationTime,
- preferredBlockSize);
+ super(id, perm, blocks, blockReplication, modificationTime,
+ modificationTime, preferredBlockSize);
setLocalName(name);
this.clientName = clientName;
this.clientMachine = clientMachine;
@@ -112,7 +114,8 @@ class INodeFileUnderConstruction extends
assert allBlocksComplete() : "Can't finalize inode " + this
+ " since it contains non-complete blocks! Blocks are "
+ Arrays.asList(getBlocks());
- INodeFile obj = new INodeFile(getPermissionStatus(),
+ INodeFile obj = new INodeFile(getId(),
+ getPermissionStatus(),
getBlocks(),
getBlockReplication(),
getModificationTime(),
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java Thu Jan 3 21:23:58 2013
@@ -28,9 +28,9 @@ import org.apache.hadoop.hdfs.DFSUtil;
public class INodeSymlink extends INode {
private final byte[] symlink; // The target URI
- INodeSymlink(String value, long mtime, long atime,
- PermissionStatus permissions) {
- super(permissions, mtime, atime);
+ INodeSymlink(long id, String value, long mtime, long atime,
+ PermissionStatus permissions) {
+ super(id, permissions, mtime, atime);
this.symlink = DFSUtil.string2Bytes(value);
}
Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1423068-1428155
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_connect.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_connect.c?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_connect.c (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_connect.c Thu Jan 3 21:23:58 2013
@@ -131,7 +131,6 @@ static enum authConf discoverAuthConf(vo
int fuseConnectInit(const char *nnUri, int port)
{
- const char *timerPeriod;
int ret;
gTimerPeriod = FUSE_CONN_DEFAULT_TIMER_PERIOD;
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_open.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_open.c?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_open.c (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_open.c Thu Jan 3 21:23:58 2013
@@ -24,12 +24,77 @@
#include <stdio.h>
#include <stdlib.h>
+static int get_hdfs_open_flags_from_info(hdfsFS fs, const char *path,
+ int flags, int *outflags, const hdfsFileInfo *info);
+
+/**
+ * Given a set of FUSE flags, determine the libhdfs flags we need.
+ *
+ * This is complicated by two things:
+ * 1. libhdfs doesn't support O_RDWR at all;
+ * 2. when given O_WRONLY, libhdfs will truncate the file unless O_APPEND is
+ * also given. In other words, there is an implicit O_TRUNC.
+ *
+ * Probably the next iteration of the libhdfs interface should not use the POSIX
+ * flags at all, since, as you can see, they don't really match up very closely
+ * to the POSIX meaning. However, for the time being, this is the API.
+ *
+ * @param fs The libhdfs object
+ * @param path The path we're opening
+ * @param flags The FUSE flags
+ *
+ * @return negative error code on failure; flags otherwise.
+ */
+static int64_t get_hdfs_open_flags(hdfsFS fs, const char *path, int flags)
+{
+ int hasContent;
+ int64_t ret;
+ hdfsFileInfo *info;
+
+ if ((flags & O_ACCMODE) == O_RDONLY) {
+ return O_RDONLY;
+ }
+ if (flags & O_TRUNC) {
+ /* If we're opening for write or read/write, O_TRUNC means we should blow
+ * away the file which is there and create our own file.
+ * */
+ return O_WRONLY;
+ }
+ info = hdfsGetPathInfo(fs, path);
+ if (info) {
+ if (info->mSize == 0) {
+ // If the file has zero length, we shouldn't feel bad about blowing it
+ // away.
+ ret = O_WRONLY;
+ } else if ((flags & O_ACCMODE) == O_RDWR) {
+ // HACK: translate O_RDWR requests into O_RDONLY if the file already
+ // exists and has non-zero length.
+ ret = O_RDONLY;
+ } else { // O_WRONLY
+ // HACK: translate O_WRONLY requests into append if the file already
+ // exists.
+ ret = O_WRONLY | O_APPEND;
+ }
+ } else { // !info
+ if (flags & O_CREAT) {
+ ret = O_WRONLY;
+ } else {
+ ret = -ENOENT;
+ }
+ }
+ if (info) {
+ hdfsFreeFileInfo(info, 1);
+ }
+ return ret;
+}
+
int dfs_open(const char *path, struct fuse_file_info *fi)
{
hdfsFS fs = NULL;
dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
dfs_fh *fh = NULL;
- int mutexInit = 0, ret;
+ int mutexInit = 0, ret, flags = 0;
+ int64_t flagRet;
TRACE1("open", path)
@@ -38,10 +103,6 @@ int dfs_open(const char *path, struct fu
assert('/' == *path);
assert(dfs);
- // 0x8000 is always passed in and hadoop doesn't like it, so killing it here
- // bugbug figure out what this flag is and report problem to Hadoop JIRA
- int flags = (fi->flags & 0x7FFF);
-
// retrieve dfs specific data
fh = (dfs_fh*)calloc(1, sizeof (dfs_fh));
if (!fh) {
@@ -57,22 +118,12 @@ int dfs_open(const char *path, struct fu
goto error;
}
fs = hdfsConnGetFs(fh->conn);
-
- if (flags & O_RDWR) {
- hdfsFileInfo *info = hdfsGetPathInfo(fs, path);
- if (info == NULL) {
- // File does not exist (maybe?); interpret it as a O_WRONLY
- // If the actual error was something else, we'll get it again when
- // we try to open the file.
- flags ^= O_RDWR;
- flags |= O_WRONLY;
- } else {
- // File exists; open this as read only.
- flags ^= O_RDWR;
- flags |= O_RDONLY;
- }
+ flagRet = get_hdfs_open_flags(fs, path, fi->flags);
+ if (flagRet < 0) {
+ ret = -flagRet;
+ goto error;
}
-
+ flags = flagRet;
if ((fh->hdfsFH = hdfsOpenFile(fs, path, flags, 0, 0, 0)) == NULL) {
ERROR("Could not open file %s (errno=%d)", path, errno);
if (errno == 0 || errno == EINTERNAL) {
@@ -91,7 +142,7 @@ int dfs_open(const char *path, struct fu
}
mutexInit = 1;
- if (fi->flags & O_WRONLY || fi->flags & O_CREAT) {
+ if ((flags & O_ACCMODE) == O_WRONLY) {
fh->buf = NULL;
} else {
assert(dfs->rdbuffer_size > 0);
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.c?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.c (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.c Thu Jan 3 21:23:58 2013
@@ -98,7 +98,7 @@ static void dfsPrintOptions(FILE *fp, co
o->attribute_timeout, o->rdbuffer_size, o->direct_io);
}
-void *dfs_init(void)
+void *dfs_init(struct fuse_conn_info *conn)
{
int ret;
@@ -143,6 +143,45 @@ void *dfs_init(void)
exit(EXIT_FAILURE);
}
}
+
+#ifdef FUSE_CAP_ATOMIC_O_TRUNC
+ // If FUSE_CAP_ATOMIC_O_TRUNC is set, open("foo", O_CREAT | O_TRUNC) will
+ // result in dfs_open being called with O_TRUNC.
+ //
+ // If this capability is not present, fuse will try to use multiple
+ // operation to "simulate" open(O_TRUNC). This doesn't work very well with
+ // HDFS.
+ // Unfortunately, this capability is only implemented on Linux 2.6.29 or so.
+ // See HDFS-4140 for details.
+ if (conn->capable & FUSE_CAP_ATOMIC_O_TRUNC) {
+ conn->want |= FUSE_CAP_ATOMIC_O_TRUNC;
+ }
+#endif
+
+#ifdef FUSE_CAP_ASYNC_READ
+ // We're OK with doing reads at the same time as writes.
+ if (conn->capable & FUSE_CAP_ASYNC_READ) {
+ conn->want |= FUSE_CAP_ASYNC_READ;
+ }
+#endif
+
+#ifdef FUSE_CAP_BIG_WRITES
+ // Yes, we can read more than 4kb at a time. In fact, please do!
+ if (conn->capable & FUSE_CAP_BIG_WRITES) {
+ conn->want |= FUSE_CAP_BIG_WRITES;
+ }
+#endif
+
+#ifdef FUSE_CAP_DONT_MASK
+ if ((options.no_permissions) && (conn->capable & FUSE_CAP_DONT_MASK)) {
+ // If we're handing permissions ourselves, we don't want the kernel
+ // applying its own umask. HDFS already implements its own per-user
+ // umasks! Sadly, this only actually does something on kernels 2.6.31 and
+ // later.
+ conn->want |= FUSE_CAP_DONT_MASK;
+ }
+#endif
+
return (void*)dfs;
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.h?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.h (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.h Thu Jan 3 21:23:58 2013
@@ -19,13 +19,15 @@
#ifndef __FUSE_INIT_H__
#define __FUSE_INIT_H__
+struct fuse_conn_info;
+
/**
* These are responsible for initializing connections to dfs and internal
* data structures and then freeing them.
* i.e., what happens on mount and unmount.
*
*/
-void *dfs_init();
+void *dfs_init(struct fuse_conn_info *conn);
void dfs_destroy (void *ptr);
#endif
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/test/fuse_workload.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/test/fuse_workload.c?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/test/fuse_workload.c (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/test/fuse_workload.c Thu Jan 3 21:23:58 2013
@@ -16,6 +16,8 @@
* limitations under the License.
*/
+#define FUSE_USE_VERSION 26
+
#include "fuse-dfs/test/fuse_workload.h"
#include "libhdfs/expect.h"
#include "util/posix_util.h"
@@ -23,6 +25,7 @@
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
+#include <fuse.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
@@ -138,13 +141,89 @@ static int safeRead(int fd, void *buf, i
return amt;
}
+/* Bug: HDFS-2551.
+ * When a program writes a file, closes it, and immediately re-opens it,
+ * it might not appear to have the correct length. This is because FUSE
+ * invokes the release() callback asynchronously.
+ *
+ * To work around this, we keep retrying until the file length is what we
+ * expect.
+ */
+static int closeWorkaroundHdfs2551(int fd, const char *path, off_t expectedSize)
+{
+ int ret, try;
+ struct stat stBuf;
+
+ RETRY_ON_EINTR_GET_ERRNO(ret, close(fd));
+ EXPECT_ZERO(ret);
+ for (try = 0; try < MAX_TRIES; try++) {
+ EXPECT_ZERO(stat(path, &stBuf));
+ EXPECT_NONZERO(S_ISREG(stBuf.st_mode));
+ if (stBuf.st_size == expectedSize) {
+ return 0;
+ }
+ sleepNoSig(1);
+ }
+ fprintf(stderr, "FUSE_WORKLOAD: error: expected file %s to have length "
+ "%lld; instead, it had length %lld\n",
+ path, (long long)expectedSize, (long long)stBuf.st_size);
+ return -EIO;
+}
+
+#ifdef FUSE_CAP_ATOMIC_O_TRUNC
+
+/**
+ * Test that we can create a file, write some contents to it, close that file,
+ * and then successfully re-open with O_TRUNC.
+ */
+static int testOpenTrunc(const char *base)
+{
+ int fd, err;
+ char path[PATH_MAX];
+ const char * const SAMPLE1 = "this is the first file that we wrote.";
+ const char * const SAMPLE2 = "this is the second file that we wrote. "
+ "It's #2!";
+
+ snprintf(path, sizeof(path), "%s/trunc.txt", base);
+ fd = open(path, O_CREAT | O_TRUNC | O_WRONLY, 0644);
+ if (fd < 0) {
+ err = errno;
+ fprintf(stderr, "TEST_ERROR: testOpenTrunc(%s): first open "
+ "failed with error %d\n", path, err);
+ return -err;
+ }
+ EXPECT_ZERO(safeWrite(fd, SAMPLE1, strlen(SAMPLE1)));
+ EXPECT_ZERO(closeWorkaroundHdfs2551(fd, path, strlen(SAMPLE1)));
+ fd = open(path, O_CREAT | O_TRUNC | O_WRONLY, 0644);
+ if (fd < 0) {
+ err = errno;
+ fprintf(stderr, "TEST_ERROR: testOpenTrunc(%s): second open "
+ "failed with error %d\n", path, err);
+ return -err;
+ }
+ EXPECT_ZERO(safeWrite(fd, SAMPLE2, strlen(SAMPLE2)));
+ EXPECT_ZERO(closeWorkaroundHdfs2551(fd, path, strlen(SAMPLE2)));
+ return 0;
+}
+
+#else
+
+static int testOpenTrunc(const char *base)
+{
+ fprintf(stderr, "FUSE_WORKLOAD: We lack FUSE_CAP_ATOMIC_O_TRUNC support. "
+ "Not testing open(O_TRUNC).\n");
+ return 0;
+}
+
+#endif
+
int runFuseWorkloadImpl(const char *root, const char *pcomp,
struct fileCtx *ctx)
{
char base[PATH_MAX], tmp[PATH_MAX], *tmpBuf;
char src[PATH_MAX], dst[PATH_MAX];
struct stat stBuf;
- int ret, i, try;
+ int ret, i;
struct utimbuf tbuf;
struct statvfs stvBuf;
@@ -241,35 +320,10 @@ int runFuseWorkloadImpl(const char *root
EXPECT_ZERO(safeWrite(ctx[i].fd, ctx[i].str, ctx[i].strLen));
}
for (i = 0; i < NUM_FILE_CTX; i++) {
- RETRY_ON_EINTR_GET_ERRNO(ret, close(ctx[i].fd));
- EXPECT_ZERO(ret);
+ EXPECT_ZERO(closeWorkaroundHdfs2551(ctx[i].fd, ctx[i].path, ctx[i].strLen));
ctx[i].fd = -1;
}
for (i = 0; i < NUM_FILE_CTX; i++) {
- /* Bug: HDFS-2551.
- * When a program writes a file, closes it, and immediately re-opens it,
- * it might not appear to have the correct length. This is because FUSE
- * invokes the release() callback asynchronously.
- *
- * To work around this, we keep retrying until the file length is what we
- * expect.
- */
- for (try = 0; try < MAX_TRIES; try++) {
- EXPECT_ZERO(stat(ctx[i].path, &stBuf));
- EXPECT_NONZERO(S_ISREG(stBuf.st_mode));
- if (ctx[i].strLen == stBuf.st_size) {
- break;
- }
- sleepNoSig(1);
- }
- if (try == MAX_TRIES) {
- fprintf(stderr, "FUSE_WORKLOAD: error: expected file %s to have length "
- "%d; instead, it had length %lld\n",
- ctx[i].path, ctx[i].strLen, (long long)stBuf.st_size);
- return -EIO;
- }
- }
- for (i = 0; i < NUM_FILE_CTX; i++) {
ctx[i].fd = open(ctx[i].path, O_RDONLY);
if (ctx[i].fd < 0) {
fprintf(stderr, "FUSE_WORKLOAD: Failed to open file %s for reading!\n",
@@ -308,6 +362,7 @@ int runFuseWorkloadImpl(const char *root
for (i = 0; i < NUM_FILE_CTX; i++) {
free(ctx[i].path);
}
+ EXPECT_ZERO(testOpenTrunc(base));
EXPECT_ZERO(recursiveDelete(base));
return 0;
}
Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1423068-1428155
Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1423068-1428155
Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1423068-1428155
Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1423068-1428155
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java Thu Jan 3 21:23:58 2013
@@ -62,7 +62,8 @@ public class CreateEditsLog {
PermissionStatus p = new PermissionStatus("joeDoe", "people",
new FsPermission((short)0777));
- INodeDirectory dirInode = new INodeDirectory(p, 0L);
+ INodeDirectory dirInode = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,
+ p, 0L);
editLog.logMkDir(BASE_PATH, dirInode);
long blockSize = 10;
BlockInfo[] blocks = new BlockInfo[blocksPerFile];
@@ -81,8 +82,9 @@ public class CreateEditsLog {
}
INodeFileUnderConstruction inode = new INodeFileUnderConstruction(
- null, replication, 0, blockSize, blocks, p, "", "", null);
- // Append path to filename with information about blockIDs
+ INodeId.GRANDFATHER_INODE_ID, null, replication, 0, blockSize,
+ blocks, p, "", "", null);
+ // Append path to filename with information about blockIDs
String path = "_" + iF + "_B" + blocks[0].getBlockId() +
"_to_B" + blocks[blocksPerFile-1].getBlockId() + "_";
String filePath = nameGenerator.getNextFileName("");
@@ -90,12 +92,12 @@ public class CreateEditsLog {
// Log the new sub directory in edits
if ((iF % nameGenerator.getFilesPerDirectory()) == 0) {
String currentDir = nameGenerator.getCurrentDir();
- dirInode = new INodeDirectory(p, 0L);
+ dirInode = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, p, 0L);
editLog.logMkDir(currentDir, dirInode);
}
- editLog.logOpenFile(filePath,
- new INodeFileUnderConstruction(
- p, replication, 0, blockSize, "", "", null));
+ editLog.logOpenFile(filePath, new INodeFileUnderConstruction(
+ INodeId.GRANDFATHER_INODE_ID, p, replication, 0, blockSize, "", "",
+ null));
editLog.logCloseFile(filePath, inode);
if (currentBlockId - bidAtSync >= 2000) { // sync every 2K blocks
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java Thu Jan 3 21:23:58 2013
@@ -208,7 +208,7 @@ public abstract class FSImageTestUtil {
* only a specified number of "mkdirs" operations.
*/
public static void createAbortedLogWithMkdirs(File editsLogDir, int numDirs,
- long firstTxId) throws IOException {
+ long firstTxId, long newInodeId) throws IOException {
FSEditLog editLog = FSImageTestUtil.createStandaloneEditLog(editsLogDir);
editLog.setNextTxId(firstTxId);
editLog.openForWrite();
@@ -217,7 +217,7 @@ public abstract class FSImageTestUtil {
FsPermission.createImmutable((short)0755));
for (int i = 1; i <= numDirs; i++) {
String dirName = "dir" + i;
- INodeDirectory dir = new INodeDirectory(dirName, perms);
+ INodeDirectory dir = new INodeDirectory(newInodeId + i -1, dirName, perms);
editLog.logMkDir("/" + dirName, dir);
}
editLog.logSync();
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java Thu Jan 3 21:23:58 2013
@@ -340,8 +340,8 @@ public class TestBackupNode {
//
// Take a checkpoint
//
- backup = startBackupNode(conf, op, 1);
long txid = cluster.getNameNodeRpc().getTransactionID();
+ backup = startBackupNode(conf, op, 1);
waitCheckpointDone(cluster, txid);
for (int i = 0; i < 10; i++) {
@@ -417,11 +417,65 @@ public class TestBackupNode {
// verify that file2 exists
assertTrue(fileSys.exists(file2));
} catch(IOException e) {
- LOG.error("Error in TestBackupNode:", e);
+ LOG.error("Error in TestBackupNode: ", e);
assertTrue(e.getLocalizedMessage(), false);
} finally {
fileSys.close();
cluster.shutdown();
}
}
+
+ /**
+ * Verify that a file can be read both from NameNode and BackupNode.
+ */
+ @Test
+ public void testCanReadData() throws IOException {
+ Path file1 = new Path("/fileToRead.dat");
+ Configuration conf = new HdfsConfiguration();
+ conf.setBoolean(DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY, true);
+ MiniDFSCluster cluster = null;
+ FileSystem fileSys = null;
+ BackupNode backup = null;
+ try {
+ // Start NameNode and BackupNode
+ cluster = new MiniDFSCluster.Builder(conf)
+ .numDataNodes(0).format(true).build();
+ fileSys = cluster.getFileSystem();
+ long txid = cluster.getNameNodeRpc().getTransactionID();
+ backup = startBackupNode(conf, StartupOption.BACKUP, 1);
+ waitCheckpointDone(cluster, txid);
+
+ // Setup dual NameNode configuration for DataNodes
+ String rpcAddrKeyPreffix =
+ DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + ".bnCluster";
+ String nnAddr = cluster.getNameNode().getNameNodeAddressHostPortString();
+ conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+ String bnAddr = backup.getNameNodeAddressHostPortString();
+ conf.set(DFSConfigKeys.DFS_NAMESERVICES, "bnCluster");
+ conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, "bnCluster");
+ conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + ".bnCluster",
+ "nnActive, nnBackup");
+ conf.set(rpcAddrKeyPreffix + ".nnActive", nnAddr);
+ conf.set(rpcAddrKeyPreffix + ".nnBackup", bnAddr);
+ cluster.startDataNodes(conf, 3, true, StartupOption.REGULAR, null);
+
+ DFSTestUtil.createFile(
+ fileSys, file1, fileSize, fileSize, blockSize, (short)3, seed);
+
+ // Read the same file from file systems pointing to NN and BN
+ FileSystem bnFS = FileSystem.get(
+ new Path("hdfs://" + bnAddr).toUri(), conf);
+ String nnData = DFSTestUtil.readFile(fileSys, file1);
+ String bnData = DFSTestUtil.readFile(bnFS, file1);
+ assertEquals("Data read from BackupNode and NameNode is not the same.",
+ nnData, bnData);
+ } catch(IOException e) {
+ LOG.error("Error in TestBackupNode: ", e);
+ assertTrue(e.getLocalizedMessage(), false);
+ } finally {
+ if(fileSys != null) fileSys.close();
+ if(backup != null) backup.stop();
+ if(cluster != null) cluster.shutdown();
+ }
+ }
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Thu Jan 3 21:23:58 2013
@@ -152,7 +152,8 @@ public class TestEditLog {
for (int i = 0; i < numTransactions; i++) {
INodeFileUnderConstruction inode = new INodeFileUnderConstruction(
- p, replication, blockSize, 0, "", "", null);
+ namesystem.allocateNewInodeId(), p, replication, blockSize, 0, "",
+ "", null);
editLog.logOpenFile("/filename" + (startIndex + i), inode);
editLog.logCloseFile("/filename" + (startIndex + i), inode);
editLog.logSync();
@@ -317,6 +318,11 @@ public class TestEditLog {
// we should now be writing to edits_inprogress_3
fsimage.rollEditLog();
+ // Remember the current lastInodeId and will reset it back to test
+ // loading editlog segments.The transactions in the following allocate new
+ // inode id to write to editlogs but doesn't create ionde in namespace
+ long originalLastInodeId = namesystem.getLastInodeId();
+
// Create threads and make them run transactions concurrently.
Thread threadId[] = new Thread[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; i++) {
@@ -349,6 +355,7 @@ public class TestEditLog {
// If there were any corruptions, it is likely that the reading in
// of these transactions will throw an exception.
//
+ namesystem.resetLastInodeIdWithoutChecking(originalLastInodeId);
for (Iterator<StorageDirectory> it =
fsimage.getStorage().dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0);
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java Thu Jan 3 21:23:58 2013
@@ -73,7 +73,8 @@ public class TestFsLimits {
fileAsURI(new File(MiniDFSCluster.getBaseDirectory(),
"namenode")).toString());
- rootInode = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME, perms);
+ rootInode = new INodeDirectoryWithQuota(getMockNamesystem()
+ .allocateNewInodeId(), INodeDirectory.ROOT_NAME, perms);
inodes = new INode[]{ rootInode, null };
fs = null;
fsIsReady = true;
@@ -152,7 +153,8 @@ public class TestFsLimits {
// have to create after the caller has had a chance to set conf values
if (fs == null) fs = new MockFSDirectory();
- INode child = new INodeDirectory(name, perms);
+ INode child = new INodeDirectory(getMockNamesystem().allocateNewInodeId(),
+ name, perms);
child.setLocalName(name);
Class<?> generated = null;
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Thu Jan 3 21:23:58 2013
@@ -26,14 +26,18 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIsNotDirectoryException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.junit.Test;
@@ -54,9 +58,9 @@ public class TestINodeFile {
public void testReplication () {
replication = 3;
preferredBlockSize = 128*1024*1024;
- INodeFile inf = new INodeFile(new PermissionStatus(userName, null,
- FsPermission.getDefault()), null, replication,
- 0L, 0L, preferredBlockSize);
+ INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID,
+ new PermissionStatus(userName, null, FsPermission.getDefault()), null,
+ replication, 0L, 0L, preferredBlockSize);
assertEquals("True has to be returned in this case", replication,
inf.getBlockReplication());
}
@@ -71,9 +75,9 @@ public class TestINodeFile {
throws IllegalArgumentException {
replication = -1;
preferredBlockSize = 128*1024*1024;
- new INodeFile(new PermissionStatus(userName, null,
- FsPermission.getDefault()), null, replication,
- 0L, 0L, preferredBlockSize);
+ new INodeFile(INodeId.GRANDFATHER_INODE_ID, new PermissionStatus(userName,
+ null, FsPermission.getDefault()), null, replication, 0L, 0L,
+ preferredBlockSize);
}
/**
@@ -84,20 +88,20 @@ public class TestINodeFile {
public void testPreferredBlockSize () {
replication = 3;
preferredBlockSize = 128*1024*1024;
- INodeFile inf = new INodeFile(new PermissionStatus(userName, null,
- FsPermission.getDefault()), null, replication,
- 0L, 0L, preferredBlockSize);
- assertEquals("True has to be returned in this case", preferredBlockSize,
- inf.getPreferredBlockSize());
- }
+ INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID,
+ new PermissionStatus(userName, null, FsPermission.getDefault()), null,
+ replication, 0L, 0L, preferredBlockSize);
+ assertEquals("True has to be returned in this case", preferredBlockSize,
+ inf.getPreferredBlockSize());
+ }
@Test
public void testPreferredBlockSizeUpperBound () {
replication = 3;
preferredBlockSize = BLKSIZE_MAXVALUE;
- INodeFile inf = new INodeFile(new PermissionStatus(userName, null,
- FsPermission.getDefault()), null, replication,
- 0L, 0L, preferredBlockSize);
+ INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID,
+ new PermissionStatus(userName, null, FsPermission.getDefault()), null,
+ replication, 0L, 0L, preferredBlockSize);
assertEquals("True has to be returned in this case", BLKSIZE_MAXVALUE,
inf.getPreferredBlockSize());
}
@@ -112,9 +116,9 @@ public class TestINodeFile {
throws IllegalArgumentException {
replication = 3;
preferredBlockSize = -1;
- new INodeFile(new PermissionStatus(userName, null,
- FsPermission.getDefault()), null, replication,
- 0L, 0L, preferredBlockSize);
+ new INodeFile(INodeId.GRANDFATHER_INODE_ID, new PermissionStatus(userName,
+ null, FsPermission.getDefault()), null, replication, 0L, 0L,
+ preferredBlockSize);
}
/**
@@ -127,10 +131,10 @@ public class TestINodeFile {
throws IllegalArgumentException {
replication = 3;
preferredBlockSize = BLKSIZE_MAXVALUE+1;
- new INodeFile(new PermissionStatus(userName, null,
- FsPermission.getDefault()), null, replication,
- 0L, 0L, preferredBlockSize);
- }
+ new INodeFile(INodeId.GRANDFATHER_INODE_ID, new PermissionStatus(userName,
+ null, FsPermission.getDefault()), null, replication, 0L, 0L,
+ preferredBlockSize);
+ }
@Test
public void testGetFullPathName() {
@@ -139,12 +143,14 @@ public class TestINodeFile {
replication = 3;
preferredBlockSize = 128*1024*1024;
- INodeFile inf = new INodeFile(perms, null, replication,
- 0L, 0L, preferredBlockSize);
+ INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID, perms, null,
+ replication, 0L, 0L, preferredBlockSize);
inf.setLocalName("f");
- INodeDirectory root = new INodeDirectory(INodeDirectory.ROOT_NAME, perms);
- INodeDirectory dir = new INodeDirectory("d", perms);
+ INodeDirectory root = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,
+ INodeDirectory.ROOT_NAME, perms);
+ INodeDirectory dir = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, "d",
+ perms);
assertEquals("f", inf.getFullPathName());
assertEquals("", inf.getLocalParentDir());
@@ -242,7 +248,7 @@ public class TestINodeFile {
for (int i = 0; i < nCount; i++) {
PermissionStatus perms = new PermissionStatus(userName, null,
FsPermission.getDefault());
- iNodes[i] = new INodeFile(perms, null, replication, 0L, 0L,
+ iNodes[i] = new INodeFile(i, perms, null, replication, 0L, 0L,
preferredBlockSize);
iNodes[i].setLocalName(fileNamePrefix + Integer.toString(i));
BlockInfo newblock = new BlockInfo(replication);
@@ -293,10 +299,10 @@ public class TestINodeFile {
}
{//cast from INodeFile
- final INode from = new INodeFile(
- perm, null, replication, 0L, 0L, preferredBlockSize);
-
- //cast to INodeFile, should success
+ final INode from = new INodeFile(INodeId.GRANDFATHER_INODE_ID, perm,
+ null, replication, 0L, 0L, preferredBlockSize);
+
+ //cast to INodeFile, should success
final INodeFile f = INodeFile.valueOf(from, path);
assertTrue(f == from);
@@ -318,8 +324,9 @@ public class TestINodeFile {
{//cast from INodeFileUnderConstruction
final INode from = new INodeFileUnderConstruction(
- perm, replication, 0L, 0L, "client", "machine", null);
-
+ INodeId.GRANDFATHER_INODE_ID, perm, replication, 0L, 0L, "client",
+ "machine", null);
+
//cast to INodeFile, should success
final INodeFile f = INodeFile.valueOf(from, path);
assertTrue(f == from);
@@ -338,7 +345,8 @@ public class TestINodeFile {
}
{//cast from INodeDirectory
- final INode from = new INodeDirectory(perm, 0L);
+ final INode from = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, perm,
+ 0L);
//cast to INodeFile, should fail
try {
@@ -361,4 +369,47 @@ public class TestINodeFile {
assertTrue(d == from);
}
}
+
+ /**
+ * Verify root always has inode id 1001 and new formated fsimage has last
+ * allocated inode id 1000. Validate correct lastInodeId is persisted.
+ * @throws IOException
+ */
+ @Test
+ public void TestInodeId() throws IOException {
+
+ Configuration conf = new Configuration();
+ conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
+ DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+ .build();
+ cluster.waitActive();
+
+ FSNamesystem fsn = cluster.getNamesystem();
+ long lastId = fsn.getLastInodeId();
+
+ assertTrue(lastId == 1001);
+
+ // Create one directory and the last inode id should increase to 1002
+ FileSystem fs = cluster.getFileSystem();
+ Path path = new Path("/test1");
+ assertTrue(fs.mkdirs(path));
+ assertTrue(fsn.getLastInodeId() == 1002);
+
+ Path filePath = new Path("/test1/file");
+ fs.create(filePath);
+ assertTrue(fsn.getLastInodeId() == 1003);
+
+ // Rename doesn't increase inode id
+ Path renamedPath = new Path("/test2");
+ fs.rename(path, renamedPath);
+ assertTrue(fsn.getLastInodeId() == 1003);
+
+ cluster.restartNameNode();
+ cluster.waitActive();
+ // Make sure empty editlog can be handled
+ cluster.restartNameNode();
+ cluster.waitActive();
+ assertTrue(fsn.getLastInodeId() == 1003);
+ }
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java Thu Jan 3 21:23:58 2013
@@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IOUtils;
@@ -139,7 +140,9 @@ public class TestEditLogsDuringFailover
// Create a fake in-progress edit-log in the shared directory
URI sharedUri = cluster.getSharedEditsDir(0, 1);
File sharedDir = new File(sharedUri.getPath(), "current");
- FSImageTestUtil.createAbortedLogWithMkdirs(sharedDir, NUM_DIRS_IN_LOG, 1);
+ FSNamesystem fsn = cluster.getNamesystem(0);
+ FSImageTestUtil.createAbortedLogWithMkdirs(sharedDir, NUM_DIRS_IN_LOG, 1,
+ fsn.getLastInodeId() + 1);
assertEditFiles(Collections.singletonList(sharedUri),
NNStorage.getInProgressEditsFileName(1));
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java?rev=1428601&r1=1428600&r2=1428601&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java Thu Jan 3 21:23:58 2013
@@ -204,8 +204,9 @@ public class TestStandbyCheckpoints {
File sharedDir = new File(sharedUri.getPath(), "current");
File tmpDir = new File(MiniDFSCluster.getBaseDirectory(),
"testCheckpointCancellation-tmp");
- FSImageTestUtil.createAbortedLogWithMkdirs(tmpDir, NUM_DIRS_IN_LOG,
- 3);
+ FSNamesystem fsn = cluster.getNamesystem(0);
+ FSImageTestUtil.createAbortedLogWithMkdirs(tmpDir, NUM_DIRS_IN_LOG, 3,
+ fsn.getLastInodeId() + 1);
String fname = NNStorage.getInProgressEditsFileName(3);
new File(tmpDir, fname).renameTo(new File(sharedDir, fname));