You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2013/07/10 23:20:24 UTC
svn commit: r1502007 [1/2] - in
/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs:
./ src/main/java/ src/main/java/org/apache/hadoop/hdfs/protocol/
src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/
src/main/java/org/a...
Author: szetszwo
Date: Wed Jul 10 21:20:23 2013
New Revision: 1502007
URL: http://svn.apache.org/r1502007
Log:
svn merge -c 1501993 from branch-2 for HDFS-4645. Move from randomly generated block ID to sequentially generated block ID.
Added:
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/OutOfV1GenerationStampsException.java
- copied unchanged from r1501993, hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/OutOfV1GenerationStampsException.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SequentialBlockIdGenerator.java
- copied unchanged from r1501993, hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SequentialBlockIdGenerator.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSequentialBlockId.java
- copied unchanged from r1501993, hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSequentialBlockId.java
Removed:
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RandomBlockIdGenerator.java
Modified:
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/ (props changed)
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed)
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
Propchange: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs:r1501993
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Jul 10 21:20:23 2013
@@ -179,6 +179,9 @@ Release 2.1.0-beta - 2013-07-02
HDFS-4908. Reduce snapshot inode memory usage. (szetszwo)
+ HDFS-4645. Move from randomly generated block ID to sequentially generated
+ block ID. (Arpit Agarwal via szetszwo)
+
OPTIMIZATIONS
BUG FIXES
Propchange: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1501993
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java Wed Jul 10 21:20:23 2013
@@ -86,6 +86,10 @@ public class HdfsConstants {
// An invalid transaction ID that will never be seen in a real namesystem.
public static final long INVALID_TXID = -12345;
+ // Number of generation stamps reserved for legacy blocks.
+ public static final long RESERVED_GENERATION_STAMPS_V1 =
+ 1024L * 1024 * 1024 * 1024;
+
/**
* URI Scheme for hdfs://namenode/ URIs.
*/
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java Wed Jul 10 21:20:23 2013
@@ -102,9 +102,9 @@ public class LayoutVersion {
RESERVED_REL1_3_0(-44, -41,
"Reserved for release 1.3.0", true, ADD_INODE_ID, SNAPSHOT),
OPTIMIZE_SNAPSHOT_INODES(-45, -43,
- "Reduce snapshot inode memory footprint", false);
-
-
+ "Reduce snapshot inode memory footprint", false),
+ SEQUENTIAL_BLOCK_ID(-46, "Allocate block IDs sequentially and store " +
+ "block IDs in the edits log and image files");
final int lv;
final int ancestorLV;
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Wed Jul 10 21:20:23 2013
@@ -1726,7 +1726,7 @@ public class BlockManager {
ReplicaState reportedState = itBR.getCurrentReplicaState();
if (shouldPostponeBlocksFromFuture &&
- namesystem.isGenStampInFuture(iblk.getGenerationStamp())) {
+ namesystem.isGenStampInFuture(iblk)) {
queueReportedBlock(node, iblk, reportedState,
QUEUE_REASON_FUTURE_GENSTAMP);
continue;
@@ -1848,7 +1848,7 @@ public class BlockManager {
}
if (shouldPostponeBlocksFromFuture &&
- namesystem.isGenStampInFuture(block.getGenerationStamp())) {
+ namesystem.isGenStampInFuture(block)) {
queueReportedBlock(dn, block, reportedState,
QUEUE_REASON_FUTURE_GENSTAMP);
return null;
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Wed Jul 10 21:20:23 2013
@@ -60,7 +60,6 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp;
@@ -69,6 +68,9 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV1Op;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV2Op;
import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
@@ -800,12 +802,30 @@ public class FSEditLog implements LogsPu
logEdit(op);
}
- /**
+ /**
+ * Add legacy block generation stamp record to edit log
+ */
+ void logGenerationStampV1(long genstamp) {
+ SetGenstampV1Op op = SetGenstampV1Op.getInstance(cache.get())
+ .setGenerationStamp(genstamp);
+ logEdit(op);
+ }
+
+ /**
* Add generation stamp record to edit log
*/
- void logGenerationStamp(long genstamp) {
- SetGenstampOp op = SetGenstampOp.getInstance(cache.get())
- .setGenerationStamp(genstamp);
+ void logGenerationStampV2(long genstamp) {
+ SetGenstampV2Op op = SetGenstampV2Op.getInstance(cache.get())
+ .setGenerationStamp(genstamp);
+ logEdit(op);
+ }
+
+ /**
+ * Record a newly allocated block ID in the edit log
+ */
+ void logAllocateBlockId(long blockId) {
+ AllocateBlockIdOp op = AllocateBlockIdOp.getInstance(cache.get())
+ .setBlockId(blockId);
logEdit(op);
}
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Wed Jul 10 21:20:23 2013
@@ -57,7 +57,6 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetNSQuotaOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp;
@@ -67,6 +66,9 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV1Op;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV2Op;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
import org.apache.hadoop.hdfs.util.Holder;
@@ -406,9 +408,9 @@ public class FSEditLogLoader {
mkdirOp.timestamp);
break;
}
- case OP_SET_GENSTAMP: {
- SetGenstampOp setGenstampOp = (SetGenstampOp)op;
- fsNamesys.setGenerationStamp(setGenstampOp.genStamp);
+ case OP_SET_GENSTAMP_V1: {
+ SetGenstampV1Op setGenstampV1Op = (SetGenstampV1Op)op;
+ fsNamesys.setGenerationStampV1(setGenstampV1Op.genStampV1);
break;
}
case OP_SET_PERMISSIONS: {
@@ -554,6 +556,16 @@ public class FSEditLogLoader {
disallowSnapshotOp.snapshotRoot);
break;
}
+ case OP_SET_GENSTAMP_V2: {
+ SetGenstampV2Op setGenstampV2Op = (SetGenstampV2Op) op;
+ fsNamesys.setGenerationStampV2(setGenstampV2Op.genStampV2);
+ break;
+ }
+ case OP_ALLOCATE_BLOCK_ID: {
+ AllocateBlockIdOp allocateBlockIdOp = (AllocateBlockIdOp) op;
+ fsNamesys.setLastAllocatedBlockId(allocateBlockIdOp.blockId);
+ break;
+ }
default:
throw new IOException("Invalid operation read " + op.opCode);
}
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java Wed Jul 10 21:20:23 2013
@@ -90,7 +90,7 @@ public abstract class FSEditLogOp {
inst.put(OP_RENAME_OLD, new RenameOldOp());
inst.put(OP_DELETE, new DeleteOp());
inst.put(OP_MKDIR, new MkdirOp());
- inst.put(OP_SET_GENSTAMP, new SetGenstampOp());
+ inst.put(OP_SET_GENSTAMP_V1, new SetGenstampV1Op());
inst.put(OP_SET_PERMISSIONS, new SetPermissionsOp());
inst.put(OP_SET_OWNER, new SetOwnerOp());
inst.put(OP_SET_NS_QUOTA, new SetNSQuotaOp());
@@ -116,6 +116,8 @@ public abstract class FSEditLogOp {
inst.put(OP_CREATE_SNAPSHOT, new CreateSnapshotOp());
inst.put(OP_DELETE_SNAPSHOT, new DeleteSnapshotOp());
inst.put(OP_RENAME_SNAPSHOT, new RenameSnapshotOp());
+ inst.put(OP_SET_GENSTAMP_V2, new SetGenstampV2Op());
+ inst.put(OP_ALLOCATE_BLOCK_ID, new AllocateBlockIdOp());
}
public FSEditLogOp get(FSEditLogOpCodes opcode) {
@@ -1054,39 +1056,39 @@ public abstract class FSEditLogOp {
}
}
- static class SetGenstampOp extends FSEditLogOp {
- long genStamp;
+ static class SetGenstampV1Op extends FSEditLogOp {
+ long genStampV1;
- private SetGenstampOp() {
- super(OP_SET_GENSTAMP);
+ private SetGenstampV1Op() {
+ super(OP_SET_GENSTAMP_V1);
}
- static SetGenstampOp getInstance(OpInstanceCache cache) {
- return (SetGenstampOp)cache.get(OP_SET_GENSTAMP);
+ static SetGenstampV1Op getInstance(OpInstanceCache cache) {
+ return (SetGenstampV1Op)cache.get(OP_SET_GENSTAMP_V1);
}
- SetGenstampOp setGenerationStamp(long genStamp) {
- this.genStamp = genStamp;
+ SetGenstampV1Op setGenerationStamp(long genStamp) {
+ this.genStampV1 = genStamp;
return this;
}
-
+
@Override
- public
+ public
void writeFields(DataOutputStream out) throws IOException {
- FSImageSerialization.writeLong(genStamp, out);
+ FSImageSerialization.writeLong(genStampV1, out);
}
-
+
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
- this.genStamp = FSImageSerialization.readLong(in);
+ this.genStampV1 = FSImageSerialization.readLong(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
- builder.append("SetGenstampOp [genStamp=");
- builder.append(genStamp);
+ builder.append("SetGenstampOp [GenStamp=");
+ builder.append(genStampV1);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
@@ -1094,15 +1096,119 @@ public abstract class FSEditLogOp {
builder.append("]");
return builder.toString();
}
-
+
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "GENSTAMP",
- Long.valueOf(genStamp).toString());
+ Long.valueOf(genStampV1).toString());
}
-
+
+ @Override void fromXml(Stanza st) throws InvalidXmlException {
+ this.genStampV1 = Long.valueOf(st.getValue("GENSTAMP"));
+ }
+ }
+
+ static class SetGenstampV2Op extends FSEditLogOp {
+ long genStampV2;
+
+ private SetGenstampV2Op() {
+ super(OP_SET_GENSTAMP_V2);
+ }
+
+ static SetGenstampV2Op getInstance(OpInstanceCache cache) {
+ return (SetGenstampV2Op)cache.get(OP_SET_GENSTAMP_V2);
+ }
+
+ SetGenstampV2Op setGenerationStamp(long genStamp) {
+ this.genStampV2 = genStamp;
+ return this;
+ }
+
+ @Override
+ public
+ void writeFields(DataOutputStream out) throws IOException {
+ FSImageSerialization.writeLong(genStampV2, out);
+ }
+
+ @Override
+ void readFields(DataInputStream in, int logVersion)
+ throws IOException {
+ this.genStampV2 = FSImageSerialization.readLong(in);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("SetGenstampV2Op [GenStampV2=");
+ builder.append(genStampV2);
+ builder.append(", opCode=");
+ builder.append(opCode);
+ builder.append(", txid=");
+ builder.append(txid);
+ builder.append("]");
+ return builder.toString();
+ }
+
+ @Override
+ protected void toXml(ContentHandler contentHandler) throws SAXException {
+ XMLUtils.addSaxString(contentHandler, "GENSTAMPV2",
+ Long.valueOf(genStampV2).toString());
+ }
+
+ @Override void fromXml(Stanza st) throws InvalidXmlException {
+ this.genStampV2 = Long.valueOf(st.getValue("GENSTAMPV2"));
+ }
+ }
+
+ static class AllocateBlockIdOp extends FSEditLogOp {
+ long blockId;
+
+ private AllocateBlockIdOp() {
+ super(OP_ALLOCATE_BLOCK_ID);
+ }
+
+ static AllocateBlockIdOp getInstance(OpInstanceCache cache) {
+ return (AllocateBlockIdOp)cache.get(OP_ALLOCATE_BLOCK_ID);
+ }
+
+ AllocateBlockIdOp setBlockId(long blockId) {
+ this.blockId = blockId;
+ return this;
+ }
+
+ @Override
+ public
+ void writeFields(DataOutputStream out) throws IOException {
+ FSImageSerialization.writeLong(blockId, out);
+ }
+
+ @Override
+ void readFields(DataInputStream in, int logVersion)
+ throws IOException {
+ this.blockId = FSImageSerialization.readLong(in);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("AllocateBlockIdOp [blockId=");
+ builder.append(blockId);
+ builder.append(", opCode=");
+ builder.append(opCode);
+ builder.append(", txid=");
+ builder.append(txid);
+ builder.append("]");
+ return builder.toString();
+ }
+
+ @Override
+ protected void toXml(ContentHandler contentHandler) throws SAXException {
+ XMLUtils.addSaxString(contentHandler, "BLOCK_ID",
+ Long.valueOf(blockId).toString());
+ }
+
@Override void fromXml(Stanza st) throws InvalidXmlException {
- this.genStamp = Long.valueOf(st.getValue("GENSTAMP"));
+ this.blockId = Long.valueOf(st.getValue("BLOCK_ID"));
}
}
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java Wed Jul 10 21:20:23 2013
@@ -41,7 +41,7 @@ public enum FSEditLogOpCodes {
OP_SET_PERMISSIONS ((byte) 7),
OP_SET_OWNER ((byte) 8),
OP_CLOSE ((byte) 9),
- OP_SET_GENSTAMP ((byte) 10),
+ OP_SET_GENSTAMP_V1 ((byte) 10),
OP_SET_NS_QUOTA ((byte) 11), // obsolete
OP_CLEAR_NS_QUOTA ((byte) 12), // obsolete
OP_TIMES ((byte) 13), // set atime, mtime
@@ -61,8 +61,9 @@ public enum FSEditLogOpCodes {
OP_DELETE_SNAPSHOT ((byte) 27),
OP_RENAME_SNAPSHOT ((byte) 28),
OP_ALLOW_SNAPSHOT ((byte) 29),
- OP_DISALLOW_SNAPSHOT ((byte) 30);
-
+ OP_DISALLOW_SNAPSHOT ((byte) 30),
+ OP_SET_GENSTAMP_V2 ((byte) 31),
+ OP_ALLOCATE_BLOCK_ID ((byte) 32);
private byte opCode;
/**
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Wed Jul 10 21:20:23 2013
@@ -95,7 +95,6 @@ public class FSImage implements Closeabl
final private Configuration conf;
protected NNStorageRetentionManager archivalManager;
- protected IdGenerator blockIdGenerator;
/**
* Construct an FSImage
@@ -141,9 +140,6 @@ public class FSImage implements Closeabl
Preconditions.checkState(fileCount == 1,
"FSImage.format should be called with an uninitialized namesystem, has " +
fileCount + " files");
- // BlockIdGenerator is defined during formatting
- // currently there is only one BlockIdGenerator
- blockIdGenerator = createBlockIdGenerator(fsn);
NamespaceInfo ns = NNStorage.newNamespaceInfo();
ns.clusterID = clusterId;
@@ -814,9 +810,6 @@ public class FSImage implements Closeabl
FSImageFormat.Loader loader = new FSImageFormat.Loader(
conf, target);
loader.load(curFile);
- // BlockIdGenerator is determined after loading image
- // currently there is only one BlockIdGenerator
- blockIdGenerator = createBlockIdGenerator(target);
target.setBlockPoolId(this.getBlockPoolID());
// Check that the image digest we loaded matches up with what
@@ -1249,12 +1242,4 @@ public class FSImage implements Closeabl
public synchronized long getMostRecentCheckpointTxId() {
return storage.getMostRecentCheckpointTxId();
}
-
- public long getUniqueBlockId() {
- return blockIdGenerator.nextValue();
- }
-
- public IdGenerator createBlockIdGenerator(FSNamesystem fsn) {
- return new RandomBlockIdGenerator(fsn);
- }
}
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Wed Jul 10 21:20:23 2013
@@ -70,8 +70,10 @@ import org.apache.hadoop.io.Text;
* <pre>
* FSImage {
* layoutVersion: int, namespaceID: int, numberItemsInFSDirectoryTree: long,
- * namesystemGenerationStamp: long, transactionID: long,
- * snapshotCounter: int, numberOfSnapshots: int, numOfSnapshottableDirs: int,
+ * namesystemGenerationStampV1: long, namesystemGenerationStampV2: long,
+ * generationStampAtBlockIdSwitch:long, lastAllocatedBlockId:
+ * long transactionID: long, snapshotCounter: int, numberOfSnapshots: int,
+ * numOfSnapshottableDirs: int,
* {FSDirectoryTree, FilesUnderConstruction, SecretManagerState} (can be compressed)
* }
*
@@ -257,10 +259,30 @@ public class FSImageFormat {
long numFiles = in.readLong();
- // read in the last generation stamp.
+ // read in the last generation stamp for legacy blocks.
long genstamp = in.readLong();
- namesystem.setGenerationStamp(genstamp);
+ namesystem.setGenerationStampV1(genstamp);
+ if (LayoutVersion.supports(Feature.SEQUENTIAL_BLOCK_ID, imgVersion)) {
+ // read the starting generation stamp for sequential block IDs
+ genstamp = in.readLong();
+ namesystem.setGenerationStampV2(genstamp);
+
+ // read the last generation stamp for blocks created after
+ // the switch to sequential block IDs.
+ long stampAtIdSwitch = in.readLong();
+ namesystem.setGenerationStampV1Limit(stampAtIdSwitch);
+
+ // read the max sequential block ID.
+ long maxSequentialBlockId = in.readLong();
+ namesystem.setLastAllocatedBlockId(maxSequentialBlockId);
+ } else {
+ long startingGenStamp = namesystem.upgradeGenerationStampToV2();
+ // This is an upgrade.
+ LOG.info("Upgrading to sequential block IDs. Generation stamp " +
+ "for new blocks set to " + startingGenStamp);
+ }
+
// read the transaction ID of the last edit represented by
// this image
if (LayoutVersion.supports(Feature.STORED_TXIDS, imgVersion)) {
@@ -884,9 +906,13 @@ public class FSImageFormat {
out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo()
.getNamespaceID());
out.writeLong(fsDir.rootDir.numItemsInTree());
- out.writeLong(sourceNamesystem.getGenerationStamp());
+ out.writeLong(sourceNamesystem.getGenerationStampV1());
+ out.writeLong(sourceNamesystem.getGenerationStampV2());
+ out.writeLong(sourceNamesystem.getGenerationStampAtblockIdSwitch());
+ out.writeLong(sourceNamesystem.getLastAllocatedBlockId());
out.writeLong(context.getTxId());
out.writeLong(sourceNamesystem.getLastInodeId());
+
sourceNamesystem.getSnapshotManager().write(out);
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Jul 10 21:20:23 2013
@@ -156,12 +156,7 @@ import org.apache.hadoop.hdfs.security.t
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics;
+import org.apache.hadoop.hdfs.server.blockmanagement.*;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
@@ -362,9 +357,32 @@ public class FSNamesystem implements Nam
private final long maxBlocksPerFile; // maximum # of blocks per file
/**
- * The global generation stamp for this file system.
+ * The global generation stamp for legacy blocks with randomly
+ * generated block IDs.
*/
- private final GenerationStamp generationStamp = new GenerationStamp();
+ private final GenerationStamp generationStampV1 = new GenerationStamp();
+
+ /**
+ * The global generation stamp for this file system.
+ */
+ private final GenerationStamp generationStampV2 = new GenerationStamp();
+
+ /**
+ * The value of the generation stamp when the first switch to sequential
+ * block IDs was made. Blocks with generation stamps below this value
+ * have randomly allocated block IDs. Blocks with generation stamps above
+ * this value had sequentially allocated block IDs. Read from the fsImage
+ * (or initialized as an offset from the V1 (legacy) generation stamp on
+ * upgrade).
+ */
+ private long generationStampV1Limit =
+ GenerationStamp.GRANDFATHER_GENERATION_STAMP;
+
+ /**
+ * The global block ID space for this file system.
+ */
+ @VisibleForTesting
+ private final SequentialBlockIdGenerator blockIdGenerator;
// precision of access times.
private final long accessTimePrecision;
@@ -424,7 +442,11 @@ public class FSNamesystem implements Nam
void clear() {
dir.reset();
dtSecretManager.reset();
- generationStamp.setCurrentValue(GenerationStamp.LAST_RESERVED_STAMP);
+ generationStampV1.setCurrentValue(GenerationStamp.LAST_RESERVED_STAMP);
+ generationStampV2.setCurrentValue(GenerationStamp.LAST_RESERVED_STAMP);
+ blockIdGenerator.setCurrentValue(
+ SequentialBlockIdGenerator.LAST_RESERVED_BLOCK_ID);
+ generationStampV1Limit = GenerationStamp.GRANDFATHER_GENERATION_STAMP;
leaseManager.removeAllLeases();
inodeId.setCurrentValue(INodeId.LAST_RESERVED_ID);
}
@@ -526,9 +548,9 @@ public class FSNamesystem implements Nam
*
* Note that this does not load any data off of disk -- if you would
* like that behavior, use {@link #loadFromDisk(Configuration)}
-
- * @param fnImage The FSImage to associate with
+ *
* @param conf configuration
+ * @param fsImage The FSImage to associate with
* @throws IOException on bad configuration
*/
FSNamesystem(Configuration conf, FSImage fsImage) throws IOException {
@@ -539,6 +561,7 @@ public class FSNamesystem implements Nam
this.blockManager = new BlockManager(this, this, conf);
this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
+ this.blockIdGenerator = new SequentialBlockIdGenerator(this.blockManager);
this.fsOwner = UserGroupInformation.getCurrentUser();
this.fsOwnerShortUserName = fsOwner.getShortUserName();
@@ -2656,9 +2679,9 @@ public class FSNamesystem implements Nam
*/
Block createNewBlock() throws IOException {
assert hasWriteLock();
- Block b = new Block(getFSImage().getUniqueBlockId(), 0, 0);
+ Block b = new Block(nextBlockId(), 0, 0);
// Increment the generation stamp for every new block.
- b.setGenerationStamp(nextGenerationStamp());
+ b.setGenerationStamp(nextGenerationStamp(false));
return b;
}
@@ -3354,7 +3377,7 @@ public class FSNamesystem implements Nam
uc.setExpectedLocations(blockManager.getNodes(lastBlock));
}
// start recovery of the last block for this file
- long blockRecoveryId = nextGenerationStamp();
+ long blockRecoveryId = nextGenerationStamp(isLegacyBlock(uc));
lease = reassignLease(lease, src, recoveryLeaseHolder, pendingFile);
uc.initializeBlockRecovery(blockRecoveryId);
leaseManager.renewLease(lease);
@@ -4988,34 +5011,164 @@ public class FSNamesystem implements Nam
}
/**
- * Sets the generation stamp for this filesystem
+ * Sets the current generation stamp for legacy blocks
+ */
+ void setGenerationStampV1(long stamp) {
+ generationStampV1.setCurrentValue(stamp);
+ }
+
+ /**
+ * Gets the current generation stamp for legacy blocks
+ */
+ long getGenerationStampV1() {
+ return generationStampV1.getCurrentValue();
+ }
+
+ /**
+ * Gets the current generation stamp for this filesystem
+ */
+ void setGenerationStampV2(long stamp) {
+ generationStampV2.setCurrentValue(stamp);
+ }
+
+ /**
+ * Gets the current generation stamp for this filesystem
+ */
+ long getGenerationStampV2() {
+ return generationStampV2.getCurrentValue();
+ }
+
+ /**
+ * Upgrades the generation stamp for the filesystem
+ * by reserving a sufficient range for all existing blocks.
+ * Should be invoked only during the first upgrade to
+ * sequential block IDs.
+ */
+ long upgradeGenerationStampToV2() {
+ Preconditions.checkState(generationStampV2.getCurrentValue() ==
+ GenerationStamp.LAST_RESERVED_STAMP);
+
+ generationStampV2.skipTo(
+ generationStampV1.getCurrentValue() +
+ HdfsConstants.RESERVED_GENERATION_STAMPS_V1);
+
+ generationStampV1Limit = generationStampV2.getCurrentValue();
+ return generationStampV2.getCurrentValue();
+ }
+
+ /**
+ * Sets the generation stamp that delineates random and sequentially
+ * allocated block IDs.
+ * @param stamp
+ */
+ void setGenerationStampV1Limit(long stamp) {
+ Preconditions.checkState(generationStampV1Limit ==
+ GenerationStamp.GRANDFATHER_GENERATION_STAMP);
+ generationStampV1Limit = stamp;
+ }
+
+ /**
+ * Gets the value of the generation stamp that delineates sequential
+ * and random block IDs.
*/
- void setGenerationStamp(long stamp) {
- generationStamp.setCurrentValue(stamp);
+ long getGenerationStampAtblockIdSwitch() {
+ return generationStampV1Limit;
+ }
+
+ @VisibleForTesting
+ SequentialBlockIdGenerator getBlockIdGenerator() {
+ return blockIdGenerator;
+ }
+
+ /**
+ * Sets the maximum allocated block ID for this filesystem. This is
+ * the basis for allocating new block IDs.
+ */
+ void setLastAllocatedBlockId(long blockId) {
+ blockIdGenerator.skipTo(blockId);
}
/**
- * Gets the generation stamp for this filesystem
+ * Gets the maximum sequentially allocated block ID for this filesystem
*/
- long getGenerationStamp() {
- return generationStamp.getCurrentValue();
+ long getLastAllocatedBlockId() {
+ return blockIdGenerator.getCurrentValue();
}
/**
* Increments, logs and then returns the stamp
*/
- private long nextGenerationStamp() throws SafeModeException {
+ long nextGenerationStamp(boolean legacyBlock)
+ throws IOException, SafeModeException {
assert hasWriteLock();
if (isInSafeMode()) {
throw new SafeModeException(
"Cannot get next generation stamp", safeMode);
}
- final long gs = generationStamp.nextValue();
- getEditLog().logGenerationStamp(gs);
+
+ long gs;
+ if (legacyBlock) {
+ gs = getNextGenerationStampV1();
+ getEditLog().logGenerationStampV1(gs);
+ } else {
+ gs = getNextGenerationStampV2();
+ getEditLog().logGenerationStampV2(gs);
+ }
+
// NB: callers sync the log
return gs;
}
+ @VisibleForTesting
+ long getNextGenerationStampV1() throws IOException {
+ long genStampV1 = generationStampV1.nextValue();
+
+ if (genStampV1 >= generationStampV1Limit) {
+ // We ran out of generation stamps for legacy blocks. In practice, it
+ // is extremely unlikely as we reserved 1T v1 generation stamps. The
+ // result is that we can no longer append to the legacy blocks that
+ // were created before the upgrade to sequential block IDs.
+ throw new OutOfV1GenerationStampsException();
+ }
+
+ return genStampV1;
+ }
+
+ @VisibleForTesting
+ long getNextGenerationStampV2() {
+ return generationStampV2.nextValue();
+ }
+
+ long getGenerationStampV1Limit() {
+ return generationStampV1Limit;
+ }
+
+ /**
+ * Determine whether the block ID was randomly generated (legacy) or
+ * sequentially generated. The generation stamp value is used to
+ * make the distinction.
+ * @param block
+ * @return true if the block ID was randomly generated, false otherwise.
+ */
+ boolean isLegacyBlock(Block block) {
+ return block.getGenerationStamp() < getGenerationStampV1Limit();
+ }
+
+ /**
+ * Increments, logs and then returns the block ID
+ */
+ private long nextBlockId() throws SafeModeException {
+ assert hasWriteLock();
+ if (isInSafeMode()) {
+ throw new SafeModeException(
+ "Cannot get next block ID", safeMode);
+ }
+ final long blockId = blockIdGenerator.nextValue();
+ getEditLog().logAllocateBlockId(blockId);
+ // NB: callers sync the log
+ return blockId;
+ }
+
private INodeFileUnderConstruction checkUCBlock(ExtendedBlock block,
String clientName) throws IOException {
assert hasWriteLock();
@@ -5096,7 +5249,8 @@ public class FSNamesystem implements Nam
checkUCBlock(block, clientName);
// get a new generation stamp and an access token
- block.setGenerationStamp(nextGenerationStamp());
+ block.setGenerationStamp(
+ nextGenerationStamp(isLegacyBlock(block.getLocalBlock())));
locatedBlock = new LocatedBlock(block, new DatanodeInfo[0]);
blockManager.setBlockToken(locatedBlock, AccessMode.WRITE);
} finally {
@@ -5111,7 +5265,7 @@ public class FSNamesystem implements Nam
* Update a pipeline for a block under construction
*
* @param clientName the name of the client
- * @param oldblock and old block
+ * @param oldBlock and old block
* @param newBlock a new block with a new generation stamp and length
* @param newNodes datanodes in the pipeline
* @throws IOException if any error occurs
@@ -5813,9 +5967,14 @@ public class FSNamesystem implements Nam
}
@Override
- public boolean isGenStampInFuture(long genStamp) {
- return (genStamp > getGenerationStamp());
+ public boolean isGenStampInFuture(Block block) {
+ if (isLegacyBlock(block)) {
+ return block.getGenerationStamp() > getGenerationStampV1();
+ } else {
+ return block.getGenerationStamp() > getGenerationStampV2();
+ }
}
+
@VisibleForTesting
public EditLogTailer getEditLogTailer() {
return editLogTailer;
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java Wed Jul 10 21:20:23 2013
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.hdfs.util.RwLock;
import org.apache.hadoop.ipc.StandbyException;
@@ -37,7 +38,7 @@ public interface Namesystem extends RwLo
public boolean isInStandbyState();
- public boolean isGenStampInFuture(long generationStamp);
+ public boolean isGenStampInFuture(Block block);
public void adjustSafeModeBlockTotals(int deltaSafe, int deltaTotal);
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java Wed Jul 10 21:20:23 2013
@@ -126,7 +126,7 @@ class ImageLoaderCurrent implements Imag
new SimpleDateFormat("yyyy-MM-dd HH:mm");
private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23,
-24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39,
- -40, -41, -42, -43, -44, -45};
+ -40, -41, -42, -43, -44, -45, -46 };
private int imageVersion = 0;
private final Map<Long, String> subtreeMap = new HashMap<Long, String>();
@@ -165,6 +165,12 @@ class ImageLoaderCurrent implements Imag
v.visit(ImageElement.GENERATION_STAMP, in.readLong());
+ if (LayoutVersion.supports(Feature.SEQUENTIAL_BLOCK_ID, imageVersion)) {
+ v.visit(ImageElement.GENERATION_STAMP_V2, in.readLong());
+ v.visit(ImageElement.GENERATION_STAMP_V1_LIMIT, in.readLong());
+ v.visit(ImageElement.LAST_ALLOCATED_BLOCK_ID, in.readLong());
+ }
+
if (LayoutVersion.supports(Feature.STORED_TXIDS, imageVersion)) {
v.visit(ImageElement.TRANSACTION_ID, in.readLong());
}
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java Wed Jul 10 21:20:23 2013
@@ -38,6 +38,9 @@ abstract class ImageVisitor {
LAYOUT_VERSION,
NUM_INODES,
GENERATION_STAMP,
+ GENERATION_STAMP_V2,
+ GENERATION_STAMP_V1_LIMIT,
+ LAST_ALLOCATED_BLOCK_ID,
INODES,
INODE,
INODE_PATH,
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Wed Jul 10 21:20:23 2013
@@ -241,6 +241,39 @@ public class DFSTestUtil {
IOUtils.closeStream(out);
}
}
+
+ public static void createFile(FileSystem fs, Path fileName, int bufferLen,
+ long fileLen, long blockSize, short replFactor, long seed)
+ throws IOException {
+ assert bufferLen > 0;
+ if (!fs.mkdirs(fileName.getParent())) {
+ throw new IOException("Mkdirs failed to create " +
+ fileName.getParent().toString());
+ }
+ FSDataOutputStream out = null;
+ try {
+ out = fs.create(fileName, true, fs.getConf()
+ .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
+ replFactor, blockSize);
+ if (fileLen > 0) {
+ byte[] toWrite = new byte[bufferLen];
+ Random rb = new Random(seed);
+ long bytesToWrite = fileLen;
+ while (bytesToWrite>0) {
+ rb.nextBytes(toWrite);
+ int bytesToWriteNext = (bufferLen < bytesToWrite) ? bufferLen
+ : (int) bytesToWrite;
+
+ out.write(toWrite, 0, bytesToWriteNext);
+ bytesToWrite -= bytesToWriteNext;
+ }
+ }
+ } finally {
+ if (out != null) {
+ out.close();
+ }
+ }
+ }
/** check if the files have been copied correctly. */
public boolean checkFiles(FileSystem fs, String topdir) throws IOException {
@@ -554,7 +587,7 @@ public class DFSTestUtil {
}
public static ExtendedBlock getFirstBlock(FileSystem fs, Path path) throws IOException {
- HdfsDataInputStream in = (HdfsDataInputStream)((DistributedFileSystem)fs).open(path);
+ HdfsDataInputStream in = (HdfsDataInputStream) fs.open(path);
in.readByte();
return in.getCurrentBlock();
}
@@ -564,6 +597,12 @@ public class DFSTestUtil {
return ((HdfsDataInputStream) in).getAllBlocks();
}
+ public static List<LocatedBlock> getAllBlocks(FileSystem fs, Path path)
+ throws IOException {
+ HdfsDataInputStream in = (HdfsDataInputStream) fs.open(path);
+ return in.getAllBlocks();
+ }
+
public static Token<BlockTokenIdentifier> getBlockToken(
FSDataOutputStream out) {
return ((DFSOutputStream) out.getWrappedStream()).getBlockToken();
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java Wed Jul 10 21:20:23 2013
@@ -53,7 +53,9 @@ import com.google.common.base.Joiner;
*/
public class TestDFSUpgrade {
- private static final int EXPECTED_TXID = 45;
+ // TODO: Avoid hard-coding expected_txid. The test should be more robust.
+ private static final int EXPECTED_TXID = 61;
+
private static final Log LOG = LogFactory.getLog(TestDFSUpgrade.class.getName());
private Configuration conf;
private int testCounter = 0;
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java Wed Jul 10 21:20:23 2013
@@ -210,6 +210,7 @@ public class TestDataTransferProtocol {
@Test
public void testOpWrite() throws IOException {
int numDataNodes = 1;
+ final long BLOCK_ID_FUDGE = 128;
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
try {
@@ -260,8 +261,9 @@ public class TestDataTransferProtocol {
"Recover failed close to a finalized replica", false);
firstBlock.setGenerationStamp(newGS);
- /* Test writing to a new block */
- long newBlockId = firstBlock.getBlockId() + 1;
+ // Test writing to a new block. Don't choose the next sequential
+ // block ID to avoid conflicting with IDs chosen by the NN.
+ long newBlockId = firstBlock.getBlockId() + BLOCK_ID_FUDGE;
ExtendedBlock newBlock = new ExtendedBlock(firstBlock.getBlockPoolId(),
newBlockId, 0, firstBlock.getGenerationStamp());
@@ -292,7 +294,7 @@ public class TestDataTransferProtocol {
Path file1 = new Path("dataprotocol1.dat");
DFSTestUtil.createFile(fileSys, file1, 1L, (short)numDataNodes, 0L);
DFSOutputStream out = (DFSOutputStream)(fileSys.append(file1).
- getWrappedStream());
+ getWrappedStream());
out.write(1);
out.hflush();
FSDataInputStream in = fileSys.open(file1);
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java Wed Jul 10 21:20:23 2013
@@ -64,9 +64,6 @@ public class OfflineEditsViewerHelper {
/**
* Generates edits with all op codes and returns the edits filename
- *
- * @param dfsDir DFS directory (where to setup MiniDFS cluster)
- * @param editsFilename where to copy the edits
*/
public String generateEdits() throws IOException {
CheckpointSignature signature = runOperations();
@@ -142,7 +139,7 @@ public class OfflineEditsViewerHelper {
DistributedFileSystem dfs =
(DistributedFileSystem)cluster.getFileSystem();
FileContext fc = FileContext.getFileContext(cluster.getURI(0), config);
- // OP_ADD 0, OP_SET_GENSTAMP 10
+ // OP_ADD 0
Path pathFileCreate = new Path("/file_create_u\1F431");
FSDataOutputStream s = dfs.create(pathFileCreate);
// OP_CLOSE 9
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Wed Jul 10 21:20:23 2013
@@ -1171,7 +1171,8 @@ public class TestCheckpoint {
throw new IOException(e);
}
- final int EXPECTED_TXNS_FIRST_SEG = 11;
+ // TODO: Fix the test to not require a hard-coded transaction count.
+ final int EXPECTED_TXNS_FIRST_SEG = 13;
// the following steps should have happened:
// edits_inprogress_1 -> edits_1-12 (finalized)
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Wed Jul 10 21:20:23 2013
@@ -1083,7 +1083,7 @@ public class TestEditLog {
editlog.initJournalsForWrite();
editlog.openForWrite();
for (int i = 2; i < TXNS_PER_ROLL; i++) {
- editlog.logGenerationStamp((long)0);
+ editlog.logGenerationStampV2((long) 0);
}
editlog.logSync();
@@ -1095,7 +1095,7 @@ public class TestEditLog {
for (int i = 0; i < numrolls; i++) {
editlog.rollEditLog();
- editlog.logGenerationStamp((long)i);
+ editlog.logGenerationStampV2((long) i);
editlog.logSync();
while (aborts.size() > 0
@@ -1105,7 +1105,7 @@ public class TestEditLog {
}
for (int j = 3; j < TXNS_PER_ROLL; j++) {
- editlog.logGenerationStamp((long)i);
+ editlog.logGenerationStampV2((long) i);
}
editlog.logSync();
}
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java Wed Jul 10 21:20:23 2013
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.*;
+import static org.hamcrest.CoreMatchers.is;
import java.io.IOException;
import java.io.OutputStream;
@@ -51,10 +52,10 @@ public class TestEditLogFileInputStream
// Read the edit log and verify that we got all of the data.
EnumMap<FSEditLogOpCodes, Holder<Integer>> counts =
FSImageTestUtil.countEditLogOpTypes(elis);
- assertEquals(1L, (long)counts.get(FSEditLogOpCodes.OP_ADD).held);
- assertEquals(1L, (long)counts.get(FSEditLogOpCodes.OP_SET_GENSTAMP).held);
- assertEquals(1L, (long)counts.get(FSEditLogOpCodes.OP_CLOSE).held);
-
+ assertThat(counts.get(FSEditLogOpCodes.OP_ADD).held, is(1));
+ assertThat(counts.get(FSEditLogOpCodes.OP_SET_GENSTAMP_V1).held, is(1));
+ assertThat(counts.get(FSEditLogOpCodes.OP_CLOSE).held, is(1));
+
// Check that length header was picked up.
assertEquals(FAKE_LOG_DATA.length, elis.length());
elis.close();
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java Wed Jul 10 21:20:23 2013
@@ -541,7 +541,7 @@ public class TestSaveNamespace {
FSNamesystem spyFsn = spy(fsn);
final FSNamesystem finalFsn = spyFsn;
DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG);
- doAnswer(delayer).when(spyFsn).getGenerationStamp();
+ doAnswer(delayer).when(spyFsn).getGenerationStampV2();
ExecutorService pool = Executors.newFixedThreadPool(2);
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java (original)
+++ hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java Wed Jul 10 21:20:23 2013
@@ -44,7 +44,13 @@ public class TestOfflineEditsViewer {
private static final Map<FSEditLogOpCodes, Boolean> obsoleteOpCodes =
new HashMap<FSEditLogOpCodes, Boolean>();
- static { initializeObsoleteOpCodes(); }
+ private static final Map<FSEditLogOpCodes, Boolean> missingOpCodes =
+ new HashMap<FSEditLogOpCodes, Boolean>();
+
+ static {
+ initializeObsoleteOpCodes();
+ initializeMissingOpCodes();
+ }
private static String buildDir =
System.getProperty("test.build.data", "build/test/data");
@@ -74,6 +80,16 @@ public class TestOfflineEditsViewer {
obsoleteOpCodes.put(FSEditLogOpCodes.OP_CLEAR_NS_QUOTA, true);
}
+ /**
+ * Initialize missingOpcodes
+ *
+ * Opcodes that are not available except after uprade from
+ * an older version. We don't test these here.
+ */
+ private static void initializeMissingOpCodes() {
+ obsoleteOpCodes.put(FSEditLogOpCodes.OP_SET_GENSTAMP_V1, true);
+ }
+
@Before
public void setup() {
new File(cacheDir).mkdirs();
@@ -103,6 +119,8 @@ public class TestOfflineEditsViewer {
assertTrue(
"Edits " + edits + " should have all op codes",
hasAllOpCodes(edits));
+ LOG.info("Comparing generated file " + editsReparsed +
+ " with reference file " + edits);
assertTrue(
"Generated edits and reparsed (bin to XML to bin) should be same",
filesEqualIgnoreTrailingZeros(edits, editsReparsed));
@@ -222,9 +240,12 @@ public class TestOfflineEditsViewer {
// don't need to test obsolete opCodes
if(obsoleteOpCodes.containsKey(opCode)) {
continue;
- }
- if (opCode == FSEditLogOpCodes.OP_INVALID)
+ } else if (missingOpCodes.containsKey(opCode)) {
+ continue;
+ } else if (opCode == FSEditLogOpCodes.OP_INVALID) {
continue;
+ }
+
Long count = visitor.getStatistics().get(opCode);
if((count == null) || (count == 0)) {
hasAllOpCodes = false;
Modified: hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1.0-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored?rev=1502007&r1=1502006&r2=1502007&view=diff
==============================================================================
Binary files - no diff available.