You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2013/04/15 23:42:07 UTC
svn commit: r1468238 - in
/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/org/apache/hadoop/hdfs/protocol/
src/main/java/org/apache/hadoop/hdfs/server/namenode/
src/main/java/org/apache/hadoop/hdfs/server/namenode/sn...
Author: szetszwo
Date: Mon Apr 15 21:42:06 2013
New Revision: 1468238
URL: http://svn.apache.org/r1468238
Log:
HDFS-4666. Define ".snapshot" as a reserved inode name so that users cannot create a file/directory with ".snapshot" as the name. If ".snapshot" is used in a previous version of HDFS, it must be renamed before upgrade; otherwise, upgrade will fail.
Modified:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FSLimitException.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt?rev=1468238&r1=1468237&r2=1468238&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt Mon Apr 15 21:42:06 2013
@@ -236,3 +236,8 @@ Branch-2802 Snapshot (Unreleased)
szetszwo)
HDFS-4692. Use timestamp as default snapshot names. (szetszwo)
+
+ HDFS-4666. Define ".snapshot" as a reserved inode name so that users cannot
+ create a file/directory with ".snapshot" as the name. If ".snapshot" is used
+ in a previous version of HDFS, it must be renamed before upgrade; otherwise,
+ upgrade will fail. (szetszwo)
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FSLimitException.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FSLimitException.java?rev=1468238&r1=1468237&r2=1468238&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FSLimitException.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FSLimitException.java Mon Apr 15 21:42:06 2013
@@ -97,4 +97,15 @@ public abstract class FSLimitException e
" is exceeded: limit=" + quota + " items=" + count;
}
}
+
+ /** The given name is illegal. */
+ public static final class IllegalNameException extends FSLimitException {
+ public static final long serialVersionUID = 1L;
+
+ public IllegalNameException() {}
+
+ public IllegalNameException(String msg) {
+ super(msg);
+ }
+ }
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java?rev=1468238&r1=1468237&r2=1468238&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java Mon Apr 15 21:42:06 2013
@@ -18,7 +18,9 @@
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
/************************************
@@ -107,4 +109,10 @@ public class HdfsConstants {
* A special path component contained in the path for a snapshot file/dir
*/
public static final String DOT_SNAPSHOT_DIR = ".snapshot";
+
+ public static final byte[] DOT_SNAPSHOT_DIR_BYTES
+ = DFSUtil.string2Bytes(DOT_SNAPSHOT_DIR);
+
+ public static final String SEPARATOR_DOT_SNAPSHOT_DIR
+ = Path.SEPARATOR + DOT_SNAPSHOT_DIR;
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java?rev=1468238&r1=1468237&r2=1468238&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java Mon Apr 15 21:42:06 2013
@@ -98,7 +98,7 @@ public class LayoutVersion {
"add OP_UPDATE_BLOCKS"),
RESERVED_REL1_2_0(-41, -32, "Reserved for release 1.2.0", true, CONCAT),
ADD_INODE_ID(-42, -40, "Assign a unique inode id for each inode", false),
- SNAPSHOT(-43, -42, "Support for snapshot feature", false);
+ SNAPSHOT(-43, "Support for snapshot feature");
final int lv;
final int ancestorLV;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1468238&r1=1468237&r2=1468238&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Mon Apr 15 21:42:06 2013
@@ -23,6 +23,7 @@ import java.io.Closeable;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
@@ -41,11 +42,12 @@ import org.apache.hadoop.fs.permission.F
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.FSLimitException;
+import org.apache.hadoop.hdfs.protocol.FSLimitException.IllegalNameException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -1355,7 +1357,7 @@ public class FSDirectory implements Clos
readLock();
try {
- if (srcs.endsWith(Path.SEPARATOR + HdfsConstants.DOT_SNAPSHOT_DIR)) {
+ if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
return getSnapshotsListing(srcs, startAfter);
}
final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, true);
@@ -1393,10 +1395,10 @@ public class FSDirectory implements Clos
*/
private DirectoryListing getSnapshotsListing(String src, byte[] startAfter)
throws UnresolvedLinkException, IOException {
- assert hasReadLock();
- final String dotSnapshot = Path.SEPARATOR + HdfsConstants.DOT_SNAPSHOT_DIR;
- Preconditions.checkArgument(src.endsWith(dotSnapshot),
- src + " does not end with " + dotSnapshot);
+ Preconditions.checkState(hasReadLock());
+ Preconditions.checkArgument(
+ src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
+ "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
final String dirPath = normalizePath(src.substring(0,
src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));
@@ -1428,7 +1430,7 @@ public class FSDirectory implements Clos
String srcs = normalizePath(src);
readLock();
try {
- if (srcs.endsWith(Path.SEPARATOR + HdfsConstants.DOT_SNAPSHOT_DIR)) {
+ if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
return getFileInfo4DotSnapshot(srcs);
}
final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, resolveLink);
@@ -1442,9 +1444,9 @@ public class FSDirectory implements Clos
private HdfsFileStatus getFileInfo4DotSnapshot(String src)
throws UnresolvedLinkException {
- final String dotSnapshot = Path.SEPARATOR + HdfsConstants.DOT_SNAPSHOT_DIR;
- Preconditions.checkArgument(src.endsWith(dotSnapshot),
- src + " does not end with " + dotSnapshot);
+ Preconditions.checkArgument(
+ src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
+ "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
final String dirPath = normalizePath(src.substring(0,
src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));
@@ -1927,38 +1929,47 @@ public class FSDirectory implements Clos
verifyQuota(dst, dstIndex, delta.get(Quota.NAMESPACE),
delta.get(Quota.DISKSPACE), src[i - 1]);
}
-
- /**
- * Verify that filesystem limit constraints are not violated
- */
- void verifyFsLimits(INode[] pathComponents, int pos, INode child)
- throws FSLimitException {
- verifyMaxComponentLength(child.getLocalName(), pathComponents, pos);
- verifyMaxDirItems(pathComponents, pos);
+
+ /** Verify if the snapshot name is legal. */
+ void verifySnapshotName(String snapshotName, String path)
+ throws PathComponentTooLongException, IllegalNameException {
+ final byte[] bytes = DFSUtil.string2Bytes(snapshotName);
+ verifyINodeName(bytes);
+ verifyMaxComponentLength(bytes, path, 0);
+ }
+
+ /** Verify if the inode name is legal. */
+ void verifyINodeName(byte[] childName) throws IllegalNameException {
+ if (Arrays.equals(HdfsConstants.DOT_SNAPSHOT_DIR_BYTES, childName)) {
+ String s = "\"" + HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name.";
+ if (!ready) {
+ s += " Please rename it before upgrade.";
+ }
+ throw new IllegalNameException(s);
+ }
}
/**
* Verify child's name for fs limit.
* @throws PathComponentTooLongException child's name is too long.
*/
- public void verifyMaxComponentLength(String childName,
- Object parentPath, int pos) throws PathComponentTooLongException {
+ void verifyMaxComponentLength(byte[] childName, Object parentPath, int pos)
+ throws PathComponentTooLongException {
if (maxComponentLength == 0) {
return;
}
- final int length = childName.length();
+ final int length = childName.length;
if (length > maxComponentLength) {
final String p = parentPath instanceof INode[]?
getFullPathName((INode[])parentPath, pos - 1): (String)parentPath;
final PathComponentTooLongException e = new PathComponentTooLongException(
- maxComponentLength, length, p, childName);
+ maxComponentLength, length, p, DFSUtil.bytes2String(childName));
if (ready) {
throw e;
} else {
// Do not throw if edits log is still being processed
- NameNode.LOG.error("FSDirectory.verifyMaxComponentLength: "
- + e.getLocalizedMessage());
+ NameNode.LOG.error("ERROR in FSDirectory.verifyINodeName", e);
}
}
}
@@ -1967,7 +1978,7 @@ public class FSDirectory implements Clos
* Verify children size for fs limit.
* @throws MaxDirectoryItemsExceededException too many children.
*/
- private void verifyMaxDirItems(INode[] pathComponents, int pos)
+ void verifyMaxDirItems(INode[] pathComponents, int pos)
throws MaxDirectoryItemsExceededException {
if (maxDirItems == 0) {
return;
@@ -2015,8 +2026,11 @@ public class FSDirectory implements Clos
// original location becase a quota violation would cause the the item
// to go "poof". The fs limits must be bypassed for the same reason.
if (checkQuota) {
- verifyFsLimits(inodes, pos, child);
+ verifyMaxComponentLength(child.getLocalNameBytes(), inodes, pos);
+ verifyMaxDirItems(inodes, pos);
}
+ // always verify inode name
+ verifyINodeName(child.getLocalNameBytes());
final Quota.Counts counts = child.computeQuotaUsage();
updateCount(iip, pos,
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1468238&r1=1468237&r2=1468238&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Mon Apr 15 21:42:06 2013
@@ -565,6 +565,10 @@ public class FSImageFormat {
INode loadINode(final byte[] localName, boolean isSnapshotINode,
DataInput in) throws IOException {
final int imgVersion = getLayoutVersion();
+ if (LayoutVersion.supports(Feature.SNAPSHOT, imgVersion)) {
+ namesystem.getFSDirectory().verifyINodeName(localName);
+ }
+
long inodeId = LayoutVersion.supports(Feature.ADD_INODE_ID, imgVersion) ?
in.readLong() : namesystem.allocateNewInodeId();
@@ -903,7 +907,7 @@ public class FSImageFormat {
* actually leads to.
* @return The snapshot path.
*/
- private String computeSnapshotPath(String nonSnapshotPath,
+ private static String computeSnapshotPath(String nonSnapshotPath,
Snapshot snapshot) {
String snapshotParentFullPath = snapshot.getRoot().getParent()
.getFullPathName();
@@ -911,10 +915,8 @@ public class FSImageFormat {
String relativePath = nonSnapshotPath.equals(snapshotParentFullPath) ?
Path.SEPARATOR : nonSnapshotPath.substring(
snapshotParentFullPath.length());
- String snapshotFullPath = snapshotParentFullPath + Path.SEPARATOR
- + HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + snapshotName
- + relativePath;
- return snapshotFullPath;
+ return Snapshot.getSnapshotPath(snapshotParentFullPath,
+ snapshotName + relativePath);
}
/**
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1468238&r1=1468237&r2=1468238&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Mon Apr 15 21:42:06 2013
@@ -5806,7 +5806,7 @@ public class FSNamesystem implements Nam
if (snapshotName == null || snapshotName.isEmpty()) {
snapshotName = Snapshot.generateDefaultSnapshotName();
}
- dir.verifyMaxComponentLength(snapshotName, snapshotRoot, 0);
+ dir.verifySnapshotName(snapshotName, snapshotRoot);
dir.writeLock();
try {
snapshotPath = snapshotManager.createSnapshot(snapshotRoot, snapshotName);
@@ -5844,7 +5844,7 @@ public class FSNamesystem implements Nam
safeMode);
}
checkOwner(pc, path);
- dir.verifyMaxComponentLength(snapshotNewName, path, 0);
+ dir.verifySnapshotName(snapshotNewName, path);
snapshotManager.renameSnapshot(path, snapshotOldName, snapshotNewName);
getEditLog().logRenameSnapshot(path, snapshotOldName, snapshotNewName);
@@ -5854,12 +5854,9 @@ public class FSNamesystem implements Nam
getEditLog().logSync();
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
- Path oldSnapshotRoot = new Path(path, HdfsConstants.DOT_SNAPSHOT_DIR
- + "/" + snapshotOldName);
- Path newSnapshotRoot = new Path(path, HdfsConstants.DOT_SNAPSHOT_DIR
- + "/" + snapshotNewName);
- logAuditEvent(true, "renameSnapshot", oldSnapshotRoot.toString(),
- newSnapshotRoot.toString(), null);
+ String oldSnapshotRoot = Snapshot.getSnapshotPath(path, snapshotOldName);
+ String newSnapshotRoot = Snapshot.getSnapshotPath(path, snapshotNewName);
+ logAuditEvent(true, "renameSnapshot", oldSnapshotRoot, newSnapshotRoot, null);
}
}
@@ -5959,9 +5956,8 @@ public class FSNamesystem implements Nam
getEditLog().logSync();
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
- Path rootPath = new Path(snapshotRoot, HdfsConstants.DOT_SNAPSHOT_DIR
- + Path.SEPARATOR + snapshotName);
- logAuditEvent(true, "deleteSnapshot", rootPath.toString(), null, null);
+ String rootPath = Snapshot.getSnapshotPath(snapshotRoot, snapshotName);
+ logAuditEvent(true, "deleteSnapshot", rootPath, null, null);
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1468238&r1=1468237&r2=1468238&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Mon Apr 15 21:42:06 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.na
import java.io.FileNotFoundException;
import java.io.PrintWriter;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
@@ -511,8 +512,8 @@ public class INodeDirectory extends INod
* @return true if path component is {@link HdfsConstants#DOT_SNAPSHOT_DIR}
*/
private static boolean isDotSnapshotDir(byte[] pathComponent) {
- return pathComponent == null ? false : HdfsConstants.DOT_SNAPSHOT_DIR
- .equalsIgnoreCase(DFSUtil.bytes2String(pathComponent));
+ return pathComponent == null ? false
+ : Arrays.equals(HdfsConstants.DOT_SNAPSHOT_DIR_BYTES, pathComponent);
}
/**
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java?rev=1468238&r1=1468237&r2=1468238&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java Mon Apr 15 21:42:06 2013
@@ -49,9 +49,16 @@ public class Snapshot implements Compara
return new SimpleDateFormat(DEFAULT_SNAPSHOT_NAME_PATTERN).format(new Date());
}
- static String getSnapshotPath(String snapshottableDir, String snapshotName) {
- return new Path(snapshottableDir, HdfsConstants.DOT_SNAPSHOT_DIR
- + Path.SEPARATOR + snapshotName).toString();
+ public static String getSnapshotPath(String snapshottableDir,
+ String snapshotRelativePath) {
+ final StringBuilder b = new StringBuilder(snapshottableDir);
+ if (b.charAt(b.length() - 1) != Path.SEPARATOR_CHAR) {
+ b.append(Path.SEPARATOR);
+ }
+ return b.append(HdfsConstants.DOT_SNAPSHOT_DIR)
+ .append(Path.SEPARATOR)
+ .append(snapshotRelativePath)
+ .toString();
}
/**
@@ -123,9 +130,7 @@ public class Snapshot implements Compara
@Override
public String getFullPathName() {
- return getParent().getFullPathName() + Path.SEPARATOR
- + HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR
- + this.getLocalName();
+ return getSnapshotPath(getParent().getFullPathName(), getLocalName());
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java?rev=1468238&r1=1468237&r2=1468238&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java Mon Apr 15 21:42:06 2013
@@ -123,7 +123,7 @@ class ImageLoaderCurrent implements Imag
new SimpleDateFormat("yyyy-MM-dd HH:mm");
private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23,
-24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39,
- -40, -41, -42};
+ -40, -41, -42, -43};
private int imageVersion = 0;
/* (non-Javadoc)
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java?rev=1468238&r1=1468237&r2=1468238&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java Mon Apr 15 21:42:06 2013
@@ -43,14 +43,18 @@ public class SnapshotDiff {
if (Path.CUR_DIR.equals(name)) { // current directory
return "";
}
- if (name.startsWith(HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR)
- || name.startsWith(Path.SEPARATOR + HdfsConstants.DOT_SNAPSHOT_DIR
- + Path.SEPARATOR)) {
- // get the snapshot name
- int i = name.indexOf(HdfsConstants.DOT_SNAPSHOT_DIR);
- return name.substring(i + HdfsConstants.DOT_SNAPSHOT_DIR.length() + 1);
+ final int i;
+ if (name.startsWith(HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR)) {
+ i = 0;
+ } else if (name.startsWith(
+ HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR + Path.SEPARATOR)) {
+ i = 1;
+ } else {
+ return name;
}
- return name;
+
+ // get the snapshot name
+ return name.substring(i + HdfsConstants.DOT_SNAPSHOT_DIR.length() + 1);
}
public static void main(String[] argv) throws IOException {
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java?rev=1468238&r1=1468237&r2=1468238&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java Mon Apr 15 21:42:06 2013
@@ -33,8 +33,10 @@ import org.apache.hadoop.fs.permission.P
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.FSLimitException.IllegalNameException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.junit.Before;
import org.junit.Test;
@@ -104,6 +106,7 @@ public class TestFsLimits {
addChildWithName("333", null);
addChildWithName("4444", null);
addChildWithName("55555", null);
+ addChildWithName(HdfsConstants.DOT_SNAPSHOT_DIR, IllegalNameException.class);
}
@Test
@@ -143,6 +146,7 @@ public class TestFsLimits {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, 2);
fsIsReady = false;
+ addChildWithName(HdfsConstants.DOT_SNAPSHOT_DIR, IllegalNameException.class);
addChildWithName("1", null);
addChildWithName("22", null);
addChildWithName("333", null);
@@ -159,7 +163,10 @@ public class TestFsLimits {
Class<?> generated = null;
try {
- fs.verifyFsLimits(inodes, 1, child);
+ fs.verifyMaxComponentLength(child.getLocalNameBytes(), inodes, 1);
+ fs.verifyMaxDirItems(inodes, 1);
+ fs.verifyINodeName(child.getLocalNameBytes());
+
rootInode.addChild(child);
} catch (QuotaExceededException e) {
generated = e.getClass();
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored?rev=1468238&r1=1468237&r2=1468238&view=diff
==============================================================================
Binary files - no diff available.
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml?rev=1468238&r1=1468237&r2=1468238&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml Mon Apr 15 21:42:06 2013
@@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<EDITS>
- <EDITS_VERSION>-42</EDITS_VERSION>
+ <EDITS_VERSION>-43</EDITS_VERSION>
<RECORD>
<OPCODE>OP_START_LOG_SEGMENT</OPCODE>
<DATA>
@@ -13,8 +13,8 @@
<TXID>2</TXID>
<DELEGATION_KEY>
<KEY_ID>1</KEY_ID>
- <EXPIRY_DATE>1358580908163</EXPIRY_DATE>
- <KEY>96eeb98b6abe3660</KEY>
+ <EXPIRY_DATE>1366591664956</EXPIRY_DATE>
+ <KEY>f80c8ce0a9ff77d5</KEY>
</DELEGATION_KEY>
</DATA>
</RECORD>
@@ -24,33 +24,26 @@
<TXID>3</TXID>
<DELEGATION_KEY>
<KEY_ID>2</KEY_ID>
- <EXPIRY_DATE>1358580908166</EXPIRY_DATE>
- <KEY>975b880e8601c22e</KEY>
+ <EXPIRY_DATE>1366591664958</EXPIRY_DATE>
+ <KEY>75e0c5176b531b18</KEY>
</DELEGATION_KEY>
</DATA>
</RECORD>
<RECORD>
- <OPCODE>OP_SET_GENSTAMP</OPCODE>
- <DATA>
- <TXID>4</TXID>
- <GENSTAMP>1001</GENSTAMP>
- </DATA>
- </RECORD>
- <RECORD>
<OPCODE>OP_ADD</OPCODE>
<DATA>
- <TXID>5</TXID>
+ <TXID>4</TXID>
<LENGTH>0</LENGTH>
<INODEID>1002</INODEID>
- <PATH>/file_create</PATH>
+ <PATH>/file_create_u\0001;F431</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1357889709370</MTIME>
- <ATIME>1357889709370</ATIME>
+ <MTIME>1365900465930</MTIME>
+ <ATIME>1365900465930</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
- <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1898631685_1</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_724142360_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>szetszwo</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@@ -59,18 +52,18 @@
<RECORD>
<OPCODE>OP_CLOSE</OPCODE>
<DATA>
- <TXID>6</TXID>
+ <TXID>5</TXID>
<LENGTH>0</LENGTH>
<INODEID>0</INODEID>
- <PATH>/file_create</PATH>
+ <PATH>/file_create_u\0001;F431</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1357889709398</MTIME>
- <ATIME>1357889709370</ATIME>
+ <MTIME>1365900465946</MTIME>
+ <ATIME>1365900465930</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>szetszwo</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@@ -79,32 +72,32 @@
<RECORD>
<OPCODE>OP_RENAME_OLD</OPCODE>
<DATA>
- <TXID>7</TXID>
+ <TXID>6</TXID>
<LENGTH>0</LENGTH>
- <SRC>/file_create</SRC>
+ <SRC>/file_create_u\0001;F431</SRC>
<DST>/file_moved</DST>
- <TIMESTAMP>1357889709400</TIMESTAMP>
+ <TIMESTAMP>1365900465949</TIMESTAMP>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_DELETE</OPCODE>
<DATA>
- <TXID>8</TXID>
+ <TXID>7</TXID>
<LENGTH>0</LENGTH>
<PATH>/file_moved</PATH>
- <TIMESTAMP>1357889709595</TIMESTAMP>
+ <TIMESTAMP>1365900465953</TIMESTAMP>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_MKDIR</OPCODE>
<DATA>
- <TXID>9</TXID>
+ <TXID>8</TXID>
<LENGTH>0</LENGTH>
<INODEID>1003</INODEID>
<PATH>/directory_mkdir</PATH>
- <TIMESTAMP>1357889709598</TIMESTAMP>
+ <TIMESTAMP>1365900465956</TIMESTAMP>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>szetszwo</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>493</MODE>
</PERMISSION_STATUS>
@@ -113,28 +106,28 @@
<RECORD>
<OPCODE>OP_ALLOW_SNAPSHOT</OPCODE>
<DATA>
- <TXID>10</TXID>
+ <TXID>9</TXID>
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_DISALLOW_SNAPSHOT</OPCODE>
<DATA>
- <TXID>11</TXID>
+ <TXID>10</TXID>
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_ALLOW_SNAPSHOT</OPCODE>
<DATA>
- <TXID>12</TXID>
+ <TXID>11</TXID>
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_CREATE_SNAPSHOT</OPCODE>
<DATA>
- <TXID>13</TXID>
+ <TXID>12</TXID>
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
<SNAPSHOTNAME>snapshot1</SNAPSHOTNAME>
</DATA>
@@ -142,7 +135,7 @@
<RECORD>
<OPCODE>OP_RENAME_SNAPSHOT</OPCODE>
<DATA>
- <TXID>14</TXID>
+ <TXID>13</TXID>
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
<SNAPSHOTOLDNAME>snapshot1</SNAPSHOTOLDNAME>
<SNAPSHOTNEWNAME>snapshot2</SNAPSHOTNEWNAME>
@@ -151,33 +144,26 @@
<RECORD>
<OPCODE>OP_DELETE_SNAPSHOT</OPCODE>
<DATA>
- <TXID>15</TXID>
+ <TXID>14</TXID>
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
<SNAPSHOTNAME>snapshot2</SNAPSHOTNAME>
</DATA>
</RECORD>
<RECORD>
- <OPCODE>OP_SET_GENSTAMP</OPCODE>
- <DATA>
- <TXID>16</TXID>
- <GENSTAMP>1002</GENSTAMP>
- </DATA>
- </RECORD>
- <RECORD>
<OPCODE>OP_ADD</OPCODE>
<DATA>
- <TXID>17</TXID>
+ <TXID>15</TXID>
<LENGTH>0</LENGTH>
<INODEID>1004</INODEID>
- <PATH>/file_create</PATH>
+ <PATH>/file_create_u\0001;F431</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1357889709615</MTIME>
- <ATIME>1357889709615</ATIME>
+ <MTIME>1365900465976</MTIME>
+ <ATIME>1365900465976</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
- <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1898631685_1</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_724142360_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>szetszwo</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@@ -186,18 +172,18 @@
<RECORD>
<OPCODE>OP_CLOSE</OPCODE>
<DATA>
- <TXID>18</TXID>
+ <TXID>16</TXID>
<LENGTH>0</LENGTH>
<INODEID>0</INODEID>
- <PATH>/file_create</PATH>
+ <PATH>/file_create_u\0001;F431</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1357889709616</MTIME>
- <ATIME>1357889709615</ATIME>
+ <MTIME>1365900465978</MTIME>
+ <ATIME>1365900465976</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>szetszwo</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@@ -206,33 +192,33 @@
<RECORD>
<OPCODE>OP_SET_REPLICATION</OPCODE>
<DATA>
- <TXID>19</TXID>
- <PATH>/file_create</PATH>
+ <TXID>17</TXID>
+ <PATH>/file_create_u\0001;F431</PATH>
<REPLICATION>1</REPLICATION>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_SET_PERMISSIONS</OPCODE>
<DATA>
- <TXID>20</TXID>
- <SRC>/file_create</SRC>
+ <TXID>18</TXID>
+ <SRC>/file_create_u\0001;F431</SRC>
<MODE>511</MODE>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_SET_OWNER</OPCODE>
<DATA>
- <TXID>21</TXID>
- <SRC>/file_create</SRC>
+ <TXID>19</TXID>
+ <SRC>/file_create_u\0001;F431</SRC>
<USERNAME>newOwner</USERNAME>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_TIMES</OPCODE>
<DATA>
- <TXID>22</TXID>
+ <TXID>20</TXID>
<LENGTH>0</LENGTH>
- <PATH>/file_create</PATH>
+ <PATH>/file_create_u\0001;F431</PATH>
<MTIME>1285195527000</MTIME>
<ATIME>1285195527000</ATIME>
</DATA>
@@ -240,7 +226,7 @@
<RECORD>
<OPCODE>OP_SET_QUOTA</OPCODE>
<DATA>
- <TXID>23</TXID>
+ <TXID>21</TXID>
<SRC>/directory_mkdir</SRC>
<NSQUOTA>1000</NSQUOTA>
<DSQUOTA>-1</DSQUOTA>
@@ -249,36 +235,29 @@
<RECORD>
<OPCODE>OP_RENAME</OPCODE>
<DATA>
- <TXID>24</TXID>
+ <TXID>22</TXID>
<LENGTH>0</LENGTH>
- <SRC>/file_create</SRC>
+ <SRC>/file_create_u\0001;F431</SRC>
<DST>/file_moved</DST>
- <TIMESTAMP>1357889709627</TIMESTAMP>
+ <TIMESTAMP>1365900465991</TIMESTAMP>
<OPTIONS>NONE</OPTIONS>
</DATA>
</RECORD>
<RECORD>
- <OPCODE>OP_SET_GENSTAMP</OPCODE>
- <DATA>
- <TXID>25</TXID>
- <GENSTAMP>1003</GENSTAMP>
- </DATA>
- </RECORD>
- <RECORD>
<OPCODE>OP_ADD</OPCODE>
<DATA>
- <TXID>26</TXID>
+ <TXID>23</TXID>
<LENGTH>0</LENGTH>
<INODEID>1005</INODEID>
<PATH>/file_concat_target</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1357889709631</MTIME>
- <ATIME>1357889709631</ATIME>
+ <MTIME>1365900465996</MTIME>
+ <ATIME>1365900465996</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
- <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1898631685_1</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_724142360_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>szetszwo</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@@ -287,132 +266,125 @@
<RECORD>
<OPCODE>OP_SET_GENSTAMP</OPCODE>
<DATA>
- <TXID>27</TXID>
- <GENSTAMP>1004</GENSTAMP>
+ <TXID>24</TXID>
+ <GENSTAMP>1001</GENSTAMP>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_UPDATE_BLOCKS</OPCODE>
<DATA>
- <TXID>28</TXID>
+ <TXID>25</TXID>
<PATH>/file_concat_target</PATH>
<BLOCK>
- <BLOCK_ID>2251442935629307058</BLOCK_ID>
+ <BLOCK_ID>7730270391831370404</BLOCK_ID>
<NUM_BYTES>0</NUM_BYTES>
- <GENSTAMP>1004</GENSTAMP>
+ <GENSTAMP>1001</GENSTAMP>
</BLOCK>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_SET_GENSTAMP</OPCODE>
<DATA>
- <TXID>29</TXID>
- <GENSTAMP>1005</GENSTAMP>
+ <TXID>26</TXID>
+ <GENSTAMP>1002</GENSTAMP>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_UPDATE_BLOCKS</OPCODE>
<DATA>
- <TXID>30</TXID>
+ <TXID>27</TXID>
<PATH>/file_concat_target</PATH>
<BLOCK>
- <BLOCK_ID>2251442935629307058</BLOCK_ID>
+ <BLOCK_ID>7730270391831370404</BLOCK_ID>
<NUM_BYTES>512</NUM_BYTES>
- <GENSTAMP>1004</GENSTAMP>
+ <GENSTAMP>1001</GENSTAMP>
</BLOCK>
<BLOCK>
- <BLOCK_ID>-2982568627579528939</BLOCK_ID>
+ <BLOCK_ID>7070364572574548346</BLOCK_ID>
<NUM_BYTES>0</NUM_BYTES>
- <GENSTAMP>1005</GENSTAMP>
+ <GENSTAMP>1002</GENSTAMP>
</BLOCK>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_SET_GENSTAMP</OPCODE>
<DATA>
- <TXID>31</TXID>
- <GENSTAMP>1006</GENSTAMP>
+ <TXID>28</TXID>
+ <GENSTAMP>1003</GENSTAMP>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_UPDATE_BLOCKS</OPCODE>
<DATA>
- <TXID>32</TXID>
+ <TXID>29</TXID>
<PATH>/file_concat_target</PATH>
<BLOCK>
- <BLOCK_ID>2251442935629307058</BLOCK_ID>
+ <BLOCK_ID>7730270391831370404</BLOCK_ID>
<NUM_BYTES>512</NUM_BYTES>
- <GENSTAMP>1004</GENSTAMP>
+ <GENSTAMP>1001</GENSTAMP>
</BLOCK>
<BLOCK>
- <BLOCK_ID>-2982568627579528939</BLOCK_ID>
+ <BLOCK_ID>7070364572574548346</BLOCK_ID>
<NUM_BYTES>512</NUM_BYTES>
- <GENSTAMP>1005</GENSTAMP>
+ <GENSTAMP>1002</GENSTAMP>
</BLOCK>
<BLOCK>
- <BLOCK_ID>4155042801019283737</BLOCK_ID>
+ <BLOCK_ID>-2436647467986907584</BLOCK_ID>
<NUM_BYTES>0</NUM_BYTES>
- <GENSTAMP>1006</GENSTAMP>
+ <GENSTAMP>1003</GENSTAMP>
</BLOCK>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_CLOSE</OPCODE>
<DATA>
- <TXID>33</TXID>
+ <TXID>30</TXID>
<LENGTH>0</LENGTH>
<INODEID>0</INODEID>
<PATH>/file_concat_target</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1357889709689</MTIME>
- <ATIME>1357889709631</ATIME>
+ <MTIME>1365900466070</MTIME>
+ <ATIME>1365900465996</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
<BLOCK>
- <BLOCK_ID>2251442935629307058</BLOCK_ID>
+ <BLOCK_ID>7730270391831370404</BLOCK_ID>
<NUM_BYTES>512</NUM_BYTES>
- <GENSTAMP>1004</GENSTAMP>
+ <GENSTAMP>1001</GENSTAMP>
</BLOCK>
<BLOCK>
- <BLOCK_ID>-2982568627579528939</BLOCK_ID>
+ <BLOCK_ID>7070364572574548346</BLOCK_ID>
<NUM_BYTES>512</NUM_BYTES>
- <GENSTAMP>1005</GENSTAMP>
+ <GENSTAMP>1002</GENSTAMP>
</BLOCK>
<BLOCK>
- <BLOCK_ID>4155042801019283737</BLOCK_ID>
+ <BLOCK_ID>-2436647467986907584</BLOCK_ID>
<NUM_BYTES>512</NUM_BYTES>
- <GENSTAMP>1006</GENSTAMP>
+ <GENSTAMP>1003</GENSTAMP>
</BLOCK>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>szetszwo</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
</DATA>
</RECORD>
<RECORD>
- <OPCODE>OP_SET_GENSTAMP</OPCODE>
- <DATA>
- <TXID>34</TXID>
- <GENSTAMP>1007</GENSTAMP>
- </DATA>
- </RECORD>
- <RECORD>
<OPCODE>OP_ADD</OPCODE>
<DATA>
- <TXID>35</TXID>
+ <TXID>31</TXID>
<LENGTH>0</LENGTH>
<INODEID>1006</INODEID>
<PATH>/file_concat_0</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1357889709692</MTIME>
- <ATIME>1357889709692</ATIME>
+ <MTIME>1365900466074</MTIME>
+ <ATIME>1365900466074</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
- <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1898631685_1</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_724142360_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>szetszwo</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@@ -421,132 +393,125 @@
<RECORD>
<OPCODE>OP_SET_GENSTAMP</OPCODE>
<DATA>
- <TXID>36</TXID>
- <GENSTAMP>1008</GENSTAMP>
+ <TXID>32</TXID>
+ <GENSTAMP>1004</GENSTAMP>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_UPDATE_BLOCKS</OPCODE>
<DATA>
- <TXID>37</TXID>
+ <TXID>33</TXID>
<PATH>/file_concat_0</PATH>
<BLOCK>
- <BLOCK_ID>-1610317934607606165</BLOCK_ID>
+ <BLOCK_ID>-8902070029031700083</BLOCK_ID>
<NUM_BYTES>0</NUM_BYTES>
- <GENSTAMP>1008</GENSTAMP>
+ <GENSTAMP>1004</GENSTAMP>
</BLOCK>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_SET_GENSTAMP</OPCODE>
<DATA>
- <TXID>38</TXID>
- <GENSTAMP>1009</GENSTAMP>
+ <TXID>34</TXID>
+ <GENSTAMP>1005</GENSTAMP>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_UPDATE_BLOCKS</OPCODE>
<DATA>
- <TXID>39</TXID>
+ <TXID>35</TXID>
<PATH>/file_concat_0</PATH>
<BLOCK>
- <BLOCK_ID>-1610317934607606165</BLOCK_ID>
+ <BLOCK_ID>-8902070029031700083</BLOCK_ID>
<NUM_BYTES>512</NUM_BYTES>
- <GENSTAMP>1008</GENSTAMP>
+ <GENSTAMP>1004</GENSTAMP>
</BLOCK>
<BLOCK>
- <BLOCK_ID>-8800031246891481946</BLOCK_ID>
+ <BLOCK_ID>1791253399175285670</BLOCK_ID>
<NUM_BYTES>0</NUM_BYTES>
- <GENSTAMP>1009</GENSTAMP>
+ <GENSTAMP>1005</GENSTAMP>
</BLOCK>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_SET_GENSTAMP</OPCODE>
<DATA>
- <TXID>40</TXID>
- <GENSTAMP>1010</GENSTAMP>
+ <TXID>36</TXID>
+ <GENSTAMP>1006</GENSTAMP>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_UPDATE_BLOCKS</OPCODE>
<DATA>
- <TXID>41</TXID>
+ <TXID>37</TXID>
<PATH>/file_concat_0</PATH>
<BLOCK>
- <BLOCK_ID>-1610317934607606165</BLOCK_ID>
+ <BLOCK_ID>-8902070029031700083</BLOCK_ID>
<NUM_BYTES>512</NUM_BYTES>
- <GENSTAMP>1008</GENSTAMP>
+ <GENSTAMP>1004</GENSTAMP>
</BLOCK>
<BLOCK>
- <BLOCK_ID>-8800031246891481946</BLOCK_ID>
+ <BLOCK_ID>1791253399175285670</BLOCK_ID>
<NUM_BYTES>512</NUM_BYTES>
- <GENSTAMP>1009</GENSTAMP>
+ <GENSTAMP>1005</GENSTAMP>
</BLOCK>
<BLOCK>
- <BLOCK_ID>5107221616119360155</BLOCK_ID>
+ <BLOCK_ID>3333415502075331416</BLOCK_ID>
<NUM_BYTES>0</NUM_BYTES>
- <GENSTAMP>1010</GENSTAMP>
+ <GENSTAMP>1006</GENSTAMP>
</BLOCK>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_CLOSE</OPCODE>
<DATA>
- <TXID>42</TXID>
+ <TXID>38</TXID>
<LENGTH>0</LENGTH>
<INODEID>0</INODEID>
<PATH>/file_concat_0</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1357889709710</MTIME>
- <ATIME>1357889709692</ATIME>
+ <MTIME>1365900466094</MTIME>
+ <ATIME>1365900466074</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
<BLOCK>
- <BLOCK_ID>-1610317934607606165</BLOCK_ID>
+ <BLOCK_ID>-8902070029031700083</BLOCK_ID>
<NUM_BYTES>512</NUM_BYTES>
- <GENSTAMP>1008</GENSTAMP>
+ <GENSTAMP>1004</GENSTAMP>
</BLOCK>
<BLOCK>
- <BLOCK_ID>-8800031246891481946</BLOCK_ID>
+ <BLOCK_ID>1791253399175285670</BLOCK_ID>
<NUM_BYTES>512</NUM_BYTES>
- <GENSTAMP>1009</GENSTAMP>
+ <GENSTAMP>1005</GENSTAMP>
</BLOCK>
<BLOCK>
- <BLOCK_ID>5107221616119360155</BLOCK_ID>
+ <BLOCK_ID>3333415502075331416</BLOCK_ID>
<NUM_BYTES>512</NUM_BYTES>
- <GENSTAMP>1010</GENSTAMP>
+ <GENSTAMP>1006</GENSTAMP>
</BLOCK>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>szetszwo</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
</DATA>
</RECORD>
<RECORD>
- <OPCODE>OP_SET_GENSTAMP</OPCODE>
- <DATA>
- <TXID>43</TXID>
- <GENSTAMP>1011</GENSTAMP>
- </DATA>
- </RECORD>
- <RECORD>
<OPCODE>OP_ADD</OPCODE>
<DATA>
- <TXID>44</TXID>
+ <TXID>39</TXID>
<LENGTH>0</LENGTH>
<INODEID>1007</INODEID>
<PATH>/file_concat_1</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1357889709713</MTIME>
- <ATIME>1357889709713</ATIME>
+ <MTIME>1365900466097</MTIME>
+ <ATIME>1365900466097</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
- <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1898631685_1</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_724142360_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>szetszwo</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@@ -555,105 +520,105 @@
<RECORD>
<OPCODE>OP_SET_GENSTAMP</OPCODE>
<DATA>
- <TXID>45</TXID>
- <GENSTAMP>1012</GENSTAMP>
+ <TXID>40</TXID>
+ <GENSTAMP>1007</GENSTAMP>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_UPDATE_BLOCKS</OPCODE>
<DATA>
- <TXID>46</TXID>
+ <TXID>41</TXID>
<PATH>/file_concat_1</PATH>
<BLOCK>
- <BLOCK_ID>-8774815971566494617</BLOCK_ID>
+ <BLOCK_ID>-406914295015578364</BLOCK_ID>
<NUM_BYTES>0</NUM_BYTES>
- <GENSTAMP>1012</GENSTAMP>
+ <GENSTAMP>1007</GENSTAMP>
</BLOCK>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_SET_GENSTAMP</OPCODE>
<DATA>
- <TXID>47</TXID>
- <GENSTAMP>1013</GENSTAMP>
+ <TXID>42</TXID>
+ <GENSTAMP>1008</GENSTAMP>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_UPDATE_BLOCKS</OPCODE>
<DATA>
- <TXID>48</TXID>
+ <TXID>43</TXID>
<PATH>/file_concat_1</PATH>
<BLOCK>
- <BLOCK_ID>-8774815971566494617</BLOCK_ID>
+ <BLOCK_ID>-406914295015578364</BLOCK_ID>
<NUM_BYTES>512</NUM_BYTES>
- <GENSTAMP>1012</GENSTAMP>
+ <GENSTAMP>1007</GENSTAMP>
</BLOCK>
<BLOCK>
- <BLOCK_ID>-7548224044764905612</BLOCK_ID>
+ <BLOCK_ID>208049244517243116</BLOCK_ID>
<NUM_BYTES>0</NUM_BYTES>
- <GENSTAMP>1013</GENSTAMP>
+ <GENSTAMP>1008</GENSTAMP>
</BLOCK>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_SET_GENSTAMP</OPCODE>
<DATA>
- <TXID>49</TXID>
- <GENSTAMP>1014</GENSTAMP>
+ <TXID>44</TXID>
+ <GENSTAMP>1009</GENSTAMP>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_UPDATE_BLOCKS</OPCODE>
<DATA>
- <TXID>50</TXID>
+ <TXID>45</TXID>
<PATH>/file_concat_1</PATH>
<BLOCK>
- <BLOCK_ID>-8774815971566494617</BLOCK_ID>
+ <BLOCK_ID>-406914295015578364</BLOCK_ID>
<NUM_BYTES>512</NUM_BYTES>
- <GENSTAMP>1012</GENSTAMP>
+ <GENSTAMP>1007</GENSTAMP>
</BLOCK>
<BLOCK>
- <BLOCK_ID>-7548224044764905612</BLOCK_ID>
+ <BLOCK_ID>208049244517243116</BLOCK_ID>
<NUM_BYTES>512</NUM_BYTES>
- <GENSTAMP>1013</GENSTAMP>
+ <GENSTAMP>1008</GENSTAMP>
</BLOCK>
<BLOCK>
- <BLOCK_ID>-4765510052507711478</BLOCK_ID>
+ <BLOCK_ID>-1546331983133724845</BLOCK_ID>
<NUM_BYTES>0</NUM_BYTES>
- <GENSTAMP>1014</GENSTAMP>
+ <GENSTAMP>1009</GENSTAMP>
</BLOCK>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_CLOSE</OPCODE>
<DATA>
- <TXID>51</TXID>
+ <TXID>46</TXID>
<LENGTH>0</LENGTH>
<INODEID>0</INODEID>
<PATH>/file_concat_1</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1357889709744</MTIME>
- <ATIME>1357889709713</ATIME>
+ <MTIME>1365900466121</MTIME>
+ <ATIME>1365900466097</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
<BLOCK>
- <BLOCK_ID>-8774815971566494617</BLOCK_ID>
+ <BLOCK_ID>-406914295015578364</BLOCK_ID>
<NUM_BYTES>512</NUM_BYTES>
- <GENSTAMP>1012</GENSTAMP>
+ <GENSTAMP>1007</GENSTAMP>
</BLOCK>
<BLOCK>
- <BLOCK_ID>-7548224044764905612</BLOCK_ID>
+ <BLOCK_ID>208049244517243116</BLOCK_ID>
<NUM_BYTES>512</NUM_BYTES>
- <GENSTAMP>1013</GENSTAMP>
+ <GENSTAMP>1008</GENSTAMP>
</BLOCK>
<BLOCK>
- <BLOCK_ID>-4765510052507711478</BLOCK_ID>
+ <BLOCK_ID>-1546331983133724845</BLOCK_ID>
<NUM_BYTES>512</NUM_BYTES>
- <GENSTAMP>1014</GENSTAMP>
+ <GENSTAMP>1009</GENSTAMP>
</BLOCK>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>szetszwo</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@@ -662,10 +627,10 @@
<RECORD>
<OPCODE>OP_CONCAT_DELETE</OPCODE>
<DATA>
- <TXID>52</TXID>
+ <TXID>47</TXID>
<LENGTH>0</LENGTH>
<TRG>/file_concat_target</TRG>
- <TIMESTAMP>1357889709747</TIMESTAMP>
+ <TIMESTAMP>1365900466123</TIMESTAMP>
<SOURCES>
<SOURCE1>/file_concat_0</SOURCE1>
<SOURCE2>/file_concat_1</SOURCE2>
@@ -675,15 +640,15 @@
<RECORD>
<OPCODE>OP_SYMLINK</OPCODE>
<DATA>
- <TXID>53</TXID>
+ <TXID>48</TXID>
<LENGTH>0</LENGTH>
<INODEID>1008</INODEID>
<PATH>/file_symlink</PATH>
<VALUE>/file_concat_target</VALUE>
- <MTIME>1357889709751</MTIME>
- <ATIME>1357889709751</ATIME>
+ <MTIME>1365900466141</MTIME>
+ <ATIME>1365900466141</ATIME>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>szetszwo</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>511</MODE>
</PERMISSION_STATUS>
@@ -692,75 +657,68 @@
<RECORD>
<OPCODE>OP_GET_DELEGATION_TOKEN</OPCODE>
<DATA>
- <TXID>54</TXID>
+ <TXID>49</TXID>
<DELEGATION_TOKEN_IDENTIFIER>
<KIND>HDFS_DELEGATION_TOKEN</KIND>
<SEQUENCE_NUMBER>1</SEQUENCE_NUMBER>
- <OWNER>jing</OWNER>
+ <OWNER>szetszwo</OWNER>
<RENEWER>JobTracker</RENEWER>
<REALUSER></REALUSER>
- <ISSUE_DATE>1357889709754</ISSUE_DATE>
- <MAX_DATE>1358494509754</MAX_DATE>
+ <ISSUE_DATE>1365900466144</ISSUE_DATE>
+ <MAX_DATE>1366505266144</MAX_DATE>
<MASTER_KEY_ID>2</MASTER_KEY_ID>
</DELEGATION_TOKEN_IDENTIFIER>
- <EXPIRY_TIME>1357976109754</EXPIRY_TIME>
+ <EXPIRY_TIME>1365986866144</EXPIRY_TIME>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_RENEW_DELEGATION_TOKEN</OPCODE>
<DATA>
- <TXID>55</TXID>
+ <TXID>50</TXID>
<DELEGATION_TOKEN_IDENTIFIER>
<KIND>HDFS_DELEGATION_TOKEN</KIND>
<SEQUENCE_NUMBER>1</SEQUENCE_NUMBER>
- <OWNER>jing</OWNER>
+ <OWNER>szetszwo</OWNER>
<RENEWER>JobTracker</RENEWER>
<REALUSER></REALUSER>
- <ISSUE_DATE>1357889709754</ISSUE_DATE>
- <MAX_DATE>1358494509754</MAX_DATE>
+ <ISSUE_DATE>1365900466144</ISSUE_DATE>
+ <MAX_DATE>1366505266144</MAX_DATE>
<MASTER_KEY_ID>2</MASTER_KEY_ID>
</DELEGATION_TOKEN_IDENTIFIER>
- <EXPIRY_TIME>1357976109820</EXPIRY_TIME>
+ <EXPIRY_TIME>1365986866231</EXPIRY_TIME>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_CANCEL_DELEGATION_TOKEN</OPCODE>
<DATA>
- <TXID>56</TXID>
+ <TXID>51</TXID>
<DELEGATION_TOKEN_IDENTIFIER>
<KIND>HDFS_DELEGATION_TOKEN</KIND>
<SEQUENCE_NUMBER>1</SEQUENCE_NUMBER>
- <OWNER>jing</OWNER>
+ <OWNER>szetszwo</OWNER>
<RENEWER>JobTracker</RENEWER>
<REALUSER></REALUSER>
- <ISSUE_DATE>1357889709754</ISSUE_DATE>
- <MAX_DATE>1358494509754</MAX_DATE>
+ <ISSUE_DATE>1365900466144</ISSUE_DATE>
+ <MAX_DATE>1366505266144</MAX_DATE>
<MASTER_KEY_ID>2</MASTER_KEY_ID>
</DELEGATION_TOKEN_IDENTIFIER>
</DATA>
</RECORD>
<RECORD>
- <OPCODE>OP_SET_GENSTAMP</OPCODE>
- <DATA>
- <TXID>57</TXID>
- <GENSTAMP>1015</GENSTAMP>
- </DATA>
- </RECORD>
- <RECORD>
<OPCODE>OP_ADD</OPCODE>
<DATA>
- <TXID>58</TXID>
+ <TXID>52</TXID>
<LENGTH>0</LENGTH>
<INODEID>1009</INODEID>
<PATH>/hard-lease-recovery-test</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1357889709825</MTIME>
- <ATIME>1357889709825</ATIME>
+ <MTIME>1365900466237</MTIME>
+ <ATIME>1365900466237</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
- <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1898631685_1</CLIENT_NAME>
+ <CLIENT_NAME>DFSClient_NONMAPREDUCE_724142360_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>szetszwo</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@@ -769,46 +727,46 @@
<RECORD>
<OPCODE>OP_SET_GENSTAMP</OPCODE>
<DATA>
- <TXID>59</TXID>
- <GENSTAMP>1016</GENSTAMP>
+ <TXID>53</TXID>
+ <GENSTAMP>1010</GENSTAMP>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_UPDATE_BLOCKS</OPCODE>
<DATA>
- <TXID>60</TXID>
+ <TXID>54</TXID>
<PATH>/hard-lease-recovery-test</PATH>
<BLOCK>
- <BLOCK_ID>2307430723798727721</BLOCK_ID>
+ <BLOCK_ID>-8246064927003717498</BLOCK_ID>
<NUM_BYTES>0</NUM_BYTES>
- <GENSTAMP>1016</GENSTAMP>
+ <GENSTAMP>1010</GENSTAMP>
</BLOCK>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_UPDATE_BLOCKS</OPCODE>
<DATA>
- <TXID>61</TXID>
+ <TXID>55</TXID>
<PATH>/hard-lease-recovery-test</PATH>
<BLOCK>
- <BLOCK_ID>2307430723798727721</BLOCK_ID>
+ <BLOCK_ID>-8246064927003717498</BLOCK_ID>
<NUM_BYTES>0</NUM_BYTES>
- <GENSTAMP>1016</GENSTAMP>
+ <GENSTAMP>1010</GENSTAMP>
</BLOCK>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_SET_GENSTAMP</OPCODE>
<DATA>
- <TXID>62</TXID>
- <GENSTAMP>1017</GENSTAMP>
+ <TXID>56</TXID>
+ <GENSTAMP>1011</GENSTAMP>
</DATA>
</RECORD>
<RECORD>
<OPCODE>OP_REASSIGN_LEASE</OPCODE>
<DATA>
- <TXID>63</TXID>
- <LEASEHOLDER>DFSClient_NONMAPREDUCE_-1898631685_1</LEASEHOLDER>
+ <TXID>57</TXID>
+ <LEASEHOLDER>DFSClient_NONMAPREDUCE_724142360_1</LEASEHOLDER>
<PATH>/hard-lease-recovery-test</PATH>
<NEWHOLDER>HDFS_NameNode</NEWHOLDER>
</DATA>
@@ -816,23 +774,23 @@
<RECORD>
<OPCODE>OP_CLOSE</OPCODE>
<DATA>
- <TXID>64</TXID>
+ <TXID>58</TXID>
<LENGTH>0</LENGTH>
<INODEID>0</INODEID>
<PATH>/hard-lease-recovery-test</PATH>
<REPLICATION>1</REPLICATION>
- <MTIME>1357889712257</MTIME>
- <ATIME>1357889709825</ATIME>
+ <MTIME>1365900468855</MTIME>
+ <ATIME>1365900466237</ATIME>
<BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE>
<BLOCK>
- <BLOCK_ID>2307430723798727721</BLOCK_ID>
+ <BLOCK_ID>-8246064927003717498</BLOCK_ID>
<NUM_BYTES>11</NUM_BYTES>
- <GENSTAMP>1017</GENSTAMP>
+ <GENSTAMP>1011</GENSTAMP>
</BLOCK>
<PERMISSION_STATUS>
- <USERNAME>jing</USERNAME>
+ <USERNAME>szetszwo</USERNAME>
<GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE>
</PERMISSION_STATUS>
@@ -841,7 +799,7 @@
<RECORD>
<OPCODE>OP_END_LOG_SEGMENT</OPCODE>
<DATA>
- <TXID>65</TXID>
+ <TXID>59</TXID>
</DATA>
</RECORD>
</EDITS>