You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by in...@apache.org on 2019/11/12 18:54:17 UTC
[hadoop] branch trunk updated: HDFS-14922. Prevent snapshot
modification time got change on startup. Contributed by hemanthboyina.
This is an automated email from the ASF dual-hosted git repository.
inigoiri pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push:
new 40150da HDFS-14922. Prevent snapshot modification time got change on startup. Contributed by hemanthboyina.
40150da is described below
commit 40150da1e12a41c2e774fe2a277ddc3988bed239
Author: Inigo Goiri <in...@apache.org>
AuthorDate: Tue Nov 12 10:53:54 2019 -0800
HDFS-14922. Prevent snapshot modification time got change on startup. Contributed by hemanthboyina.
---
.../hadoop/hdfs/server/namenode/FSDirSnapshotOp.java | 7 +++++--
.../hadoop/hdfs/server/namenode/FSEditLog.java | 15 ++++++++++++---
.../hadoop/hdfs/server/namenode/FSEditLogLoader.java | 3 ++-
.../hadoop/hdfs/server/namenode/FSEditLogOp.java | 20 ++++++++++++++++++--
.../hadoop/hdfs/server/namenode/INodeDirectory.java | 9 +++++++--
.../snapshot/DirectorySnapshottableFeature.java | 14 ++++++++++----
.../server/namenode/snapshot/SnapshotManager.java | 6 ++++--
.../hdfs/server/namenode/snapshot/TestSnapshot.java | 16 ++++++++++++++++
.../namenode/snapshot/TestSnapshotManager.java | 9 ++++++---
9 files changed, 80 insertions(+), 19 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
index 4a72f54..49020e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import org.apache.hadoop.util.ChunkedArrayList;
+import org.apache.hadoop.util.Time;
import java.io.IOException;
import java.util.ArrayList;
@@ -104,16 +105,18 @@ class FSDirSnapshotOp {
String snapshotPath;
verifySnapshotName(fsd, snapshotName, snapshotRoot);
+ // time of snapshot creation
+ final long now = Time.now();
fsd.writeLock();
try {
snapshotPath = snapshotManager.createSnapshot(
fsd.getFSNamesystem().getLeaseManager(),
- iip, snapshotRoot, snapshotName);
+ iip, snapshotRoot, snapshotName, now);
} finally {
fsd.writeUnlock();
}
fsd.getEditLog().logCreateSnapshot(snapshotRoot, snapshotName,
- logRetryCache);
+ logRetryCache, now);
return snapshotPath;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index a76f5b4..ae7101d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -1116,10 +1116,19 @@ public class FSEditLog implements LogsPurgeable {
.setNewHolder(newHolder);
logEdit(op);
}
-
- void logCreateSnapshot(String snapRoot, String snapName, boolean toLogRpcIds) {
+
+ /**
+ * Log that a snapshot is created.
+ * @param snapRoot Root of the snapshot.
+ * @param snapName Name of the snapshot.
+ * @param toLogRpcIds If it is logging RPC ids.
+ * @param mtime The snapshot creation time set by Time.now().
+ */
+ void logCreateSnapshot(String snapRoot, String snapName, boolean toLogRpcIds,
+ long mtime) {
CreateSnapshotOp op = CreateSnapshotOp.getInstance(cache.get())
- .setSnapshotRoot(snapRoot).setSnapshotName(snapName);
+ .setSnapshotRoot(snapRoot).setSnapshotName(snapName)
+ .setSnapshotMTime(mtime);
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index a8eb0dd..5ed869e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -801,7 +801,8 @@ public class FSEditLogLoader {
INodesInPath iip = fsDir.getINodesInPath(snapshotRoot, DirOp.WRITE);
String path = fsNamesys.getSnapshotManager().createSnapshot(
fsDir.getFSNamesystem().getLeaseManager(),
- iip, snapshotRoot, createSnapshotOp.snapshotName);
+ iip, snapshotRoot, createSnapshotOp.snapshotName,
+ createSnapshotOp.mtime);
if (toAddRetryCache) {
fsNamesys.addCacheEntryWithPayload(createSnapshotOp.rpcClientId,
createSnapshotOp.rpcCallId, path);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index e278a33..e7f4dcb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -3437,6 +3437,8 @@ public abstract class FSEditLogOp {
static class CreateSnapshotOp extends FSEditLogOp {
String snapshotRoot;
String snapshotName;
+ /** Modification time of the edit set by Time.now(). */
+ long mtime;
public CreateSnapshotOp() {
super(OP_CREATE_SNAPSHOT);
@@ -3450,22 +3452,32 @@ public abstract class FSEditLogOp {
void resetSubFields() {
snapshotRoot = null;
snapshotName = null;
+ mtime = 0L;
}
+ /* set the name of the snapshot. */
CreateSnapshotOp setSnapshotName(String snapName) {
this.snapshotName = snapName;
return this;
}
+ /* set the directory path where the snapshot is taken. */
public CreateSnapshotOp setSnapshotRoot(String snapRoot) {
snapshotRoot = snapRoot;
return this;
}
-
+
+ /* The snapshot creation time set by Time.now(). */
+ CreateSnapshotOp setSnapshotMTime(long mTime) {
+ this.mtime = mTime;
+ return this;
+ }
+
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
snapshotRoot = FSImageSerialization.readString(in);
snapshotName = FSImageSerialization.readString(in);
+ mtime = FSImageSerialization.readLong(in);
// read RPC ids if necessary
readRpcIds(in, logVersion);
@@ -3475,6 +3487,7 @@ public abstract class FSEditLogOp {
public void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(snapshotRoot, out);
FSImageSerialization.writeString(snapshotName, out);
+ FSImageSerialization.writeLong(mtime, out);
writeRpcIds(rpcClientId, rpcCallId, out);
}
@@ -3482,6 +3495,7 @@ public abstract class FSEditLogOp {
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SNAPSHOTROOT", snapshotRoot);
XMLUtils.addSaxString(contentHandler, "SNAPSHOTNAME", snapshotName);
+ XMLUtils.addSaxString(contentHandler, "MTIME", Long.toString(mtime));
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@@ -3489,6 +3503,7 @@ public abstract class FSEditLogOp {
void fromXml(Stanza st) throws InvalidXmlException {
snapshotRoot = st.getValue("SNAPSHOTROOT");
snapshotName = st.getValue("SNAPSHOTNAME");
+ this.mtime = Long.parseLong(st.getValue("MTIME"));
readRpcIdsFromXml(st);
}
@@ -3499,7 +3514,8 @@ public abstract class FSEditLogOp {
builder.append("CreateSnapshotOp [snapshotRoot=")
.append(snapshotRoot)
.append(", snapshotName=")
- .append(snapshotName);
+ .append(snapshotName)
+ .append(", mtime=").append(mtime);
appendRpcIdsToString(builder, rpcClientId, rpcCallId);
builder.append("]");
return builder.toString();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index 85d5a45..dd3c22c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -276,12 +276,17 @@ public class INodeDirectory extends INodeWithAdditionalFields
getDirectorySnapshottableFeature().setSnapshotQuota(snapshotQuota);
}
+ /**
+ * Add a snapshot.
+ * @param name Name of the snapshot.
+ * @param mtime The snapshot creation time set by Time.now().
+ */
public Snapshot addSnapshot(int id, String name,
final LeaseManager leaseManager, final boolean captureOpenFiles,
- int maxSnapshotLimit)
+ int maxSnapshotLimit, long mtime)
throws SnapshotException {
return getDirectorySnapshottableFeature().addSnapshot(this, id, name,
- leaseManager, captureOpenFiles, maxSnapshotLimit);
+ leaseManager, captureOpenFiles, maxSnapshotLimit, mtime);
}
public Snapshot removeSnapshot(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
index 073b88b..b98b9cd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
@@ -166,10 +166,17 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
this.snapshotsByNames.add(snapshot);
}
- /** Add a snapshot. */
+ /**
+ * Add a snapshot.
+ * @param snapshotRoot Root of the snapshot.
+ * @param name Name of the snapshot.
+ * @param mtime The snapshot creation time set by Time.now().
+ * @throws SnapshotException Throw SnapshotException when there is a snapshot
+ * with the same name already exists or snapshot quota exceeds
+ */
public Snapshot addSnapshot(INodeDirectory snapshotRoot, int id, String name,
final LeaseManager leaseManager, final boolean captureOpenFiles,
- int maxSnapshotLimit)
+ int maxSnapshotLimit, long now)
throws SnapshotException {
//check snapshot quota
final int n = getNumSnapshots();
@@ -195,8 +202,7 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
d.setSnapshotRoot(s.getRoot());
snapshotsByNames.add(-i - 1, s);
- // set modification time
- final long now = Time.now();
+ // modification time is the snapshot creation time
snapshotRoot.updateModificationTime(now, Snapshot.CURRENT_STATE_ID);
s.getRoot().setModificationTime(now, Snapshot.CURRENT_STATE_ID);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
index b908ee9..9244710 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
@@ -312,6 +312,7 @@ public class SnapshotManager implements SnapshotStatsMXBean {
* @param iip the INodes resolved from the snapshottable directory's path
* @param snapshotName
* The name of the snapshot.
+ * @param mtime is the snapshot creation time set by Time.now().
* @throws IOException
* Throw IOException when 1) the given path does not lead to an
* existing snapshottable directory, and/or 2) there exists a
@@ -319,7 +320,8 @@ public class SnapshotManager implements SnapshotStatsMXBean {
* snapshot number exceeds quota
*/
public String createSnapshot(final LeaseManager leaseManager,
- final INodesInPath iip, String snapshotRoot, String snapshotName)
+ final INodesInPath iip, String snapshotRoot, String snapshotName,
+ long mtime)
throws IOException {
INodeDirectory srcRoot = getSnapshottableRoot(iip);
@@ -333,7 +335,7 @@ public class SnapshotManager implements SnapshotStatsMXBean {
}
srcRoot.addSnapshot(snapshotCounter, snapshotName, leaseManager,
- this.captureOpenFiles, maxSnapshotLimit);
+ this.captureOpenFiles, maxSnapshotLimit, mtime);
//create success, update id
snapshotCounter++;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
index 0f78d98..5a5092c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
@@ -456,6 +456,22 @@ public class TestSnapshot {
assertEquals(0, rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
}
+ @Test(timeout = 60000)
+ public void testSnapshotMtime() throws Exception {
+ Path dir = new Path("/dir");
+ Path sub = new Path(dir, "sub");
+ Path subFile = new Path(sub, "file");
+ DFSTestUtil.createFile(hdfs, subFile, BLOCKSIZE, REPLICATION, seed);
+
+ hdfs.allowSnapshot(dir);
+ Path snapshotPath = hdfs.createSnapshot(dir, "s1");
+ FileStatus oldSnapshotStatus = hdfs.getFileStatus(snapshotPath);
+ cluster.restartNameNodes();
+ FileStatus newSnapshotStatus = hdfs.getFileStatus(snapshotPath);
+ assertEquals(oldSnapshotStatus.getModificationTime(),
+ newSnapshotStatus.getModificationTime());
+ }
+
/**
* Prepare a list of modifications. A modification may be a file creation,
* file deletion, or a modification operation such as appending to an existing
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
index 8bb8c95..e74144a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
import org.junit.Assert;
import org.junit.Test;
@@ -60,14 +61,15 @@ public class TestSnapshotManager {
// Create testMaxSnapshotLimit snapshots. These should all succeed.
//
for (Integer i = 0; i < testMaxSnapshotLimit; ++i) {
- sm.createSnapshot(leaseManager, iip, "dummy", i.toString());
+ sm.createSnapshot(leaseManager, iip, "dummy", i.toString(), Time.now());
}
// Attempt to create one more snapshot. This should fail due to snapshot
// ID rollover.
//
try {
- sm.createSnapshot(leaseManager, iip, "dummy", "shouldFailSnapshot");
+ sm.createSnapshot(leaseManager, iip, "dummy", "shouldFailSnapshot",
+ Time.now());
Assert.fail("Expected SnapshotException not thrown");
} catch (SnapshotException se) {
Assert.assertTrue(
@@ -82,7 +84,8 @@ public class TestSnapshotManager {
// to snapshot ID rollover.
//
try {
- sm.createSnapshot(leaseManager, iip, "dummy", "shouldFailSnapshot2");
+ sm.createSnapshot(leaseManager, iip, "dummy", "shouldFailSnapshot2",
+ Time.now());
Assert.fail("Expected SnapshotException not thrown");
} catch (SnapshotException se) {
Assert.assertTrue(
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org