You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2013/04/13 23:41:34 UTC

svn commit: r1467706 - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apache/hadoop/hdfs/protocolPB/ src/main/java/org/apac...

Author: szetszwo
Date: Sat Apr 13 21:41:33 2013
New Revision: 1467706

URL: http://svn.apache.org/r1467706
Log:
HDFS-4692. Use timestamp as default snapshot names.

Modified:
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt?rev=1467706&r1=1467705&r2=1467706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt Sat Apr 13 21:41:33 2013
@@ -234,3 +234,5 @@ Branch-2802 Snapshot (Unreleased)
 
   HDFS-4675. Fix rename across snapshottable directories.  (Jing Zhao via
   szetszwo)
+
+  HDFS-4692. Use timestamp as default snapshot names.  (szetszwo)

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1467706&r1=1467705&r2=1467706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Sat Apr 13 21:41:33 2013
@@ -2021,12 +2021,13 @@ public class DFSClient implements java.i
    * 
    * @param snapshotRoot The directory where the snapshot is to be taken
    * @param snapshotName Name of the snapshot
+   * @return the snapshot path.
    * @see ClientProtocol#createSnapshot(String, String)
    */
-  public void createSnapshot(String snapshotRoot, String snapshotName)
+  public String createSnapshot(String snapshotRoot, String snapshotName)
       throws IOException {
     checkOpen();
-    namenode.createSnapshot(snapshotRoot, snapshotName);
+    return namenode.createSnapshot(snapshotRoot, snapshotName);
   }
   
   /**

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1467706&r1=1467705&r2=1467706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Sat Apr 13 21:41:33 2013
@@ -932,9 +932,9 @@ public class DistributedFileSystem exten
   }
   
   @Override
-  public void createSnapshot(Path path, String snapshotName) 
+  public Path createSnapshot(Path path, String snapshotName) 
       throws IOException {
-    dfs.createSnapshot(getPathName(path), snapshotName);
+    return new Path(dfs.createSnapshot(getPathName(path), snapshotName));
   }
   
   @Override

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1467706&r1=1467705&r2=1467706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Sat Apr 13 21:41:33 2013
@@ -1000,9 +1000,10 @@ public interface ClientProtocol {
    * Create a snapshot
    * @param snapshotRoot the path that is being snapshotted
    * @param snapshotName name of the snapshot created
+   * @return the snapshot path.
    * @throws IOException
    */
-  public void createSnapshot(String snapshotRoot, String snapshotName)
+  public String createSnapshot(String snapshotRoot, String snapshotName)
       throws IOException;
 
   /**

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java?rev=1467706&r1=1467705&r2=1467706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java Sat Apr 13 21:41:33 2013
@@ -165,8 +165,6 @@ import com.google.protobuf.ServiceExcept
 public class ClientNamenodeProtocolServerSideTranslatorPB implements
     ClientNamenodeProtocolPB {
   final private ClientProtocol server;
-  static final CreateSnapshotResponseProto VOID_CREATE_SNAPSHOT_RESPONSE =
-      CreateSnapshotResponseProto.newBuilder().build();
   static final DeleteSnapshotResponseProto VOID_DELETE_SNAPSHOT_RESPONSE =
       DeleteSnapshotResponseProto.newBuilder().build();
   static final RenameSnapshotResponseProto VOID_RENAME_SNAPSHOT_RESPONSE =
@@ -898,22 +896,26 @@ public class ClientNamenodeProtocolServe
 
   @Override
   public CreateSnapshotResponseProto createSnapshot(RpcController controller,
-      CreateSnapshotRequestProto request) throws ServiceException {
+      CreateSnapshotRequestProto req) throws ServiceException {
     try {
-      server.createSnapshot(request.getSnapshotRoot(),
-          request.getSnapshotName());
+      final CreateSnapshotResponseProto.Builder builder
+          = CreateSnapshotResponseProto.newBuilder();
+      final String snapshotPath = server.createSnapshot(req.getSnapshotRoot(),
+          req.hasSnapshotName()? req.getSnapshotName(): null);
+      if (snapshotPath != null) {
+        builder.setSnapshotPath(snapshotPath);
+      }
+      return builder.build();
     } catch (IOException e) {
       throw new ServiceException(e);
     }
-    return VOID_CREATE_SNAPSHOT_RESPONSE;
   }
 
   @Override
   public DeleteSnapshotResponseProto deleteSnapshot(RpcController controller,
-      DeleteSnapshotRequestProto request) throws ServiceException {
+      DeleteSnapshotRequestProto req) throws ServiceException {
     try {
-      server
-          .deleteSnapshot(request.getSnapshotRoot(), request.getSnapshotName());
+      server.deleteSnapshot(req.getSnapshotRoot(), req.getSnapshotName());
       return VOID_DELETE_SNAPSHOT_RESPONSE;
     } catch (IOException e) {
       throw new ServiceException(e);

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java?rev=1467706&r1=1467705&r2=1467706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java Sat Apr 13 21:41:33 2013
@@ -40,13 +40,13 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
@@ -56,8 +56,8 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto;
@@ -112,7 +112,6 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.io.EnumSetWritable;
@@ -882,12 +881,16 @@ public class ClientNamenodeProtocolTrans
   }
 
   @Override
-  public void createSnapshot(String snapshotRoot, String snapshotName)
+  public String createSnapshot(String snapshotRoot, String snapshotName)
       throws IOException {
-    CreateSnapshotRequestProto req = CreateSnapshotRequestProto.newBuilder()
-        .setSnapshotRoot(snapshotRoot).setSnapshotName(snapshotName).build();
+    final CreateSnapshotRequestProto.Builder builder
+        = CreateSnapshotRequestProto.newBuilder().setSnapshotRoot(snapshotRoot);
+    if (snapshotName != null) {
+      builder.setSnapshotName(snapshotName);
+    }
+    final CreateSnapshotRequestProto req = builder.build();
     try {
-      rpcProxy.createSnapshot(null, req);
+      return rpcProxy.createSnapshot(null, req).getSnapshotPath();
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
     }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1467706&r1=1467705&r2=1467706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Sat Apr 13 21:41:33 2013
@@ -5731,12 +5731,12 @@ public class FSNamesystem implements Nam
         .shouldAvoidStaleDataNodesForWrite();
   }
   
-  public SnapshotManager getSnapshotManager() {
+  SnapshotManager getSnapshotManager() {
     return snapshotManager;
   }
   
   /** Allow snapshot on a directroy. */
-  public void allowSnapshot(String path) throws SafeModeException, IOException {
+  void allowSnapshot(String path) throws SafeModeException, IOException {
     final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
@@ -5762,8 +5762,7 @@ public class FSNamesystem implements Nam
   }
   
   /** Disallow snapshot on a directory. */
-  public void disallowSnapshot(String path)
-      throws SafeModeException, IOException {
+  void disallowSnapshot(String path) throws SafeModeException, IOException {
     final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
@@ -5793,10 +5792,11 @@ public class FSNamesystem implements Nam
    * @param snapshotRoot The directory path where the snapshot is taken
    * @param snapshotName The name of the snapshot
    */
-  public void createSnapshot(String snapshotRoot, String snapshotName)
+  String createSnapshot(String snapshotRoot, String snapshotName)
       throws SafeModeException, IOException {
     final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
+    final String snapshotPath;
     try {
       checkOperation(OperationCategory.WRITE);
       if (isInSafeMode()) {
@@ -5805,9 +5805,13 @@ public class FSNamesystem implements Nam
       }
       checkOwner(pc, snapshotRoot);
 
+      if (snapshotName == null || snapshotName.isEmpty()) {
+        snapshotName = Snapshot.generateDefaultSnapshotName();
+      }
+      dir.verifyMaxComponentLength(snapshotName, snapshotRoot, 0);
       dir.writeLock();
       try {
-        snapshotManager.createSnapshot(snapshotRoot, snapshotName);
+        snapshotPath = snapshotManager.createSnapshot(snapshotRoot, snapshotName);
       } finally {
         dir.writeUnlock();
       }
@@ -5818,11 +5822,9 @@ public class FSNamesystem implements Nam
     getEditLog().logSync();
     
     if (auditLog.isInfoEnabled() && isExternalInvocation()) {
-      Path rootPath = new Path(snapshotRoot, HdfsConstants.DOT_SNAPSHOT_DIR
-          + Path.SEPARATOR + snapshotName);
-      logAuditEvent(true, "createSnapshot", snapshotRoot, rootPath.toString(),
-          null);
+      logAuditEvent(true, "createSnapshot", snapshotRoot, snapshotPath, null);
     }
+    return snapshotPath;
   }
   
   /**
@@ -5833,7 +5835,7 @@ public class FSNamesystem implements Nam
    * @throws SafeModeException
    * @throws IOException 
    */
-  public void renameSnapshot(String path, String snapshotOldName,
+  void renameSnapshot(String path, String snapshotOldName,
       String snapshotNewName) throws SafeModeException, IOException {
     final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
@@ -5844,8 +5846,7 @@ public class FSNamesystem implements Nam
             safeMode);
       }
       checkOwner(pc, path);
-      // TODO: check if the new name is valid. May also need this for
-      // creationSnapshot
+      dir.verifyMaxComponentLength(snapshotNewName, path, 0);
       
       snapshotManager.renameSnapshot(path, snapshotOldName, snapshotNewName);
       getEditLog().logRenameSnapshot(path, snapshotOldName, snapshotNewName);
@@ -5905,7 +5906,7 @@ public class FSNamesystem implements Nam
    *         and labeled as M/-/+/R respectively. 
    * @throws IOException
    */
-  public SnapshotDiffReport getSnapshotDiffReport(String path,
+  SnapshotDiffReport getSnapshotDiffReport(String path,
       String fromSnapshot, String toSnapshot) throws IOException {
     SnapshotDiffInfo diffs = null;
     readLock();
@@ -5931,7 +5932,7 @@ public class FSNamesystem implements Nam
    * @throws SafeModeException
    * @throws IOException
    */
-  public void deleteSnapshot(String snapshotRoot, String snapshotName)
+  void deleteSnapshot(String snapshotRoot, String snapshotName)
       throws SafeModeException, IOException {
     final FSPermissionChecker pc = getPermissionChecker();
     writeLock();

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1467706&r1=1467705&r2=1467706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Sat Apr 13 21:41:33 2013
@@ -1093,14 +1093,14 @@ class NameNodeRpcServer implements Namen
   }
 
   @Override
-  public void createSnapshot(String snapshotRoot, String snapshotName)
+  public String createSnapshot(String snapshotRoot, String snapshotName)
       throws IOException {
     if (!checkPathLength(snapshotRoot)) {
       throw new IOException("createSnapshot: Pathname too long.  Limit "
           + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
     }
     metrics.incrCreateSnapshotOps();
-    namesystem.createSnapshot(snapshotRoot, snapshotName);
+    return namesystem.createSnapshot(snapshotRoot, snapshotName);
   }
   
   @Override
@@ -1127,6 +1127,9 @@ class NameNodeRpcServer implements Namen
   @Override
   public void renameSnapshot(String snapshotRoot, String snapshotOldName,
       String snapshotNewName) throws IOException {
+    if (snapshotNewName == null || snapshotNewName.isEmpty()) {
+      throw new IOException("The new snapshot name is null or empty.");
+    }
     metrics.incrRenameSnapshotOps();
     namesystem.renameSnapshot(snapshotRoot, snapshotOldName, snapshotNewName);
   }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java?rev=1467706&r1=1467705&r2=1467706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java Sat Apr 13 21:41:33 2013
@@ -288,7 +288,7 @@ public class INodeDirectorySnapshottable
     final int i = searchSnapshot(nameBytes);
     if (i >= 0) {
       throw new SnapshotException("Failed to add snapshot: there is already a "
-          + "snapshot with the same name \"" + name + "\".");
+          + "snapshot with the same name \"" + Snapshot.getSnapshotName(s) + "\".");
     }
 
     final DirectoryDiff d = getDiffs().addDiff(s, this);

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java?rev=1467706&r1=1467705&r2=1467706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java Sat Apr 13 21:41:33 2013
@@ -20,7 +20,9 @@ package org.apache.hadoop.hdfs.server.na
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
+import java.text.SimpleDateFormat;
 import java.util.Comparator;
+import java.util.Date;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.Path;
@@ -38,6 +40,30 @@ public class Snapshot implements Compara
   public static final int INVALID_ID = -1;
   
   /**
+   * The pattern for generating the default snapshot name.
+   * E.g. s20130412-151029.033
+   */
+  private static final String DEFAULT_SNAPSHOT_NAME_PATTERN = "'s'yyyyMMdd-HHmmss.SSS";
+  
+  public static String generateDefaultSnapshotName() {
+    return new SimpleDateFormat(DEFAULT_SNAPSHOT_NAME_PATTERN).format(new Date());
+  }
+
+  static String getSnapshotPath(String snapshottableDir, String snapshotName) {
+    return new Path(snapshottableDir, HdfsConstants.DOT_SNAPSHOT_DIR
+        + Path.SEPARATOR + snapshotName).toString();
+  }
+  
+  /** 
+   * Get the name of the given snapshot. 
+   * @param s The given snapshot.
+   * @return The name of the snapshot, or an empty string if {@code s} is null
+   */
+  static String getSnapshotName(Snapshot s) {
+    return s != null ? s.getRoot().getLocalName() : "";
+  }
+
+  /**
    * Compare snapshot IDs. Null indicates the current status thus is greater
    * than non-null snapshots.
    */
@@ -78,15 +104,6 @@ public class Snapshot implements Compara
     }
     return latest;
   }
-  
-  /** 
-   * Get the name of the given snapshot. 
-   * @param s The given snapshot.
-   * @return The name of the snapshot, or an empty string if {@code s} is null
-   */
-  public static String getSnapshotName(Snapshot s) {
-    return s != null ? s.getRoot().getLocalName() : "";
-  }
 
   /** The root directory of the snapshot. */
   public class Root extends INodeDirectory {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java?rev=1467706&r1=1467705&r2=1467706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java Sat Apr 13 21:41:33 2013
@@ -127,19 +127,19 @@ public class SnapshotManager implements 
    *           snapshot with the given name for the directory, and/or 3)
    *           snapshot number exceeds quota
    */
-  public void createSnapshot(final String path, final String snapshotName
+  public String createSnapshot(final String path, String snapshotName
       ) throws IOException {
     // Find the source root directory path where the snapshot is taken.
     final INodesInPath i = fsdir.getINodesInPath4Write(path);
     final INodeDirectorySnapshottable srcRoot
         = INodeDirectorySnapshottable.valueOf(i.getLastINode(), path);
 
-    fsdir.verifyMaxComponentLength(snapshotName, path, 0);
     srcRoot.addSnapshot(snapshotCounter, snapshotName);
       
     //create success, update id
     snapshotCounter++;
     numSnapshots.getAndIncrement();
+    return Snapshot.getSnapshotPath(path, snapshotName);
   }
   
   /**

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1467706&r1=1467705&r2=1467706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Sat Apr 13 21:41:33 2013
@@ -450,10 +450,11 @@ message GetDataEncryptionKeyResponseProt
 
 message CreateSnapshotRequestProto {
   required string snapshotRoot = 1;
-  required string snapshotName = 2;
+  optional string snapshotName = 2;
 }
 
-message CreateSnapshotResponseProto { // void response
+message CreateSnapshotResponseProto {
+  required string snapshotPath = 1;
 }
 
 message RenameSnapshotRequestProto {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java?rev=1467706&r1=1467705&r2=1467706&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java Sat Apr 13 21:41:33 2013
@@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.ser
 
 import java.io.IOException;
 import java.util.Random;
+import java.util.regex.Pattern;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -178,7 +179,19 @@ public class TestNestedSnapshots {
     final Path foo = new Path(dir, "foo");
     final Path f1 = new Path(foo, "f1");
     DFSTestUtil.createFile(hdfs, f1, BLOCKSIZE, REPLICATION, SEED);
-    hdfs.createSnapshot(dir, "s0");
+    {
+      //create a snapshot with default snapshot name
+      final Path snapshotPath = hdfs.createSnapshot(dir);
+
+      //check snapshot path and the default snapshot name
+      final String snapshotName = snapshotPath.getName(); 
+      Assert.assertTrue("snapshotName=" + snapshotName, Pattern.matches(
+          "s\\d\\d\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d\\.\\d\\d\\d",
+          snapshotName));
+      final Path parent = snapshotPath.getParent();
+      Assert.assertEquals(HdfsConstants.DOT_SNAPSHOT_DIR, parent.getName());
+      Assert.assertEquals(dir, parent.getParent());
+    }
     final Path f2 = new Path(foo, "f2");
     DFSTestUtil.createFile(hdfs, f2, BLOCKSIZE, REPLICATION, SEED);
     
@@ -193,7 +206,7 @@ public class TestNestedSnapshots {
 
     try {
       // createSnapshot should fail with quota
-      hdfs.createSnapshot(dir, "s1");
+      hdfs.createSnapshot(dir);
       Assert.fail();
     } catch(RemoteException re) {
       final IOException ioe = re.unwrapRemoteException();