You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by sh...@apache.org on 2020/10/07 05:04:46 UTC

[hadoop] branch trunk updated: HDFS-15611. Add list Snapshot command in WebHDFS. (#2355)

This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 16aea11  HDFS-15611. Add list Snapshot command in WebHDFS. (#2355)
16aea11 is described below

commit 16aea11c945c84936984e80241dcdd4a0d4b7f58
Author: bshashikant <sh...@apache.org>
AuthorDate: Wed Oct 7 10:34:32 2020 +0530

    HDFS-15611. Add list Snapshot command in WebHDFS. (#2355)
---
 .../hadoop/hdfs/protocol/SnapshotStatus.java       | 20 ++++-
 .../org/apache/hadoop/hdfs/web/JsonUtilClient.java | 36 +++++++++
 .../apache/hadoop/hdfs/web/WebHdfsFileSystem.java  | 14 ++++
 .../hadoop/hdfs/web/resources/GetOpParam.java      |  3 +-
 .../hadoop/fs/http/client/HttpFSFileSystem.java    | 16 +++-
 .../apache/hadoop/fs/http/server/FSOperations.java | 38 +++++++++
 .../fs/http/server/HttpFSParametersProvider.java   |  1 +
 .../apache/hadoop/fs/http/server/HttpFSServer.java |  8 ++
 .../hadoop/fs/http/client/BaseTestHttpFSWith.java  | 51 +++++++++++-
 .../hadoop/fs/http/server/TestHttpFSServer.java    | 47 +++++++++++
 .../web/resources/NamenodeWebHdfsMethods.java      |  7 ++
 .../java/org/apache/hadoop/hdfs/web/JsonUtil.java  | 24 ++++++
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md       | 92 ++++++++++++++++++++++
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java    | 61 ++++++++++++++
 14 files changed, 412 insertions(+), 6 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java
index 3e2a7ae..8f10db8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotStatus.java
@@ -73,9 +73,18 @@ public class SnapshotStatus {
     this.parentFullPath = parentFullPath;
   }
 
+  public SnapshotStatus(HdfsFileStatus dirStatus,
+                                      int snapshotID, boolean isDeleted,
+                                      byte[] parentFullPath) {
+    this.dirStatus = dirStatus;
+    this.snapshotID = snapshotID;
+    this.isDeleted = isDeleted;
+    this.parentFullPath = parentFullPath;
+  }
+
   /**
-   * sets the prent path name.
-   * @param path parent path
+   * sets the path name.
+   * @param path path
    */
   public void setParentFullPath(byte[] path) {
     parentFullPath = path;
@@ -174,7 +183,7 @@ public class SnapshotStatus {
     return Math.max(n, String.valueOf(value).length());
   }
 
-  static String getSnapshotPath(String snapshottableDir,
+  public static String getSnapshotPath(String snapshottableDir,
                                 String snapshotRelativePath) {
     String parentFullPathStr =
         snapshottableDir == null || snapshottableDir.isEmpty() ?
@@ -188,4 +197,9 @@ public class SnapshotStatus {
         .append(snapshotRelativePath)
         .toString();
   }
+
+  public static String getParentPath(String snapshotPath) {
+    int index = snapshotPath.indexOf(HdfsConstants.DOT_SNAPSHOT_DIR);
+    return index == -1 ? snapshotPath : snapshotPath.substring(0, index - 1);
+  }
 }
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index e846b56..6ce01a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -872,4 +873,39 @@ public class JsonUtilClient {
             snapshotQuota, parentFullPath);
     return snapshottableDirectoryStatus;
   }
+
+  public static SnapshotStatus[] toSnapshotList(final Map<?, ?> json) {
+    if (json == null) {
+      return null;
+    }
+    List<?> list = (List<?>) json.get("SnapshotList");
+    if (list == null) {
+      return null;
+    }
+    SnapshotStatus[] statuses =
+        new SnapshotStatus[list.size()];
+    for (int i = 0; i < list.size(); i++) {
+      statuses[i] = toSnapshotStatus((Map<?, ?>) list.get(i));
+    }
+    return statuses;
+  }
+
+  private static SnapshotStatus toSnapshotStatus(
+      Map<?, ?> json) {
+    if (json == null) {
+      return null;
+    }
+    int snapshotID = getInt(json, "snapshotID", 0);
+    boolean isDeleted = "DELETED".equalsIgnoreCase(
+        (String)json.get("deletionStatus"));
+    String fullPath = ((String) json.get("fullPath"));
+
+    HdfsFileStatus dirStatus =
+        toFileStatus((Map<?, ?>) json.get("dirStatus"), false);
+    SnapshotStatus snapshotStatus =
+        new SnapshotStatus(dirStatus, snapshotID,
+            isDeleted, DFSUtilClient.string2Bytes(
+                SnapshotStatus.getParentPath(fullPath)));
+    return snapshotStatus;
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 25e7f73..bfcd0f8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -106,6 +106,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
@@ -1459,6 +1460,19 @@ public class WebHdfsFileSystem extends FileSystem
     }.run();
   }
 
+  public SnapshotStatus[] getSnapshotListing(final Path snapshotDir)
+      throws IOException {
+    storageStatistics
+        .incrementOpCounter(OpType.GET_SNAPSHOT_LIST);
+    final HttpOpParam.Op op = GetOpParam.Op.GETSNAPSHOTLIST;
+    return new FsPathResponseRunner<SnapshotStatus[]>(op, snapshotDir) {
+      @Override
+      SnapshotStatus[] decodeResponse(Map<?, ?> json) {
+        return JsonUtilClient.toSnapshotList(json);
+      }
+    }.run();
+  }
+
   @Override
   public boolean setReplication(final Path p, final short replication
   ) throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
index f9a5fa6..abbef9d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
@@ -62,7 +62,8 @@ public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
     LISTSTATUS_BATCH(false, HttpURLConnection.HTTP_OK),
     GETSERVERDEFAULTS(false, HttpURLConnection.HTTP_OK),
     GETSNAPSHOTDIFF(false, HttpURLConnection.HTTP_OK),
-    GETSNAPSHOTTABLEDIRECTORYLIST(false, HttpURLConnection.HTTP_OK);
+    GETSNAPSHOTTABLEDIRECTORYLIST(false, HttpURLConnection.HTTP_OK),
+    GETSNAPSHOTLIST(false, HttpURLConnection.HTTP_OK);
 
     final boolean redirect;
     final int expectedHttpResponseCode;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index a9ef653..ccd71d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
 import org.apache.hadoop.hdfs.web.JsonUtilClient;
 import org.apache.hadoop.lib.wsrs.EnumSetParam;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -262,7 +263,8 @@ public class HttpFSFileSystem extends FileSystem
     ALLOWSNAPSHOT(HTTP_PUT), DISALLOWSNAPSHOT(HTTP_PUT),
     CREATESNAPSHOT(HTTP_PUT), DELETESNAPSHOT(HTTP_DELETE),
     RENAMESNAPSHOT(HTTP_PUT), GETSNAPSHOTDIFF(HTTP_GET),
-    GETSNAPSHOTTABLEDIRECTORYLIST(HTTP_GET), GETSERVERDEFAULTS(HTTP_GET),
+    GETSNAPSHOTTABLEDIRECTORYLIST(HTTP_GET), GETSNAPSHOTLIST(HTTP_GET),
+    GETSERVERDEFAULTS(HTTP_GET),
     CHECKACCESS(HTTP_GET), SETECPOLICY(HTTP_PUT), GETECPOLICY(
         HTTP_GET), UNSETECPOLICY(HTTP_POST), SATISFYSTORAGEPOLICY(HTTP_PUT);
 
@@ -1582,6 +1584,18 @@ public class HttpFSFileSystem extends FileSystem
     return JsonUtilClient.toSnapshottableDirectoryList(json);
   }
 
+  public SnapshotStatus[] getSnapshotListing(Path snapshotRoot)
+      throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM, Operation.GETSNAPSHOTLIST.toString());
+    HttpURLConnection conn = getConnection(
+        Operation.GETSNAPSHOTLIST.getMethod(),
+        params, snapshotRoot, true);
+    HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+    JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
+    return JsonUtilClient.toSnapshotList(json);
+  }
+
   /**
    * This filesystem's capabilities must be in sync with that of
    * {@code DistributedFileSystem.hasPathCapability()} except
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 0f8ea07..6effb83 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
 import org.apache.hadoop.hdfs.web.JsonUtil;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.lib.service.FileSystemAccess;
@@ -1835,6 +1836,43 @@ public class FSOperations {
   }
 
   /**
+   *  Executor that performs a getSnapshotListing operation.
+   */
+  @InterfaceAudience.Private
+  public static class FSGetSnapshotListing implements
+      FileSystemAccess.FileSystemExecutor<String> {
+    private Path path;
+
+    /**
+     * Creates a getSnapshotDiff executor.
+     * @param path directory path of the snapshots to be examined.
+     */
+    public FSGetSnapshotListing(String path) {
+      this.path = new Path(path);
+    }
+
+    /**
+     * Executes the filesystem operation.
+     * @param fs filesystem instance to use.
+     * @return A JSON string of all snapshots for a snapshottable directory.
+     * @throws IOException thrown if an IO error occurred.
+     */
+    @Override
+    public String execute(FileSystem fs) throws IOException {
+      SnapshotStatus[] sds = null;
+      if (fs instanceof DistributedFileSystem) {
+        DistributedFileSystem dfs = (DistributedFileSystem) fs;
+        sds = dfs.getSnapshotListing(path);
+      } else {
+        throw new UnsupportedOperationException("getSnapshotListing is "
+            + "not supported for HttpFs on " + fs.getClass()
+            + ". Please check your fs.defaultFS configuration");
+      }
+      return JsonUtil.toJsonString(sds);
+    }
+  }
+
+  /**
    * Executor that performs a getServerDefaults operation.
    */
   @InterfaceAudience.Private
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
index 39180fb..f6c84dc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
@@ -116,6 +116,7 @@ public class HttpFSParametersProvider extends ParametersProvider {
         new Class[] {OldSnapshotNameParam.class,
             SnapshotNameParam.class});
     PARAMS_DEF.put(Operation.GETSNAPSHOTTABLEDIRECTORYLIST, new Class[] {});
+    PARAMS_DEF.put(Operation.GETSNAPSHOTLIST, new Class[] {});
     PARAMS_DEF.put(Operation.GETSERVERDEFAULTS, new Class[] {});
     PARAMS_DEF.put(Operation.CHECKACCESS, new Class[] {FsActionParam.class});
     PARAMS_DEF.put(Operation.SETECPOLICY, new Class[] {ECPolicyParam.class});
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
index f2dd5d8..da559f1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
@@ -458,6 +458,14 @@ public class HttpFSServer {
       response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
       break;
     }
+    case GETSNAPSHOTLIST: {
+      FSOperations.FSGetSnapshotListing command =
+          new FSOperations.FSGetSnapshotListing(path);
+      String js = fsExecute(user, command);
+      AUDIT_LOG.info("[{}]", "/");
+      response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+      break;
+    }
     case GETSERVERDEFAULTS: {
       FSOperations.FSGetServerDefaults command =
           new FSOperations.FSGetServerDefaults();
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
index 106e475..90e1721 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
 import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.web.JsonUtil;
@@ -1158,7 +1159,8 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
     CREATE_SNAPSHOT, RENAME_SNAPSHOT, DELETE_SNAPSHOT,
     ALLOW_SNAPSHOT, DISALLOW_SNAPSHOT, DISALLOW_SNAPSHOT_EXCEPTION,
     FILE_STATUS_ATTR, GET_SNAPSHOT_DIFF, GET_SNAPSHOTTABLE_DIRECTORY_LIST,
-    GET_SERVERDEFAULTS, CHECKACCESS, SETECPOLICY, SATISFYSTORAGEPOLICY
+    GET_SNAPSHOT_LIST, GET_SERVERDEFAULTS, CHECKACCESS, SETECPOLICY,
+    SATISFYSTORAGEPOLICY
   }
 
   private void operation(Operation op) throws Exception {
@@ -1279,6 +1281,9 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
     case GET_SNAPSHOTTABLE_DIRECTORY_LIST:
       testGetSnapshottableDirListing();
       break;
+    case GET_SNAPSHOT_LIST:
+      testGetSnapshotListing();
+      break;
     case GET_SERVERDEFAULTS:
       testGetServerDefaults();
       break;
@@ -1657,6 +1662,50 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
         JsonUtil.toJsonString(dfssds));
   }
 
+  private void testGetSnapshotListing() throws Exception {
+    if (!this.isLocalFS()) {
+      // Create a directory with snapshot allowed
+      Path path = new Path("/tmp/tmp-snap-test");
+      createSnapshotTestsPreconditions(path);
+      // Get the FileSystem instance that's being tested
+      FileSystem fs = this.getHttpFSFileSystem();
+      // Check FileStatus
+      Assert.assertTrue(fs.getFileStatus(path).isSnapshotEnabled());
+      // Create a file and take a snapshot
+      Path file1 = new Path(path, "file1");
+      testCreate(file1, false);
+      fs.createSnapshot(path, "snap1");
+      // Create another file and take a snapshot
+      Path file2 = new Path(path, "file2");
+      testCreate(file2, false);
+      fs.createSnapshot(path, "snap2");
+      // Get snapshot diff
+      SnapshotStatus[] snapshotStatus = null;
+      if (fs instanceof HttpFSFileSystem) {
+        HttpFSFileSystem httpFS = (HttpFSFileSystem) fs;
+        snapshotStatus = httpFS.getSnapshotListing(path);
+      } else if (fs instanceof WebHdfsFileSystem) {
+        WebHdfsFileSystem webHdfsFileSystem = (WebHdfsFileSystem) fs;
+        snapshotStatus = webHdfsFileSystem.getSnapshotListing(path);
+      } else {
+        Assert.fail(fs.getClass().getSimpleName() +
+            " doesn't support getSnapshotDiff");
+      }
+      // Verify result with DFS
+      DistributedFileSystem dfs = (DistributedFileSystem)
+          FileSystem.get(path.toUri(), this.getProxiedFSConf());
+      SnapshotStatus[] dfsStatus =
+          dfs.getSnapshotListing(path);
+      Assert.assertEquals(JsonUtil.toJsonString(snapshotStatus),
+          JsonUtil.toJsonString(dfsStatus));
+      // Cleanup
+      fs.deleteSnapshot(path, "snap2");
+      fs.deleteSnapshot(path, "snap1");
+      fs.delete(path, true);
+    }
+  }
+
+
   private void testGetSnapshottableDirListing() throws Exception {
     if (!this.isLocalFS()) {
       FileSystem fs = this.getHttpFSFileSystem();
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
index 6739393..121a2d2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
 import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.web.JsonUtil;
@@ -1515,6 +1516,23 @@ public class TestHttpFSServer extends HFSTestCase {
     Assert.assertEquals(dirLst, JsonUtil.toJsonString(dfsDirLst));
   }
 
+  private void verifyGetSnapshotList(DistributedFileSystem dfs, Path path)
+      throws Exception {
+    // Send a request
+    HttpURLConnection conn = sendRequestToHttpFSServer(path.toString(),
+        "GETSNAPSHOTLIST", "");
+    // Should return HTTP_OK
+    Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
+    // Verify the response
+    BufferedReader reader =
+        new BufferedReader(new InputStreamReader(conn.getInputStream()));
+    // The response should be a one-line JSON string.
+    String dirLst = reader.readLine();
+    // Verify the content of status with DFS API.
+    SnapshotStatus[] dfsDirLst = dfs.getSnapshotListing(path);
+    Assert.assertEquals(dirLst, JsonUtil.toJsonString(dfsDirLst));
+  }
+
   @Test
   @TestDir
   @TestJetty
@@ -1550,6 +1568,35 @@ public class TestHttpFSServer extends HFSTestCase {
     verifyGetSnapshottableDirectoryList(dfs);
   }
 
+
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testGetSnapshotList() throws Exception {
+    createHttpFSServer(false, false);
+    // Create test directories
+    String pathStr = "/tmp/tmp-snap-list-test-1";
+    createDirWithHttp(pathStr, "700", null);
+    Path path = new Path(pathStr);
+    DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(
+        path.toUri(), TestHdfsHelper.getHdfsConf());
+    // Enable snapshot for path1
+    dfs.allowSnapshot(path);
+    Assert.assertTrue(dfs.getFileStatus(path).isSnapshotEnabled());
+    // Verify response when there is one snapshottable directory
+    verifyGetSnapshotList(dfs, path);
+    // Create a file and take a snapshot
+    String file1 = pathStr + "/file1";
+    createWithHttp(file1, null);
+    dfs.createSnapshot(path, "snap1");
+    // Create another file and take a snapshot
+    String file2 = pathStr + "/file2";
+    createWithHttp(file2, null);
+    dfs.createSnapshot(path, "snap2");
+    verifyGetSnapshotList(dfs, path);
+  }
+
   @Test
   @TestDir
   @TestJetty
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 9baed4f..634efda 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -86,6 +86,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
@@ -1340,6 +1341,12 @@ public class NamenodeWebHdfsMethods {
       final String js = JsonUtil.toJsonString(snapshottableDirectoryList);
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }
+    case GETSNAPSHOTLIST: {
+      SnapshotStatus[] snapshotList =
+          cp.getSnapshotListing(fullpath);
+      final String js = JsonUtil.toJsonString(snapshotList);
+      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+    }
     default:
       throw new UnsupportedOperationException(op + " is not supported");
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index bf5568b..5a1f304 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -588,6 +588,17 @@ public class JsonUtil {
     return toJsonString("SnapshottableDirectoryList", a);
   }
 
+  public static String toJsonString(SnapshotStatus[] snapshotList) {
+    if (snapshotList == null) {
+      return toJsonString("SnapshotList", null);
+    }
+    Object[] a = new Object[snapshotList.length];
+    for (int i = 0; i < snapshotList.length; i++) {
+      a[i] = toJsonMap(snapshotList[i]);
+    }
+    return toJsonString("SnapshotList", a);
+  }
+
   private static Object toJsonMap(
       SnapshottableDirectoryStatus snapshottableDirectoryStatus) {
     final Map<String, Object> m = new TreeMap<String, Object>();
@@ -599,6 +610,19 @@ public class JsonUtil {
     return m;
   }
 
+  private static Object toJsonMap(
+      SnapshotStatus snapshotStatus) {
+    final Map<String, Object> m = new TreeMap<String, Object>();
+    HdfsFileStatus status = snapshotStatus.getDirStatus();
+    m.put("snapshotID", snapshotStatus.getSnapshotID());
+    m.put("deletionStatus", snapshotStatus.isDeleted() ? "DELETED" : "ACTIVE");
+    m.put("fullPath", SnapshotStatus.getSnapshotPath(
+            DFSUtilClient.bytes2String(snapshotStatus.getParentFullPath()),
+        status.getLocalName()));
+    m.put("dirStatus", toJsonMap(snapshotStatus.getDirStatus()));
+    return m;
+  }
+
   private static Map<String, Object> toJsonMap(
       final BlockLocation blockLocation) throws IOException {
     if (blockLocation == null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index e9c5ad0..3fb68d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -53,6 +53,7 @@ The HTTP REST API supports the complete [FileSystem](../../api/org/apache/hadoop
     * [`GETSTORAGEPOLICY`](#Get_Storage_Policy) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getStoragePolicy)
     * [`GETSNAPSHOTDIFF`](#Get_Snapshot_Diff)
     * [`GETSNAPSHOTTABLEDIRECTORYLIST`](#Get_Snapshottable_Directory_List)
+    * [`GETSNAPSHOTLIST`](#Get_Snapshot_List)
     * [`GETFILEBLOCKLOCATIONS`](#Get_File_Block_Locations) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileBlockLocations)
     * [`GETECPOLICY`](#Get_EC_Policy) (see [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).getErasureCodingPolicy)
 *   HTTP PUT
@@ -1642,6 +1643,46 @@ See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).renameSna
             ]
         }
 
+### Get Snapshot List
+
+* Submit a HTTP GET request.
+
+        curl -i GET "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?"
+
+    The call lists the snapshots for a snapshottable directory. The client receives a response with a [`SnapshotList` JSON object](#SnapshotList_JSON_Schema):
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+        Transfer-Encoding: chunked
+
+        {
+            "SnapshotList":
+            [
+                {
+                  "dirStatus":
+                    {
+                        "accessTime":0,
+                        "blockSize":0,
+                        "childrenNum":0,
+                        "fileId":16386,
+                        "group":"hadoop",
+                        "length":0,
+                        "modificationTime":1520761889225,
+                        "owner":"random",
+                        "pathSuffix":"bar",
+                        "permission":"755",
+                        "replication":0,
+                        "storagePolicy":0,
+                        "type":"DIRECTORY"
+                    },
+                  "fullPath":"/",
+                  "snapshotID":0,
+                  "deletionStatus":ACTIVE
+                }
+            ]
+        }
+
+
 Delegation Token Operations
 ---------------------------
 
@@ -2675,6 +2716,57 @@ var snapshottableDirectoryStatus =
   }
 }
 ```
+### SnapshotList JSON Schema
+
+```json
+{
+  "name": "SnapshotList",
+  "type": "object",
+  "properties":
+  {
+    "SnapshotList":
+    {
+      "description": "An array of SnapshotStatus",
+      "type"        : "array",
+      "items"       : snapshotStatus,
+      "required"    : true
+    }
+  }
+}
+```
+
+#### SnapshotStatus
+
+JavaScript syntax is used to define `snapshotStatus` so that it can be referred in `SnapshotList` JSON schema.
+
+```javascript
+var snapshotStatus =
+{
+  "type": "object",
+  "properties":
+  {
+    "dirStatus": fileStatusProperties,
+    "fullPath":
+    {
+      "description" : "Full path of the parent of the snapshot",
+      "type"        : "string",
+      "required"    : true
+    },
+    "snapshotID":
+    {
+      "description" : "snapshot ID for the snapshot",
+      "type"        : "integer",
+      "required"    : true
+    },
+    "deletionStatus":
+    {
+      "description" : "Status showing whether the snapshot is active or in deleted state",
+      "type"        : "string",
+      "required"    : true
+    }
+  }
+}
+```
 
 ### BlockLocations JSON Schema
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
index 3dd5f64..9e4f727 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
@@ -106,6 +106,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import static org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
 import static org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -844,6 +845,66 @@ public class TestWebHDFS {
   }
 
   @Test
+  public void testWebHdfsSnapshotList() throws Exception {
+    final Configuration conf = WebHdfsTestUtil.createConf();
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      cluster.waitActive();
+      final DistributedFileSystem dfs = cluster.getFileSystem();
+      final WebHdfsFileSystem webHdfs = WebHdfsTestUtil
+          .getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
+      final Path foo = new Path("/foo");
+      dfs.mkdirs(foo);
+      dfs.allowSnapshot(foo);
+      webHdfs.createSnapshot(foo, "s1");
+      webHdfs.createSnapshot(foo, "s2");
+      webHdfs.deleteSnapshot(foo, "s2");
+      SnapshotStatus[] statuses = webHdfs.getSnapshotListing(foo);
+      SnapshotStatus[] dfsStatuses = dfs.getSnapshotListing(foo);
+
+      for (int i = 0; i < dfsStatuses.length; i++) {
+        Assert.assertEquals(statuses[i].getSnapshotID(),
+            dfsStatuses[i].getSnapshotID());
+        Assert.assertEquals(statuses[i].isDeleted(),
+            dfsStatuses[i].isDeleted());
+        Assert.assertTrue(Arrays.equals(statuses[i].getParentFullPath(),
+            dfsStatuses[i].getParentFullPath()));
+        Assert.assertEquals(dfsStatuses[i].getDirStatus().getChildrenNum(),
+            statuses[i].getDirStatus().getChildrenNum());
+        Assert.assertEquals(dfsStatuses[i].getDirStatus().getModificationTime(),
+            statuses[i].getDirStatus().getModificationTime());
+        Assert.assertEquals(dfsStatuses[i].getDirStatus().isDir(),
+            statuses[i].getDirStatus().isDir());
+        Assert.assertEquals(dfsStatuses[i].getDirStatus().getAccessTime(),
+            statuses[i].getDirStatus().getAccessTime());
+        Assert.assertEquals(dfsStatuses[i].getDirStatus().getPermission(),
+            statuses[i].getDirStatus().getPermission());
+        Assert.assertEquals(dfsStatuses[i].getDirStatus().getOwner(),
+            statuses[i].getDirStatus().getOwner());
+        Assert.assertEquals(dfsStatuses[i].getDirStatus().getGroup(),
+            statuses[i].getDirStatus().getGroup());
+        Assert.assertEquals(dfsStatuses[i].getDirStatus().getPath(),
+            statuses[i].getDirStatus().getPath());
+        Assert.assertEquals(dfsStatuses[i].getDirStatus().getFileId(),
+            statuses[i].getDirStatus().getFileId());
+        Assert.assertEquals(dfsStatuses[i].getDirStatus().hasAcl(),
+            statuses[i].getDirStatus().hasAcl());
+        Assert.assertEquals(dfsStatuses[i].getDirStatus().isEncrypted(),
+            statuses[i].getDirStatus().isEncrypted());
+        Assert.assertEquals(dfsStatuses[i].getDirStatus().isErasureCoded(),
+            statuses[i].getDirStatus().isErasureCoded());
+        Assert.assertEquals(dfsStatuses[i].getDirStatus().isSnapshotEnabled(),
+            statuses[i].getDirStatus().isSnapshotEnabled());
+      }
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+
+  @Test
   public void testWebHdfsCreateNonRecursive() throws IOException, URISyntaxException {
     final Configuration conf = WebHdfsTestUtil.createConf();
     WebHdfsFileSystem webHdfs = null;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org