You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by we...@apache.org on 2019/10/03 23:59:17 UTC
[hadoop] branch branch-3.1 updated: HDFS-14124. EC : Support EC
Commands (set/get/unset EcPolicy) via WebHdfs. Contributed by Ayush Saxena.
This is an automated email from the ASF dual-hosted git repository.
weichiu pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/branch-3.1 by this push:
new 1a7cb7a HDFS-14124. EC : Support EC Commands (set/get/unset EcPolicy) via WebHdfs. Contributed by Ayush Saxena.
1a7cb7a is described below
commit 1a7cb7aba199ad0aaf36a7840021db9917b97b34
Author: Vinayakumar B <vi...@apache.org>
AuthorDate: Tue Dec 11 17:59:04 2018 +0530
HDFS-14124. EC : Support EC Commands (set/get/unset EcPolicy) via WebHdfs. Contributed by Ayush Saxena.
(cherry picked from commit 39dc7345b80e27ba8bd1ff4c19ca241aef5ac0fc)
(cherry picked from commit abe14d32d41b4a2bf630e8a89d794d729cee119b)
Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
---
.../org/apache/hadoop/hdfs/web/JsonUtilClient.java | 11 +++
.../apache/hadoop/hdfs/web/WebHdfsFileSystem.java | 33 ++++++++
.../hadoop/hdfs/web/resources/GetOpParam.java | 2 +
.../hadoop/hdfs/web/resources/PostOpParam.java | 2 +
.../hadoop/hdfs/web/resources/PutOpParam.java | 1 +
.../web/resources/NamenodeWebHdfsMethods.java | 15 +++-
.../hadoop-hdfs/src/site/markdown/WebHDFS.md | 89 +++++++++++++++++++++-
.../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 18 ++++-
8 files changed, 164 insertions(+), 7 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 458e013..2d1d411 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -709,6 +709,17 @@ class JsonUtilClient {
replicationFallbacks, copyOnCreateFile.booleanValue());
}
+ public static ErasureCodingPolicy toECPolicy(Map<?, ?> m) {
+ byte id = ((Number) m.get("id")).byteValue();
+ String name = (String) m.get("name");
+ String codec = (String) m.get("codecName");
+ int cellsize = ((Number) m.get("cellSize")).intValue();
+ int dataunits = ((Number) m.get("numDataUnits")).intValue();
+ int parityunits = ((Number) m.get("numParityUnits")).intValue();
+ ECSchema ecs = new ECSchema(codec, dataunits, parityunits);
+ return new ErasureCodingPolicy(name, ecs, cellsize, id);
+ }
+
private static StorageType[] toStorageTypes(List<?> list) {
if (list == null) {
return null;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index c2f6c39..6fa7c97 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -93,6 +93,7 @@ import org.apache.hadoop.hdfs.HdfsKMSUtil;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
@@ -1309,15 +1310,47 @@ public class WebHdfsFileSystem extends FileSystem
}
public void enableECPolicy(String policyName) throws IOException {
+ statistics.incrementWriteOps(1);
+ storageStatistics.incrementOpCounter(OpType.ENABLE_EC_POLICY);
final HttpOpParam.Op op = PutOpParam.Op.ENABLEECPOLICY;
new FsPathRunner(op, null, new ECPolicyParam(policyName)).run();
}
public void disableECPolicy(String policyName) throws IOException {
+ statistics.incrementWriteOps(1);
+ storageStatistics.incrementOpCounter(OpType.DISABLE_EC_POLICY);
final HttpOpParam.Op op = PutOpParam.Op.DISABLEECPOLICY;
new FsPathRunner(op, null, new ECPolicyParam(policyName)).run();
}
+ public void setErasureCodingPolicy(Path p, String policyName)
+ throws IOException {
+ statistics.incrementWriteOps(1);
+ storageStatistics.incrementOpCounter(OpType.SET_EC_POLICY);
+ final HttpOpParam.Op op = PutOpParam.Op.SETECPOLICY;
+ new FsPathRunner(op, p, new ECPolicyParam(policyName)).run();
+ }
+
+ public void unsetErasureCodingPolicy(Path p) throws IOException {
+ statistics.incrementWriteOps(1);
+ storageStatistics.incrementOpCounter(OpType.UNSET_EC_POLICY);
+ final HttpOpParam.Op op = PostOpParam.Op.UNSETECPOLICY;
+ new FsPathRunner(op, p).run();
+ }
+
+ public ErasureCodingPolicy getErasureCodingPolicy(Path p)
+ throws IOException {
+ statistics.incrementReadOps(1);
+ storageStatistics.incrementOpCounter(OpType.GET_EC_POLICY);
+ final HttpOpParam.Op op =GetOpParam.Op.GETECPOLICY;
+ return new FsPathResponseRunner<ErasureCodingPolicy>(op, p) {
+ @Override
+ ErasureCodingPolicy decodeResponse(Map<?, ?> json) throws IOException {
+ return JsonUtilClient.toECPolicy((Map<?, ?>) json);
+ }
+ }.run();
+ }
+
@Override
public Path createSnapshot(final Path path, final String snapshotName)
throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
index 7cf572f..6dff47a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
@@ -44,6 +44,8 @@ public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
GETALLSTORAGEPOLICY(false, HttpURLConnection.HTTP_OK),
GETSTORAGEPOLICY(false, HttpURLConnection.HTTP_OK),
+ GETECPOLICY(false, HttpURLConnection.HTTP_OK),
+
NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED),
CHECKACCESS(false, HttpURLConnection.HTTP_OK),
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
index 305db46..cda1c11 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
@@ -29,6 +29,8 @@ public class PostOpParam extends HttpOpParam<PostOpParam.Op> {
TRUNCATE(false, HttpURLConnection.HTTP_OK),
+ UNSETECPOLICY(false, HttpURLConnection.HTTP_OK),
+
UNSETSTORAGEPOLICY(false, HttpURLConnection.HTTP_OK),
NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
index 7bbd361..75b1899 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
@@ -48,6 +48,7 @@ public class PutOpParam extends HttpOpParam<PutOpParam.Op> {
ENABLEECPOLICY(false, HttpURLConnection.HTTP_OK),
DISABLEECPOLICY(false, HttpURLConnection.HTTP_OK),
+ SETECPOLICY(false, HttpURLConnection.HTTP_OK),
ALLOWSNAPSHOT(false, HttpURLConnection.HTTP_OK),
DISALLOWSNAPSHOT(false, HttpURLConnection.HTTP_OK),
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 13f9521..62a643a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -78,6 +78,7 @@ import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
@@ -812,12 +813,14 @@ public class NamenodeWebHdfsMethods {
validateOpParams(op, ecpolicy);
cp.enableErasureCodingPolicy(ecpolicy.getValue());
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
-
case DISABLEECPOLICY:
validateOpParams(op, ecpolicy);
cp.disableErasureCodingPolicy(ecpolicy.getValue());
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
-
+ case SETECPOLICY:
+ validateOpParams(op, ecpolicy);
+ cp.setErasureCodingPolicy(fullpath, ecpolicy.getValue());
+ return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
default:
throw new UnsupportedOperationException(op + " is not supported");
}
@@ -945,6 +948,9 @@ public class NamenodeWebHdfsMethods {
cp.unsetStoragePolicy(fullpath);
return Response.ok().build();
}
+ case UNSETECPOLICY:
+ cp.unsetErasureCodingPolicy(fullpath);
+ return Response.ok().build();
default:
throw new UnsupportedOperationException(op + " is not supported");
}
@@ -1246,6 +1252,11 @@ public class NamenodeWebHdfsMethods {
final String js = JsonUtil.toJsonString(storagePolicy);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
+ case GETECPOLICY: {
+ ErasureCodingPolicy ecpolicy = cp.getErasureCodingPolicy(fullpath);
+ final String js = JsonUtil.toJsonString(ecpolicy);
+ return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+ }
case GETSERVERDEFAULTS: {
// Since none of the server defaults values are hot reloaded, we can
// cache the output of serverDefaults.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 051ba9f..1253a04 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -53,6 +53,7 @@ The HTTP REST API supports the complete [FileSystem](../../api/org/apache/hadoop
* [`GETSTORAGEPOLICY`](#Get_Storage_Policy) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getStoragePolicy)
* [`GETSNAPSHOTDIFF`](#Get_Snapshot_Diff)
* [`GETSNAPSHOTTABLEDIRECTORYLIST`](#Get_Snapshottable_Directory_List)
+ * [`GETECPOLICY`](#Get_EC_Policy) (see [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).getErasureCodingPolicy)
* HTTP PUT
* [`CREATE`](#Create_and_Write_to_a_File) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).create)
* [`MKDIRS`](#Make_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).mkdirs)
@@ -71,11 +72,13 @@ The HTTP REST API supports the complete [FileSystem](../../api/org/apache/hadoop
* [`SETSTORAGEPOLICY`](#Set_Storage_Policy) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setStoragePolicy)
* [`ENABLEECPOLICY`](#Enable_EC_Policy) (see [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).enablePolicy)
* [`DISABLEECPOLICY`](#Disable_EC_Policy) (see [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).disablePolicy)
+ * [`SETECPOLICY`](#Set_EC_Policy) (see [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).setErasureCodingPolicy)
* HTTP POST
* [`APPEND`](#Append_to_a_File) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).append)
* [`CONCAT`](#Concat_Files) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).concat)
* [`TRUNCATE`](#Truncate_a_File) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).truncate)
* [`UNSETSTORAGEPOLICY`](#Unset_Storage_Policy) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).unsetStoragePolicy)
+ * [`UNSETECPOLICY`](#Unset_EC_Policy) (see [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).unsetErasureCodingPolicy)
* HTTP DELETE
* [`DELETE`](#Delete_a_FileDirectory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).delete)
* [`DELETESNAPSHOT`](#Delete_Snapshot) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).deleteSnapshot)
@@ -1333,7 +1336,7 @@ Erasure Coding Operations
HTTP/1.1 200 OK
Content-Length: 0
-See also: [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).enablePolicy)
+See also: [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).enablePolicy
### Disable EC Policy
@@ -1347,7 +1350,68 @@ See also: [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).
HTTP/1.1 200 OK
Content-Length: 0
-See also: [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).disablePolicy)
+See also: [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).disablePolicy
+
+### Set EC Policy
+
+* Submit a HTTP PUT request.
+
+ curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=SETECPOLICY
+ &ecpolicy=<policy>"
+
+ The client receives a response with zero content length:
+
+ HTTP/1.1 200 OK
+ Content-Length: 0
+
+See also: [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).setErasureCodingPolicy
+
+### Get EC Policy
+
+* Submit a HTTP GET request.
+
+ curl -i -X GET "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=GETECPOLICY
+ "
+
+ The client receives a response with a [`ECPolicy` JSON object](#ECPolicy_JSON_Schema):
+
+
+ {
+ "name": "RS-10-4-1024k",
+ "schema":
+ {
+ "codecName": "rs",
+ "numDataUnits": 10,
+ "numParityUnits": 4,
+ "extraOptions": {}
+ }
+ "cellSize": 1048576,
+ "id":5,
+ "codecname":"rs",
+ "numDataUnits": 10,
+ "numParityUnits": 4,
+ "replicationpolicy":false,
+ "systemPolicy":true
+
+ }
+
+
+
+See also: [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).getErasureCodingPolicy
+
+### Unset EC Policy
+
+* Submit a HTTP POST request.
+
+ curl -i -X POST "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=UNSETECPOLICY
+ "
+
+ The client receives a response with zero content length:
+
+ HTTP/1.1 200 OK
+ Content-Length: 0
+
+See also: [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).unsetErasureCodingPolicy
Snapshot Operations
-------------------
@@ -2307,6 +2371,26 @@ var blockStoragePolicyProperties =
}
};
```
+### ECPolicy JSON Schema
+
+```json
+{
+ "name": "RS-10-4-1024k",
+ schema {
+ "codecName": "rs",
+ "numDataUnits": 10,
+ "numParityUnits": 4,
+ "extraOptions": {}
+ }
+ "cellSize": 1048576,
+ "id":5,
+ "codecname":"rs",
+ "numDataUnits": 10,
+ "numParityUnits": 4,
+ "replicationpolicy":false,
+ "systemPolicy":true
+}
+```
### BlockStoragePolicies JSON Schema
@@ -2378,6 +2462,7 @@ A `BlockStoragePolicies` JSON object represents an array of `BlockStoragePolicy`
}
```
+
#### DiffReport Entries
JavaScript syntax is used to define `diffReportEntries` so that it can be referred in `SnapshotDiffReport` JSON schema.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
index 082664d..a22a23e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
@@ -1573,7 +1573,7 @@ public class TestWebHDFS {
// Test For Enable/Disable EC Policy in DFS.
@Test
- public void testEnableDisableECPolicy() throws Exception {
+ public void testECPolicyCommands() throws Exception {
Configuration conf = new HdfsConfiguration();
try (MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(0).build()) {
@@ -1582,12 +1582,24 @@ public class TestWebHDFS {
final WebHdfsFileSystem webHdfs = WebHdfsTestUtil
.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
String policy = "RS-10-4-1024k";
-
// Check for Enable EC policy via WEBHDFS.
dfs.disableErasureCodingPolicy(policy);
checkECPolicyState(dfs.getAllErasureCodingPolicies(), policy, "disable");
- webHdfs.enableECPolicy("RS-10-4-1024k");
+ webHdfs.enableECPolicy(policy);
checkECPolicyState(dfs.getAllErasureCodingPolicies(), policy, "enable");
+ Path dir = new Path("/tmp");
+ dfs.mkdirs(dir);
+ // Check for Set EC policy via WEBHDFS
+ assertNull(dfs.getErasureCodingPolicy(dir));
+ webHdfs.setErasureCodingPolicy(dir, policy);
+ assertEquals(policy, dfs.getErasureCodingPolicy(dir).getName());
+
+ // Check for Get EC policy via WEBHDFS
+ assertEquals(policy, webHdfs.getErasureCodingPolicy(dir).getName());
+
+ // Check for Unset EC policy via WEBHDFS
+ webHdfs.unsetErasureCodingPolicy(dir);
+ assertNull(dfs.getErasureCodingPolicy(dir));
// Check for Disable EC policy via WEBHDFS.
webHdfs.disableECPolicy(policy);
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org