You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2018/05/16 18:30:52 UTC

[1/2] hadoop git commit: HDFS-13512. WebHdfs getFileStatus doesn't return ecPolicy. Contributed by Ajay Kumar.

Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 e12ddd939 -> 45dd200a2
  refs/heads/trunk 55d554908 -> 0fc988e6a


HDFS-13512. WebHdfs getFileStatus doesn't return ecPolicy. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0fc988e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0fc988e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0fc988e6

Branch: refs/heads/trunk
Commit: 0fc988e6a3dc6b435cbeea680549c06ef6147e3f
Parents: 55d5549
Author: Arpit Agarwal <ar...@apache.org>
Authored: Wed May 16 11:28:39 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Wed May 16 11:28:39 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  | 16 +++++++
 .../org/apache/hadoop/hdfs/web/JsonUtil.java    | 19 ++++++++
 .../apache/hadoop/hdfs/web/TestJsonUtil.java    | 48 +++++++++++++++++++-
 3 files changed, 82 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fc988e6/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 13c5226..9bb1846 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
@@ -48,6 +49,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
@@ -143,6 +145,19 @@ class JsonUtilClient {
       f.add(HdfsFileStatus.Flags.HAS_EC);
     }
 
+    Map<String, Object> ecPolicyObj = (Map) m.get("ecPolicyObj");
+    ErasureCodingPolicy ecPolicy = null;
+    if (ecPolicyObj != null) {
+      Map<String, String> extraOptions = (Map) ecPolicyObj.get("extraOptions");
+      ECSchema ecSchema = new ECSchema((String) ecPolicyObj.get("codecName"),
+          (int) ecPolicyObj.get("numDataUnits"),
+          (int) ecPolicyObj.get("numParityUnits"), extraOptions);
+      ecPolicy = new ErasureCodingPolicy((String) ecPolicyObj.get("name"),
+          ecSchema, (int) ecPolicyObj.get("cellSize"),
+          (byte) (int) ecPolicyObj.get("id"));
+
+    }
+
     final long aTime = ((Number) m.get("accessTime")).longValue();
     final long mTime = ((Number) m.get("modificationTime")).longValue();
     final long blockSize = ((Number) m.get("blockSize")).longValue();
@@ -170,6 +185,7 @@ class JsonUtilClient {
       .fileId(fileId)
       .children(childrenNum)
       .storagePolicy(storagePolicy)
+      .ecPolicy(ecPolicy)
       .build();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fc988e6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index 43a252b..5c810bf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.web;
 
+import com.google.common.collect.ImmutableMap;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileChecksum;
@@ -135,7 +136,10 @@ public class JsonUtil {
     if (status.isErasureCoded()) {
       m.put("ecBit", true);
       if (status.getErasureCodingPolicy() != null) {
+        // to maintain backward comparability
         m.put("ecPolicy", status.getErasureCodingPolicy().getName());
+        // to re-construct HdfsFileStatus object via WebHdfs
+        m.put("ecPolicyObj", getEcPolicyAsMap(status.getErasureCodingPolicy()));
       }
     }
     if (status.isSnapshotEnabled()) {
@@ -152,6 +156,21 @@ public class JsonUtil {
     return m;
   }
 
+  private static Map<String, Object> getEcPolicyAsMap(
+      final ErasureCodingPolicy ecPolicy) {
+    /** Convert an ErasureCodingPolicy to a map. */
+    ImmutableMap.Builder<String, Object> builder = ImmutableMap.builder();
+    builder.put("name", ecPolicy.getName())
+        .put("cellSize", ecPolicy.getCellSize())
+        .put("numDataUnits", ecPolicy.getNumDataUnits())
+        .put("numParityUnits", ecPolicy.getNumParityUnits())
+        .put("codecName", ecPolicy.getCodecName())
+        .put("id", ecPolicy.getId())
+        .put("extraOptions", ecPolicy.getSchema().getExtraOptions());
+    return builder.build();
+
+  }
+
   /** Convert an ExtendedBlock to a Json map. */
   private static Map<String, Object> toJsonMap(final ExtendedBlock extendedblock) {
     if (extendedblock == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fc988e6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
index 2d9c8b1..e1dc271 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
@@ -23,6 +23,7 @@ import static org.apache.hadoop.fs.permission.FsAction.*;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
 
 import java.io.IOException;
+import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
@@ -40,8 +41,11 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus.Flags;
+import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.util.Time;
 import org.junit.Assert;
 import org.junit.Test;
@@ -66,9 +70,11 @@ public class TestJsonUtil {
   }
 
   @Test
-  public void testHdfsFileStatus() throws IOException {
+  public void testHdfsFileStatusWithEcPolicy() throws IOException {
     final long now = Time.now();
     final String parent = "/dir";
+    ErasureCodingPolicy dummyEcPolicy = new ErasureCodingPolicy("ecPolicy1",
+        new ECSchema("EcSchema", 1, 1), 1024 * 2, (byte) 1);
     final HdfsFileStatus status = new HdfsFileStatus.Builder()
         .length(1001L)
         .replication(3)
@@ -81,7 +87,46 @@ public class TestJsonUtil {
         .symlink(DFSUtil.string2Bytes("bar"))
         .path(DFSUtil.string2Bytes("foo"))
         .fileId(HdfsConstants.GRANDFATHER_INODE_ID)
+        .ecPolicy(dummyEcPolicy)
+        .flags(EnumSet.allOf(Flags.class))
         .build();
+
+    final FileStatus fstatus = toFileStatus(status, parent);
+    System.out.println("status  = " + status);
+    System.out.println("fstatus = " + fstatus);
+    final String json = JsonUtil.toJsonString(status, true);
+    System.out.println("json    = " + json.replace(",", ",\n  "));
+    final HdfsFileStatus s2 =
+        JsonUtilClient.toFileStatus((Map<?, ?>) READER.readValue(json), true);
+    final FileStatus fs2 = toFileStatus(s2, parent);
+    System.out.println("s2      = " + s2);
+    System.out.println("fs2     = " + fs2);
+    Assert.assertEquals(status.getErasureCodingPolicy(),
+        s2.getErasureCodingPolicy());
+    Assert.assertEquals(fstatus, fs2);
+  }
+
+  @Test
+  public void testHdfsFileStatusWithoutEcPolicy() throws IOException {
+    final long now = Time.now();
+    final String parent = "/dir";
+    ErasureCodingPolicy dummyEcPolicy = new ErasureCodingPolicy("ecPolicy1",
+        new ECSchema("EcSchema", 1, 1), 1024 * 2, (byte) 1);
+    final HdfsFileStatus status = new HdfsFileStatus.Builder()
+        .length(1001L)
+        .replication(3)
+        .blocksize(1L << 26)
+        .mtime(now)
+        .atime(now + 10)
+        .perm(new FsPermission((short) 0644))
+        .owner("user")
+        .group("group")
+        .symlink(DFSUtil.string2Bytes("bar"))
+        .path(DFSUtil.string2Bytes("foo"))
+        .fileId(HdfsConstants.GRANDFATHER_INODE_ID)
+        .build();
+    Assert.assertTrue(status.getErasureCodingPolicy() == null);
+
     final FileStatus fstatus = toFileStatus(status, parent);
     System.out.println("status  = " + status);
     System.out.println("fstatus = " + fstatus);
@@ -92,6 +137,7 @@ public class TestJsonUtil {
     final FileStatus fs2 = toFileStatus(s2, parent);
     System.out.println("s2      = " + s2);
     System.out.println("fs2     = " + fs2);
+
     Assert.assertEquals(fstatus, fs2);
   }
   


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[2/2] hadoop git commit: HDFS-13512. WebHdfs getFileStatus doesn't return ecPolicy. Contributed by Ajay Kumar.

Posted by ar...@apache.org.
HDFS-13512. WebHdfs getFileStatus doesn't return ecPolicy. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45dd200a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45dd200a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45dd200a

Branch: refs/heads/branch-3.1
Commit: 45dd200a2c74b57f7f9945af7b0a393f6eceb346
Parents: e12ddd9
Author: Arpit Agarwal <ar...@apache.org>
Authored: Wed May 16 11:28:39 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Wed May 16 11:28:51 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  | 16 +++++++
 .../org/apache/hadoop/hdfs/web/JsonUtil.java    | 19 ++++++++
 .../apache/hadoop/hdfs/web/TestJsonUtil.java    | 48 +++++++++++++++++++-
 3 files changed, 82 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/45dd200a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 13c5226..9bb1846 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
@@ -48,6 +49,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
@@ -143,6 +145,19 @@ class JsonUtilClient {
       f.add(HdfsFileStatus.Flags.HAS_EC);
     }
 
+    Map<String, Object> ecPolicyObj = (Map) m.get("ecPolicyObj");
+    ErasureCodingPolicy ecPolicy = null;
+    if (ecPolicyObj != null) {
+      Map<String, String> extraOptions = (Map) ecPolicyObj.get("extraOptions");
+      ECSchema ecSchema = new ECSchema((String) ecPolicyObj.get("codecName"),
+          (int) ecPolicyObj.get("numDataUnits"),
+          (int) ecPolicyObj.get("numParityUnits"), extraOptions);
+      ecPolicy = new ErasureCodingPolicy((String) ecPolicyObj.get("name"),
+          ecSchema, (int) ecPolicyObj.get("cellSize"),
+          (byte) (int) ecPolicyObj.get("id"));
+
+    }
+
     final long aTime = ((Number) m.get("accessTime")).longValue();
     final long mTime = ((Number) m.get("modificationTime")).longValue();
     final long blockSize = ((Number) m.get("blockSize")).longValue();
@@ -170,6 +185,7 @@ class JsonUtilClient {
       .fileId(fileId)
       .children(childrenNum)
       .storagePolicy(storagePolicy)
+      .ecPolicy(ecPolicy)
       .build();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45dd200a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index 43a252b..5c810bf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.web;
 
+import com.google.common.collect.ImmutableMap;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileChecksum;
@@ -135,7 +136,10 @@ public class JsonUtil {
     if (status.isErasureCoded()) {
       m.put("ecBit", true);
       if (status.getErasureCodingPolicy() != null) {
+        // to maintain backward comparability
         m.put("ecPolicy", status.getErasureCodingPolicy().getName());
+        // to re-construct HdfsFileStatus object via WebHdfs
+        m.put("ecPolicyObj", getEcPolicyAsMap(status.getErasureCodingPolicy()));
       }
     }
     if (status.isSnapshotEnabled()) {
@@ -152,6 +156,21 @@ public class JsonUtil {
     return m;
   }
 
+  private static Map<String, Object> getEcPolicyAsMap(
+      final ErasureCodingPolicy ecPolicy) {
+    /** Convert an ErasureCodingPolicy to a map. */
+    ImmutableMap.Builder<String, Object> builder = ImmutableMap.builder();
+    builder.put("name", ecPolicy.getName())
+        .put("cellSize", ecPolicy.getCellSize())
+        .put("numDataUnits", ecPolicy.getNumDataUnits())
+        .put("numParityUnits", ecPolicy.getNumParityUnits())
+        .put("codecName", ecPolicy.getCodecName())
+        .put("id", ecPolicy.getId())
+        .put("extraOptions", ecPolicy.getSchema().getExtraOptions());
+    return builder.build();
+
+  }
+
   /** Convert an ExtendedBlock to a Json map. */
   private static Map<String, Object> toJsonMap(final ExtendedBlock extendedblock) {
     if (extendedblock == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45dd200a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
index 2d9c8b1..e1dc271 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
@@ -23,6 +23,7 @@ import static org.apache.hadoop.fs.permission.FsAction.*;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
 
 import java.io.IOException;
+import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
@@ -40,8 +41,11 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus.Flags;
+import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.util.Time;
 import org.junit.Assert;
 import org.junit.Test;
@@ -66,9 +70,11 @@ public class TestJsonUtil {
   }
 
   @Test
-  public void testHdfsFileStatus() throws IOException {
+  public void testHdfsFileStatusWithEcPolicy() throws IOException {
     final long now = Time.now();
     final String parent = "/dir";
+    ErasureCodingPolicy dummyEcPolicy = new ErasureCodingPolicy("ecPolicy1",
+        new ECSchema("EcSchema", 1, 1), 1024 * 2, (byte) 1);
     final HdfsFileStatus status = new HdfsFileStatus.Builder()
         .length(1001L)
         .replication(3)
@@ -81,7 +87,46 @@ public class TestJsonUtil {
         .symlink(DFSUtil.string2Bytes("bar"))
         .path(DFSUtil.string2Bytes("foo"))
         .fileId(HdfsConstants.GRANDFATHER_INODE_ID)
+        .ecPolicy(dummyEcPolicy)
+        .flags(EnumSet.allOf(Flags.class))
         .build();
+
+    final FileStatus fstatus = toFileStatus(status, parent);
+    System.out.println("status  = " + status);
+    System.out.println("fstatus = " + fstatus);
+    final String json = JsonUtil.toJsonString(status, true);
+    System.out.println("json    = " + json.replace(",", ",\n  "));
+    final HdfsFileStatus s2 =
+        JsonUtilClient.toFileStatus((Map<?, ?>) READER.readValue(json), true);
+    final FileStatus fs2 = toFileStatus(s2, parent);
+    System.out.println("s2      = " + s2);
+    System.out.println("fs2     = " + fs2);
+    Assert.assertEquals(status.getErasureCodingPolicy(),
+        s2.getErasureCodingPolicy());
+    Assert.assertEquals(fstatus, fs2);
+  }
+
+  @Test
+  public void testHdfsFileStatusWithoutEcPolicy() throws IOException {
+    final long now = Time.now();
+    final String parent = "/dir";
+    ErasureCodingPolicy dummyEcPolicy = new ErasureCodingPolicy("ecPolicy1",
+        new ECSchema("EcSchema", 1, 1), 1024 * 2, (byte) 1);
+    final HdfsFileStatus status = new HdfsFileStatus.Builder()
+        .length(1001L)
+        .replication(3)
+        .blocksize(1L << 26)
+        .mtime(now)
+        .atime(now + 10)
+        .perm(new FsPermission((short) 0644))
+        .owner("user")
+        .group("group")
+        .symlink(DFSUtil.string2Bytes("bar"))
+        .path(DFSUtil.string2Bytes("foo"))
+        .fileId(HdfsConstants.GRANDFATHER_INODE_ID)
+        .build();
+    Assert.assertTrue(status.getErasureCodingPolicy() == null);
+
     final FileStatus fstatus = toFileStatus(status, parent);
     System.out.println("status  = " + status);
     System.out.println("fstatus = " + fstatus);
@@ -92,6 +137,7 @@ public class TestJsonUtil {
     final FileStatus fs2 = toFileStatus(s2, parent);
     System.out.println("s2      = " + s2);
     System.out.println("fs2     = " + fs2);
+
     Assert.assertEquals(fstatus, fs2);
   }
   


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org