You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2019/08/13 23:37:37 UTC

[hadoop] branch ozone-0.4.1 updated (237a208 -> e6b744b)

This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


    from 237a208  HDDS-1891. Ozone fs shell command should work with default port when port number is not specified
     new 3eec5e1  HDDS-1961. TestStorageContainerManager#testScmProcessDatanodeHeartbeat is flaky.
     new e6b744b  HDDS-1917. TestOzoneRpcClientAbstract is failing.

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../hadoop/ozone/TestStorageContainerManager.java  | 25 +++++------
 .../client/rpc/TestOzoneRpcClientAbstract.java     | 52 +++++++++++-----------
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  7 ++-
 3 files changed, 43 insertions(+), 41 deletions(-)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 01/02: HDDS-1961. TestStorageContainerManager#testScmProcessDatanodeHeartbeat is flaky.

Posted by ae...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 3eec5e1d61918e83b1f94ebfa0d864826c03465f
Author: Nanda kumar <na...@apache.org>
AuthorDate: Tue Aug 13 22:04:03 2019 +0530

    HDDS-1961. TestStorageContainerManager#testScmProcessDatanodeHeartbeat is flaky.
    
    Signed-off-by: Anu Engineer <ae...@apache.org>
    (cherry picked from commit cb390dff87a86eae22c432576be90d39f84a6ee8)
---
 .../hadoop/ozone/TestStorageContainerManager.java  | 25 +++++++++++-----------
 1 file changed, 12 insertions(+), 13 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 3ac5993..55b184a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -69,6 +69,7 @@ import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.net.DNSToSwitchMapping;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.StaticMapping;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -500,7 +501,9 @@ public class TestStorageContainerManager {
     String scmId = UUID.randomUUID().toString();
     conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
         StaticMapping.class, DNSToSwitchMapping.class);
-    StaticMapping.addNodeToRack(HddsUtils.getHostName(conf), "/rack1");
+    StaticMapping.addNodeToRack(NetUtils.normalizeHostNames(
+        Collections.singleton(HddsUtils.getHostName(conf))).get(0),
+        "/rack1");
 
     final int datanodeNum = 3;
     MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
@@ -520,21 +523,17 @@ public class TestStorageContainerManager {
       Thread.sleep(heartbeatCheckerIntervalMs * 2);
 
       List<DatanodeDetails> allNodes = scm.getScmNodeManager().getAllNodes();
-      Assert.assertTrue(allNodes.size() == datanodeNum);
-      for (int i = 0; i < allNodes.size(); i++) {
+      Assert.assertEquals(datanodeNum, allNodes.size());
+      for (DatanodeDetails node : allNodes) {
         DatanodeInfo datanodeInfo = (DatanodeInfo) scm.getScmNodeManager()
-            .getNodeByUuid(allNodes.get(i).getUuidString());
-        Assert.assertTrue((datanodeInfo.getLastHeartbeatTime() - start)
-            >= heartbeatCheckerIntervalMs);
-        Assert.assertTrue(datanodeInfo.getUuidString()
-            .equals(datanodeInfo.getNetworkName()));
-        Assert.assertTrue(datanodeInfo.getNetworkLocation()
-            .equals("/rack1"));
+            .getNodeByUuid(node.getUuidString());
+        Assert.assertTrue(datanodeInfo.getLastHeartbeatTime() > start);
+        Assert.assertEquals(datanodeInfo.getUuidString(),
+            datanodeInfo.getNetworkName());
+        Assert.assertEquals("/rack1", datanodeInfo.getNetworkLocation());
       }
     } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
+      cluster.shutdown();
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 02/02: HDDS-1917. TestOzoneRpcClientAbstract is failing.

Posted by ae...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit e6b744b8f8519342a4e4bdc15cb4088e13e855c6
Author: Nanda kumar <na...@apache.org>
AuthorDate: Tue Aug 6 14:32:13 2019 +0530

    HDDS-1917. TestOzoneRpcClientAbstract is failing.
    
    Signed-off-by: Anu Engineer <ae...@apache.org>
    (cherry picked from commit 3cff73aff476977775f6a48a36878191409f050f)
---
 .../client/rpc/TestOzoneRpcClientAbstract.java     | 52 +++++++++++-----------
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  7 ++-
 2 files changed, 31 insertions(+), 28 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index 4e426ba..c203fec 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -26,6 +26,7 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.TreeMap;
 import java.util.UUID;
 import java.util.concurrent.CountDownLatch;
@@ -2533,31 +2534,30 @@ public abstract class TestOzoneRpcClientAbstract {
           ACLType.READ_ACL, ACCESS);
       // Verify that operation successful.
       assertTrue(store.addAcl(ozObj, newAcl));
-      List<OzoneAcl> acls = store.getAcl(ozObj);
-
-      assertTrue(acls.size() == expectedAcls.size());
-      boolean aclVerified = false;
-      for(OzoneAcl acl: acls) {
-        if(acl.getName().equals(newAcl.getName())) {
-          assertTrue(acl.getAclList().contains(ACLType.READ_ACL));
-          aclVerified = true;
-        }
-      }
-      assertTrue("New acl expected but not found.", aclVerified);
-      aclVerified = false;
+
+      assertEquals(expectedAcls.size(), store.getAcl(ozObj).size());
+      final Optional<OzoneAcl> readAcl = store.getAcl(ozObj).stream()
+          .filter(acl -> acl.getName().equals(newAcl.getName())
+              && acl.getType().equals(newAcl.getType()))
+          .findFirst();
+      assertTrue("New acl expected but not found.", readAcl.isPresent());
+      assertTrue("READ_ACL should exist in current acls:"
+          + readAcl.get(),
+          readAcl.get().getAclList().contains(ACLType.READ_ACL));
+
 
       // Case:2 Remove newly added acl permission.
       assertTrue(store.removeAcl(ozObj, newAcl));
-      acls = store.getAcl(ozObj);
-      assertTrue(acls.size() == expectedAcls.size());
-      for(OzoneAcl acl: acls) {
-        if(acl.getName().equals(newAcl.getName())) {
-          assertFalse("READ_ACL should not exist in current acls:" +
-              acls, acl.getAclList().contains(ACLType.READ_ACL));
-          aclVerified = true;
-        }
-      }
-      assertTrue("New acl expected but not found.", aclVerified);
+
+      assertEquals(expectedAcls.size(), store.getAcl(ozObj).size());
+      final Optional<OzoneAcl> nonReadAcl = store.getAcl(ozObj).stream()
+          .filter(acl -> acl.getName().equals(newAcl.getName())
+              && acl.getType().equals(newAcl.getType()))
+          .findFirst();
+      assertTrue("New acl expected but not found.", nonReadAcl.isPresent());
+      assertFalse("READ_ACL should not exist in current acls:"
+              + nonReadAcl.get(),
+          nonReadAcl.get().getAclList().contains(ACLType.READ_ACL));
     } else {
       fail("Default acl should not be empty.");
     }
@@ -2570,17 +2570,17 @@ public abstract class TestOzoneRpcClientAbstract {
       store.removeAcl(ozObj, a);
     }
     List<OzoneAcl> newAcls = store.getAcl(ozObj);
-    assertTrue(newAcls.size() == 0);
+    assertEquals(0, newAcls.size());
 
     // Add acl's and then call getAcl.
     int aclCount = 0;
     for (OzoneAcl a : expectedAcls) {
       aclCount++;
       assertTrue(store.addAcl(ozObj, a));
-      assertTrue(store.getAcl(ozObj).size() == aclCount);
+      assertEquals(aclCount, store.getAcl(ozObj).size());
     }
     newAcls = store.getAcl(ozObj);
-    assertTrue(newAcls.size() == expectedAcls.size());
+    assertEquals(expectedAcls.size(), newAcls.size());
     List<OzoneAcl> finalNewAcls = newAcls;
     expectedAcls.forEach(a -> assertTrue(finalNewAcls.contains(a)));
 
@@ -2591,7 +2591,7 @@ public abstract class TestOzoneRpcClientAbstract {
         ACLType.ALL, ACCESS);
     store.setAcl(ozObj, Arrays.asList(ua, ug));
     newAcls = store.getAcl(ozObj);
-    assertTrue(newAcls.size() == 2);
+    assertEquals(2, newAcls.size());
     assertTrue(newAcls.contains(ua));
     assertTrue(newAcls.contains(ug));
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 557904a..06a4f2c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -77,6 +77,7 @@ import org.apache.hadoop.ozone.om.helpers.OmPartInfo;
 import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclType;
 import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
 import org.apache.hadoop.ozone.security.acl.RequestContext;
@@ -1404,7 +1405,8 @@ public class KeyManagerImpl implements KeyManager {
       List<OzoneAclInfo> newAcls = new ArrayList<>(keyInfo.getAcls());
       OzoneAclInfo newAcl = null;
       for(OzoneAclInfo a: keyInfo.getAcls()) {
-        if(a.getName().equals(acl.getName())) {
+        if (a.getName().equals(acl.getName()) &&
+            a.getType().equals(OzoneAclType.valueOf(acl.getType().name()))) {
           BitSet currentAcls = BitSet.valueOf(a.getRights().toByteArray());
           currentAcls.or(acl.getAclBitSet());
 
@@ -1481,7 +1483,8 @@ public class KeyManagerImpl implements KeyManager {
       } else {
         // Acl to be removed might be a subset of existing acls.
         for(OzoneAclInfo a: keyInfo.getAcls()) {
-          if(a.getName().equals(acl.getName())) {
+          if (a.getName().equals(acl.getName()) &&
+              a.getType().equals(OzoneAclType.valueOf(acl.getType().name()))) {
             BitSet currentAcls = BitSet.valueOf(a.getRights().toByteArray());
             acl.getAclBitSet().xor(currentAcls);
             currentAcls.and(acl.getAclBitSet());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org