You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by el...@apache.org on 2019/08/08 14:12:39 UTC

[hadoop] branch ozone-0.4.1 updated: HDDS-1926. The new caching layer is used for old OM requests but not updated

This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
     new eb828dc  HDDS-1926. The new caching layer is used for old OM requests but not updated
eb828dc is described below

commit eb828dc1e40bf526b9b7d3e0a40b228b68bc76c8
Author: Bharat Viswanadham <bh...@apache.org>
AuthorDate: Thu Aug 8 15:52:04 2019 +0200

    HDDS-1926. The new caching layer is used for old OM requests but not updated
    
    Closes #1247
---
 .../hadoop/ozone/om/TestOzoneManagerRestart.java   | 214 +++++++++++++++++++++
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     |  18 +-
 2 files changed, 230 insertions(+), 2 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
new file mode 100644
index 0000000..76841dd
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
@@ -0,0 +1,214 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneKey;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.web.handlers.UserArgs;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.UUID;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
+import static org.junit.Assert.fail;
+
+/**
+ * Test some client operations after cluster starts. And perform restart and
+ * then performs client operations and check the behavior is expected or not.
+ */
+public class TestOzoneManagerRestart {
+  private MiniOzoneCluster cluster = null;
+  private UserArgs userArgs;
+  private OzoneConfiguration conf;
+  private String clusterId;
+  private String scmId;
+  private String omId;
+
+  @Rule
+  public Timeout timeout = new Timeout(60000);
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true
+   *
+   * @throws IOException
+   */
+  @Before
+  public void init() throws Exception {
+    conf = new OzoneConfiguration();
+    clusterId = UUID.randomUUID().toString();
+    scmId = UUID.randomUUID().toString();
+    omId = UUID.randomUUID().toString();
+    conf.setBoolean(OZONE_ACL_ENABLED, true);
+    conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2);
+    conf.set(OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD);
+    cluster =  MiniOzoneCluster.newBuilder(conf)
+        .setClusterId(clusterId)
+        .setScmId(scmId)
+        .setOmId(omId)
+        .build();
+    cluster.waitForClusterToBeReady();
+    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
+        null, null, null, null);
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @After
+  public void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testRestartOMWithVolumeOperation() throws Exception {
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+
+    OzoneClient client = cluster.getClient();
+
+    ObjectStore objectStore = client.getObjectStore();
+
+    objectStore.createVolume(volumeName);
+
+    OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
+    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
+
+    cluster.restartOzoneManager();
+    cluster.restartStorageContainerManager(true);
+
+    // After restart, try to create same volume again, it should fail.
+    try {
+      objectStore.createVolume(volumeName);
+      fail("testRestartOM failed");
+    } catch (IOException ex) {
+      GenericTestUtils.assertExceptionContains("VOLUME_ALREADY_EXISTS", ex);
+    }
+
+    // Get Volume.
+    ozoneVolume = objectStore.getVolume(volumeName);
+    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
+
+  }
+
+
+  @Test
+  public void testRestartOMWithBucketOperation() throws Exception {
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+
+    OzoneClient client = cluster.getClient();
+
+    ObjectStore objectStore = client.getObjectStore();
+
+    objectStore.createVolume(volumeName);
+
+    OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
+    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
+
+    ozoneVolume.createBucket(bucketName);
+
+    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
+    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
+
+    cluster.restartOzoneManager();
+    cluster.restartStorageContainerManager(true);
+
+    // After restart, try to create same bucket again, it should fail.
+    try {
+      ozoneVolume.createBucket(bucketName);
+      fail("testRestartOMWithBucketOperation failed");
+    } catch (IOException ex) {
+      GenericTestUtils.assertExceptionContains("BUCKET_ALREADY_EXISTS", ex);
+    }
+
+    // Get bucket.
+    ozoneBucket = ozoneVolume.getBucket(bucketName);
+    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
+
+  }
+
+
+  @Test
+  public void testRestartOMWithKeyOperation() throws Exception {
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String key = "key" + RandomStringUtils.randomNumeric(5);
+
+    OzoneClient client = cluster.getClient();
+
+    ObjectStore objectStore = client.getObjectStore();
+
+    objectStore.createVolume(volumeName);
+
+    OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
+    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
+
+    ozoneVolume.createBucket(bucketName);
+
+    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
+    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
+
+    String data = "random data";
+    OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key,
+        data.length(), ReplicationType.RATIS, ReplicationFactor.ONE,
+        new HashMap<>());
+
+    ozoneOutputStream.write(data.getBytes(), 0, data.length());
+    ozoneOutputStream.close();
+
+    cluster.restartOzoneManager();
+    cluster.restartStorageContainerManager(true);
+
+
+    // As we allow override of keys, not testing re-create key. We shall see
+    // after restart key exists or not.
+
+    // Get key.
+    OzoneKey ozoneKey = ozoneBucket.getKey(key);
+    Assert.assertTrue(ozoneKey.getName().equals(key));
+    Assert.assertTrue(ozoneKey.getReplicationType().equals(
+        ReplicationType.RATIS));
+  }
+
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index c7d6bb4..460e115 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -140,12 +140,19 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   private Table s3SecretTable;
   private Table dTokenTable;
   private Table prefixTable;
+  private boolean isRatisEnabled;
 
   public OmMetadataManagerImpl(OzoneConfiguration conf) throws IOException {
     this.lock = new OzoneManagerLock(conf);
     this.openKeyExpireThresholdMS = 1000L * conf.getInt(
         OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS,
         OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT);
+    // TODO: This is a temporary check. Once fully implemented, all OM state
+    //  change should go through Ratis - be it standalone (for non-HA) or
+    //  replicated (for HA).
+    isRatisEnabled = conf.getBoolean(
+        OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY,
+        OMConfigKeys.OZONE_OM_RATIS_ENABLE_DEFAULT);
     start(conf);
   }
 
@@ -269,14 +276,21 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
     userTable =
         this.store.getTable(USER_TABLE, String.class, VolumeList.class);
     checkTableStatus(userTable, USER_TABLE);
+
+    // As now we have eviction policies, and for non-HA code path we don't
+    // support cache and cleanup policies setting cache to manual.
+    TableCacheImpl.CacheCleanupPolicy cleanupPolicy = isRatisEnabled ?
+        TableCacheImpl.CacheCleanupPolicy.NEVER :
+        TableCacheImpl.CacheCleanupPolicy.MANUAL;
+
     volumeTable =
         this.store.getTable(VOLUME_TABLE, String.class, OmVolumeArgs.class,
-            TableCacheImpl.CacheCleanupPolicy.NEVER);
+            cleanupPolicy);
     checkTableStatus(volumeTable, VOLUME_TABLE);
 
     bucketTable =
         this.store.getTable(BUCKET_TABLE, String.class, OmBucketInfo.class,
-            TableCacheImpl.CacheCleanupPolicy.NEVER);
+            cleanupPolicy);
 
     checkTableStatus(bucketTable, BUCKET_TABLE);
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org