You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by bh...@apache.org on 2018/07/09 20:19:49 UTC

[13/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/ozoneManager.js
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/ozoneManager.js b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/ozoneManager.js
new file mode 100644
index 0000000..ca03554
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/ozoneManager.js
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+(function () {
+    "use strict";
+
+    var isIgnoredJmxKeys = function (key) {
+        return key == 'name' || key == 'modelerType' || key.match(/tag.*/);
+    };
+
+    angular.module('ozoneManager', ['ozone', 'nvd3']);
+    angular.module('ozoneManager').config(function ($routeProvider) {
+        $routeProvider
+            .when("/metrics/ozoneManager", {
+                template: "<om-metrics></om-metrics>"
+            });
+    });
+    angular.module('ozoneManager').component('omMetrics', {
+        templateUrl: 'om-metrics.html',
+        controller: function ($http) {
+            var ctrl = this;
+
+            ctrl.graphOptions = {
+                chart: {
+                    type: 'pieChart',
+                    height: 500,
+                    x: function (d) {
+                        return d.key;
+                    },
+                    y: function (d) {
+                        return d.value;
+                    },
+                    showLabels: true,
+                    labelType: 'value',
+                    duration: 500,
+                    labelThreshold: 0.01,
+                    valueFormat: function(d) {
+                        return d3.format('d')(d);
+                    },
+                    legend: {
+                        margin: {
+                            top: 5,
+                            right: 35,
+                            bottom: 5,
+                            left: 0
+                        }
+                    }
+                }
+            };
+
+
+            $http.get("jmx?qry=Hadoop:service=OzoneManager,name=OMMetrics")
+                .then(function (result) {
+
+                    var groupedMetrics = {others: [], nums: {}};
+                    var metrics = result.data.beans[0]
+                    for (var key in metrics) {
+                        var numericalStatistic = key.match(/Num([A-Z][a-z]+)(.+?)(Fails)?$/);
+                        if (numericalStatistic) {
+                            var type = numericalStatistic[1];
+                            var name = numericalStatistic[2];
+                            var failed = numericalStatistic[3];
+                            groupedMetrics.nums[type] = groupedMetrics.nums[type] || {
+                                    failures: [],
+                                    all: []
+                                };
+                            if (failed) {
+                                groupedMetrics.nums[type].failures.push({
+                                    key: name,
+                                    value: metrics[key]
+                                })
+                            } else {
+                                if (name == "Ops") {
+                                    groupedMetrics.nums[type].ops = metrics[key]
+                                } else {
+                                    groupedMetrics.nums[type].all.push({
+                                        key: name,
+                                        value: metrics[key]
+                                    })
+                                }
+                            }
+                        } else if (isIgnoredJmxKeys(key)) {
+                            //ignore
+                        } else {
+                            groupedMetrics.others.push({
+                                'key': key,
+                                'value': metrics[key]
+                            });
+                        }
+                    }
+                    ctrl.metrics = groupedMetrics;
+                })
+        }
+    });
+
+})();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestBucketManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestBucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestBucketManagerImpl.java
deleted file mode 100644
index 0b43bf9..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestBucketManagerImpl.java
+++ /dev/null
@@ -1,395 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
-import org.apache.hadoop.ozone.ksm.exceptions
-    .KSMException.ResultCodes;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.runner.RunWith;
-import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.runners.MockitoJUnitRunner;
-import org.mockito.stubbing.Answer;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.LinkedList;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import static org.mockito.Mockito.any;
-
-/**
- * Tests BucketManagerImpl, mocks KSMMetadataManager for testing.
- */
-@RunWith(MockitoJUnitRunner.class)
-public class TestBucketManagerImpl {
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  private KSMMetadataManager getMetadataManagerMock(String... volumesToCreate)
-      throws IOException {
-    KSMMetadataManager metadataManager = Mockito.mock(KSMMetadataManager.class);
-    Map<String, byte[]> metadataDB = new HashMap<>();
-    ReadWriteLock lock = new ReentrantReadWriteLock();
-
-    Mockito.when(metadataManager.writeLock()).thenReturn(lock.writeLock());
-    Mockito.when(metadataManager.readLock()).thenReturn(lock.readLock());
-    Mockito.when(metadataManager.getVolumeKey(any(String.class))).thenAnswer(
-        (InvocationOnMock invocation) ->
-            DFSUtil.string2Bytes(
-                OzoneConsts.KSM_VOLUME_PREFIX + invocation.getArguments()[0]));
-    Mockito.when(metadataManager
-        .getBucketKey(any(String.class), any(String.class))).thenAnswer(
-            (InvocationOnMock invocation) ->
-                DFSUtil.string2Bytes(
-                    OzoneConsts.KSM_VOLUME_PREFIX
-                        + invocation.getArguments()[0]
-                        + OzoneConsts.KSM_BUCKET_PREFIX
-                        + invocation.getArguments()[1]));
-
-    Mockito.doAnswer(
-        new Answer<Boolean>() {
-          @Override
-          public Boolean answer(InvocationOnMock invocation)
-              throws Throwable {
-            String keyRootName =  OzoneConsts.KSM_KEY_PREFIX
-                + invocation.getArguments()[0]
-                + OzoneConsts.KSM_KEY_PREFIX
-                + invocation.getArguments()[1]
-                + OzoneConsts.KSM_KEY_PREFIX;
-            Iterator<String> keyIterator = metadataDB.keySet().iterator();
-            while(keyIterator.hasNext()) {
-              if(keyIterator.next().startsWith(keyRootName)) {
-                return false;
-              }
-            }
-            return true;
-          }
-        }).when(metadataManager).isBucketEmpty(any(String.class),
-        any(String.class));
-
-    Mockito.doAnswer(
-        new Answer<Void>() {
-          @Override
-          public Void answer(InvocationOnMock invocation) throws Throwable {
-            metadataDB.put(DFSUtil.bytes2String(
-                (byte[])invocation.getArguments()[0]),
-                (byte[])invocation.getArguments()[1]);
-            return null;
-          }
-        }).when(metadataManager).put(any(byte[].class), any(byte[].class));
-
-    Mockito.when(metadataManager.get(any(byte[].class))).thenAnswer(
-        (InvocationOnMock invocation) ->
-            metadataDB.get(DFSUtil.bytes2String(
-                (byte[])invocation.getArguments()[0]))
-    );
-    Mockito.doAnswer(
-        new Answer<Void>() {
-          @Override
-          public Void answer(InvocationOnMock invocation) throws Throwable {
-            metadataDB.remove(DFSUtil.bytes2String(
-                (byte[])invocation.getArguments()[0]));
-            return null;
-          }
-        }).when(metadataManager).delete(any(byte[].class));
-
-    for(String volumeName : volumesToCreate) {
-      byte[] dummyVolumeInfo = DFSUtil.string2Bytes(volumeName);
-      metadataDB.put(OzoneConsts.KSM_VOLUME_PREFIX + volumeName,
-                     dummyVolumeInfo);
-    }
-    return metadataManager;
-  }
-
-  @Test
-  public void testCreateBucketWithoutVolume() throws IOException {
-    thrown.expectMessage("Volume doesn't exist");
-    KSMMetadataManager metaMgr = getMetadataManagerMock();
-    try {
-      BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-      KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-          .setVolumeName("sampleVol")
-          .setBucketName("bucketOne")
-          .build();
-      bucketManager.createBucket(bucketInfo);
-    } catch(KSMException ksmEx) {
-      Assert.assertEquals(ResultCodes.FAILED_VOLUME_NOT_FOUND,
-          ksmEx.getResult());
-      throw ksmEx;
-    }
-  }
-
-  @Test
-  public void testCreateBucket() throws IOException {
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    Assert.assertNotNull(bucketManager.getBucketInfo("sampleVol", "bucketOne"));
-  }
-
-  @Test
-  public void testCreateAlreadyExistingBucket() throws IOException {
-    thrown.expectMessage("Bucket already exist");
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    try {
-      BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-      KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-          .setVolumeName("sampleVol")
-          .setBucketName("bucketOne")
-          .build();
-      bucketManager.createBucket(bucketInfo);
-      bucketManager.createBucket(bucketInfo);
-    } catch(KSMException ksmEx) {
-      Assert.assertEquals(ResultCodes.FAILED_BUCKET_ALREADY_EXISTS,
-          ksmEx.getResult());
-      throw ksmEx;
-    }
-  }
-
-  @Test
-  public void testGetBucketInfoForInvalidBucket() throws IOException {
-    thrown.expectMessage("Bucket not found");
-    try {
-      KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-      BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-      bucketManager.getBucketInfo("sampleVol", "bucketOne");
-    } catch(KSMException ksmEx) {
-      Assert.assertEquals(ResultCodes.FAILED_BUCKET_NOT_FOUND,
-          ksmEx.getResult());
-      throw ksmEx;
-    }
-  }
-
-  @Test
-  public void testGetBucketInfo() throws IOException {
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setStorageType(StorageType.DISK)
-        .setIsVersionEnabled(false)
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    KsmBucketInfo result = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals("sampleVol", result.getVolumeName());
-    Assert.assertEquals("bucketOne", result.getBucketName());
-    Assert.assertEquals(StorageType.DISK,
-        result.getStorageType());
-    Assert.assertEquals(false, result.getIsVersionEnabled());
-  }
-
-  @Test
-  public void testSetBucketPropertyAddACL() throws IOException {
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    List<OzoneAcl> acls = new LinkedList<>();
-    OzoneAcl ozoneAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
-        "root", OzoneAcl.OzoneACLRights.READ);
-    acls.add(ozoneAcl);
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setAcls(acls)
-        .setStorageType(StorageType.DISK)
-        .setIsVersionEnabled(false)
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    KsmBucketInfo result = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals("sampleVol", result.getVolumeName());
-    Assert.assertEquals("bucketOne", result.getBucketName());
-    Assert.assertEquals(1, result.getAcls().size());
-    List<OzoneAcl> addAcls = new LinkedList<>();
-    OzoneAcl newAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
-        "ozone", OzoneAcl.OzoneACLRights.READ);
-    addAcls.add(newAcl);
-    KsmBucketArgs bucketArgs = KsmBucketArgs.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setAddAcls(addAcls)
-        .build();
-    bucketManager.setBucketProperty(bucketArgs);
-    KsmBucketInfo updatedResult = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals(2, updatedResult.getAcls().size());
-    Assert.assertTrue(updatedResult.getAcls().contains(newAcl));
-  }
-
-  @Test
-  public void testSetBucketPropertyRemoveACL() throws IOException {
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    List<OzoneAcl> acls = new LinkedList<>();
-    OzoneAcl aclOne = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
-        "root", OzoneAcl.OzoneACLRights.READ);
-    OzoneAcl aclTwo = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
-        "ozone", OzoneAcl.OzoneACLRights.READ);
-    acls.add(aclOne);
-    acls.add(aclTwo);
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setAcls(acls)
-        .setStorageType(StorageType.DISK)
-        .setIsVersionEnabled(false)
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    KsmBucketInfo result = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals(2, result.getAcls().size());
-    List<OzoneAcl> removeAcls = new LinkedList<>();
-    removeAcls.add(aclTwo);
-    KsmBucketArgs bucketArgs = KsmBucketArgs.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setRemoveAcls(removeAcls)
-        .build();
-    bucketManager.setBucketProperty(bucketArgs);
-    KsmBucketInfo updatedResult = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals(1, updatedResult.getAcls().size());
-    Assert.assertFalse(updatedResult.getAcls().contains(aclTwo));
-  }
-
-  @Test
-  public void testSetBucketPropertyChangeStorageType() throws IOException {
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setStorageType(StorageType.DISK)
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    KsmBucketInfo result = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals(StorageType.DISK,
-        result.getStorageType());
-    KsmBucketArgs bucketArgs = KsmBucketArgs.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setStorageType(StorageType.SSD)
-        .build();
-    bucketManager.setBucketProperty(bucketArgs);
-    KsmBucketInfo updatedResult = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals(StorageType.SSD,
-        updatedResult.getStorageType());
-  }
-
-  @Test
-  public void testSetBucketPropertyChangeVersioning() throws IOException {
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setIsVersionEnabled(false)
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    KsmBucketInfo result = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertFalse(result.getIsVersionEnabled());
-    KsmBucketArgs bucketArgs = KsmBucketArgs.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setIsVersionEnabled(true)
-        .build();
-    bucketManager.setBucketProperty(bucketArgs);
-    KsmBucketInfo updatedResult = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertTrue(updatedResult.getIsVersionEnabled());
-  }
-
-  @Test
-  public void testDeleteBucket() throws IOException {
-    thrown.expectMessage("Bucket not found");
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    for(int i = 0; i < 5; i++) {
-      KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-          .setVolumeName("sampleVol")
-          .setBucketName("bucket_" + i)
-          .build();
-      bucketManager.createBucket(bucketInfo);
-    }
-    for(int i = 0; i < 5; i++) {
-      Assert.assertEquals("bucket_" + i,
-          bucketManager.getBucketInfo(
-              "sampleVol", "bucket_" + i).getBucketName());
-    }
-    try {
-      bucketManager.deleteBucket("sampleVol", "bucket_1");
-      Assert.assertNotNull(bucketManager.getBucketInfo(
-          "sampleVol", "bucket_2"));
-    } catch(IOException ex) {
-      Assert.fail(ex.getMessage());
-    }
-    try {
-      bucketManager.getBucketInfo("sampleVol", "bucket_1");
-    } catch(KSMException ksmEx) {
-      Assert.assertEquals(ResultCodes.FAILED_BUCKET_NOT_FOUND,
-          ksmEx.getResult());
-      throw ksmEx;
-    }
-  }
-
-  @Test
-  public void testDeleteNonEmptyBucket() throws IOException {
-    thrown.expectMessage("Bucket is not empty");
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    //Create keys in bucket
-    metaMgr.put(DFSUtil.string2Bytes("/sampleVol/bucketOne/key_one"),
-        DFSUtil.string2Bytes("value_one"));
-    metaMgr.put(DFSUtil.string2Bytes("/sampleVol/bucketOne/key_two"),
-        DFSUtil.string2Bytes("value_two"));
-    try {
-      bucketManager.deleteBucket("sampleVol", "bucketOne");
-    } catch(KSMException ksmEx) {
-      Assert.assertEquals(ResultCodes.FAILED_BUCKET_NOT_EMPTY,
-          ksmEx.getResult());
-      throw ksmEx;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java
deleted file mode 100644
index e6158bd..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java
+++ /dev/null
@@ -1,234 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream;
-import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
-import org.apache.hadoop.hdds.scm.storage.ChunkInputStream;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.OutputStream;
-import java.io.IOException;
-import java.util.ArrayList;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * This class tests ChunkGroupInputStream and ChunkGroupOutStream.
- */
-public class TestChunkStreams {
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  /**
-   * This test uses ByteArrayOutputStream as the underlying stream to test
-   * the correctness of ChunkGroupOutputStream.
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testWriteGroupOutputStream() throws Exception {
-    try (ChunkGroupOutputStream groupOutputStream =
-             new ChunkGroupOutputStream()) {
-      ArrayList<OutputStream> outputStreams = new ArrayList<>();
-
-      // 5 byte streams, each 100 bytes. write 500 bytes means writing to each
-      // of them with 100 bytes.
-      for (int i = 0; i < 5; i++) {
-        ByteArrayOutputStream out = new ByteArrayOutputStream(100);
-        outputStreams.add(out);
-        groupOutputStream.addStream(out, 100);
-      }
-      assertEquals(0, groupOutputStream.getByteOffset());
-
-      String dataString = RandomStringUtils.randomAscii(500);
-      byte[] data = dataString.getBytes();
-      groupOutputStream.write(data, 0, data.length);
-      assertEquals(500, groupOutputStream.getByteOffset());
-
-      String res = "";
-      int offset = 0;
-      for (OutputStream stream : outputStreams) {
-        String subString = stream.toString();
-        res += subString;
-        assertEquals(dataString.substring(offset, offset + 100), subString);
-        offset += 100;
-      }
-      assertEquals(dataString, res);
-    }
-  }
-
-  @Test
-  public void testErrorWriteGroupOutputStream() throws Exception {
-    try (ChunkGroupOutputStream groupOutputStream =
-             new ChunkGroupOutputStream()) {
-      ArrayList<OutputStream> outputStreams = new ArrayList<>();
-
-      // 5 byte streams, each 100 bytes. write 500 bytes means writing to each
-      // of them with 100 bytes. all 5 streams makes up a ChunkGroupOutputStream
-      // with a total of 500 bytes in size
-      for (int i = 0; i < 5; i++) {
-        ByteArrayOutputStream out = new ByteArrayOutputStream(100);
-        outputStreams.add(out);
-        groupOutputStream.addStream(out, 100);
-      }
-      assertEquals(0, groupOutputStream.getByteOffset());
-
-      // first writes of 100 bytes should succeed
-      groupOutputStream.write(RandomStringUtils.randomAscii(100).getBytes());
-      assertEquals(100, groupOutputStream.getByteOffset());
-
-      // second writes of 500 bytes should fail, as there should be only 400
-      // bytes space left
-      // TODO : if we decide to take the 400 bytes instead in the future,
-      // other add more informative error code rather than exception, need to
-      // change this part.
-      exception.expect(Exception.class);
-      groupOutputStream.write(RandomStringUtils.randomAscii(500).getBytes());
-      assertEquals(100, groupOutputStream.getByteOffset());
-    }
-  }
-
-  @Test
-  public void testReadGroupInputStream() throws Exception {
-    try (ChunkGroupInputStream groupInputStream = new ChunkGroupInputStream()) {
-      ArrayList<ChunkInputStream> inputStreams = new ArrayList<>();
-
-      String dataString = RandomStringUtils.randomAscii(500);
-      byte[] buf = dataString.getBytes();
-      int offset = 0;
-      for (int i = 0; i < 5; i++) {
-        int tempOffset = offset;
-        ChunkInputStream in =
-            new ChunkInputStream(null, null, null, new ArrayList<>(), null) {
-              private ByteArrayInputStream in =
-                  new ByteArrayInputStream(buf, tempOffset, 100);
-
-              @Override
-              public void seek(long pos) throws IOException {
-                throw new UnsupportedOperationException();
-              }
-
-              @Override
-              public long getPos() throws IOException {
-                throw new UnsupportedOperationException();
-              }
-
-              @Override
-              public boolean seekToNewSource(long targetPos)
-                  throws IOException {
-                throw new UnsupportedOperationException();
-              }
-
-              @Override
-              public int read() throws IOException {
-                return in.read();
-              }
-
-              @Override
-              public int read(byte[] b, int off, int len) throws IOException {
-                return in.read(b, off, len);
-              }
-            };
-        inputStreams.add(in);
-        offset += 100;
-        groupInputStream.addStream(in, 100);
-      }
-
-      byte[] resBuf = new byte[500];
-      int len = groupInputStream.read(resBuf, 0, 500);
-
-      assertEquals(500, len);
-      assertEquals(dataString, new String(resBuf));
-    }
-  }
-
-  @Test
-  public void testErrorReadGroupInputStream() throws Exception {
-    try (ChunkGroupInputStream groupInputStream = new ChunkGroupInputStream()) {
-      ArrayList<ChunkInputStream> inputStreams = new ArrayList<>();
-
-      String dataString = RandomStringUtils.randomAscii(500);
-      byte[] buf = dataString.getBytes();
-      int offset = 0;
-      for (int i = 0; i < 5; i++) {
-        int tempOffset = offset;
-        ChunkInputStream in =
-            new ChunkInputStream(null, null, null, new ArrayList<>(), null) {
-              private ByteArrayInputStream in =
-                  new ByteArrayInputStream(buf, tempOffset, 100);
-
-              @Override
-              public void seek(long pos) throws IOException {
-                throw new UnsupportedOperationException();
-              }
-
-              @Override
-              public long getPos() throws IOException {
-                throw new UnsupportedOperationException();
-              }
-
-              @Override
-              public boolean seekToNewSource(long targetPos)
-                  throws IOException {
-                throw new UnsupportedOperationException();
-              }
-
-              @Override
-              public int read() throws IOException {
-                return in.read();
-              }
-
-              @Override
-              public int read(byte[] b, int off, int len) throws IOException {
-                return in.read(b, off, len);
-              }
-            };
-        inputStreams.add(in);
-        offset += 100;
-        groupInputStream.addStream(in, 100);
-      }
-
-      byte[] resBuf = new byte[600];
-      // read 300 bytes first
-      int len = groupInputStream.read(resBuf, 0, 340);
-      assertEquals(3, groupInputStream.getCurrentStreamIndex());
-      assertEquals(60, groupInputStream.getRemainingOfIndex(3));
-      assertEquals(340, len);
-      assertEquals(dataString.substring(0, 340),
-          new String(resBuf).substring(0, 340));
-
-      // read following 300 bytes, but only 200 left
-      len = groupInputStream.read(resBuf, 340, 260);
-      assertEquals(5, groupInputStream.getCurrentStreamIndex());
-      assertEquals(0, groupInputStream.getRemainingOfIndex(4));
-      assertEquals(160, len);
-      assertEquals(dataString, new String(resBuf).substring(0, 500));
-
-      // further read should get EOF
-      len = groupInputStream.read(resBuf, 0, 1);
-      // reached EOF, further read should get -1
-      assertEquals(-1, len);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerHttpServer.java
deleted file mode 100644
index b263df5..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerHttpServer.java
+++ /dev/null
@@ -1,141 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.web.URLConnectionFactory;
-import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpConfig.Policy;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
-
-import java.io.File;
-import java.net.InetSocketAddress;
-import java.net.URL;
-import java.net.URLConnection;
-import java.util.Arrays;
-import java.util.Collection;
-
-/**
- * Test http server os KSM with various HTTP option.
- */
-@RunWith(value = Parameterized.class)
-public class TestKeySpaceManagerHttpServer {
-  private static final String BASEDIR = GenericTestUtils
-      .getTempPath(TestKeySpaceManagerHttpServer.class.getSimpleName());
-  private static String keystoresDir;
-  private static String sslConfDir;
-  private static Configuration conf;
-  private static URLConnectionFactory connectionFactory;
-
-  @Parameters public static Collection<Object[]> policy() {
-    Object[][] params = new Object[][] {
-        {HttpConfig.Policy.HTTP_ONLY},
-        {HttpConfig.Policy.HTTPS_ONLY},
-        {HttpConfig.Policy.HTTP_AND_HTTPS} };
-    return Arrays.asList(params);
-  }
-
-  private final HttpConfig.Policy policy;
-
-  public TestKeySpaceManagerHttpServer(Policy policy) {
-    super();
-    this.policy = policy;
-  }
-
-  @BeforeClass public static void setUp() throws Exception {
-    File base = new File(BASEDIR);
-    FileUtil.fullyDelete(base);
-    base.mkdirs();
-    conf = new Configuration();
-    keystoresDir = new File(BASEDIR).getAbsolutePath();
-    sslConfDir = KeyStoreTestUtil.getClasspathDir(
-        TestKeySpaceManagerHttpServer.class);
-    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
-    connectionFactory =
-        URLConnectionFactory.newDefaultURLConnectionFactory(conf);
-    conf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
-        KeyStoreTestUtil.getClientSSLConfigFileName());
-    conf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
-        KeyStoreTestUtil.getServerSSLConfigFileName());
-  }
-
-  @AfterClass public static void tearDown() throws Exception {
-    FileUtil.fullyDelete(new File(BASEDIR));
-    KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
-  }
-
-  @Test public void testHttpPolicy() throws Exception {
-    conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
-    conf.set(ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY, "localhost:0");
-
-    InetSocketAddress addr = InetSocketAddress.createUnresolved("localhost", 0);
-    KeySpaceManagerHttpServer server = null;
-    try {
-      server = new KeySpaceManagerHttpServer(conf, null);
-      server.start();
-
-      Assert.assertTrue(implies(policy.isHttpEnabled(),
-          canAccess("http", server.getHttpAddress())));
-      Assert.assertTrue(
-          implies(!policy.isHttpEnabled(), server.getHttpAddress() == null));
-
-      Assert.assertTrue(implies(policy.isHttpsEnabled(),
-          canAccess("https", server.getHttpsAddress())));
-      Assert.assertTrue(
-          implies(!policy.isHttpsEnabled(), server.getHttpsAddress() == null));
-
-    } finally {
-      if (server != null) {
-        server.stop();
-      }
-    }
-  }
-
-  private static boolean canAccess(String scheme, InetSocketAddress addr) {
-    if (addr == null) {
-      return false;
-    }
-    try {
-      URL url =
-          new URL(scheme + "://" + NetUtils.getHostPortString(addr) + "/jmx");
-      URLConnection conn = connectionFactory.openConnection(url);
-      conn.connect();
-      conn.getContent();
-    } catch (Exception e) {
-      return false;
-    }
-    return true;
-  }
-
-  private static boolean implies(boolean a, boolean b) {
-    return !a || b;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/package-info.java
deleted file mode 100644
index 089ff4b..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm;
-/**
- * KSM tests
- */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
new file mode 100644
index 0000000..1ecac7f
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
@@ -0,0 +1,394 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.junit.runner.RunWith;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.runners.MockitoJUnitRunner;
+import org.mockito.stubbing.Answer;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.LinkedList;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import static org.mockito.Mockito.any;
+
+/**
+ * Tests BucketManagerImpl, mocks OMMetadataManager for testing.
+ */
+@RunWith(MockitoJUnitRunner.class)
+public class TestBucketManagerImpl {
+  @Rule
+  public ExpectedException thrown = ExpectedException.none();
+
+  private OMMetadataManager getMetadataManagerMock(String... volumesToCreate)
+      throws IOException {
+    OMMetadataManager metadataManager = Mockito.mock(OMMetadataManager.class);
+    Map<String, byte[]> metadataDB = new HashMap<>();
+    ReadWriteLock lock = new ReentrantReadWriteLock();
+
+    Mockito.when(metadataManager.writeLock()).thenReturn(lock.writeLock());
+    Mockito.when(metadataManager.readLock()).thenReturn(lock.readLock());
+    Mockito.when(metadataManager.getVolumeKey(any(String.class))).thenAnswer(
+        (InvocationOnMock invocation) ->
+            DFSUtil.string2Bytes(
+                OzoneConsts.OM_VOLUME_PREFIX + invocation.getArguments()[0]));
+    Mockito.when(metadataManager
+        .getBucketKey(any(String.class), any(String.class))).thenAnswer(
+            (InvocationOnMock invocation) ->
+                DFSUtil.string2Bytes(
+                    OzoneConsts.OM_VOLUME_PREFIX
+                        + invocation.getArguments()[0]
+                        + OzoneConsts.OM_BUCKET_PREFIX
+                        + invocation.getArguments()[1]));
+
+    Mockito.doAnswer(
+        new Answer<Boolean>() {
+          @Override
+          public Boolean answer(InvocationOnMock invocation)
+              throws Throwable {
+            String keyRootName =  OzoneConsts.OM_KEY_PREFIX
+                + invocation.getArguments()[0]
+                + OzoneConsts.OM_KEY_PREFIX
+                + invocation.getArguments()[1]
+                + OzoneConsts.OM_KEY_PREFIX;
+            Iterator<String> keyIterator = metadataDB.keySet().iterator();
+            while(keyIterator.hasNext()) {
+              if(keyIterator.next().startsWith(keyRootName)) {
+                return false;
+              }
+            }
+            return true;
+          }
+        }).when(metadataManager).isBucketEmpty(any(String.class),
+        any(String.class));
+
+    Mockito.doAnswer(
+        new Answer<Void>() {
+          @Override
+          public Void answer(InvocationOnMock invocation) throws Throwable {
+            metadataDB.put(DFSUtil.bytes2String(
+                (byte[])invocation.getArguments()[0]),
+                (byte[])invocation.getArguments()[1]);
+            return null;
+          }
+        }).when(metadataManager).put(any(byte[].class), any(byte[].class));
+
+    Mockito.when(metadataManager.get(any(byte[].class))).thenAnswer(
+        (InvocationOnMock invocation) ->
+            metadataDB.get(DFSUtil.bytes2String(
+                (byte[])invocation.getArguments()[0]))
+    );
+    Mockito.doAnswer(
+        new Answer<Void>() {
+          @Override
+          public Void answer(InvocationOnMock invocation) throws Throwable {
+            metadataDB.remove(DFSUtil.bytes2String(
+                (byte[])invocation.getArguments()[0]));
+            return null;
+          }
+        }).when(metadataManager).delete(any(byte[].class));
+
+    for(String volumeName : volumesToCreate) {
+      byte[] dummyVolumeInfo = DFSUtil.string2Bytes(volumeName);
+      metadataDB.put(OzoneConsts.OM_VOLUME_PREFIX + volumeName,
+                     dummyVolumeInfo);
+    }
+    return metadataManager;
+  }
+
+  @Test
+  public void testCreateBucketWithoutVolume() throws IOException {
+    thrown.expectMessage("Volume doesn't exist");
+    OMMetadataManager metaMgr = getMetadataManagerMock();
+    try {
+      BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+      OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+          .setVolumeName("sampleVol")
+          .setBucketName("bucketOne")
+          .build();
+      bucketManager.createBucket(bucketInfo);
+    } catch(OMException omEx) {
+      Assert.assertEquals(ResultCodes.FAILED_VOLUME_NOT_FOUND,
+          omEx.getResult());
+      throw omEx;
+    }
+  }
+
+  @Test
+  public void testCreateBucket() throws IOException {
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .build();
+    bucketManager.createBucket(bucketInfo);
+    Assert.assertNotNull(bucketManager.getBucketInfo("sampleVol", "bucketOne"));
+  }
+
+  @Test
+  public void testCreateAlreadyExistingBucket() throws IOException {
+    thrown.expectMessage("Bucket already exist");
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    try {
+      BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+      OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+          .setVolumeName("sampleVol")
+          .setBucketName("bucketOne")
+          .build();
+      bucketManager.createBucket(bucketInfo);
+      bucketManager.createBucket(bucketInfo);
+    } catch(OMException omEx) {
+      Assert.assertEquals(ResultCodes.FAILED_BUCKET_ALREADY_EXISTS,
+          omEx.getResult());
+      throw omEx;
+    }
+  }
+
+  @Test
+  public void testGetBucketInfoForInvalidBucket() throws IOException {
+    thrown.expectMessage("Bucket not found");
+    try {
+      OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+      BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+      bucketManager.getBucketInfo("sampleVol", "bucketOne");
+    } catch(OMException omEx) {
+      Assert.assertEquals(ResultCodes.FAILED_BUCKET_NOT_FOUND,
+          omEx.getResult());
+      throw omEx;
+    }
+  }
+
+  @Test
+  public void testGetBucketInfo() throws IOException {
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setStorageType(StorageType.DISK)
+        .setIsVersionEnabled(false)
+        .build();
+    bucketManager.createBucket(bucketInfo);
+    OmBucketInfo result = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertEquals("sampleVol", result.getVolumeName());
+    Assert.assertEquals("bucketOne", result.getBucketName());
+    Assert.assertEquals(StorageType.DISK,
+        result.getStorageType());
+    Assert.assertEquals(false, result.getIsVersionEnabled());
+  }
+
+  @Test
+  public void testSetBucketPropertyAddACL() throws IOException {
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    List<OzoneAcl> acls = new LinkedList<>();
+    OzoneAcl ozoneAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
+        "root", OzoneAcl.OzoneACLRights.READ);
+    acls.add(ozoneAcl);
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setAcls(acls)
+        .setStorageType(StorageType.DISK)
+        .setIsVersionEnabled(false)
+        .build();
+    bucketManager.createBucket(bucketInfo);
+    OmBucketInfo result = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertEquals("sampleVol", result.getVolumeName());
+    Assert.assertEquals("bucketOne", result.getBucketName());
+    Assert.assertEquals(1, result.getAcls().size());
+    List<OzoneAcl> addAcls = new LinkedList<>();
+    OzoneAcl newAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
+        "ozone", OzoneAcl.OzoneACLRights.READ);
+    addAcls.add(newAcl);
+    OmBucketArgs bucketArgs = OmBucketArgs.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setAddAcls(addAcls)
+        .build();
+    bucketManager.setBucketProperty(bucketArgs);
+    OmBucketInfo updatedResult = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertEquals(2, updatedResult.getAcls().size());
+    Assert.assertTrue(updatedResult.getAcls().contains(newAcl));
+  }
+
+  @Test
+  public void testSetBucketPropertyRemoveACL() throws IOException {
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    List<OzoneAcl> acls = new LinkedList<>();
+    OzoneAcl aclOne = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
+        "root", OzoneAcl.OzoneACLRights.READ);
+    OzoneAcl aclTwo = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
+        "ozone", OzoneAcl.OzoneACLRights.READ);
+    acls.add(aclOne);
+    acls.add(aclTwo);
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setAcls(acls)
+        .setStorageType(StorageType.DISK)
+        .setIsVersionEnabled(false)
+        .build();
+    bucketManager.createBucket(bucketInfo);
+    OmBucketInfo result = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertEquals(2, result.getAcls().size());
+    List<OzoneAcl> removeAcls = new LinkedList<>();
+    removeAcls.add(aclTwo);
+    OmBucketArgs bucketArgs = OmBucketArgs.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setRemoveAcls(removeAcls)
+        .build();
+    bucketManager.setBucketProperty(bucketArgs);
+    OmBucketInfo updatedResult = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertEquals(1, updatedResult.getAcls().size());
+    Assert.assertFalse(updatedResult.getAcls().contains(aclTwo));
+  }
+
+  @Test
+  public void testSetBucketPropertyChangeStorageType() throws IOException {
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setStorageType(StorageType.DISK)
+        .build();
+    bucketManager.createBucket(bucketInfo);
+    OmBucketInfo result = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertEquals(StorageType.DISK,
+        result.getStorageType());
+    OmBucketArgs bucketArgs = OmBucketArgs.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setStorageType(StorageType.SSD)
+        .build();
+    bucketManager.setBucketProperty(bucketArgs);
+    OmBucketInfo updatedResult = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertEquals(StorageType.SSD,
+        updatedResult.getStorageType());
+  }
+
+  @Test
+  public void testSetBucketPropertyChangeVersioning() throws IOException {
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setIsVersionEnabled(false)
+        .build();
+    bucketManager.createBucket(bucketInfo);
+    OmBucketInfo result = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertFalse(result.getIsVersionEnabled());
+    OmBucketArgs bucketArgs = OmBucketArgs.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setIsVersionEnabled(true)
+        .build();
+    bucketManager.setBucketProperty(bucketArgs);
+    OmBucketInfo updatedResult = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertTrue(updatedResult.getIsVersionEnabled());
+  }
+
+  @Test
+  public void testDeleteBucket() throws IOException {
+    thrown.expectMessage("Bucket not found");
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    for(int i = 0; i < 5; i++) {
+      OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+          .setVolumeName("sampleVol")
+          .setBucketName("bucket_" + i)
+          .build();
+      bucketManager.createBucket(bucketInfo);
+    }
+    for(int i = 0; i < 5; i++) {
+      Assert.assertEquals("bucket_" + i,
+          bucketManager.getBucketInfo(
+              "sampleVol", "bucket_" + i).getBucketName());
+    }
+    try {
+      bucketManager.deleteBucket("sampleVol", "bucket_1");
+      Assert.assertNotNull(bucketManager.getBucketInfo(
+          "sampleVol", "bucket_2"));
+    } catch(IOException ex) {
+      Assert.fail(ex.getMessage());
+    }
+    try {
+      bucketManager.getBucketInfo("sampleVol", "bucket_1");
+    } catch(OMException omEx) {
+      Assert.assertEquals(ResultCodes.FAILED_BUCKET_NOT_FOUND,
+          omEx.getResult());
+      throw omEx;
+    }
+  }
+
+  @Test
+  public void testDeleteNonEmptyBucket() throws IOException {
+    thrown.expectMessage("Bucket is not empty");
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .build();
+    bucketManager.createBucket(bucketInfo);
+    //Create keys in bucket
+    metaMgr.put(DFSUtil.string2Bytes("/sampleVol/bucketOne/key_one"),
+        DFSUtil.string2Bytes("value_one"));
+    metaMgr.put(DFSUtil.string2Bytes("/sampleVol/bucketOne/key_two"),
+        DFSUtil.string2Bytes("value_two"));
+    try {
+      bucketManager.deleteBucket("sampleVol", "bucketOne");
+    } catch(OMException omEx) {
+      Assert.assertEquals(ResultCodes.FAILED_BUCKET_NOT_EMPTY,
+          omEx.getResult());
+      throw omEx;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java
new file mode 100644
index 0000000..7ce916a
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java
@@ -0,0 +1,234 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream;
+import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
+import org.apache.hadoop.hdds.scm.storage.ChunkInputStream;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.OutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * This class tests ChunkGroupInputStream and ChunkGroupOutStream.
+ */
+public class TestChunkStreams {
+
+  @Rule
+  public ExpectedException exception = ExpectedException.none();
+
+  /**
+   * This test uses ByteArrayOutputStream as the underlying stream to test
+   * the correctness of ChunkGroupOutputStream.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testWriteGroupOutputStream() throws Exception {
+    try (ChunkGroupOutputStream groupOutputStream =
+             new ChunkGroupOutputStream()) {
+      ArrayList<OutputStream> outputStreams = new ArrayList<>();
+
+      // 5 byte streams, each 100 bytes. write 500 bytes means writing to each
+      // of them with 100 bytes.
+      for (int i = 0; i < 5; i++) {
+        ByteArrayOutputStream out = new ByteArrayOutputStream(100);
+        outputStreams.add(out);
+        groupOutputStream.addStream(out, 100);
+      }
+      assertEquals(0, groupOutputStream.getByteOffset());
+
+      String dataString = RandomStringUtils.randomAscii(500);
+      byte[] data = dataString.getBytes();
+      groupOutputStream.write(data, 0, data.length);
+      assertEquals(500, groupOutputStream.getByteOffset());
+
+      String res = "";
+      int offset = 0;
+      for (OutputStream stream : outputStreams) {
+        String subString = stream.toString();
+        res += subString;
+        assertEquals(dataString.substring(offset, offset + 100), subString);
+        offset += 100;
+      }
+      assertEquals(dataString, res);
+    }
+  }
+
+  @Test
+  public void testErrorWriteGroupOutputStream() throws Exception {
+    try (ChunkGroupOutputStream groupOutputStream =
+             new ChunkGroupOutputStream()) {
+      ArrayList<OutputStream> outputStreams = new ArrayList<>();
+
+      // 5 byte streams, each 100 bytes. write 500 bytes means writing to each
+      // of them with 100 bytes. all 5 streams makes up a ChunkGroupOutputStream
+      // with a total of 500 bytes in size
+      for (int i = 0; i < 5; i++) {
+        ByteArrayOutputStream out = new ByteArrayOutputStream(100);
+        outputStreams.add(out);
+        groupOutputStream.addStream(out, 100);
+      }
+      assertEquals(0, groupOutputStream.getByteOffset());
+
+      // first writes of 100 bytes should succeed
+      groupOutputStream.write(RandomStringUtils.randomAscii(100).getBytes());
+      assertEquals(100, groupOutputStream.getByteOffset());
+
+      // second writes of 500 bytes should fail, as there should be only 400
+      // bytes space left
+      // TODO : if we decide to take the 400 bytes instead in the future,
+      // other add more informative error code rather than exception, need to
+      // change this part.
+      exception.expect(Exception.class);
+      groupOutputStream.write(RandomStringUtils.randomAscii(500).getBytes());
+      assertEquals(100, groupOutputStream.getByteOffset());
+    }
+  }
+
+  @Test
+  public void testReadGroupInputStream() throws Exception {
+    try (ChunkGroupInputStream groupInputStream = new ChunkGroupInputStream()) {
+      ArrayList<ChunkInputStream> inputStreams = new ArrayList<>();
+
+      String dataString = RandomStringUtils.randomAscii(500);
+      byte[] buf = dataString.getBytes();
+      int offset = 0;
+      for (int i = 0; i < 5; i++) {
+        int tempOffset = offset;
+        ChunkInputStream in =
+            new ChunkInputStream(null, null, null, new ArrayList<>(), null) {
+              private ByteArrayInputStream in =
+                  new ByteArrayInputStream(buf, tempOffset, 100);
+
+              @Override
+              public void seek(long pos) throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public long getPos() throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public boolean seekToNewSource(long targetPos)
+                  throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public int read() throws IOException {
+                return in.read();
+              }
+
+              @Override
+              public int read(byte[] b, int off, int len) throws IOException {
+                return in.read(b, off, len);
+              }
+            };
+        inputStreams.add(in);
+        offset += 100;
+        groupInputStream.addStream(in, 100);
+      }
+
+      byte[] resBuf = new byte[500];
+      int len = groupInputStream.read(resBuf, 0, 500);
+
+      assertEquals(500, len);
+      assertEquals(dataString, new String(resBuf));
+    }
+  }
+
+  @Test
+  public void testErrorReadGroupInputStream() throws Exception {
+    try (ChunkGroupInputStream groupInputStream = new ChunkGroupInputStream()) {
+      ArrayList<ChunkInputStream> inputStreams = new ArrayList<>();
+
+      String dataString = RandomStringUtils.randomAscii(500);
+      byte[] buf = dataString.getBytes();
+      int offset = 0;
+      for (int i = 0; i < 5; i++) {
+        int tempOffset = offset;
+        ChunkInputStream in =
+            new ChunkInputStream(null, null, null, new ArrayList<>(), null) {
+              private ByteArrayInputStream in =
+                  new ByteArrayInputStream(buf, tempOffset, 100);
+
+              @Override
+              public void seek(long pos) throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public long getPos() throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public boolean seekToNewSource(long targetPos)
+                  throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public int read() throws IOException {
+                return in.read();
+              }
+
+              @Override
+              public int read(byte[] b, int off, int len) throws IOException {
+                return in.read(b, off, len);
+              }
+            };
+        inputStreams.add(in);
+        offset += 100;
+        groupInputStream.addStream(in, 100);
+      }
+
+      byte[] resBuf = new byte[600];
+      // read 300 bytes first
+      int len = groupInputStream.read(resBuf, 0, 340);
+      assertEquals(3, groupInputStream.getCurrentStreamIndex());
+      assertEquals(60, groupInputStream.getRemainingOfIndex(3));
+      assertEquals(340, len);
+      assertEquals(dataString.substring(0, 340),
+          new String(resBuf).substring(0, 340));
+
+      // read following 300 bytes, but only 200 left
+      len = groupInputStream.read(resBuf, 340, 260);
+      assertEquals(5, groupInputStream.getCurrentStreamIndex());
+      assertEquals(0, groupInputStream.getRemainingOfIndex(4));
+      assertEquals(160, len);
+      assertEquals(dataString, new String(resBuf).substring(0, 500));
+
+      // further read should get EOF
+      len = groupInputStream.read(resBuf, 0, 1);
+      // reached EOF, further read should get -1
+      assertEquals(-1, len);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
new file mode 100644
index 0000000..3e11a13
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
@@ -0,0 +1,141 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.web.URLConnectionFactory;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.http.HttpConfig.Policy;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+import java.io.File;
+import java.net.InetSocketAddress;
+import java.net.URL;
+import java.net.URLConnection;
+import java.util.Arrays;
+import java.util.Collection;
+
+/**
+ * Test http server of OM with various HTTP option.
+ */
+@RunWith(value = Parameterized.class)
+public class TestOzoneManagerHttpServer {
+  private static final String BASEDIR = GenericTestUtils
+      .getTempPath(TestOzoneManagerHttpServer.class.getSimpleName());
+  private static String keystoresDir;
+  private static String sslConfDir;
+  private static Configuration conf;
+  private static URLConnectionFactory connectionFactory;
+
+  @Parameters public static Collection<Object[]> policy() {
+    Object[][] params = new Object[][] {
+        {HttpConfig.Policy.HTTP_ONLY},
+        {HttpConfig.Policy.HTTPS_ONLY},
+        {HttpConfig.Policy.HTTP_AND_HTTPS} };
+    return Arrays.asList(params);
+  }
+
+  private final HttpConfig.Policy policy;
+
+  public TestOzoneManagerHttpServer(Policy policy) {
+    super();
+    this.policy = policy;
+  }
+
+  @BeforeClass public static void setUp() throws Exception {
+    File base = new File(BASEDIR);
+    FileUtil.fullyDelete(base);
+    base.mkdirs();
+    conf = new Configuration();
+    keystoresDir = new File(BASEDIR).getAbsolutePath();
+    sslConfDir = KeyStoreTestUtil.getClasspathDir(
+        TestOzoneManagerHttpServer.class);
+    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
+    connectionFactory =
+        URLConnectionFactory.newDefaultURLConnectionFactory(conf);
+    conf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
+        KeyStoreTestUtil.getClientSSLConfigFileName());
+    conf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
+        KeyStoreTestUtil.getServerSSLConfigFileName());
+  }
+
+  @AfterClass public static void tearDown() throws Exception {
+    FileUtil.fullyDelete(new File(BASEDIR));
+    KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
+  }
+
+  @Test public void testHttpPolicy() throws Exception {
+    conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
+    conf.set(ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY, "localhost:0");
+
+    InetSocketAddress addr = InetSocketAddress.createUnresolved("localhost", 0);
+    OzoneManagerHttpServer server = null;
+    try {
+      server = new OzoneManagerHttpServer(conf, null);
+      server.start();
+
+      Assert.assertTrue(implies(policy.isHttpEnabled(),
+          canAccess("http", server.getHttpAddress())));
+      Assert.assertTrue(
+          implies(!policy.isHttpEnabled(), server.getHttpAddress() == null));
+
+      Assert.assertTrue(implies(policy.isHttpsEnabled(),
+          canAccess("https", server.getHttpsAddress())));
+      Assert.assertTrue(
+          implies(!policy.isHttpsEnabled(), server.getHttpsAddress() == null));
+
+    } finally {
+      if (server != null) {
+        server.stop();
+      }
+    }
+  }
+
+  private static boolean canAccess(String scheme, InetSocketAddress addr) {
+    if (addr == null) {
+      return false;
+    }
+    try {
+      URL url =
+          new URL(scheme + "://" + NetUtils.getHostPortString(addr) + "/jmx");
+      URLConnection conn = connectionFactory.openConnection(url);
+      conn.connect();
+      conn.getContent();
+    } catch (Exception e) {
+      return false;
+    }
+    return true;
+  }
+
+  private static boolean implies(boolean a, boolean b) {
+    return !a || b;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/package-info.java
new file mode 100644
index 0000000..12fcf7c
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.om;
+/**
+ * OM tests
+ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
index 8417e46..b63e182 100644
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
+++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.ozone.web.handlers.UserArgs;
 import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
 import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.junit.Assert;
 
@@ -109,7 +109,7 @@ class OzoneContract extends AbstractFSContract {
     String uri = String.format("%s://%s.%s/",
         OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
     getConf().set("fs.defaultFS", uri);
-    copyClusterConfigs(KSMConfigKeys.OZONE_KSM_ADDRESS_KEY);
+    copyClusterConfigs(OMConfigKeys.OZONE_OM_ADDRESS_KEY);
     copyClusterConfigs(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
     return FileSystem.get(getConf());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
index 26776c5..3884edd 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
@@ -32,11 +32,11 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.BucketInfo;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyInfo;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeInfo;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeList;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.util.Tool;
@@ -60,10 +60,10 @@ import java.util.HashSet;
 import java.util.Set;
 
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_SUFFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.KSM_DB_NAME;
-import static org.apache.hadoop.ozone.OzoneConsts.KSM_USER_PREFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.KSM_BUCKET_PREFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.KSM_VOLUME_PREFIX;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_USER_PREFIX;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_BUCKET_PREFIX;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_VOLUME_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OPEN_CONTAINERS_DB;
 
 /**
@@ -120,7 +120,7 @@ public class SQLCLI  extends Configured implements Tool {
       "INSERT INTO openContainer (containerName, containerUsed) " +
           "VALUES (\"%s\", \"%s\")";
 
-  // for ksm.db
+  // for om.db
   private static final String CREATE_VOLUME_LIST =
       "CREATE TABLE volumeList (" +
           "userName TEXT NOT NULL," +
@@ -278,9 +278,9 @@ public class SQLCLI  extends Configured implements Tool {
     } else if (dbName.toString().equals(OPEN_CONTAINERS_DB)) {
       LOG.info("Converting open container DB");
       convertOpenContainerDB(dbPath, outPath);
-    } else if (dbName.toString().equals(KSM_DB_NAME)) {
-      LOG.info("Converting ksm DB");
-      convertKSMDB(dbPath, outPath);
+    } else if (dbName.toString().equals(OM_DB_NAME)) {
+      LOG.info("Converting om DB");
+      convertOMDB(dbPath, outPath);
     } else {
       LOG.error("Unrecognized db name {}", dbName);
     }
@@ -301,7 +301,7 @@ public class SQLCLI  extends Configured implements Tool {
   }
 
   /**
-   * Convert ksm.db to sqlite db file. With following schema.
+   * Convert om.db to sqlite db file. With following schema.
    * (* for primary key)
    *
    * 1. for key type USER, it contains a username and a list volumes
@@ -341,8 +341,8 @@ public class SQLCLI  extends Configured implements Tool {
    * @param outPath
    * @throws Exception
    */
-  private void convertKSMDB(Path dbPath, Path outPath) throws Exception {
-    LOG.info("Create tables for sql ksm db.");
+  private void convertOMDB(Path dbPath, Path outPath) throws Exception {
+    LOG.info("Create tables for sql om db.");
     File dbFile = dbPath.toFile();
     try (MetadataStore dbStore = MetadataStoreBuilder.newBuilder()
         .setConf(conf).setDbFile(dbFile).build();
@@ -357,7 +357,7 @@ public class SQLCLI  extends Configured implements Tool {
         String keyString = DFSUtilClient.bytes2String(key);
         KeyType type = getKeyType(keyString);
         try {
-          insertKSMDB(conn, type, keyString, value);
+          insertOMDB(conn, type, keyString, value);
         } catch (IOException | SQLException ex) {
           LOG.error("Exception inserting key {} type {}", keyString, type, ex);
         }
@@ -366,8 +366,8 @@ public class SQLCLI  extends Configured implements Tool {
     }
   }
 
-  private void insertKSMDB(Connection conn, KeyType type, String keyName,
-      byte[] value) throws IOException, SQLException {
+  private void insertOMDB(Connection conn, KeyType type, String keyName,
+                          byte[] value) throws IOException, SQLException {
     switch (type) {
     case USER:
       VolumeList volumeList = VolumeList.parseFrom(value);
@@ -412,16 +412,16 @@ public class SQLCLI  extends Configured implements Tool {
       executeSQL(conn, insertKeyInfo);
       break;
     default:
-      throw new IOException("Unknown key from ksm.db");
+      throw new IOException("Unknown key from om.db");
     }
   }
 
   private KeyType getKeyType(String key) {
-    if (key.startsWith(KSM_USER_PREFIX)) {
+    if (key.startsWith(OM_USER_PREFIX)) {
       return KeyType.USER;
-    } else if (key.startsWith(KSM_VOLUME_PREFIX)) {
-      return key.replaceFirst(KSM_VOLUME_PREFIX, "")
-          .contains(KSM_BUCKET_PREFIX) ? KeyType.BUCKET : KeyType.VOLUME;
+    } else if (key.startsWith(OM_VOLUME_PREFIX)) {
+      return key.replaceFirst(OM_VOLUME_PREFIX, "")
+          .contains(OM_BUCKET_PREFIX) ? KeyType.BUCKET : KeyType.VOLUME;
     }else {
       return KeyType.KEY;
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org