You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by bh...@apache.org on 2019/03/15 04:17:06 UTC

[hadoop] branch trunk updated: HDDS-761. Create S3 subcommand to run S3 related operations.

This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 9001508  HDDS-761. Create S3 subcommand to run S3 related operations.
9001508 is described below

commit 90015084850298803d8e165a0f6f9719ba724ea3
Author: Vivek Ratnavel Subramanian <vi...@gmail.com>
AuthorDate: Thu Mar 14 21:16:59 2019 -0700

    HDDS-761. Create S3 subcommand to run S3 related operations.
    
    Closes #579
---
 hadoop-hdds/docs/content/BucketCommands.md         |   4 +-
 hadoop-hdds/docs/content/S3.md                     |   2 +-
 hadoop-hdds/docs/content/S3Commands.md             |   2 +-
 hadoop-hdds/docs/content/SparkOzoneFSK8S.md        |   6 +-
 hadoop-ozone/common/src/main/bin/ozone             |   5 +
 .../src/main/smoketest/security/ozone-secure.robot |   2 +-
 .../hadoop/ozone/ozShell/TestOzoneShell.java       |  63 -----
 .../apache/hadoop/ozone/ozShell/TestS3Shell.java   | 292 +++++++++++++++++++++
 .../org/apache/hadoop/ozone/web/ozShell/Shell.java |   4 +-
 .../ozone/web/ozShell/bucket/BucketCommands.java   |   3 +-
 .../ozone/web/ozShell/s3/GetS3SecretHandler.java   |   2 +-
 .../hadoop/ozone/web/ozShell/s3/S3Commands.java    |  60 -----
 .../hadoop/ozone/web/ozShell/s3/S3Shell.java       |  56 ++++
 13 files changed, 364 insertions(+), 137 deletions(-)

diff --git a/hadoop-hdds/docs/content/BucketCommands.md b/hadoop-hdds/docs/content/BucketCommands.md
index 5afd999..6153762 100644
--- a/hadoop-hdds/docs/content/BucketCommands.md
+++ b/hadoop-hdds/docs/content/BucketCommands.md
@@ -119,10 +119,10 @@ ozone sh bucket update --addAcl=user:bilbo:rw /hive/jan
 The above command gives user bilbo read/write permission to the bucket.
 
 ### path
-The bucket command to provide ozone mapping for s3 bucket(Created via aws cli)
+The bucket command to provide ozone mapping for s3 bucket (Created via aws cli)
 
 {{< highlight bash >}}
-ozone sh bucket path <<s3Bucket>>
+ozone s3 path <<s3Bucket>>
 {{< /highlight >}}
 
 The above command will print VolumeName and the mapping created for s3Bucket.
diff --git a/hadoop-hdds/docs/content/S3.md b/hadoop-hdds/docs/content/S3.md
index a451310..bca2d30 100644
--- a/hadoop-hdds/docs/content/S3.md
+++ b/hadoop-hdds/docs/content/S3.md
@@ -94,7 +94,7 @@ Security is not yet implemented, you can *use* any AWS_ACCESS_KEY_ID and AWS_SEC
 
 Note: Ozone has a notion for 'volumes' which is missing from the S3 Rest endpoint. Under the hood S3 bucket names are mapped to Ozone 'volume/bucket' locations (depending on the given authentication information).
 
-To show the storage location of a S3 bucket, use the `ozone sh bucket path <bucketname>` command.
+To show the storage location of a S3 bucket, use the `ozone s3 path <bucketname>` command.
 
 ```
 aws s3api --endpoint-url http://localhost:9878 create-bucket --bucket=bucket1
diff --git a/hadoop-hdds/docs/content/S3Commands.md b/hadoop-hdds/docs/content/S3Commands.md
index 1a1d6ee..23936e6 100644
--- a/hadoop-hdds/docs/content/S3Commands.md
+++ b/hadoop-hdds/docs/content/S3Commands.md
@@ -32,7 +32,7 @@ User should get the kerberos ticket before using this option.
 
 
 {{< highlight bash >}}
-ozone sh s3 getkey
+ozone s3 getsecret
 {{< /highlight >}}
 Prints the AWS_SECRET_ACCESS_KEY and AWS_ACCESS_KEY_ID for the current user.
 
diff --git a/hadoop-hdds/docs/content/SparkOzoneFSK8S.md b/hadoop-hdds/docs/content/SparkOzoneFSK8S.md
index ef88d61..3e598d9 100644
--- a/hadoop-hdds/docs/content/SparkOzoneFSK8S.md
+++ b/hadoop-hdds/docs/content/SparkOzoneFSK8S.md
@@ -118,7 +118,7 @@ Download any text file and put it to the `/tmp/alice.txt` first.
 kubectl port-forward s3g-0 9878:9878
 aws s3api --endpoint http://localhost:9878 create-bucket --bucket=test
 aws s3api --endpoint http://localhost:9878 put-object --bucket test --key alice.txt --body /tmp/alice.txt
-kubectl exec -it scm-0 ozone sh bucket path test
+kubectl exec -it scm-0 ozone s3 path test
 ```
 
 The output of the last command is something like this:
@@ -138,13 +138,13 @@ kubectl create clusterrolebinding spark-role --clusterrole=edit --serviceaccount
 ```
 ## Execute the job
 
-Execute the following spar-submit command, but change at least the following values:
+Execute the following spark-submit command, but change at least the following values:
 
  * the kubernetes master url (you can check your ~/.kube/config to find the actual value)
  * the kubernetes namespace (yournamespace in this example)
  * serviceAccountName (you can use the _spark_ value if you folllowed the previous steps)
  * container.image (in this example this is myrepo/spark-ozone. This is pushed to the registry in the previous steps)
- * location of the input file (o3fs://...), use the string which is identified earlier with the `ozone sh bucket path` command
+ * location of the input file (o3fs://...), use the string which is identified earlier with the `ozone s3 path <bucketname>` command
 
 ```
 bin/spark-submit \
diff --git a/hadoop-ozone/common/src/main/bin/ozone b/hadoop-ozone/common/src/main/bin/ozone
index 3d1e96f..c6a2d83 100755
--- a/hadoop-ozone/common/src/main/bin/ozone
+++ b/hadoop-ozone/common/src/main/bin/ozone
@@ -48,6 +48,7 @@ function hadoop_usage
   hadoop_add_subcommand "s3g" daemon "run the S3 compatible REST gateway"
   hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager"
   hadoop_add_subcommand "sh" client "command line interface for object store operations"
+  hadoop_add_subcommand "s3" client "command line interface for s3 related operations"
   hadoop_add_subcommand "version" client "print the version"
 
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
@@ -129,6 +130,10 @@ function ozonecmd_case
       HADOOP_CLASSNAME=org.apache.hadoop.ozone.web.ozShell.Shell
       OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-ozone-manager"
     ;;
+    s3)
+      HADOOP_CLASSNAME=org.apache.hadoop.ozone.web.ozShell.s3.S3Shell
+      OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-ozone-manager"
+    ;;
     scm)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
       HADOOP_CLASSNAME='org.apache.hadoop.hdds.scm.server.StorageContainerManager'
diff --git a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure.robot b/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure.robot
index a81c093..9c481e6 100644
--- a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure.robot
@@ -37,7 +37,7 @@ Install aws cli
 Setup credentials
     ${hostname}=        Execute                    hostname
     Execute             kinit -k testuser/${hostname}@EXAMPLE.COM -t /etc/security/keytabs/testuser.keytab
-    ${result} =         Execute                    ozone sh s3 getsecret
+    ${result} =         Execute                    ozone s3 getsecret
     ${accessKey} =      Get Regexp Matches         ${result}     (?<=awsAccessKey=).*
     ${secret} =         Get Regexp Matches	       ${result}     (?<=awsSecret=).*
                         Execute                    aws configure set default.s3.signature_version s3v4
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index 13cf0e4..7f77f87 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -76,13 +76,11 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.S3_BUCKET_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
 
-import static org.apache.hadoop.ozone.web.ozShell.s3.GetS3SecretHandler.OZONE_GETS3SECRET_ERROR;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
@@ -1176,67 +1174,6 @@ public class TestOzoneShell {
     executeWithError(shell, args, "the length should be a positive number");
   }
 
-  @Test
-  public void testS3BucketMapping() throws  IOException {
-    String setOmAddress =
-        "--set=" + OZONE_OM_ADDRESS_KEY + "=" + getOmAddress();
-
-    String s3Bucket = "bucket1";
-    String commandOutput;
-    createS3Bucket("ozone", s3Bucket);
-
-    //WHEN
-    String[] args =
-        new String[] {setOmAddress, "bucket",
-            "path", s3Bucket};
-    execute(shell, args);
-
-    //THEN
-    commandOutput = out.toString();
-    String volumeName = client.getOzoneVolumeName(s3Bucket);
-    assertTrue(commandOutput.contains("Volume name for S3Bucket is : " +
-        volumeName));
-    assertTrue(commandOutput.contains(OzoneConsts.OZONE_URI_SCHEME + "://" +
-        s3Bucket + "." + volumeName));
-    out.reset();
-
-    //Trying to get map for an unknown bucket
-    args = new String[] {setOmAddress, "bucket", "path",
-        "unknownbucket"};
-    executeWithError(shell, args, S3_BUCKET_NOT_FOUND);
-
-    // No bucket name
-    args = new String[] {setOmAddress, "bucket", "path"};
-    executeWithError(shell, args, "Missing required parameter");
-
-    // Invalid bucket name
-    args = new String[] {setOmAddress, "bucket", "path", "/asd/multipleslash"};
-    executeWithError(shell, args, S3_BUCKET_NOT_FOUND);
-  }
-
-  @Test
-  public void testS3Secret() throws Exception {
-    String setOmAddress =
-        "--set=" + OZONE_OM_ADDRESS_KEY + "=" + getOmAddress();
-
-    String output;
-
-    String[] args = new String[] {setOmAddress, "s3", "getsecret"};
-    execute(shell, args);
-    // Get the first line of output
-    output = out.toString().split("\n")[0];
-
-    assertTrue(output.equals(OZONE_GETS3SECRET_ERROR));
-  }
-
-  private void createS3Bucket(String userName, String s3Bucket) {
-    try {
-      client.createS3Bucket("ozone", s3Bucket);
-    } catch (IOException ex) {
-      GenericTestUtils.assertExceptionContains("S3_BUCKET_ALREADY_EXISTS", ex);
-    }
-  }
-
   private OzoneVolume creatVolume() throws OzoneException, IOException {
     String volumeName = RandomStringUtils.randomNumeric(5) + "volume";
     VolumeArgs volumeArgs = VolumeArgs.newBuilder()
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestS3Shell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestS3Shell.java
new file mode 100644
index 0000000..4e22856
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestS3Shell.java
@@ -0,0 +1,292 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.ozShell;
+
+import com.google.common.base.Strings;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
+import org.apache.hadoop.ozone.client.rpc.RpcClient;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.web.ozShell.s3.S3Shell;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import picocli.CommandLine;
+import picocli.CommandLine.ExecutionException;
+import picocli.CommandLine.IExceptionHandler2;
+import picocli.CommandLine.ParameterException;
+import picocli.CommandLine.ParseResult;
+import picocli.CommandLine.RunLast;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort;
+
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.S3_BUCKET_NOT_FOUND;
+import static org.apache.hadoop.ozone.web.ozShell.s3.GetS3SecretHandler.OZONE_GETS3SECRET_ERROR;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+/**
+ * This test class specified for testing Ozone s3Shell command.
+ */
+public class TestS3Shell {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestS3Shell.class);
+
+  /**
+   * Set the timeout for every test.
+   */
+  @Rule
+  public Timeout testTimeout = new Timeout(300000);
+
+  private static String url;
+  private static File baseDir;
+  private static OzoneConfiguration conf = null;
+  private static MiniOzoneCluster cluster = null;
+  private static ClientProtocol client = null;
+  private static S3Shell s3Shell = null;
+
+  private final ByteArrayOutputStream out = new ByteArrayOutputStream();
+  private final ByteArrayOutputStream err = new ByteArrayOutputStream();
+  private static final PrintStream OLD_OUT = System.out;
+  private static final PrintStream OLD_ERR = System.err;
+
+  /**
+   * Create a MiniOzoneCluster for testing with using distributed Ozone
+   * handler type.
+   *
+   * @throws Exception
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new OzoneConfiguration();
+
+    String path = GenericTestUtils.getTempPath(
+        TestS3Shell.class.getSimpleName());
+    baseDir = new File(path);
+    baseDir.mkdirs();
+
+    s3Shell = new S3Shell();
+
+    cluster = MiniOzoneCluster.newBuilder(conf)
+        .setNumDatanodes(3)
+        .build();
+    conf.setInt(OZONE_REPLICATION, ReplicationFactor.THREE.getValue());
+    conf.setQuietMode(false);
+    client = new RpcClient(conf);
+    cluster.waitForClusterToBeReady();
+  }
+
+  /**
+   * shutdown MiniOzoneCluster.
+   */
+  @AfterClass
+  public static void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+
+    if (baseDir != null) {
+      FileUtil.fullyDelete(baseDir, true);
+    }
+  }
+
+  @Before
+  public void setup() {
+    System.setOut(new PrintStream(out));
+    System.setErr(new PrintStream(err));
+    url = "o3://" + getOmAddress();
+  }
+
+  @After
+  public void reset() {
+    // reset stream after each unit test
+    out.reset();
+    err.reset();
+
+    // restore system streams
+    System.setOut(OLD_OUT);
+    System.setErr(OLD_ERR);
+  }
+
+  @Test
+  public void testS3BucketMapping() throws IOException {
+    String setOmAddress =
+        "--set=" + OZONE_OM_ADDRESS_KEY + "=" + getOmAddress();
+
+    String s3Bucket = "bucket1";
+    String commandOutput;
+    createS3Bucket("ozone", s3Bucket);
+
+    // WHEN
+    String[] args =
+        new String[] {setOmAddress, "path", s3Bucket};
+    execute(s3Shell, args);
+
+    // THEN
+    commandOutput = out.toString();
+    String volumeName = client.getOzoneVolumeName(s3Bucket);
+    assertTrue(commandOutput.contains("Volume name for S3Bucket is : " +
+        volumeName));
+    assertTrue(commandOutput.contains(OzoneConsts.OZONE_URI_SCHEME + "://" +
+        s3Bucket + "." + volumeName));
+    out.reset();
+
+    // Trying to get map for an unknown bucket
+    args = new String[] {setOmAddress, "path", "unknownbucket"};
+    executeWithError(s3Shell, args, S3_BUCKET_NOT_FOUND);
+
+    // No bucket name
+    args = new String[] {setOmAddress, "path"};
+    executeWithError(s3Shell, args, "Missing required parameter");
+
+    // Invalid bucket name
+    args = new String[] {setOmAddress, "path", "/asd/multipleslash"};
+    executeWithError(s3Shell, args, S3_BUCKET_NOT_FOUND);
+  }
+
+  @Test
+  public void testS3SecretUnsecuredCluster() throws Exception {
+    String setOmAddress =
+        "--set=" + OZONE_OM_ADDRESS_KEY + "=" + getOmAddress();
+
+    String output;
+
+    String[] args = new String[] {setOmAddress, "getsecret"};
+    execute(s3Shell, args);
+    // Get the first line of output
+    output = out.toString().split("\n")[0];
+
+    assertTrue(output.equals(OZONE_GETS3SECRET_ERROR));
+  }
+
+  private void createS3Bucket(String userName, String s3Bucket) {
+    try {
+      client.createS3Bucket("ozone", s3Bucket);
+    } catch (IOException ex) {
+      GenericTestUtils.assertExceptionContains("S3_BUCKET_ALREADY_EXISTS", ex);
+    }
+  }
+
+  private void execute(S3Shell shell, String[] args) {
+    LOG.info("Executing s3Shell command with args {}", Arrays.asList(args));
+    CommandLine cmd = shell.getCmd();
+
+    IExceptionHandler2<List<Object>> exceptionHandler =
+        new IExceptionHandler2<List<Object>>() {
+          @Override
+          public List<Object> handleParseException(ParameterException ex,
+                                                   String[] args) {
+            throw ex;
+          }
+
+          @Override
+          public List<Object> handleExecutionException(ExecutionException ex,
+                                                       ParseResult parseRes) {
+            throw ex;
+          }
+        };
+    cmd.parseWithHandlers(new RunLast(),
+        exceptionHandler, args);
+  }
+
+  /**
+   * Execute command, assert exception message and returns true if error
+   * was thrown.
+   */
+  private void executeWithError(S3Shell shell, String[] args,
+                                OMException.ResultCodes code) {
+    try {
+      execute(shell, args);
+      fail("Exception is expected from command execution " + Arrays
+          .asList(args));
+    } catch (Exception ex) {
+      Assert.assertEquals(OMException.class, ex.getCause().getClass());
+      Assert.assertEquals(code, ((OMException) ex.getCause()).getResult());
+    }
+  }
+
+  /**
+   * Execute command, assert exception message and returns true if error
+   * was thrown.
+   */
+  private void executeWithError(S3Shell shell, String[] args,
+                                String expectedError) {
+    if (Strings.isNullOrEmpty(expectedError)) {
+      execute(shell, args);
+    } else {
+      try {
+        execute(shell, args);
+        fail("Exception is expected from command execution " + Arrays
+            .asList(args));
+      } catch (Exception ex) {
+        if (!Strings.isNullOrEmpty(expectedError)) {
+          Throwable exceptionToCheck = ex;
+          if (exceptionToCheck.getCause() != null) {
+            exceptionToCheck = exceptionToCheck.getCause();
+          }
+          Assert.assertTrue(
+              String.format(
+                  "Error of s3Shell code doesn't contain the " +
+                      "exception [%s] in [%s]",
+                  expectedError, exceptionToCheck.getMessage()),
+              exceptionToCheck.getMessage().contains(expectedError));
+        }
+      }
+    }
+  }
+
+  private String getOmAddress() {
+    List<ServiceInfo> services;
+    try {
+      services = cluster.getOzoneManager().getServiceList();
+    } catch (IOException e) {
+      fail("Could not get service list from OM");
+      return null;
+    }
+
+    return services.stream()
+        .filter(a -> HddsProtos.NodeType.OM.equals(a.getNodeType()))
+        .findFirst()
+        .map(s -> s.getServiceAddress(ServicePort.Type.RPC))
+        .orElseThrow(IllegalStateException::new);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
index c8cb7c7..18fcdbd 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
@@ -24,7 +24,6 @@ import org.apache.hadoop.hdds.tracing.TracingUtil;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.web.ozShell.bucket.BucketCommands;
 import org.apache.hadoop.ozone.web.ozShell.keys.KeyCommands;
-import org.apache.hadoop.ozone.web.ozShell.s3.S3Commands;
 import org.apache.hadoop.ozone.web.ozShell.token.TokenCommands;
 import org.apache.hadoop.ozone.web.ozShell.volume.VolumeCommands;
 
@@ -46,8 +45,7 @@ import picocli.CommandLine.Command;
         VolumeCommands.class,
         BucketCommands.class,
         KeyCommands.class,
-        TokenCommands.class,
-        S3Commands.class
+        TokenCommands.class
     },
     versionProvider = HddsVersionProvider.class,
     mixinStandardHelpOptions = true)
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/BucketCommands.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/BucketCommands.java
index 9e8bbca..525e07f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/BucketCommands.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/BucketCommands.java
@@ -39,8 +39,7 @@ import picocli.CommandLine.ParentCommand;
         ListBucketHandler.class,
         CreateBucketHandler.class,
         UpdateBucketHandler.class,
-        DeleteBucketHandler.class,
-        S3BucketMapping.class
+        DeleteBucketHandler.class
     },
     mixinStandardHelpOptions = true,
     versionProvider = HddsVersionProvider.class)
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/GetS3SecretHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/GetS3SecretHandler.java
index de9c3ec..1a359e2 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/GetS3SecretHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/GetS3SecretHandler.java
@@ -30,7 +30,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY
  * Executes getsecret calls.
  */
 @Command(name = "getsecret",
-    description = "returns s3 secret for current user")
+    description = "Returns s3 secret for current user")
 public class GetS3SecretHandler extends Handler {
 
   public static final String OZONE_GETS3SECRET_ERROR = "This command is not" +
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/S3Commands.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/S3Commands.java
deleted file mode 100644
index 2279a64..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/S3Commands.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.ozShell.s3;
-
-import org.apache.hadoop.hdds.cli.GenericParentCommand;
-import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.cli.MissingSubcommandException;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.ParentCommand;
-
-import java.util.concurrent.Callable;
-
-/**
- * Subcommand to group s3 related operations.
- */
-@Command(name = "s3",
-    description = "S3 specific operations",
-    subcommands = {
-        GetS3SecretHandler.class
-    },
-    mixinStandardHelpOptions = true,
-    versionProvider = HddsVersionProvider.class)
-public class S3Commands implements GenericParentCommand, Callable<Void> {
-
-  @ParentCommand
-  private Shell shell;
-
-  @Override
-  public Void call() throws Exception {
-    throw new MissingSubcommandException(
-        this.shell.getCmd().getSubcommands().get("s3").getUsageMessage());
-  }
-
-  @Override
-  public boolean isVerbose() {
-    return shell.isVerbose();
-  }
-
-  @Override
-  public OzoneConfiguration createOzoneConfiguration() {
-    return shell.createOzoneConfiguration();
-  }
-}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/S3Shell.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/S3Shell.java
new file mode 100644
index 0000000..ebb9d6e
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/S3Shell.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.web.ozShell.s3;
+
+import io.opentracing.Scope;
+import io.opentracing.util.GlobalTracer;
+import org.apache.hadoop.hdds.tracing.TracingUtil;
+import org.apache.hadoop.ozone.web.ozShell.Shell;
+import org.apache.hadoop.ozone.web.ozShell.bucket.S3BucketMapping;
+import picocli.CommandLine.Command;
+
+/**
+ * Shell for s3 related operations.
+ */
+@Command(name = "ozone s3",
+    description = "Shell for S3 specific operations",
+    subcommands = {
+        GetS3SecretHandler.class,
+        S3BucketMapping.class
+    })
+
+public class S3Shell extends Shell {
+
+  @Override
+  public void execute(String[] argv) {
+    TracingUtil.initTracing("s3shell");
+    try (Scope scope = GlobalTracer.get().buildSpan("main").startActive(true)) {
+      super.execute(argv);
+    }
+  }
+
+  /**
+   * Main for the S3Shell Command handling.
+   *
+   * @param argv - System Args Strings[]
+   * @throws Exception
+   */
+  public static void main(String[] argv) throws Exception {
+    new S3Shell().run(argv);
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org