You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by el...@apache.org on 2019/11/21 14:35:26 UTC
[hadoop-ozone] 01/02: revert files added by a merge to this branch
This is an automated email from the ASF dual-hosted git repository.
elek pushed a commit to branch HDDS-2587
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
commit d1dfbc4cb3102b6687e97a527e9d67a13d0bda8c
Author: Márton Elek <el...@apache.org>
AuthorDate: Thu Nov 21 15:23:37 2019 +0100
revert files added by a merge to this branch
---
.../hdds/scm/client/ContainerOperationClient.java | 2 +-
.../hadoop/hdds/scm/client/HddsClientUtils.java | 5 +
.../apache/hadoop/hdds/scm/storage/BufferPool.java | 2 +-
.../hadoop/hdds/scm/storage/CommitWatcher.java | 4 +-
.../hadoop/hdds/scm/net/NodeSchemaLoader.java | 27 +-
.../apache/hadoop/hdds/tracing/TraceAllMethod.java | 5 -
.../common/src/main/resources/ozone-default.xml | 11 +-
.../hadoop/hdds/scm/net/TestNodeSchemaLoader.java | 151 ++++----
.../common/src/test/resources/log4j.properties | 23 --
.../networkTopologyTestFiles/external-entity.xml | 52 ---
.../hadoop/hdds/conf/ConfigFileAppender.java | 21 +-
.../hdds/scm/server/StorageContainerManager.java | 74 ++--
.../org/apache/hadoop/ozone/om/OMConfigKeys.java | 3 -
.../hadoop/ozone/om/exceptions/OMException.java | 6 +-
.../src/main/proto/OzoneManagerProtocol.proto | 1 -
.../hadoop/ozone/MiniOzoneHAClusterImpl.java | 6 +-
.../hadoop/ozone/TestSecureOzoneCluster.java | 399 ++++++++++++---------
.../hadoop/ozone/client/rpc/TestCommitWatcher.java | 9 +-
.../apache/hadoop/ozone/om/TestKeyManagerImpl.java | 133 +------
.../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 66 ++--
.../apache/hadoop/ozone/om/ha/OMHANodeDetails.java | 21 +-
.../ozone/web/ozShell/keys/GetKeyHandler.java | 44 +--
.../ozone/web/ozShell/keys/PutKeyHandler.java | 35 +-
.../endpoint/MultiDeleteRequestUnmarshaller.java | 8 +-
.../ozone/s3/util/RangeHeaderParserUtil.java | 4 +-
.../ozone/s3/util/TestRangeHeaderParserUtil.java | 6 +-
.../ozone/audit/parser/common/DatabaseHelper.java | 157 ++++----
.../audit/parser/handler/LoadCommandHandler.java | 11 +-
.../audit/parser/handler/QueryCommandHandler.java | 11 +-
.../parser/handler/TemplateCommandHandler.java | 11 +-
30 files changed, 554 insertions(+), 754 deletions(-)
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
index 6e2fd59..b1d4538 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
@@ -221,7 +221,7 @@ public class ContainerOperationClient implements ScmClient {
// That makes sense, but it is not needed for the client to work.
if (LOG.isDebugEnabled()) {
LOG.debug("Pipeline creation successful. Pipeline: {}",
- pipeline);
+ pipeline.toString());
}
}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
index d7e3f4d..8c9a11a 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
@@ -49,6 +49,8 @@ import org.apache.ratis.protocol.AlreadyClosedException;
import org.apache.ratis.protocol.GroupMismatchException;
import org.apache.ratis.protocol.NotReplicatedException;
import org.apache.ratis.protocol.RaftRetryFailureException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.InetSocketAddress;
@@ -74,6 +76,9 @@ import java.util.concurrent.TimeoutException;
@InterfaceStability.Unstable
public final class HddsClientUtils {
+ private static final Logger LOG = LoggerFactory.getLogger(
+ HddsClientUtils.class);
+
private HddsClientUtils() {
}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java
index f57700e..f986fe3 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java
@@ -94,7 +94,7 @@ public class BufferPool {
// always remove from head of the list and append at last
final ChunkBuffer buffer = bufferList.remove(0);
// Ensure the buffer to be removed is always at the head of the list.
- Preconditions.checkArgument(buffer == chunkBuffer);
+ Preconditions.checkArgument(buffer.equals(chunkBuffer));
buffer.clear();
bufferList.add(buffer);
Preconditions.checkArgument(currentBufferIndex >= 0);
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java
index 3960e33..6311991 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java
@@ -34,7 +34,6 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
-import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
@@ -111,8 +110,7 @@ public class CommitWatcher {
}
public void updateCommitInfoMap(long index, List<ChunkBuffer> buffers) {
- commitIndex2flushedDataMap.computeIfAbsent(index, k -> new LinkedList<>())
- .addAll(buffers);
+ commitIndex2flushedDataMap.put(index, buffers);
}
int getCommitInfoMapSize() {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java
index fee0852..8d7abed 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java
@@ -1,4 +1,4 @@
-/*
+/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -27,7 +27,6 @@ import org.w3c.dom.NodeList;
import org.w3c.dom.Text;
import org.xml.sax.SAXException;
-import javax.xml.XMLConstants;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
@@ -102,7 +101,7 @@ public final class NodeSchemaLoader {
* @return all valid node schemas defined in schema file
*/
public NodeSchemaLoadResult loadSchemaFromFile(String schemaFilePath)
- throws FileNotFoundException {
+ throws IllegalArgumentException, FileNotFoundException {
try {
File schemaFile = new File(schemaFilePath);
@@ -123,7 +122,7 @@ public final class NodeSchemaLoader {
try (InputStream stream = classloader
.getResourceAsStream(schemaFilePath)) {
if (stream != null) {
- LOG.info("Loading file from {}", classloader
+ LOG.info("Loading file from " + classloader
.getResources(schemaFilePath));
return loadSchemaFromStream(schemaFilePath, stream);
}
@@ -172,7 +171,6 @@ public final class NodeSchemaLoader {
LOG.info("Loading network topology layer schema file");
// Read and parse the schema file.
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
- dbf.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
dbf.setIgnoringComments(true);
DocumentBuilder builder = dbf.newDocumentBuilder();
Document doc = builder.parse(inputStream);
@@ -216,6 +214,9 @@ public final class NodeSchemaLoader {
* Load network topology layer schemas from a YAML configuration file.
* @param schemaFile as inputStream
* @return all valid node schemas defined in schema file
+ * @throws ParserConfigurationException ParserConfigurationException happen
+ * @throws IOException no such schema file
+ * @throws SAXException xml file has some invalid elements
* @throws IllegalArgumentException xml file content is logically invalid
*/
private NodeSchemaLoadResult loadSchemaFromYaml(InputStream schemaFile) {
@@ -296,7 +297,7 @@ public final class NodeSchemaLoader {
*/
private Map<String, NodeSchema> loadLayersSection(Element root) {
NodeList elements = root.getElementsByTagName(LAYER_TAG);
- Map<String, NodeSchema> schemas = new HashMap<>();
+ Map<String, NodeSchema> schemas = new HashMap<String, NodeSchema>();
for (int i = 0; i < elements.getLength(); i++) {
Node node = elements.item(i);
if (node instanceof Element) {
@@ -360,8 +361,8 @@ public final class NodeSchemaLoader {
Map<String, NodeSchema> schemas) {
NodeList elements = root.getElementsByTagName(TOPOLOGY_TAG)
.item(0).getChildNodes();
- List<NodeSchema> schemaList = new ArrayList<>();
- boolean enforcePrefix = false;
+ List<NodeSchema> schemaList = new ArrayList<NodeSchema>();
+ boolean enforecePrefix = false;
for (int i = 0; i < elements.getLength(); i++) {
Node node = elements.item(i);
if (node instanceof Element) {
@@ -382,10 +383,10 @@ public final class NodeSchemaLoader {
}
if (TOPOLOGY_PATH.equals(tagName)) {
if(value.startsWith(NetConstants.PATH_SEPARATOR_STR)) {
- value = value.substring(1);
+ value = value.substring(1, value.length());
}
String[] layerIDs = value.split(NetConstants.PATH_SEPARATOR_STR);
- if (layerIDs.length != schemas.size()) {
+ if (layerIDs == null || layerIDs.length != schemas.size()) {
throw new IllegalArgumentException("Topology path depth doesn't "
+ "match layer element numbers");
}
@@ -408,7 +409,7 @@ public final class NodeSchemaLoader {
schemaList.add(schemas.get(layerIDs[j]));
}
} else if (TOPOLOGY_ENFORCE_PREFIX.equalsIgnoreCase(tagName)) {
- enforcePrefix = Boolean.parseBoolean(value);
+ enforecePrefix = Boolean.parseBoolean(value);
} else {
throw new IllegalArgumentException("Unsupported Element <" +
tagName + ">");
@@ -416,7 +417,7 @@ public final class NodeSchemaLoader {
}
}
// Integrity check
- if (enforcePrefix) {
+ if (enforecePrefix) {
// Every InnerNode should have prefix defined
for (NodeSchema schema: schemas.values()) {
if (schema.getType() == LayerType.INNER_NODE &&
@@ -426,7 +427,7 @@ public final class NodeSchemaLoader {
}
}
}
- return new NodeSchemaLoadResult(schemaList, enforcePrefix);
+ return new NodeSchemaLoadResult(schemaList, enforecePrefix);
}
/**
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TraceAllMethod.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TraceAllMethod.java
index 6dc9f96..8bdf638 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TraceAllMethod.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TraceAllMethod.java
@@ -59,11 +59,6 @@ public class TraceAllMethod<T> implements InvocationHandler {
public Object invoke(Object proxy, Method method, Object[] args)
throws Throwable {
Method delegateMethod = findDelegatedMethod(method);
- if (delegateMethod == null) {
- throw new NoSuchMethodException("Method not found: " +
- method.getName());
- }
-
try (Scope scope = GlobalTracer.get().buildSpan(
name + "." + method.getName())
.startActive(true)) {
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 8110242..2f9ce31 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -494,20 +494,11 @@
<description>
Comma-separated list of OM service Ids.
- If not set, the default value of "omServiceIdDefault" is assigned as the
+ If not set, the default value of "om-service-value" is assigned as the
OM service ID.
</description>
</property>
<property>
- <name>ozone.om.internal.service.id</name>
- <value></value>
- <tag>OM, HA</tag>
- <description>
- Service ID of the Ozone Manager. If this is not set fall back to
- ozone.om.service.ids to find the service ID it belongs to.
- </description>
- </property>
- <property>
<name>ozone.om.nodes.EXAMPLEOMSERVICEID</name>
<value></value>
<tag>OM, HA</tag>
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java
index aa65737..0c20353 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java
@@ -1,4 +1,4 @@
-/*
+/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -19,116 +19,85 @@ package org.apache.hadoop.hdds.scm.net;
import org.junit.Rule;
import org.junit.Test;
-import org.junit.experimental.runners.Enclosed;
import org.junit.rules.Timeout;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
-import java.io.FileNotFoundException;
-import java.net.URL;
import java.util.Arrays;
import java.util.Collection;
-import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
-import static org.junit.jupiter.api.Assertions.assertThrows;
/** Test the node schema loader. */
-@RunWith(Enclosed.class)
+@RunWith(Parameterized.class)
public class TestNodeSchemaLoader {
-
- /**
- * Parameterized test cases for various error conditions.
- */
- @RunWith(Parameterized.class)
- public static class ParameterizedTests {
-
- private final String schemaFile;
- private final String errMsg;
-
- @Rule
- public Timeout testTimeout = new Timeout(2000);
-
- @Parameters
- public static Collection<Object[]> getSchemaFiles() {
- Object[][] schemaFiles = new Object[][]{
- {"enforce-error.xml", "layer without prefix defined"},
- {"invalid-cost.xml", "Cost should be positive number or 0"},
- {"multiple-leaf.xml", "Multiple LEAF layers are found"},
- {"multiple-root.xml", "Multiple ROOT layers are found"},
- {"no-leaf.xml", "No LEAF layer is found"},
- {"no-root.xml", "No ROOT layer is found"},
- {"path-layers-size-mismatch.xml",
- "Topology path depth doesn't match layer element numbers"},
- {"path-with-id-reference-failure.xml",
- "No layer found for id"},
- {"unknown-layer-type.xml", "Unsupported layer type"},
- {"wrong-path-order-1.xml",
- "Topology path doesn't start with ROOT layer"},
- {"wrong-path-order-2.xml",
- "Topology path doesn't end with LEAF layer"},
- {"no-topology.xml", "no or multiple <topology> element"},
- {"multiple-topology.xml", "no or multiple <topology> element"},
- {"invalid-version.xml", "Bad layoutversion value"},
- {"external-entity.xml", "accessExternalDTD"},
- };
- return Arrays.asList(schemaFiles);
- }
-
- public ParameterizedTests(String schemaFile, String errMsg) {
- this.schemaFile = schemaFile;
- this.errMsg = errMsg;
- }
-
- @Test
- public void testInvalid() {
- String filePath = getClassloaderResourcePath(schemaFile);
- Exception e = assertThrows(IllegalArgumentException.class,
- () -> NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath));
- assertMessageContains(e.getMessage(), errMsg, schemaFile);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestNodeSchemaLoader.class);
+ private ClassLoader classLoader =
+ Thread.currentThread().getContextClassLoader();
+
+ public TestNodeSchemaLoader(String schemaFile, String errMsg) {
+ try {
+ String filePath = classLoader.getResource(
+ "./networkTopologyTestFiles/" + schemaFile).getPath();
+ NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
+ fail("expect exceptions");
+ } catch (Throwable e) {
+ assertTrue(e.getMessage().contains(errMsg));
}
}
- /**
- * Test cases that do not use the parameters, should be executed only once.
- */
- public static class NonParameterizedTests {
-
- private static final String VALID_SCHEMA_FILE = "good.xml";
-
- @Rule
- public Timeout testTimeout = new Timeout(2000);
+ @Rule
+ public Timeout testTimeout = new Timeout(30000);
+
+ @Parameters
+ public static Collection<Object[]> getSchemaFiles() {
+ Object[][] schemaFiles = new Object[][]{
+ {"enforce-error.xml", "layer without prefix defined"},
+ {"invalid-cost.xml", "Cost should be positive number or 0"},
+ {"multiple-leaf.xml", "Multiple LEAF layers are found"},
+ {"multiple-root.xml", "Multiple ROOT layers are found"},
+ {"no-leaf.xml", "No LEAF layer is found"},
+ {"no-root.xml", "No ROOT layer is found"},
+ {"path-layers-size-mismatch.xml",
+ "Topology path depth doesn't match layer element numbers"},
+ {"path-with-id-reference-failure.xml",
+ "No layer found for id"},
+ {"unknown-layer-type.xml", "Unsupported layer type"},
+ {"wrong-path-order-1.xml",
+ "Topology path doesn't start with ROOT layer"},
+ {"wrong-path-order-2.xml", "Topology path doesn't end with LEAF layer"},
+ {"no-topology.xml", "no or multiple <topology> element"},
+ {"multiple-topology.xml", "no or multiple <topology> element"},
+ {"invalid-version.xml", "Bad layoutversion value"},
+ };
+ return Arrays.asList(schemaFiles);
+ }
- @Test
- public void testGood() throws Exception {
- String filePath = getClassloaderResourcePath(VALID_SCHEMA_FILE);
+ @Test
+ public void testGood() {
+ try {
+ String filePath = classLoader.getResource(
+ "./networkTopologyTestFiles/good.xml").getPath();
NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
- }
-
- @Test
- public void testNotExist() {
- String filePath = getClassloaderResourcePath(VALID_SCHEMA_FILE)
- .replace(VALID_SCHEMA_FILE, "non-existent.xml");
- Exception e = assertThrows(FileNotFoundException.class,
- () -> NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath));
- assertMessageContains(e.getMessage(), "not found", "non-existent.xml");
+ } catch (Throwable e) {
+ fail("should succeed");
}
}
- private static void assertMessageContains(
- String actual, String expected, String testCase) {
- if (!actual.contains(expected)) {
- fail(String.format(
- "Expected message for '%s' to contain '%s', but got: '%s'",
- testCase, expected, actual));
+ @Test
+ public void testNotExist() {
+ String filePath = classLoader.getResource(
+ "./networkTopologyTestFiles/good.xml").getPath() + ".backup";
+ try {
+ NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
+ fail("should fail");
+ } catch (Throwable e) {
+ assertTrue(e.getMessage().contains("not found"));
}
}
-
- private static String getClassloaderResourcePath(String file) {
- URL resource = Thread.currentThread().getContextClassLoader()
- .getResource("networkTopologyTestFiles/" + file);
- assertNotNull(resource);
- return resource.getPath();
- }
}
diff --git a/hadoop-hdds/common/src/test/resources/log4j.properties b/hadoop-hdds/common/src/test/resources/log4j.properties
deleted file mode 100644
index bb5cbe5..0000000
--- a/hadoop-hdds/common/src/test/resources/log4j.properties
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# log4j configuration used during build and unit tests
-
-log4j.rootLogger=INFO,stdout
-log4j.threshold=ALL
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/external-entity.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/external-entity.xml
deleted file mode 100644
index f93cb0f..0000000
--- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/external-entity.xml
+++ /dev/null
@@ -1,52 +0,0 @@
-<?xml version="1.0"?>
-<!DOCTYPE foo [
-<!ENTITY xxe SYSTEM "file:///etc/passwd" >
-]>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements. See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership. The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<configuration>
- <layoutversion>1</layoutversion>
- <layers>
- <layer id="datacenter">
- <prefix></prefix>
- <cost>1</cost>
- <type>Root</type>
- </layer>
- <layer id="rack">
- <prefix>rack</prefix>
- <cost>1</cost>
- <type>InnerNode</type>
- <default>/default-rack</default>
- </layer>
- <layer id="nodegroup">
- <prefix>nodegroup</prefix>
- <cost>1</cost>
- <type>InnerNode</type>
- <default>/default-nodegroup</default>
- </layer>
- <layer id="node">
- <prefix></prefix>
- <cost>0</cost>
- <type>&xxe;</type>
- </layer>
- </layers>
- <topology>
- <path>/datacenter/rack/nodegroup/node</path>
- <enforceprefix>true</enforceprefix>
- </topology>
-</configuration>
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileAppender.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileAppender.java
index 9f1c087..9463f42 100644
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileAppender.java
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileAppender.java
@@ -1,4 +1,4 @@
-/*
+/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hdds.conf;
-import javax.xml.XMLConstants;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.transform.OutputKeys;
@@ -49,7 +48,6 @@ public class ConfigFileAppender {
public ConfigFileAppender() {
try {
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
- factory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
builder = factory.newDocumentBuilder();
} catch (Exception ex) {
throw new ConfigurationException("Can initialize new configuration", ex);
@@ -93,7 +91,7 @@ public class ConfigFileAppender {
addXmlElement(propertyElement, "description", description);
- String tagsAsString = Arrays.stream(tags).map(Enum::name)
+ String tagsAsString = Arrays.stream(tags).map(tag -> tag.name())
.collect(Collectors.joining(", "));
addXmlElement(propertyElement, "tag", tagsAsString);
@@ -113,16 +111,15 @@ public class ConfigFileAppender {
*/
public void write(Writer writer) {
try {
- TransformerFactory factory = TransformerFactory.newInstance();
- factory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
- Transformer transformer = factory.newTransformer();
+ TransformerFactory transformerFactory = TransformerFactory.newInstance();
+ Transformer transf = transformerFactory.newTransformer();
- transformer.setOutputProperty(OutputKeys.ENCODING, "UTF-8");
- transformer.setOutputProperty(OutputKeys.INDENT, "yes");
- transformer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount",
- "2");
+ transf.setOutputProperty(OutputKeys.ENCODING, "UTF-8");
+ transf.setOutputProperty(OutputKeys.INDENT, "yes");
+ transf
+ .setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "2");
- transformer.transform(new DOMSource(document), new StreamResult(writer));
+ transf.transform(new DOMSource(document), new StreamResult(writer));
} catch (TransformerException e) {
throw new ConfigurationException("Can't write the configuration xml", e);
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 0249d7e..21127f4 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -25,6 +25,7 @@ import com.google.common.annotations.VisibleForTesting;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.RemovalListener;
+import com.google.common.cache.RemovalNotification;
import com.google.protobuf.BlockingService;
import java.util.Objects;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -177,7 +178,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
/**
* Key = DatanodeUuid, value = ContainerStat.
*/
- private final Cache<String, ContainerStat> containerReportCache;
+ private Cache<String, ContainerStat> containerReportCache;
private ReplicationManager replicationManager;
@@ -231,8 +232,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
configuration = conf;
initMetrics();
- containerReportCache = buildContainerReportCache();
-
+ initContainerReportCache(conf);
/**
* It is assumed the scm --init command creates the SCM Storage Config.
*/
@@ -478,6 +478,10 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
scmMetadataStore = configurator.getMetadataStore();
} else {
scmMetadataStore = new SCMMetadataStoreRDBImpl(conf);
+ if (scmMetadataStore == null) {
+ throw new SCMException("Unable to initialize metadata store",
+ ResultCodes.SCM_NOT_INITIALIZED);
+ }
}
}
@@ -613,7 +617,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
scmStorageConfig.setClusterId(clusterId);
}
scmStorageConfig.initialize();
- LOG.info(
+ System.out.println(
"SCM initialization succeeded."
+ "Current cluster id for sd="
+ scmStorageConfig.getStorageDir()
@@ -625,7 +629,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
return false;
}
} else {
- LOG.info(
+ System.out.println(
"SCM already initialized. Reusing existing"
+ " cluster id for sd="
+ scmStorageConfig.getStorageDir()
@@ -671,25 +675,28 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
/**
* Initialize container reports cache that sent from datanodes.
+ *
+ * @param conf
*/
- @SuppressWarnings("UnstableApiUsage")
- private Cache<String, ContainerStat> buildContainerReportCache() {
- return
+ private void initContainerReportCache(OzoneConfiguration conf) {
+ containerReportCache =
CacheBuilder.newBuilder()
.expireAfterAccess(Long.MAX_VALUE, TimeUnit.MILLISECONDS)
.maximumSize(Integer.MAX_VALUE)
- .removalListener((
- RemovalListener<String, ContainerStat>) removalNotification -> {
- synchronized (containerReportCache) {
- ContainerStat stat = removalNotification.getValue();
- if (stat != null) {
- // TODO: Are we doing the right thing here?
+ .removalListener(
+ new RemovalListener<String, ContainerStat>() {
+ @Override
+ public void onRemoval(
+ RemovalNotification<String, ContainerStat>
+ removalNotification) {
+ synchronized (containerReportCache) {
+ ContainerStat stat = removalNotification.getValue();
// remove invalid container report
metrics.decrContainerStat(stat);
- }
- if (LOG.isDebugEnabled()) {
- LOG.debug("Remove expired container stat entry for " +
- "datanode: {}.", removalNotification.getKey());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Remove expired container stat entry for " +
+ "datanode: {}.", removalNotification.getKey());
+ }
}
}
})
@@ -756,27 +763,22 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
* Start service.
*/
public void start() throws IOException {
- if (LOG.isInfoEnabled()) {
- LOG.info(buildRpcServerStartMessage(
- "StorageContainerLocationProtocol RPC server",
- getClientRpcAddress()));
- }
+ LOG.info(
+ buildRpcServerStartMessage(
+ "StorageContainerLocationProtocol RPC server",
+ getClientRpcAddress()));
ms = HddsUtils.initializeMetrics(configuration, "StorageContainerManager");
commandWatcherLeaseManager.start();
getClientProtocolServer().start();
- if (LOG.isInfoEnabled()) {
- LOG.info(buildRpcServerStartMessage("ScmBlockLocationProtocol RPC " +
- "server", getBlockProtocolServer().getBlockRpcAddress()));
- }
+ LOG.info(buildRpcServerStartMessage("ScmBlockLocationProtocol RPC " +
+ "server", getBlockProtocolServer().getBlockRpcAddress()));
getBlockProtocolServer().start();
- if (LOG.isInfoEnabled()) {
- LOG.info(buildRpcServerStartMessage("ScmDatanodeProtocl RPC " +
- "server", getDatanodeProtocolServer().getDatanodeRpcAddress()));
- }
+ LOG.info(buildRpcServerStartMessage("ScmDatanodeProtocl RPC " +
+ "server", getDatanodeProtocolServer().getDatanodeRpcAddress()));
getDatanodeProtocolServer().start();
if (getSecurityProtocolServer() != null) {
getSecurityProtocolServer().start();
@@ -969,10 +971,12 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
}
public void checkAdminAccess(String remoteUser) throws IOException {
- if (remoteUser != null && !scmAdminUsernames.contains(remoteUser)) {
- throw new IOException(
- "Access denied for user " + remoteUser + ". Superuser privilege " +
- "is required.");
+ if (remoteUser != null) {
+ if (!scmAdminUsernames.contains(remoteUser)) {
+ throw new IOException(
+ "Access denied for user " + remoteUser + ". Superuser privilege " +
+ "is required.");
+ }
}
}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index 8bbfa82..dcb9b5c 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -39,9 +39,6 @@ public final class OMConfigKeys {
"ozone.om.handler.count.key";
public static final int OZONE_OM_HANDLER_COUNT_DEFAULT = 20;
- public static final String OZONE_OM_INTERNAL_SERVICE_ID =
- "ozone.om.internal.service.id";
-
public static final String OZONE_OM_SERVICE_IDS_KEY =
"ozone.om.service.ids";
public static final String OZONE_OM_NODES_KEY =
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
index a594f41..df44e0b 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
@@ -216,9 +216,7 @@ public class OMException extends IOException {
INVALID_PART, // When part name is not found or not matching with partname
// in OM MPU partInfo.
- INVALID_PART_ORDER, // When list of parts mentioned to complete MPU are not
- // given in ascending order.
-
- SCM_GET_PIPELINE_EXCEPTION
+ INVALID_PART_ORDER // When list of parts mentioned to complete MPU are not
+ // given in ascending order.
}
}
diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
index 21577be..62b7190 100644
--- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
+++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
@@ -306,7 +306,6 @@ enum Status {
INVALID_PART = 55;
INVALID_PART_ORDER = 56;
- SCM_GET_PIPELINE_EXCEPTION = 57;
}
/**
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
index d756457..006d854 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
@@ -307,12 +307,8 @@ public final class MiniOzoneHAClusterImpl extends MiniOzoneClusterImpl {
* Initialize HA related configurations.
*/
private void initHAConfig(int basePort) throws IOException {
- // Set configurations required for starting OM HA service, because that
- // is the serviceID being passed to start Ozone HA cluster.
- // Here setting internal service and OZONE_OM_SERVICE_IDS_KEY, in this
- // way in OM start it uses internal service id to find it's service id.
+ // Set configurations required for starting OM HA service
conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, omServiceId);
- conf.set(OMConfigKeys.OZONE_OM_INTERNAL_SERVICE_ID, omServiceId);
String omNodesKey = OmUtils.addKeySuffixes(
OMConfigKeys.OZONE_OM_NODES_KEY, omServiceId);
StringBuilder omNodesKeyValue = new StringBuilder();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
index b7dab79..9cfd218 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
@@ -1,4 +1,4 @@
-/*
+/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone;
import java.io.File;
import java.io.IOException;
import java.net.InetAddress;
-import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.KeyPair;
@@ -29,13 +28,16 @@ import java.util.Properties;
import java.util.UUID;
import java.util.concurrent.Callable;
-import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
import org.apache.hadoop.hdds.scm.HddsTestUtils;
import org.apache.hadoop.hdds.scm.ScmConfig;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.ScmInfo;
import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
import org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig;
@@ -53,8 +55,10 @@ import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.minikdc.MiniKdc;
import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.net.ServerSocketUtil;
import org.apache.hadoop.ozone.client.CertificateClientTestImpl;
import org.apache.hadoop.ozone.common.Storage;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMStorage;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.exceptions.OMException;
@@ -75,6 +79,7 @@ import org.bouncycastle.asn1.x500.X500Name;
import org.bouncycastle.asn1.x500.style.BCStyle;
import org.bouncycastle.cert.jcajce.JcaX509CertificateHolder;
import org.junit.After;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
@@ -89,29 +94,10 @@ import java.time.LocalDateTime;
import java.time.temporal.ChronoUnit;
import java.util.Date;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
+import static junit.framework.TestCase.assertNotNull;
import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
-import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY;
-import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY;
-import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY;
-import static org.apache.hadoop.net.ServerSocketUtil.getPort;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.DELEGATION_TOKEN_MAX_LIFETIME_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_AUTH_METHOD;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_EXPIRED;
@@ -119,7 +105,6 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLU
import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@@ -133,64 +118,82 @@ public final class TestSecureOzoneCluster {
private static final String TEST_USER = "testUgiUser@EXAMPLE.COM";
private static final String COMPONENT = "test";
- private static final int CLIENT_TIMEOUT = 2_000;
- private static final String OM_CERT_SERIAL_ID = "9879877970576";
- private static final Logger LOG = LoggerFactory
+ private static final int CLIENT_TIMEOUT = 2 * 1000;
+ private Logger logger = LoggerFactory
.getLogger(TestSecureOzoneCluster.class);
@Rule
- public Timeout timeout = new Timeout(80_000);
-
- @Rule
- public TemporaryFolder folder= new TemporaryFolder();
+ public Timeout timeout = new Timeout(80000);
private MiniKdc miniKdc;
private OzoneConfiguration conf;
private File workDir;
+ private static Properties securityProperties;
private File scmKeytab;
private File spnegoKeytab;
private File omKeyTab;
private File testUserKeytab;
+ private String curUser;
private String testUserPrincipal;
+ private UserGroupInformation testKerberosUgi;
private StorageContainerManager scm;
private OzoneManager om;
private String host;
- private String clusterId;
- private String scmId;
- private String omId;
+
+ private static String clusterId;
+ private static String scmId;
+ private static String omId;
private OzoneManagerProtocolClientSideTranslatorPB omClient;
+ private KeyPair keyPair;
+ private Path metaDirPath;
+ @Rule
+ public TemporaryFolder folder= new TemporaryFolder();
+ private String omCertSerialId = "9879877970576";
@Before
public void init() {
try {
conf = new OzoneConfiguration();
- conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost");
-
- conf.setInt(OZONE_SCM_CLIENT_PORT_KEY,
- getPort(OZONE_SCM_CLIENT_PORT_DEFAULT, 100));
- conf.setInt(OZONE_SCM_DATANODE_PORT_KEY,
- getPort(OZONE_SCM_DATANODE_PORT_DEFAULT, 100));
- conf.setInt(OZONE_SCM_BLOCK_CLIENT_PORT_KEY,
- getPort(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, 100));
- conf.setInt(OZONE_SCM_SECURITY_SERVICE_PORT_KEY,
- getPort(OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT, 100));
+ conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost");
+
+ conf.setInt(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY, ServerSocketUtil
+ .getPort(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT, 100));
+ conf.setInt(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY, ServerSocketUtil
+ .getPort(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT, 100));
+ conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_KEY,
+ ServerSocketUtil.getPort(ScmConfigKeys
+ .OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, 100));
+ conf.setInt(ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY,
+ ServerSocketUtil.getPort(ScmConfigKeys
+ .OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT, 100));
+
+ conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, "OMMarketingCluster001");
+ conf.set(OMConfigKeys.OZONE_OM_NODES_KEY+".OMMarketingCluster001",
+ "node1,node2,node3");
+ conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY+".OMMarketingCluster001.node1",
+ "localhost:9862");
+ conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY+".OMMarketingCluster001.node2",
+ "google.com:9863");
+ conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY+".OMMarketingCluster001.node3",
+ "yahoo.com:9864");
DefaultMetricsSystem.setMiniClusterMode(true);
final String path = folder.newFolder().toString();
- Path metaDirPath = Paths.get(path, "om-meta");
+ metaDirPath = Paths.get(path, "om-meta");
conf.set(OZONE_METADATA_DIRS, metaDirPath.toString());
conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
- conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.name());
-
- workDir = GenericTestUtils.getTestDir(getClass().getSimpleName());
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+ KERBEROS.toString());
startMiniKdc();
- setSecureConfig();
- createCredentialsInKDC();
- generateKeyPair();
+ setSecureConfig(conf);
+ createCredentialsInKDC(conf, miniKdc);
+ generateKeyPair(conf);
// OzoneManager.setTestSecureOmFlag(true);
+ } catch (IOException e) {
+ logger.error("Failed to initialize TestSecureOzoneCluster", e);
} catch (Exception e) {
- LOG.error("Failed to initialize TestSecureOzoneCluster", e);
+ logger.error("Failed to initialize TestSecureOzoneCluster", e);
}
}
@@ -201,22 +204,30 @@ public final class TestSecureOzoneCluster {
if (scm != null) {
scm.stop();
}
- IOUtils.closeQuietly(om);
- IOUtils.closeQuietly(omClient);
+ if (om != null) {
+ om.stop();
+ }
+ if (omClient != null) {
+ omClient.close();
+ }
} catch (Exception e) {
- LOG.error("Failed to stop TestSecureOzoneCluster", e);
+ logger.error("Failed to stop TestSecureOzoneCluster", e);
}
}
- private void createCredentialsInKDC() throws Exception {
- ScmConfig scmConfig = conf.getObject(ScmConfig.class);
+ private void createCredentialsInKDC(Configuration configuration,
+ MiniKdc kdc) throws Exception {
+ OzoneConfiguration ozoneConfiguration =
+ new OzoneConfiguration(configuration);
SCMHTTPServerConfig httpServerConfig =
- conf.getObject(SCMHTTPServerConfig.class);
- createPrincipal(scmKeytab, scmConfig.getKerberosPrincipal());
- createPrincipal(spnegoKeytab, httpServerConfig.getKerberosPrincipal());
+ ozoneConfiguration.getObject(SCMHTTPServerConfig.class);
+ createPrincipal(scmKeytab,
+ httpServerConfig.getKerberosPrincipal());
+ createPrincipal(spnegoKeytab,
+ httpServerConfig.getKerberosKeytab());
createPrincipal(testUserKeytab, testUserPrincipal);
createPrincipal(omKeyTab,
- conf.get(OZONE_OM_KERBEROS_PRINCIPAL_KEY));
+ configuration.get(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY));
}
private void createPrincipal(File keytab, String... principal)
@@ -225,7 +236,9 @@ public final class TestSecureOzoneCluster {
}
private void startMiniKdc() throws Exception {
- Properties securityProperties = MiniKdc.createConf();
+ workDir = GenericTestUtils
+ .getTestDir(TestSecureOzoneCluster.class.getSimpleName());
+ securityProperties = MiniKdc.createConf();
miniKdc = new MiniKdc(securityProperties, workDir);
miniKdc.start();
}
@@ -234,22 +247,31 @@ public final class TestSecureOzoneCluster {
miniKdc.stop();
}
- private void setSecureConfig() throws IOException {
- conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
+ private void setSecureConfig(Configuration configuration) throws IOException {
+ SCMHTTPServerConfig httpServerConfig =
+ conf.getObject(SCMHTTPServerConfig.class);
+ ScmConfig scmConfig = conf.getObject(ScmConfig.class);
+ configuration.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
host = InetAddress.getLocalHost().getCanonicalHostName()
.toLowerCase();
-
- conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
-
- String curUser = UserGroupInformation.getCurrentUser().getUserName();
- conf.set(OZONE_ADMINISTRATORS, curUser);
-
String realm = miniKdc.getRealm();
- String hostAndRealm = host + "@" + realm;
- conf.set(HDDS_SCM_KERBEROS_PRINCIPAL_KEY, "scm/" + hostAndRealm);
- conf.set(HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY, "HTTP_SCM/" + hostAndRealm);
- conf.set(OZONE_OM_KERBEROS_PRINCIPAL_KEY, "om/" + hostAndRealm);
- conf.set(OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY, "HTTP_OM/" + hostAndRealm);
+ curUser = UserGroupInformation.getCurrentUser()
+ .getUserName();
+ configuration.set(
+ CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+ "kerberos");
+ configuration.set(OZONE_ADMINISTRATORS, curUser);
+
+ configuration.set(ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
+ "scm/" + host + "@" + realm);
+ configuration.set(SCMHTTPServerConfig.ConfigStrings
+ .HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY,
+ "HTTP_SCM/" + host + "@" + realm);
+
+ configuration.set(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY,
+ "om/" + host + "@" + realm);
+ configuration.set(OMConfigKeys.OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY,
+ "HTTP_OM/" + host + "@" + realm);
scmKeytab = new File(workDir, "scm.keytab");
spnegoKeytab = new File(workDir, "http.keytab");
@@ -257,13 +279,15 @@ public final class TestSecureOzoneCluster {
testUserKeytab = new File(workDir, "testuser.keytab");
testUserPrincipal = "test@" + realm;
- conf.set(HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
+ configuration.set(ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
scmKeytab.getAbsolutePath());
- conf.set(HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY,
+ configuration.set(
+ SCMHTTPServerConfig.ConfigStrings
+ .HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY,
spnegoKeytab.getAbsolutePath());
- conf.set(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY,
+ configuration.set(OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY,
omKeyTab.getAbsolutePath());
- conf.set(OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE,
+ conf.set(OMConfigKeys.OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE,
spnegoKeytab.getAbsolutePath());
}
@@ -274,8 +298,8 @@ public final class TestSecureOzoneCluster {
scm = StorageContainerManager.createSCM(conf);
//Reads the SCM Info from SCM instance
ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
- assertEquals(clusterId, scmInfo.getClusterId());
- assertEquals(scmId, scmInfo.getScmId());
+ Assert.assertEquals(clusterId, scmInfo.getClusterId());
+ Assert.assertEquals(scmId, scmInfo.getScmId());
}
@Test
@@ -296,9 +320,9 @@ public final class TestSecureOzoneCluster {
HddsClientUtils.getScmSecurityClient(conf, ugi);
assertNotNull(scmSecurityProtocolClient);
String caCert = scmSecurityProtocolClient.getCACertificate();
- assertNotNull(caCert);
LambdaTestUtils.intercept(RemoteException.class, "Certificate not found",
() -> scmSecurityProtocolClient.getCertificate("1"));
+ assertNotNull(caCert);
// Case 2: User without Kerberos credentials should fail.
ugi = UserGroupInformation.createRemoteUser("test");
@@ -306,10 +330,11 @@ public final class TestSecureOzoneCluster {
SCMSecurityProtocol finalScmSecurityProtocolClient =
HddsClientUtils.getScmSecurityClient(conf, ugi);
- String cannotAuthMessage = "Client cannot authenticate via:[KERBEROS]";
- LambdaTestUtils.intercept(IOException.class, cannotAuthMessage,
- finalScmSecurityProtocolClient::getCACertificate);
- LambdaTestUtils.intercept(IOException.class, cannotAuthMessage,
+ LambdaTestUtils.intercept(IOException.class, "Client cannot" +
+ " authenticate via:[KERBEROS]",
+ () -> finalScmSecurityProtocolClient.getCACertificate());
+ LambdaTestUtils.intercept(IOException.class, "Client cannot" +
+ " authenticate via:[KERBEROS]",
() -> finalScmSecurityProtocolClient.getCertificate("1"));
} finally {
if (scm != null) {
@@ -318,15 +343,20 @@ public final class TestSecureOzoneCluster {
}
}
- private void initSCM() throws IOException {
+ private void initSCM()
+ throws IOException, AuthenticationException {
+
clusterId = UUID.randomUUID().toString();
scmId = UUID.randomUUID().toString();
omId = UUID.randomUUID().toString();
final String path = folder.newFolder().toString();
Path scmPath = Paths.get(path, "scm-meta");
- Files.createDirectories(scmPath);
- conf.set(OZONE_METADATA_DIRS, scmPath.toString());
+ File temp = scmPath.toFile();
+ if(!temp.exists()) {
+ temp.mkdirs();
+ }
+ conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
SCMStorageConfig scmStore = new SCMStorageConfig(conf);
scmStore.setClusterId(clusterId);
scmStore.setScmId(scmId);
@@ -337,16 +367,19 @@ public final class TestSecureOzoneCluster {
@Test
public void testSecureScmStartupFailure() throws Exception {
initSCM();
- conf.set(HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY, "");
- conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+ conf.set(ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY, "");
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+ "kerberos");
LambdaTestUtils.intercept(IOException.class,
"Running in secure mode, but config doesn't have a keytab",
- () -> StorageContainerManager.createSCM(conf));
+ () -> {
+ StorageContainerManager.createSCM(conf);
+ });
- conf.set(HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
+ conf.set(ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
"scm/_HOST@EXAMPLE.com");
- conf.set(HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
+ conf.set(ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
"/etc/security/keytabs/scm.keytab");
testCommonKerberosFailures(
@@ -354,26 +387,27 @@ public final class TestSecureOzoneCluster {
}
- private void testCommonKerberosFailures(Callable<?> test) throws Exception {
- LambdaTestUtils.intercept(KerberosAuthException.class,
- "failure to login: for principal:",
- test);
+ private void testCommonKerberosFailures(Callable callable) throws Exception {
+ LambdaTestUtils.intercept(KerberosAuthException.class, "failure "
+ + "to login: for principal:", callable);
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+ "OAuth2");
- String invalidValue = "OAuth2";
- conf.set(HADOOP_SECURITY_AUTHENTICATION, invalidValue);
- LambdaTestUtils.intercept(IllegalArgumentException.class,
- "Invalid attribute value for " + HADOOP_SECURITY_AUTHENTICATION
- + " of " + invalidValue,
- test);
+ LambdaTestUtils.intercept(IllegalArgumentException.class, "Invalid"
+ + " attribute value for hadoop.security.authentication of OAuth2",
+ callable);
- conf.set(HADOOP_SECURITY_AUTHENTICATION, "KERBEROS_SSL");
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+ "KERBEROS_SSL");
LambdaTestUtils.intercept(AuthenticationException.class,
"KERBEROS_SSL authentication method not",
- test);
+ callable);
}
/**
* Tests the secure om Initialization Failure.
+ *
+ * @throws IOException
*/
@Test
public void testSecureOMInitializationFailure() throws Exception {
@@ -381,21 +415,23 @@ public final class TestSecureOzoneCluster {
// Create a secure SCM instance as om client will connect to it
scm = StorageContainerManager.createSCM(conf);
setupOm(conf);
- conf.set(OZONE_OM_KERBEROS_PRINCIPAL_KEY,
+ conf.set(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY,
"non-existent-user@EXAMPLE.com");
testCommonKerberosFailures(() -> OzoneManager.createOm(conf));
}
/**
* Tests the secure om Initialization success.
+ *
+ * @throws IOException
*/
@Test
public void testSecureOmInitializationSuccess() throws Exception {
initSCM();
// Create a secure SCM instance as om client will connect to it
scm = StorageContainerManager.createSCM(conf);
- LogCapturer logs = LogCapturer.captureLogs(OzoneManager.getLogger());
- GenericTestUtils.setLogLevel(OzoneManager.getLogger(), INFO);
+ LogCapturer logs = LogCapturer.captureLogs(OzoneManager.LOG);
+ GenericTestUtils.setLogLevel(OzoneManager.LOG, INFO);
setupOm(conf);
try {
@@ -403,7 +439,8 @@ public final class TestSecureOzoneCluster {
} catch (Exception ex) {
// Expects timeout failure from scmClient in om but om user login via
// kerberos should succeed.
- assertTrue(logs.getOutput().contains("Ozone Manager login successful"));
+ assertTrue(logs.getOutput().contains("Ozone Manager login"
+ + " successful"));
}
}
@@ -415,6 +452,8 @@ public final class TestSecureOzoneCluster {
* 4. Delegation token renewal without Kerberos auth fails.
* 5. Test success of token cancellation.
* 5. Test failure of token cancellation.
+ *
+ * @throws Exception
*/
@Test
public void testDelegationToken() throws Exception {
@@ -456,10 +495,9 @@ public final class TestSecureOzoneCluster {
assertTrue(renewalTime > 0);
// Check if token is of right kind and renewer is running om instance
- assertNotNull(token);
- assertEquals("OzoneToken", token.getKind().toString());
- assertEquals(OmUtils.getOmRpcAddress(conf),
- token.getService().toString());
+ Assert.assertEquals(token.getKind().toString(), "OzoneToken");
+ Assert.assertEquals(token.getService().toString(),
+ OmUtils.getOmRpcAddress(conf));
omClient.close();
// Create a remote ugi and set its authentication method to Token
@@ -470,13 +508,16 @@ public final class TestSecureOzoneCluster {
UserGroupInformation.setLoginUser(testUser);
// Get Om client, this time authentication should happen via Token
- testUser.doAs((PrivilegedExceptionAction<Void>) () -> {
- omClient = new OzoneManagerProtocolClientSideTranslatorPB(
- RPC.getProxy(OzoneManagerProtocolPB.class, omVersion,
- OmUtils.getOmAddress(conf), testUser, conf,
- NetUtils.getDefaultSocketFactory(conf), CLIENT_TIMEOUT),
- RandomStringUtils.randomAscii(5));
- return null;
+ testUser.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ omClient = new OzoneManagerProtocolClientSideTranslatorPB(
+ RPC.getProxy(OzoneManagerProtocolPB.class, omVersion,
+ OmUtils.getOmAddress(conf), testUser, conf,
+ NetUtils.getDefaultSocketFactory(conf), CLIENT_TIMEOUT),
+ RandomStringUtils.randomAscii(5));
+ return null;
+ }
});
// Case 3: Test Client can authenticate using token.
@@ -492,10 +533,15 @@ public final class TestSecureOzoneCluster {
// initial connection via DT succeeded
omLogs.clearOutput();
- OMException ex = LambdaTestUtils.intercept(OMException.class,
- "INVALID_AUTH_METHOD",
- () -> omClient.renewDelegationToken(token));
- assertEquals(INVALID_AUTH_METHOD, ex.getResult());
+ LambdaTestUtils.intercept(OMException.class, "INVALID_AUTH_METHOD",
+ () -> {
+ try {
+ omClient.renewDelegationToken(token);
+ } catch (OMException ex) {
+ assertTrue(ex.getResult().equals(INVALID_AUTH_METHOD));
+ throw ex;
+ }
+ });
assertTrue(logs.getOutput().contains(
"Auth successful for " + username + " (auth:TOKEN)"));
omLogs.clearOutput();
@@ -524,10 +570,17 @@ public final class TestSecureOzoneCluster {
OmUtils.getOmAddress(conf), testUser, conf,
NetUtils.getDefaultSocketFactory(conf),
Client.getRpcTimeout(conf)), RandomStringUtils.randomAscii(5));
- ex = LambdaTestUtils.intercept(OMException.class,
- "Cancel delegation token failed",
- () -> omClient.cancelDelegationToken(token));
- assertEquals(TOKEN_ERROR_OTHER, ex.getResult());
+ LambdaTestUtils.intercept(OMException.class, "Cancel delegation " +
+ "token failed",
+ () -> {
+ try {
+ omClient.cancelDelegationToken(token);
+ } catch (OMException ex) {
+ assertTrue(ex.getResult().equals(TOKEN_ERROR_OTHER));
+ throw ex;
+ }
+ });
+
assertTrue(logs.getOutput().contains("Auth failed for"));
} finally {
om.stop();
@@ -535,15 +588,17 @@ public final class TestSecureOzoneCluster {
}
}
- private void generateKeyPair() throws Exception {
+ private void generateKeyPair(OzoneConfiguration config) throws Exception {
HDDSKeyGenerator keyGenerator = new HDDSKeyGenerator(conf);
- KeyPair keyPair = keyGenerator.generateKey();
- KeyCodec pemWriter = new KeyCodec(new SecurityConfig(conf), COMPONENT);
+ keyPair = keyGenerator.generateKey();
+ KeyCodec pemWriter = new KeyCodec(new SecurityConfig(config), COMPONENT);
pemWriter.writeKey(keyPair, true);
}
/**
* Tests delegation token renewal.
+ *
+ * @throws Exception
*/
@Test
public void testDelegationTokenRenewal() throws Exception {
@@ -553,8 +608,7 @@ public final class TestSecureOzoneCluster {
// Setup secure OM for start.
OzoneConfiguration newConf = new OzoneConfiguration(conf);
- int tokenMaxLifetime = 500;
- newConf.setLong(DELEGATION_TOKEN_MAX_LIFETIME_KEY, tokenMaxLifetime);
+ newConf.setLong(OMConfigKeys.DELEGATION_TOKEN_MAX_LIFETIME_KEY, 500);
setupOm(newConf);
long omVersion =
RPC.getProtocolVersion(OzoneManagerProtocolPB.class);
@@ -578,10 +632,9 @@ public final class TestSecureOzoneCluster {
new Text("om"));
// Check if token is of right kind and renewer is running om instance
- assertNotNull(token);
- assertEquals("OzoneToken", token.getKind().toString());
- assertEquals(OmUtils.getOmRpcAddress(conf),
- token.getService().toString());
+ Assert.assertEquals(token.getKind().toString(), "OzoneToken");
+ Assert.assertEquals(token.getService().toString(), OmUtils
+ .getOmRpcAddress(conf));
// Renew delegation token
long expiryTime = omClient.renewDelegationToken(token);
@@ -590,18 +643,23 @@ public final class TestSecureOzoneCluster {
// Test failure of delegation renewal
// 1. When token maxExpiryTime exceeds
- Thread.sleep(tokenMaxLifetime);
- OMException ex = LambdaTestUtils.intercept(OMException.class,
+ Thread.sleep(500);
+ LambdaTestUtils.intercept(OMException.class,
"TOKEN_EXPIRED",
- () -> omClient.renewDelegationToken(token));
- assertEquals(TOKEN_EXPIRED, ex.getResult());
+ () -> {
+ try {
+ omClient.renewDelegationToken(token);
+ } catch (OMException ex) {
+ assertTrue(ex.getResult().equals(TOKEN_EXPIRED));
+ throw ex;
+ }
+ });
+
omLogs.clearOutput();
// 2. When renewer doesn't match (implicitly covers when renewer is
// null or empty )
- Token<OzoneTokenIdentifier> token2 = omClient.getDelegationToken(
- new Text("randomService"));
- assertNotNull(token2);
+ Token token2 = omClient.getDelegationToken(new Text("randomService"));
LambdaTestUtils.intercept(OMException.class,
"Delegation token renewal failed",
() -> omClient.renewDelegationToken(token2));
@@ -634,7 +692,7 @@ public final class TestSecureOzoneCluster {
OMStorage omStore = new OMStorage(config);
omStore.setClusterId("testClusterId");
omStore.setScmId("testScmId");
- omStore.setOmCertSerialId(OM_CERT_SERIAL_ID);
+ omStore.setOmCertSerialId(omCertSerialId);
// writes the version file properties
omStore.initialize();
OzoneManager.setTestSecureOmFlag(true);
@@ -663,16 +721,20 @@ public final class TestSecureOzoneCluster {
CLIENT_TIMEOUT), RandomStringUtils.randomAscii(5));
//Creates a secret since it does not exist
- S3SecretValue attempt1 = omClient.getS3Secret(username);
+ S3SecretValue firstAttempt = omClient
+ .getS3Secret(UserGroupInformation.getCurrentUser().getUserName());
//Fetches the secret from db since it was created in previous step
- S3SecretValue attempt2 = omClient.getS3Secret(username);
+ S3SecretValue secondAttempt = omClient
+ .getS3Secret(UserGroupInformation.getCurrentUser().getUserName());
//secret fetched on both attempts must be same
- assertEquals(attempt1.getAwsSecret(), attempt2.getAwsSecret());
+ assertTrue(firstAttempt.getAwsSecret()
+ .equals(secondAttempt.getAwsSecret()));
//access key fetched on both attempts must be same
- assertEquals(attempt1.getAwsAccessKey(), attempt2.getAwsAccessKey());
+ assertTrue(firstAttempt.getAwsAccessKey()
+ .equals(secondAttempt.getAwsAccessKey()));
try {
@@ -682,7 +744,9 @@ public final class TestSecureOzoneCluster {
GenericTestUtils.assertExceptionContains("USER_MISMATCH", ex);
}
} finally {
- IOUtils.closeQuietly(om);
+ if(om != null){
+ om.stop();
+ }
}
}
@@ -695,7 +759,7 @@ public final class TestSecureOzoneCluster {
LogCapturer.captureLogs(OzoneManager.getLogger());
omLogs.clearOutput();
- /*
+ /**
* As all these processes run inside the same JVM, there are issues around
* the Hadoop UGI if different processes run with different principals.
* In this test, the OM has to contact the SCM to download certs. SCM runs
@@ -708,10 +772,10 @@ public final class TestSecureOzoneCluster {
* address this problem.
*/
String realm = miniKdc.getRealm();
- conf.set(OZONE_OM_KERBEROS_PRINCIPAL_KEY,
+ conf.set(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY,
"scm/" + host + "@" + realm);
omKeyTab = new File(workDir, "scm.keytab");
- conf.set(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY,
+ conf.set(OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY,
omKeyTab.getAbsolutePath());
initSCM();
@@ -734,10 +798,10 @@ public final class TestSecureOzoneCluster {
om.stop();
om = OzoneManager.createOm(conf);
- assertNotNull(om.getCertificateClient());
- assertNotNull(om.getCertificateClient().getPublicKey());
- assertNotNull(om.getCertificateClient().getPrivateKey());
- assertNotNull(om.getCertificateClient().getCertificate());
+ Assert.assertNotNull(om.getCertificateClient());
+ Assert.assertNotNull(om.getCertificateClient().getPublicKey());
+ Assert.assertNotNull(om.getCertificateClient().getPrivateKey());
+ Assert.assertNotNull(om.getCertificateClient().getCertificate());
assertTrue(omLogs.getOutput().contains("Init response: GETCERT"));
assertTrue(omLogs.getOutput().contains("Successfully stored " +
"SCM signed certificate"));
@@ -770,10 +834,10 @@ public final class TestSecureOzoneCluster {
OzoneManager.setTestSecureOmFlag(true);
om = OzoneManager.createOm(conf);
- assertNotNull(om.getCertificateClient());
- assertNotNull(om.getCertificateClient().getPublicKey());
- assertNotNull(om.getCertificateClient().getPrivateKey());
- assertNotNull(om.getCertificateClient().getCertificate());
+ Assert.assertNotNull(om.getCertificateClient());
+ Assert.assertNotNull(om.getCertificateClient().getPublicKey());
+ Assert.assertNotNull(om.getCertificateClient().getPrivateKey());
+ Assert.assertNotNull(om.getCertificateClient().getCertificate());
assertTrue(omLogs.getOutput().contains("Init response: GETCERT"));
assertTrue(omLogs.getOutput().contains("Successfully stored " +
"SCM signed certificate"));
@@ -789,7 +853,10 @@ public final class TestSecureOzoneCluster {
if (scm != null) {
scm.stop();
}
- IOUtils.closeQuietly(om);
+ if (om != null) {
+ om.stop();
+ }
+
}
}
@@ -801,11 +868,11 @@ public final class TestSecureOzoneCluster {
RDN cn = x500Issuer.getRDNs(BCStyle.CN)[0];
String hostName = InetAddress.getLocalHost().getHostName();
String scmUser = "scm@" + hostName;
- assertEquals(scmUser, cn.getFirst().getValue().toString());
+ Assert.assertEquals(scmUser, cn.getFirst().getValue().toString());
// Subject name should be om login user in real world but in this test
// UGI has scm user context.
- assertEquals(scmUser, cn.getFirst().getValue().toString());
+ Assert.assertEquals(scmUser, cn.getFirst().getValue().toString());
LocalDate today = LocalDateTime.now().toLocalDate();
Date invalidDate;
@@ -827,7 +894,7 @@ public final class TestSecureOzoneCluster {
// Verify that certificate matches the public key.
String encodedKey1 = cert.getPublicKey().toString();
String encodedKey2 = om.getCertificateClient().getPublicKey().toString();
- assertEquals(encodedKey1, encodedKey2);
+ Assert.assertEquals(encodedKey1, encodedKey2);
}
private void initializeOmStorage(OMStorage omStorage) throws IOException {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java
index 344c51e..275dc62 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java
@@ -52,7 +52,6 @@ import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
-import static java.util.Collections.singletonList;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
@@ -141,11 +140,13 @@ public class TestCommitWatcher {
XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
CommitWatcher watcher = new CommitWatcher(bufferPool, ratisClient, 10000);
BlockID blockID = ContainerTestHelper.getTestBlockID(containerId);
+ final List<ChunkBuffer> bufferList = new ArrayList<>();
List<XceiverClientReply> replies = new ArrayList<>();
long length = 0;
List<CompletableFuture<ContainerProtos.ContainerCommandResponseProto>>
futures = new ArrayList<>();
for (int i = 0; i < capacity; i++) {
+ bufferList.clear();
ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
ContainerTestHelper
.getWriteChunkRequest(pipeline, blockID, chunkSize, null);
@@ -157,7 +158,7 @@ public class TestCommitWatcher {
ContainerTestHelper
.getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
XceiverClientReply reply = ratisClient.sendCommandAsync(putBlockRequest);
- final List<ChunkBuffer> bufferList = singletonList(byteBuffer);
+ bufferList.add(byteBuffer);
length += byteBuffer.position();
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future =
reply.getResponse().thenApply(v -> {
@@ -215,11 +216,13 @@ public class TestCommitWatcher {
XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
CommitWatcher watcher = new CommitWatcher(bufferPool, ratisClient, 10000);
BlockID blockID = ContainerTestHelper.getTestBlockID(containerId);
+ final List<ChunkBuffer> bufferList = new ArrayList<>();
List<XceiverClientReply> replies = new ArrayList<>();
long length = 0;
List<CompletableFuture<ContainerProtos.ContainerCommandResponseProto>>
futures = new ArrayList<>();
for (int i = 0; i < capacity; i++) {
+ bufferList.clear();
ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
ContainerTestHelper
.getWriteChunkRequest(pipeline, blockID, chunkSize, null);
@@ -231,7 +234,7 @@ public class TestCommitWatcher {
ContainerTestHelper
.getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
XceiverClientReply reply = ratisClient.sendCommandAsync(putBlockRequest);
- final List<ChunkBuffer> bufferList = singletonList(byteBuffer);
+ bufferList.add(byteBuffer);
length += byteBuffer.position();
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future =
reply.getResponse().thenApply(v -> {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
index 22cc0ad..44a386a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
@@ -44,7 +44,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.hdds.scm.container.MockNodeManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
@@ -54,12 +53,9 @@ import org.apache.hadoop.hdds.scm.net.NodeSchema;
import org.apache.hadoop.hdds.scm.net.NodeSchemaManager;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
import org.apache.hadoop.hdds.scm.server.SCMConfigurator;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneTestUtils;
@@ -106,14 +102,7 @@ import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA;
import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA;
import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.SCM_GET_PIPELINE_EXCEPTION;
import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL;
-import static org.mockito.Matchers.anyLong;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
/**
* Test class for @{@link KeyManagerImpl}.
@@ -142,7 +131,7 @@ public class TestKeyManagerImpl {
dir = GenericTestUtils.getRandomizedTestDir();
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString());
conf.set(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, "true");
- mockScmBlockLocationProtocol = mock(ScmBlockLocationProtocol.class);
+ mockScmBlockLocationProtocol = Mockito.mock(ScmBlockLocationProtocol.class);
metadataManager = new OmMetadataManagerImpl(conf);
nodeManager = new MockNodeManager(true, 10);
NodeSchema[] schemas = new NodeSchema[]
@@ -340,10 +329,7 @@ public class TestKeyManagerImpl {
.setKeyName(keyName)
.build();
keyManager.createDirectory(keyArgs);
- OzoneFileStatus fileStatus = keyManager.getFileStatus(keyArgs);
- Assert.assertTrue(fileStatus.isDirectory());
- Assert.assertTrue(fileStatus.getKeyInfo().getKeyLocationVersions().get(0)
- .getLocationList().isEmpty());
+ Assert.assertTrue(keyManager.getFileStatus(keyArgs).isDirectory());
}
@Test
@@ -865,121 +851,6 @@ public class TestKeyManagerImpl {
}
}
- @Test
- public void testRefreshPipeline() throws Exception {
-
- MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf).build();
- cluster.waitForClusterToBeReady();
- OzoneManager ozoneManager = cluster.getOzoneManager();
-
- StorageContainerLocationProtocol sclProtocolMock = mock(
- StorageContainerLocationProtocol.class);
- ContainerWithPipeline containerWithPipelineMock =
- mock(ContainerWithPipeline.class);
- when(containerWithPipelineMock.getPipeline())
- .thenReturn(getRandomPipeline());
- when(sclProtocolMock.getContainerWithPipeline(anyLong()))
- .thenReturn(containerWithPipelineMock);
-
- ScmClient scmClientMock = mock(ScmClient.class);
- when(scmClientMock.getContainerClient()).thenReturn(sclProtocolMock);
-
- OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo("v1",
- "b1", "k1", ReplicationType.RATIS,
- ReplicationFactor.THREE);
-
- // Add block to key.
- List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
- Pipeline pipeline = getRandomPipeline();
-
- OmKeyLocationInfo omKeyLocationInfo =
- new OmKeyLocationInfo.Builder().setBlockID(
- new BlockID(100L, 1000L))
- .setOffset(0).setLength(100L).setPipeline(pipeline).build();
-
- omKeyLocationInfoList.add(omKeyLocationInfo);
-
- OmKeyLocationInfo omKeyLocationInfo2 =
- new OmKeyLocationInfo.Builder().setBlockID(
- new BlockID(200L, 1000L))
- .setOffset(0).setLength(100L).setPipeline(pipeline).build();
- omKeyLocationInfoList.add(omKeyLocationInfo2);
-
- OmKeyLocationInfo omKeyLocationInfo3 =
- new OmKeyLocationInfo.Builder().setBlockID(
- new BlockID(100L, 2000L))
- .setOffset(0).setLength(100L).setPipeline(pipeline).build();
- omKeyLocationInfoList.add(omKeyLocationInfo3);
-
- omKeyInfo.appendNewBlocks(omKeyLocationInfoList, false);
-
- KeyManagerImpl keyManagerImpl =
- new KeyManagerImpl(ozoneManager, scmClientMock, conf, "om1");
-
- keyManagerImpl.refreshPipeline(omKeyInfo);
-
- verify(sclProtocolMock, times(2)).getContainerWithPipeline(anyLong());
- verify(sclProtocolMock, times(1)).getContainerWithPipeline(100L);
- verify(sclProtocolMock, times(1)).getContainerWithPipeline(200L);
- }
-
-
- @Test
- public void testRefreshPipelineException() throws Exception {
-
- MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf).build();
- cluster.waitForClusterToBeReady();
- OzoneManager ozoneManager = cluster.getOzoneManager();
-
- String errorMessage = "Cannot find container!!";
- StorageContainerLocationProtocol sclProtocolMock = mock(
- StorageContainerLocationProtocol.class);
- doThrow(new IOException(errorMessage)).when(sclProtocolMock)
- .getContainerWithPipeline(anyLong());
-
- ScmClient scmClientMock = mock(ScmClient.class);
- when(scmClientMock.getContainerClient()).thenReturn(sclProtocolMock);
-
- OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo("v1",
- "b1", "k1", ReplicationType.RATIS,
- ReplicationFactor.THREE);
-
- // Add block to key.
- List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
- Pipeline pipeline = getRandomPipeline();
-
- OmKeyLocationInfo omKeyLocationInfo =
- new OmKeyLocationInfo.Builder().setBlockID(
- new BlockID(100L, 1000L))
- .setOffset(0).setLength(100L).setPipeline(pipeline).build();
- omKeyLocationInfoList.add(omKeyLocationInfo);
- omKeyInfo.appendNewBlocks(omKeyLocationInfoList, false);
-
- KeyManagerImpl keyManagerImpl =
- new KeyManagerImpl(ozoneManager, scmClientMock, conf, "om1");
-
- try {
- keyManagerImpl.refreshPipeline(omKeyInfo);
- Assert.fail();
- } catch (OMException omEx) {
- Assert.assertEquals(SCM_GET_PIPELINE_EXCEPTION, omEx.getResult());
- Assert.assertTrue(omEx.getMessage().equals(errorMessage));
- }
- }
-
- /**
- * Get Random pipeline.
- * @return pipeline
- */
- private Pipeline getRandomPipeline() {
- return Pipeline.newBuilder()
- .setState(Pipeline.PipelineState.OPEN)
- .setId(PipelineID.randomId())
- .setType(ReplicationType.RATIS)
- .setFactor(ReplicationFactor.THREE)
- .setNodes(new ArrayList<>())
- .build();
- }
/**
* Creates a depth two directory.
*
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 16e878d..6c06fc8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -122,7 +122,6 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INTERNAL_ERROR;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KMS_PROVIDER;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.SCM_GET_PIPELINE_EXCEPTION;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND;
import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY;
@@ -654,17 +653,37 @@ public class KeyManagerImpl implements KeyManager {
}
// Refresh container pipeline info from SCM
// based on OmKeyArgs.refreshPipeline flag
+ // 1. Client send initial read request OmKeyArgs.refreshPipeline = false
+ // and uses the pipeline cached in OM to access datanode
+ // 2. If succeeded, done.
+ // 3. If failed due to pipeline does not exist or invalid pipeline state
+ // exception, client should retry lookupKey with
+ // OmKeyArgs.refreshPipeline = true
if (args.getRefreshPipeline()) {
- refreshPipeline(value);
+ for (OmKeyLocationInfoGroup key : value.getKeyLocationVersions()) {
+ key.getLocationList().forEach(k -> {
+ // TODO: fix Some tests that may not initialize container client
+ // The production should always have containerClient initialized.
+ if (scmClient.getContainerClient() != null) {
+ try {
+ ContainerWithPipeline cp = scmClient.getContainerClient()
+ .getContainerWithPipeline(k.getContainerID());
+ if (!cp.getPipeline().equals(k.getPipeline())) {
+ k.setPipeline(cp.getPipeline());
+ }
+ } catch (IOException e) {
+ LOG.error("Unable to update pipeline for container:{}",
+ k.getContainerID());
+ }
+ }
+ });
+ }
}
if (args.getSortDatanodes()) {
sortDatanodeInPipeline(value, clientAddress);
}
return value;
} catch (IOException ex) {
- if (ex instanceof OMException) {
- throw ex;
- }
LOG.debug("Get key failed for volume:{} bucket:{} key:{}",
volumeName, bucketName, keyName, ex);
throw new OMException(ex.getMessage(),
@@ -675,43 +694,6 @@ public class KeyManagerImpl implements KeyManager {
}
}
- /**
- * Refresh pipeline info in OM by asking SCM.
- * @param value OmKeyInfo
- */
- @VisibleForTesting
- protected void refreshPipeline(OmKeyInfo value) throws IOException {
- Map<Long, ContainerWithPipeline> containerWithPipelineMap = new HashMap<>();
- for (OmKeyLocationInfoGroup key : value.getKeyLocationVersions()) {
- for (OmKeyLocationInfo k : key.getLocationList()) {
- // TODO: fix Some tests that may not initialize container client
- // The production should always have containerClient initialized.
- if (scmClient.getContainerClient() != null) {
- try {
- if (!containerWithPipelineMap.containsKey(k.getContainerID())) {
- ContainerWithPipeline containerWithPipeline = scmClient
- .getContainerClient()
- .getContainerWithPipeline(k.getContainerID());
- containerWithPipelineMap.put(k.getContainerID(),
- containerWithPipeline);
- }
- } catch (IOException ioEx) {
- LOG.debug("Get containerPipeline failed for volume:{} bucket:{} " +
- "key:{}", value.getVolumeName(), value.getBucketName(),
- value.getKeyName(), ioEx);
- throw new OMException(ioEx.getMessage(),
- SCM_GET_PIPELINE_EXCEPTION);
- }
- ContainerWithPipeline cp =
- containerWithPipelineMap.get(k.getContainerID());
- if (!cp.getPipeline().equals(k.getPipeline())) {
- k.setPipeline(cp.getPipeline());
- }
- }
- }
- }
- }
-
@Override
public void renameKey(OmKeyArgs args, String toKeyName) throws IOException {
Preconditions.checkNotNull(args);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java
index bdfd69f..8d9e709 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java
@@ -30,11 +30,9 @@ import org.slf4j.LoggerFactory;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Collection;
-import java.util.Collections;
import java.util.List;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_INTERNAL_SERVICE_ID;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_NODES_KEY;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_NODE_ID_KEY;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT;
@@ -93,23 +91,8 @@ public class OMHANodeDetails {
String localOMServiceId = null;
String localOMNodeId = null;
int localRatisPort = 0;
-
- Collection<String> omServiceIds;
-
- localOMServiceId = conf.getTrimmed(OZONE_OM_INTERNAL_SERVICE_ID);
-
- if (localOMServiceId == null) {
- // There is no internal om service id is being set, fall back to ozone
- // .om.service.ids.
- LOG.info(OZONE_OM_INTERNAL_SERVICE_ID + " is not defined, falling back " +
- "to " + OZONE_OM_SERVICE_IDS_KEY + " to find serviceID for " +
- "OzoneManager if it is HA enabled cluster");
- omServiceIds = conf.getTrimmedStringCollection(
- OZONE_OM_SERVICE_IDS_KEY);
- } else {
- LOG.info("ServiceID for OzoneManager is {}", localOMServiceId);
- omServiceIds = Collections.singletonList(localOMServiceId);
- }
+ Collection<String> omServiceIds = conf.getTrimmedStringCollection(
+ OZONE_OM_SERVICE_IDS_KEY);
String knownOMNodeId = conf.get(OZONE_OM_NODE_ID_KEY);
int found = 0;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java
index 1eb91d5..fec789a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java
@@ -21,8 +21,8 @@ package org.apache.hadoop.ozone.web.ozShell.keys;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
-import java.io.InputStream;
-import java.io.OutputStream;
+import java.nio.file.Path;
+import java.nio.file.Paths;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientException;
import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.web.ozShell.Handler;
import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
import org.apache.hadoop.ozone.web.ozShell.Shell;
@@ -38,7 +39,6 @@ import org.apache.hadoop.ozone.web.ozShell.Shell;
import org.apache.commons.codec.digest.DigestUtils;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY;
-
import picocli.CommandLine.Command;
import picocli.CommandLine.Parameters;
@@ -65,10 +65,8 @@ public class GetKeyHandler extends Handler {
OzoneAddress address = new OzoneAddress(uri);
address.ensureKeyAddress();
-
- OzoneConfiguration conf = createOzoneConfiguration();
-
- try (OzoneClient client = address.createClient(conf)) {
+ try (OzoneClient client =
+ address.createClient(createOzoneConfiguration())) {
String volumeName = address.getVolumeName();
String bucketName = address.getBucketName();
@@ -80,6 +78,7 @@ public class GetKeyHandler extends Handler {
System.out.printf("Key Name : %s%n", keyName);
}
+ Path dataFilePath = Paths.get(fileName);
File dataFile = new File(fileName);
if (dataFile.exists() && dataFile.isDirectory()) {
@@ -87,25 +86,30 @@ public class GetKeyHandler extends Handler {
}
if (dataFile.exists()) {
- throw new OzoneClientException(dataFile.getPath() + " exists."
- + " Download would overwrite an existing file. Aborting.");
+ throw new OzoneClientException(
+ fileName + " exists. Download will overwrite an "
+ + "existing file. Aborting.");
}
- int chunkSize = (int) conf.getStorageSize(OZONE_SCM_CHUNK_SIZE_KEY,
- OZONE_SCM_CHUNK_SIZE_DEFAULT, StorageUnit.BYTES);
-
OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
OzoneBucket bucket = vol.getBucket(bucketName);
- try (OutputStream output = new FileOutputStream(dataFile);
- InputStream input = bucket.readKey(keyName)) {
- IOUtils.copyBytes(input, output, chunkSize);
+ OzoneInputStream keyInputStream = bucket.readKey(keyName);
+ if (dataFilePath != null) {
+ FileOutputStream outputStream = new FileOutputStream(dataFile);
+ IOUtils.copyBytes(keyInputStream, outputStream,
+ (int) new OzoneConfiguration()
+ .getStorageSize(OZONE_SCM_CHUNK_SIZE_KEY,
+ OZONE_SCM_CHUNK_SIZE_DEFAULT, StorageUnit.BYTES));
+ outputStream.close();
+ } else {
+ throw new OzoneClientException(
+ "Can not access the file \"" + fileName + "\"");
}
-
if (isVerbose()) {
- try (InputStream stream = new FileInputStream(dataFile)) {
- String hash = DigestUtils.md5Hex(stream);
- System.out.printf("Downloaded file hash : %s%n", hash);
- }
+ FileInputStream stream = new FileInputStream(dataFile);
+ String hash = DigestUtils.md5Hex(stream);
+ System.out.printf("Downloaded file hash : %s%n", hash);
+ stream.close();
}
}
return null;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
index 7bde7f8..2829dd6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.ozone.web.ozShell.keys;
import java.io.File;
import java.io.FileInputStream;
-import java.io.InputStream;
-import java.io.OutputStream;
import java.util.HashMap;
import java.util.Map;
@@ -35,6 +33,7 @@ import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.web.ozShell.Handler;
import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
import org.apache.hadoop.ozone.web.ozShell.Shell;
@@ -73,9 +72,9 @@ public class PutKeyHandler extends Handler {
*/
@Override
public Void call() throws Exception {
+
OzoneAddress address = new OzoneAddress(uri);
address.ensureKeyAddress();
-
try (OzoneClient client =
address.createClient(createOzoneConfiguration())) {
@@ -92,10 +91,10 @@ public class PutKeyHandler extends Handler {
File dataFile = new File(fileName);
if (isVerbose()) {
- try (InputStream stream = new FileInputStream(dataFile)) {
- String hash = DigestUtils.md5Hex(stream);
- System.out.printf("File Hash : %s%n", hash);
- }
+ FileInputStream stream = new FileInputStream(dataFile);
+ String hash = DigestUtils.md5Hex(stream);
+ System.out.printf("File Hash : %s%n", hash);
+ stream.close();
}
Configuration conf = new OzoneConfiguration();
@@ -108,20 +107,20 @@ public class PutKeyHandler extends Handler {
conf.get(OZONE_REPLICATION_TYPE, OZONE_REPLICATION_TYPE_DEFAULT));
OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
OzoneBucket bucket = vol.getBucket(bucketName);
-
Map<String, String> keyMetadata = new HashMap<>();
- String gdprEnabled = bucket.getMetadata().get(OzoneConsts.GDPR_FLAG);
- if (Boolean.parseBoolean(gdprEnabled)) {
+ if (Boolean.valueOf(bucket.getMetadata().get(OzoneConsts.GDPR_FLAG))) {
keyMetadata.put(OzoneConsts.GDPR_FLAG, Boolean.TRUE.toString());
}
-
- int chunkSize = (int) conf.getStorageSize(OZONE_SCM_CHUNK_SIZE_KEY,
- OZONE_SCM_CHUNK_SIZE_DEFAULT, StorageUnit.BYTES);
- try (InputStream input = new FileInputStream(dataFile);
- OutputStream output = bucket.createKey(keyName, dataFile.length(),
- replicationType, replicationFactor, keyMetadata)) {
- IOUtils.copyBytes(input, output, chunkSize);
- }
+ OzoneOutputStream outputStream = bucket
+ .createKey(keyName, dataFile.length(), replicationType,
+ replicationFactor, keyMetadata);
+ FileInputStream fileInputStream = new FileInputStream(dataFile);
+ IOUtils.copyBytes(fileInputStream, outputStream, (int) conf
+ .getStorageSize(OZONE_SCM_CHUNK_SIZE_KEY,
+ OZONE_SCM_CHUNK_SIZE_DEFAULT,
+ StorageUnit.BYTES));
+ outputStream.close();
+ fileInputStream.close();
}
return null;
}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java
index f5745a8..e8ed515 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java
@@ -1,4 +1,4 @@
-/*
+/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -23,10 +23,10 @@ import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.ext.MessageBodyReader;
import javax.ws.rs.ext.Provider;
-import javax.xml.XMLConstants;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.UnmarshallerHandler;
import javax.xml.parsers.SAXParserFactory;
+import java.io.IOException;
import java.io.InputStream;
import java.lang.annotation.Annotation;
import java.lang.reflect.Type;
@@ -49,7 +49,6 @@ public class MultiDeleteRequestUnmarshaller
try {
context = JAXBContext.newInstance(MultiDeleteRequest.class);
SAXParserFactory saxParserFactory = SAXParserFactory.newInstance();
- saxParserFactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
xmlReader = saxParserFactory.newSAXParser().getXMLReader();
} catch (Exception ex) {
throw new AssertionError("Can't instantiate MultiDeleteRequest parser",
@@ -66,7 +65,8 @@ public class MultiDeleteRequestUnmarshaller
@Override
public MultiDeleteRequest readFrom(Class<MultiDeleteRequest> type,
Type genericType, Annotation[] annotations, MediaType mediaType,
- MultivaluedMap<String, String> httpHeaders, InputStream entityStream) {
+ MultivaluedMap<String, String> httpHeaders, InputStream entityStream)
+ throws IOException, WebApplicationException {
try {
UnmarshallerHandler unmarshallerHandler =
context.createUnmarshaller().getUnmarshallerHandler();
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeaderParserUtil.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeaderParserUtil.java
index 2e1b86c..b1b61cc 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeaderParserUtil.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeaderParserUtil.java
@@ -50,12 +50,12 @@ public final class RangeHeaderParserUtil {
Matcher matcher = RANGE_HEADER_MATCH_PATTERN.matcher(rangeHeaderVal);
if (matcher.matches()) {
if (!matcher.group("start").equals("")) {
- start = Long.parseLong(matcher.group("start"));
+ start = Integer.parseInt(matcher.group("start"));
} else {
noStart = true;
}
if (!matcher.group("end").equals("")) {
- end = Long.parseLong(matcher.group("end"));
+ end = Integer.parseInt(matcher.group("end"));
} else {
end = length - 1;
}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRangeHeaderParserUtil.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRangeHeaderParserUtil.java
index 439b8b0..03c91bf 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRangeHeaderParserUtil.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRangeHeaderParserUtil.java
@@ -86,11 +86,7 @@ public class TestRangeHeaderParserUtil {
assertEquals(9, rangeHeader.getEndOffset());
assertEquals(false, rangeHeader.isInValidRange());
- rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=3977248768" +
- "-4977248768", 4977248769L);
- assertEquals(3977248768L, rangeHeader.getStartOffset());
- assertEquals(4977248768L, rangeHeader.getEndOffset());
- assertEquals(false, rangeHeader.isInValidRange());
+
}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java
index a215a9e..a7282b2 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java
@@ -16,31 +16,18 @@
*/
package org.apache.hadoop.ozone.audit.parser.common;
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.io.BufferedReader;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.ResultSetMetaData;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.Properties;
-import java.util.stream.Collectors;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.ozone.audit.parser.model.AuditEntry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.io.*;
+import java.sql.*;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.Properties;
+import java.util.stream.Collectors;
+
/**
* Database helper for ozone audit parser tool.
*/
@@ -55,17 +42,28 @@ public final class DatabaseHelper {
LoggerFactory.getLogger(DatabaseHelper.class);
private static Map<String, String> properties;
- public static boolean setup(String dbName, String logs) throws Exception {
- if (createAuditTable(dbName)) {
+ public static boolean setup(String dbName, String logs) {
+ //loadProperties();
+ if(createAuditTable(dbName)) {
return insertAudits(dbName, logs);
} else {
return false;
}
}
- private static Connection getConnection(String dbName) throws Exception {
- Class.forName(ParserConsts.DRIVER);
- return DriverManager.getConnection(ParserConsts.CONNECTION_PREFIX + dbName);
+ private static Connection getConnection(String dbName) {
+
+ Connection connection = null;
+ try{
+ Class.forName(ParserConsts.DRIVER);
+ connection = DriverManager.getConnection(
+ ParserConsts.CONNECTION_PREFIX + dbName);
+ } catch (ClassNotFoundException e) {
+ LOG.error(e.getMessage());
+ } catch (SQLException e) {
+ LOG.error(e.getMessage());
+ }
+ return connection;
}
private static void loadProperties() {
@@ -91,25 +89,32 @@ public final class DatabaseHelper {
}
- private static boolean createAuditTable(String dbName) throws Exception {
- try (Connection connection = getConnection(dbName);
- Statement st = connection.createStatement()) {
+ private static boolean createAuditTable(String dbName) {
+
+ try(Connection connection = getConnection(dbName);
+ Statement st = connection.createStatement()) {
+
st.executeUpdate(properties.get(ParserConsts.CREATE_AUDIT_TABLE));
+ } catch (SQLException e) {
+ LOG.error(e.getMessage());
+ return false;
}
return true;
}
- @SuppressFBWarnings("REC_CATCH_EXCEPTION")
- private static boolean insertAudits(String dbName, String logs)
- throws Exception {
- try (Connection connection = getConnection(dbName);
- PreparedStatement preparedStatement = connection.prepareStatement(
- properties.get(ParserConsts.INSERT_AUDITS))) {
+ private static boolean insertAudits(String dbName, String logs) {
+
+ try(Connection connection = getConnection(dbName);
+ PreparedStatement preparedStatement = connection.prepareStatement(
+ properties.get(ParserConsts.INSERT_AUDITS))) {
+
ArrayList<AuditEntry> auditEntries = parseAuditLogs(logs);
+
final int batchSize = 1000;
int count = 0;
+
//Insert list to db
- for (AuditEntry audit : auditEntries) {
+ for(AuditEntry audit : auditEntries) {
preparedStatement.setString(1, audit.getTimestamp());
preparedStatement.setString(2, audit.getLevel());
preparedStatement.setString(3, audit.getLogger());
@@ -119,39 +124,45 @@ public final class DatabaseHelper {
preparedStatement.setString(7, audit.getParams());
preparedStatement.setString(8, audit.getResult());
preparedStatement.setString(9, audit.getException());
+
preparedStatement.addBatch();
- if (++count % batchSize == 0) {
+
+ if(++count % batchSize == 0) {
preparedStatement.executeBatch();
}
}
- if (!auditEntries.isEmpty()) {
+ if(auditEntries.size() > 0) {
preparedStatement.executeBatch(); // insert remaining records
}
+ } catch (Exception e) {
+ LOG.error(e.getMessage());
+ return false;
}
return true;
}
- @SuppressWarnings("squid:S3776")
private static ArrayList<AuditEntry> parseAuditLogs(String filePath)
- throws IOException {
- ArrayList<AuditEntry> listResult = new ArrayList<>();
- try (FileInputStream fis = new FileInputStream(filePath);
- InputStreamReader isr = new InputStreamReader(fis, UTF_8);
+ throws Exception {
+ ArrayList<AuditEntry> listResult = new ArrayList<AuditEntry>();
+ try(FileInputStream fis = new FileInputStream(filePath);
+ InputStreamReader isr = new InputStreamReader(fis, "UTF-8");
BufferedReader bReader = new BufferedReader(isr)) {
- String currentLine = bReader.readLine();
- String nextLine = bReader.readLine();
- String[] entry;
+ String currentLine = null;
+ String[] entry = null;
AuditEntry tempEntry = null;
+ String nextLine = null;
+ currentLine = bReader.readLine();
+ nextLine = bReader.readLine();
- while (true) {
- if (tempEntry == null){
+ while(true) {
+ if(tempEntry == null){
tempEntry = new AuditEntry();
}
- if (currentLine == null) {
+ if(currentLine == null) {
break;
} else {
- if (!currentLine.matches(ParserConsts.DATE_REGEX)){
+ if(!currentLine.matches(ParserConsts.DATE_REGEX)){
tempEntry.appendException(currentLine);
} else {
entry = StringUtils.stripAll(currentLine.split("\\|"));
@@ -167,11 +178,11 @@ public final class DatabaseHelper {
.setParams(ops[1])
.setResult(entry[6].substring(entry[6].indexOf('=') + 1))
.build();
- if (entry.length == 8){
+ if(entry.length == 8){
tempEntry.setException(entry[7]);
}
}
- if (nextLine == null || nextLine.matches(ParserConsts.DATE_REGEX)){
+ if(nextLine == null || nextLine.matches(ParserConsts.DATE_REGEX)){
listResult.add(tempEntry);
tempEntry = null;
}
@@ -179,38 +190,50 @@ public final class DatabaseHelper {
nextLine = bReader.readLine();
}
}
+ } catch (RuntimeException rx) {
+ throw rx;
+ } catch (Exception ex) {
+ throw ex;
}
return listResult;
}
public static String executeCustomQuery(String dbName, String query)
- throws Exception {
+ throws SQLException {
return executeStatement(dbName, query);
}
public static String executeTemplate(String dbName, String template)
- throws Exception {
- return executeStatement(dbName, properties.get(template));
+ throws SQLException {
+ return executeStatement(dbName,
+ properties.get(template));
}
private static String executeStatement(String dbName, String sql)
- throws Exception {
+ throws SQLException {
StringBuilder result = new StringBuilder();
- ResultSetMetaData rsm;
- try (Connection connection = getConnection(dbName);
- Statement st = connection.createStatement();
- ResultSet rs = st.executeQuery(sql)) {
- if (rs != null) {
- rsm = rs.getMetaData();
- int cols = rsm.getColumnCount();
- while (rs.next()){
- for (int index = 1; index <= cols; index++){
- result.append(rs.getObject(index));
- result.append("\t");
+ ResultSet rs = null;
+ Statement st = null;
+ ResultSetMetaData rsm = null;
+ try(Connection connection = getConnection(dbName)) {
+ //loadProperties();
+
+ if(connection != null){
+ st = connection.createStatement();
+ rs = st.executeQuery(sql);
+ if(rs != null) {
+ rsm = rs.getMetaData();
+ int cols = rsm.getColumnCount();
+ while(rs.next()){
+ for(int index =1; index<=cols; index++){
+ result.append(rs.getObject(index) + "\t");
+ }
+ result.append("\n");
}
- result.append("\n");
}
+ st.close();
+ rs.close();
}
}
return result.toString();
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/LoadCommandHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/LoadCommandHandler.java
index c5e28a4..ec67bad 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/LoadCommandHandler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/LoadCommandHandler.java
@@ -16,15 +16,14 @@
*/
package org.apache.hadoop.ozone.audit.parser.handler;
-import java.util.concurrent.Callable;
-
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.ozone.audit.parser.AuditParser;
import org.apache.hadoop.ozone.audit.parser.common.DatabaseHelper;
-
+import picocli.CommandLine.*;
import picocli.CommandLine.Command;
import picocli.CommandLine.Parameters;
-import picocli.CommandLine.ParentCommand;
+
+import java.util.concurrent.Callable;
/**
* Load command handler for ozone audit parser.
@@ -42,8 +41,8 @@ public class LoadCommandHandler implements Callable<Void> {
@ParentCommand
private AuditParser auditParser;
- public Void call() throws Exception {
- if (DatabaseHelper.setup(auditParser.getDatabase(), logs)) {
+ public Void call() {
+ if(DatabaseHelper.setup(auditParser.getDatabase(), logs)) {
System.out.println(logs + " has been loaded successfully");
} else {
System.out.println("Failed to load " + logs);
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/QueryCommandHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/QueryCommandHandler.java
index 461026e..fe083ca 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/QueryCommandHandler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/QueryCommandHandler.java
@@ -16,16 +16,15 @@
*/
package org.apache.hadoop.ozone.audit.parser.handler;
-import java.sql.SQLException;
-import java.util.concurrent.Callable;
-
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.ozone.audit.parser.AuditParser;
import org.apache.hadoop.ozone.audit.parser.common.DatabaseHelper;
-
+import picocli.CommandLine.*;
import picocli.CommandLine.Command;
import picocli.CommandLine.Parameters;
-import picocli.CommandLine.ParentCommand;
+
+import java.sql.SQLException;
+import java.util.concurrent.Callable;
/**
* Custom query command handler for ozone audit parser.
@@ -45,7 +44,7 @@ public class QueryCommandHandler implements Callable<Void> {
@ParentCommand
private AuditParser auditParser;
- public Void call() throws Exception {
+ public Void call() {
try {
System.out.println(
DatabaseHelper.executeCustomQuery(auditParser.getDatabase(), query)
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java
index 4e726ec..9b97ee9 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java
@@ -16,16 +16,15 @@
*/
package org.apache.hadoop.ozone.audit.parser.handler;
-import java.sql.SQLException;
-import java.util.concurrent.Callable;
-
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.ozone.audit.parser.AuditParser;
import org.apache.hadoop.ozone.audit.parser.common.DatabaseHelper;
-
+import picocli.CommandLine.*;
import picocli.CommandLine.Command;
import picocli.CommandLine.Parameters;
-import picocli.CommandLine.ParentCommand;
+
+import java.sql.SQLException;
+import java.util.concurrent.Callable;
/**
* Template command handler for ozone audit parser.
@@ -43,7 +42,7 @@ public class TemplateCommandHandler implements Callable<Void> {
@ParentCommand
private AuditParser auditParser;
- public Void call() throws Exception {
+ public Void call() {
try {
if(DatabaseHelper.validateTemplate(template)) {
System.out.println(
---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org