You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by bh...@apache.org on 2019/11/21 08:54:16 UTC
[hadoop-ozone] branch HDDS-2587 updated: Add sonar java classes
property in pom.xml (#246)
This is an automated email from the ASF dual-hosted git repository.
bharat pushed a commit to branch HDDS-2587
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
The following commit(s) were added to refs/heads/HDDS-2587 by this push:
new ed8cbd4 Add sonar java classes property in pom.xml (#246)
ed8cbd4 is described below
commit ed8cbd4236f20981a9c60fbc803b8c181998fdd5
Author: Vivek Ratnavel Subramanian <vi...@gmail.com>
AuthorDate: Thu Nov 21 00:54:09 2019 -0800
Add sonar java classes property in pom.xml (#246)
---
.../hdds/scm/client/ContainerOperationClient.java | 2 +-
.../hadoop/hdds/scm/client/HddsClientUtils.java | 5 -
.../apache/hadoop/hdds/scm/storage/BufferPool.java | 2 +-
.../hadoop/hdds/scm/storage/CommitWatcher.java | 4 +-
.../hadoop/hdds/scm/net/NodeSchemaLoader.java | 27 +-
.../apache/hadoop/hdds/tracing/TraceAllMethod.java | 5 +
.../common/src/main/resources/ozone-default.xml | 11 +-
.../hadoop/hdds/scm/net/TestNodeSchemaLoader.java | 151 ++++----
.../common/src/test/resources/log4j.properties | 23 ++
.../networkTopologyTestFiles/external-entity.xml | 52 +++
.../hadoop/hdds/conf/ConfigFileAppender.java | 21 +-
.../hdds/scm/server/StorageContainerManager.java | 74 ++--
.../org/apache/hadoop/ozone/om/OMConfigKeys.java | 3 +
.../hadoop/ozone/om/exceptions/OMException.java | 6 +-
.../src/main/proto/OzoneManagerProtocol.proto | 1 +
hadoop-ozone/dev-support/checks/sonar.sh | 2 +-
.../hadoop/ozone/MiniOzoneHAClusterImpl.java | 6 +-
.../hadoop/ozone/TestSecureOzoneCluster.java | 399 +++++++++------------
.../hadoop/ozone/client/rpc/TestCommitWatcher.java | 9 +-
.../apache/hadoop/ozone/om/TestKeyManagerImpl.java | 133 ++++++-
.../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 66 ++--
.../apache/hadoop/ozone/om/ha/OMHANodeDetails.java | 21 +-
.../ozone/web/ozShell/keys/GetKeyHandler.java | 44 ++-
.../ozone/web/ozShell/keys/PutKeyHandler.java | 35 +-
.../endpoint/MultiDeleteRequestUnmarshaller.java | 8 +-
.../ozone/s3/util/RangeHeaderParserUtil.java | 4 +-
.../ozone/s3/util/TestRangeHeaderParserUtil.java | 6 +-
.../ozone/audit/parser/common/DatabaseHelper.java | 157 ++++----
.../audit/parser/handler/LoadCommandHandler.java | 11 +-
.../audit/parser/handler/QueryCommandHandler.java | 11 +-
.../parser/handler/TemplateCommandHandler.java | 11 +-
pom.xml | 1 +
32 files changed, 756 insertions(+), 555 deletions(-)
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
index b1d4538..6e2fd59 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
@@ -221,7 +221,7 @@ public class ContainerOperationClient implements ScmClient {
// That makes sense, but it is not needed for the client to work.
if (LOG.isDebugEnabled()) {
LOG.debug("Pipeline creation successful. Pipeline: {}",
- pipeline.toString());
+ pipeline);
}
}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
index 8c9a11a..d7e3f4d 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
@@ -49,8 +49,6 @@ import org.apache.ratis.protocol.AlreadyClosedException;
import org.apache.ratis.protocol.GroupMismatchException;
import org.apache.ratis.protocol.NotReplicatedException;
import org.apache.ratis.protocol.RaftRetryFailureException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.InetSocketAddress;
@@ -76,9 +74,6 @@ import java.util.concurrent.TimeoutException;
@InterfaceStability.Unstable
public final class HddsClientUtils {
- private static final Logger LOG = LoggerFactory.getLogger(
- HddsClientUtils.class);
-
private HddsClientUtils() {
}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java
index f986fe3..f57700e 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java
@@ -94,7 +94,7 @@ public class BufferPool {
// always remove from head of the list and append at last
final ChunkBuffer buffer = bufferList.remove(0);
// Ensure the buffer to be removed is always at the head of the list.
- Preconditions.checkArgument(buffer.equals(chunkBuffer));
+ Preconditions.checkArgument(buffer == chunkBuffer);
buffer.clear();
bufferList.add(buffer);
Preconditions.checkArgument(currentBufferIndex >= 0);
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java
index 6311991..3960e33 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java
@@ -34,6 +34,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
+import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
@@ -110,7 +111,8 @@ public class CommitWatcher {
}
public void updateCommitInfoMap(long index, List<ChunkBuffer> buffers) {
- commitIndex2flushedDataMap.put(index, buffers);
+ commitIndex2flushedDataMap.computeIfAbsent(index, k -> new LinkedList<>())
+ .addAll(buffers);
}
int getCommitInfoMapSize() {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java
index 8d7abed..fee0852 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -27,6 +27,7 @@ import org.w3c.dom.NodeList;
import org.w3c.dom.Text;
import org.xml.sax.SAXException;
+import javax.xml.XMLConstants;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
@@ -101,7 +102,7 @@ public final class NodeSchemaLoader {
* @return all valid node schemas defined in schema file
*/
public NodeSchemaLoadResult loadSchemaFromFile(String schemaFilePath)
- throws IllegalArgumentException, FileNotFoundException {
+ throws FileNotFoundException {
try {
File schemaFile = new File(schemaFilePath);
@@ -122,7 +123,7 @@ public final class NodeSchemaLoader {
try (InputStream stream = classloader
.getResourceAsStream(schemaFilePath)) {
if (stream != null) {
- LOG.info("Loading file from " + classloader
+ LOG.info("Loading file from {}", classloader
.getResources(schemaFilePath));
return loadSchemaFromStream(schemaFilePath, stream);
}
@@ -171,6 +172,7 @@ public final class NodeSchemaLoader {
LOG.info("Loading network topology layer schema file");
// Read and parse the schema file.
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+ dbf.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
dbf.setIgnoringComments(true);
DocumentBuilder builder = dbf.newDocumentBuilder();
Document doc = builder.parse(inputStream);
@@ -214,9 +216,6 @@ public final class NodeSchemaLoader {
* Load network topology layer schemas from a YAML configuration file.
* @param schemaFile as inputStream
* @return all valid node schemas defined in schema file
- * @throws ParserConfigurationException ParserConfigurationException happen
- * @throws IOException no such schema file
- * @throws SAXException xml file has some invalid elements
* @throws IllegalArgumentException xml file content is logically invalid
*/
private NodeSchemaLoadResult loadSchemaFromYaml(InputStream schemaFile) {
@@ -297,7 +296,7 @@ public final class NodeSchemaLoader {
*/
private Map<String, NodeSchema> loadLayersSection(Element root) {
NodeList elements = root.getElementsByTagName(LAYER_TAG);
- Map<String, NodeSchema> schemas = new HashMap<String, NodeSchema>();
+ Map<String, NodeSchema> schemas = new HashMap<>();
for (int i = 0; i < elements.getLength(); i++) {
Node node = elements.item(i);
if (node instanceof Element) {
@@ -361,8 +360,8 @@ public final class NodeSchemaLoader {
Map<String, NodeSchema> schemas) {
NodeList elements = root.getElementsByTagName(TOPOLOGY_TAG)
.item(0).getChildNodes();
- List<NodeSchema> schemaList = new ArrayList<NodeSchema>();
- boolean enforecePrefix = false;
+ List<NodeSchema> schemaList = new ArrayList<>();
+ boolean enforcePrefix = false;
for (int i = 0; i < elements.getLength(); i++) {
Node node = elements.item(i);
if (node instanceof Element) {
@@ -383,10 +382,10 @@ public final class NodeSchemaLoader {
}
if (TOPOLOGY_PATH.equals(tagName)) {
if(value.startsWith(NetConstants.PATH_SEPARATOR_STR)) {
- value = value.substring(1, value.length());
+ value = value.substring(1);
}
String[] layerIDs = value.split(NetConstants.PATH_SEPARATOR_STR);
- if (layerIDs == null || layerIDs.length != schemas.size()) {
+ if (layerIDs.length != schemas.size()) {
throw new IllegalArgumentException("Topology path depth doesn't "
+ "match layer element numbers");
}
@@ -409,7 +408,7 @@ public final class NodeSchemaLoader {
schemaList.add(schemas.get(layerIDs[j]));
}
} else if (TOPOLOGY_ENFORCE_PREFIX.equalsIgnoreCase(tagName)) {
- enforecePrefix = Boolean.parseBoolean(value);
+ enforcePrefix = Boolean.parseBoolean(value);
} else {
throw new IllegalArgumentException("Unsupported Element <" +
tagName + ">");
@@ -417,7 +416,7 @@ public final class NodeSchemaLoader {
}
}
// Integrity check
- if (enforecePrefix) {
+ if (enforcePrefix) {
// Every InnerNode should have prefix defined
for (NodeSchema schema: schemas.values()) {
if (schema.getType() == LayerType.INNER_NODE &&
@@ -427,7 +426,7 @@ public final class NodeSchemaLoader {
}
}
}
- return new NodeSchemaLoadResult(schemaList, enforecePrefix);
+ return new NodeSchemaLoadResult(schemaList, enforcePrefix);
}
/**
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TraceAllMethod.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TraceAllMethod.java
index 8bdf638..6dc9f96 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TraceAllMethod.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TraceAllMethod.java
@@ -59,6 +59,11 @@ public class TraceAllMethod<T> implements InvocationHandler {
public Object invoke(Object proxy, Method method, Object[] args)
throws Throwable {
Method delegateMethod = findDelegatedMethod(method);
+ if (delegateMethod == null) {
+ throw new NoSuchMethodException("Method not found: " +
+ method.getName());
+ }
+
try (Scope scope = GlobalTracer.get().buildSpan(
name + "." + method.getName())
.startActive(true)) {
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 2f9ce31..8110242 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -494,11 +494,20 @@
<description>
Comma-separated list of OM service Ids.
- If not set, the default value of "om-service-value" is assigned as the
+ If not set, the default value of "omServiceIdDefault" is assigned as the
OM service ID.
</description>
</property>
<property>
+ <name>ozone.om.internal.service.id</name>
+ <value></value>
+ <tag>OM, HA</tag>
+ <description>
+ Service ID of the Ozone Manager. If this is not set fall back to
+ ozone.om.service.ids to find the service ID it belongs to.
+ </description>
+ </property>
+ <property>
<name>ozone.om.nodes.EXAMPLEOMSERVICEID</name>
<value></value>
<tag>OM, HA</tag>
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java
index 0c20353..aa65737 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -19,85 +19,116 @@ package org.apache.hadoop.hdds.scm.net;
import org.junit.Rule;
import org.junit.Test;
+import org.junit.experimental.runners.Enclosed;
import org.junit.rules.Timeout;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import java.io.FileNotFoundException;
+import java.net.URL;
import java.util.Arrays;
import java.util.Collection;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertThrows;
/** Test the node schema loader. */
-@RunWith(Parameterized.class)
+@RunWith(Enclosed.class)
public class TestNodeSchemaLoader {
- private static final Logger LOG =
- LoggerFactory.getLogger(TestNodeSchemaLoader.class);
- private ClassLoader classLoader =
- Thread.currentThread().getContextClassLoader();
-
- public TestNodeSchemaLoader(String schemaFile, String errMsg) {
- try {
- String filePath = classLoader.getResource(
- "./networkTopologyTestFiles/" + schemaFile).getPath();
- NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
- fail("expect exceptions");
- } catch (Throwable e) {
- assertTrue(e.getMessage().contains(errMsg));
+
+ /**
+ * Parameterized test cases for various error conditions.
+ */
+ @RunWith(Parameterized.class)
+ public static class ParameterizedTests {
+
+ private final String schemaFile;
+ private final String errMsg;
+
+ @Rule
+ public Timeout testTimeout = new Timeout(2000);
+
+ @Parameters
+ public static Collection<Object[]> getSchemaFiles() {
+ Object[][] schemaFiles = new Object[][]{
+ {"enforce-error.xml", "layer without prefix defined"},
+ {"invalid-cost.xml", "Cost should be positive number or 0"},
+ {"multiple-leaf.xml", "Multiple LEAF layers are found"},
+ {"multiple-root.xml", "Multiple ROOT layers are found"},
+ {"no-leaf.xml", "No LEAF layer is found"},
+ {"no-root.xml", "No ROOT layer is found"},
+ {"path-layers-size-mismatch.xml",
+ "Topology path depth doesn't match layer element numbers"},
+ {"path-with-id-reference-failure.xml",
+ "No layer found for id"},
+ {"unknown-layer-type.xml", "Unsupported layer type"},
+ {"wrong-path-order-1.xml",
+ "Topology path doesn't start with ROOT layer"},
+ {"wrong-path-order-2.xml",
+ "Topology path doesn't end with LEAF layer"},
+ {"no-topology.xml", "no or multiple <topology> element"},
+ {"multiple-topology.xml", "no or multiple <topology> element"},
+ {"invalid-version.xml", "Bad layoutversion value"},
+ {"external-entity.xml", "accessExternalDTD"},
+ };
+ return Arrays.asList(schemaFiles);
+ }
+
+ public ParameterizedTests(String schemaFile, String errMsg) {
+ this.schemaFile = schemaFile;
+ this.errMsg = errMsg;
}
- }
- @Rule
- public Timeout testTimeout = new Timeout(30000);
-
- @Parameters
- public static Collection<Object[]> getSchemaFiles() {
- Object[][] schemaFiles = new Object[][]{
- {"enforce-error.xml", "layer without prefix defined"},
- {"invalid-cost.xml", "Cost should be positive number or 0"},
- {"multiple-leaf.xml", "Multiple LEAF layers are found"},
- {"multiple-root.xml", "Multiple ROOT layers are found"},
- {"no-leaf.xml", "No LEAF layer is found"},
- {"no-root.xml", "No ROOT layer is found"},
- {"path-layers-size-mismatch.xml",
- "Topology path depth doesn't match layer element numbers"},
- {"path-with-id-reference-failure.xml",
- "No layer found for id"},
- {"unknown-layer-type.xml", "Unsupported layer type"},
- {"wrong-path-order-1.xml",
- "Topology path doesn't start with ROOT layer"},
- {"wrong-path-order-2.xml", "Topology path doesn't end with LEAF layer"},
- {"no-topology.xml", "no or multiple <topology> element"},
- {"multiple-topology.xml", "no or multiple <topology> element"},
- {"invalid-version.xml", "Bad layoutversion value"},
- };
- return Arrays.asList(schemaFiles);
+ @Test
+ public void testInvalid() {
+ String filePath = getClassloaderResourcePath(schemaFile);
+ Exception e = assertThrows(IllegalArgumentException.class,
+ () -> NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath));
+ assertMessageContains(e.getMessage(), errMsg, schemaFile);
+ }
}
- @Test
- public void testGood() {
- try {
- String filePath = classLoader.getResource(
- "./networkTopologyTestFiles/good.xml").getPath();
+ /**
+ * Test cases that do not use the parameters, should be executed only once.
+ */
+ public static class NonParameterizedTests {
+
+ private static final String VALID_SCHEMA_FILE = "good.xml";
+
+ @Rule
+ public Timeout testTimeout = new Timeout(2000);
+
+ @Test
+ public void testGood() throws Exception {
+ String filePath = getClassloaderResourcePath(VALID_SCHEMA_FILE);
NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
- } catch (Throwable e) {
- fail("should succeed");
+ }
+
+ @Test
+ public void testNotExist() {
+ String filePath = getClassloaderResourcePath(VALID_SCHEMA_FILE)
+ .replace(VALID_SCHEMA_FILE, "non-existent.xml");
+ Exception e = assertThrows(FileNotFoundException.class,
+ () -> NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath));
+ assertMessageContains(e.getMessage(), "not found", "non-existent.xml");
}
}
- @Test
- public void testNotExist() {
- String filePath = classLoader.getResource(
- "./networkTopologyTestFiles/good.xml").getPath() + ".backup";
- try {
- NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
- fail("should fail");
- } catch (Throwable e) {
- assertTrue(e.getMessage().contains("not found"));
+ private static void assertMessageContains(
+ String actual, String expected, String testCase) {
+ if (!actual.contains(expected)) {
+ fail(String.format(
+ "Expected message for '%s' to contain '%s', but got: '%s'",
+ testCase, expected, actual));
}
}
+
+ private static String getClassloaderResourcePath(String file) {
+ URL resource = Thread.currentThread().getContextClassLoader()
+ .getResource("networkTopologyTestFiles/" + file);
+ assertNotNull(resource);
+ return resource.getPath();
+ }
}
diff --git a/hadoop-hdds/common/src/test/resources/log4j.properties b/hadoop-hdds/common/src/test/resources/log4j.properties
new file mode 100644
index 0000000..bb5cbe5
--- /dev/null
+++ b/hadoop-hdds/common/src/test/resources/log4j.properties
@@ -0,0 +1,23 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# log4j configuration used during build and unit tests
+
+log4j.rootLogger=INFO,stdout
+log4j.threshold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/external-entity.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/external-entity.xml
new file mode 100644
index 0000000..f93cb0f
--- /dev/null
+++ b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/external-entity.xml
@@ -0,0 +1,52 @@
+<?xml version="1.0"?>
+<!DOCTYPE foo [
+<!ENTITY xxe SYSTEM "file:///etc/passwd" >
+]>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration>
+ <layoutversion>1</layoutversion>
+ <layers>
+ <layer id="datacenter">
+ <prefix></prefix>
+ <cost>1</cost>
+ <type>Root</type>
+ </layer>
+ <layer id="rack">
+ <prefix>rack</prefix>
+ <cost>1</cost>
+ <type>InnerNode</type>
+ <default>/default-rack</default>
+ </layer>
+ <layer id="nodegroup">
+ <prefix>nodegroup</prefix>
+ <cost>1</cost>
+ <type>InnerNode</type>
+ <default>/default-nodegroup</default>
+ </layer>
+ <layer id="node">
+ <prefix></prefix>
+ <cost>0</cost>
+ <type>&xxe;</type>
+ </layer>
+ </layers>
+ <topology>
+ <path>/datacenter/rack/nodegroup/node</path>
+ <enforceprefix>true</enforceprefix>
+ </topology>
+</configuration>
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileAppender.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileAppender.java
index 9463f42..9f1c087 100644
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileAppender.java
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileAppender.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdds.conf;
+import javax.xml.XMLConstants;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.transform.OutputKeys;
@@ -48,6 +49,7 @@ public class ConfigFileAppender {
public ConfigFileAppender() {
try {
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
+ factory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
builder = factory.newDocumentBuilder();
} catch (Exception ex) {
throw new ConfigurationException("Can initialize new configuration", ex);
@@ -91,7 +93,7 @@ public class ConfigFileAppender {
addXmlElement(propertyElement, "description", description);
- String tagsAsString = Arrays.stream(tags).map(tag -> tag.name())
+ String tagsAsString = Arrays.stream(tags).map(Enum::name)
.collect(Collectors.joining(", "));
addXmlElement(propertyElement, "tag", tagsAsString);
@@ -111,15 +113,16 @@ public class ConfigFileAppender {
*/
public void write(Writer writer) {
try {
- TransformerFactory transformerFactory = TransformerFactory.newInstance();
- Transformer transf = transformerFactory.newTransformer();
+ TransformerFactory factory = TransformerFactory.newInstance();
+ factory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
+ Transformer transformer = factory.newTransformer();
- transf.setOutputProperty(OutputKeys.ENCODING, "UTF-8");
- transf.setOutputProperty(OutputKeys.INDENT, "yes");
- transf
- .setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "2");
+ transformer.setOutputProperty(OutputKeys.ENCODING, "UTF-8");
+ transformer.setOutputProperty(OutputKeys.INDENT, "yes");
+ transformer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount",
+ "2");
- transf.transform(new DOMSource(document), new StreamResult(writer));
+ transformer.transform(new DOMSource(document), new StreamResult(writer));
} catch (TransformerException e) {
throw new ConfigurationException("Can't write the configuration xml", e);
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 21127f4..0249d7e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -25,7 +25,6 @@ import com.google.common.annotations.VisibleForTesting;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.RemovalListener;
-import com.google.common.cache.RemovalNotification;
import com.google.protobuf.BlockingService;
import java.util.Objects;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -178,7 +177,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
/**
* Key = DatanodeUuid, value = ContainerStat.
*/
- private Cache<String, ContainerStat> containerReportCache;
+ private final Cache<String, ContainerStat> containerReportCache;
private ReplicationManager replicationManager;
@@ -232,7 +231,8 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
configuration = conf;
initMetrics();
- initContainerReportCache(conf);
+ containerReportCache = buildContainerReportCache();
+
/**
* It is assumed the scm --init command creates the SCM Storage Config.
*/
@@ -478,10 +478,6 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
scmMetadataStore = configurator.getMetadataStore();
} else {
scmMetadataStore = new SCMMetadataStoreRDBImpl(conf);
- if (scmMetadataStore == null) {
- throw new SCMException("Unable to initialize metadata store",
- ResultCodes.SCM_NOT_INITIALIZED);
- }
}
}
@@ -617,7 +613,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
scmStorageConfig.setClusterId(clusterId);
}
scmStorageConfig.initialize();
- System.out.println(
+ LOG.info(
"SCM initialization succeeded."
+ "Current cluster id for sd="
+ scmStorageConfig.getStorageDir()
@@ -629,7 +625,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
return false;
}
} else {
- System.out.println(
+ LOG.info(
"SCM already initialized. Reusing existing"
+ " cluster id for sd="
+ scmStorageConfig.getStorageDir()
@@ -675,28 +671,25 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
/**
* Initialize container reports cache that sent from datanodes.
- *
- * @param conf
*/
- private void initContainerReportCache(OzoneConfiguration conf) {
- containerReportCache =
+ @SuppressWarnings("UnstableApiUsage")
+ private Cache<String, ContainerStat> buildContainerReportCache() {
+ return
CacheBuilder.newBuilder()
.expireAfterAccess(Long.MAX_VALUE, TimeUnit.MILLISECONDS)
.maximumSize(Integer.MAX_VALUE)
- .removalListener(
- new RemovalListener<String, ContainerStat>() {
- @Override
- public void onRemoval(
- RemovalNotification<String, ContainerStat>
- removalNotification) {
- synchronized (containerReportCache) {
- ContainerStat stat = removalNotification.getValue();
+ .removalListener((
+ RemovalListener<String, ContainerStat>) removalNotification -> {
+ synchronized (containerReportCache) {
+ ContainerStat stat = removalNotification.getValue();
+ if (stat != null) {
+ // TODO: Are we doing the right thing here?
// remove invalid container report
metrics.decrContainerStat(stat);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Remove expired container stat entry for " +
- "datanode: {}.", removalNotification.getKey());
- }
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Remove expired container stat entry for " +
+ "datanode: {}.", removalNotification.getKey());
}
}
})
@@ -763,22 +756,27 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
* Start service.
*/
public void start() throws IOException {
- LOG.info(
- buildRpcServerStartMessage(
- "StorageContainerLocationProtocol RPC server",
- getClientRpcAddress()));
+ if (LOG.isInfoEnabled()) {
+ LOG.info(buildRpcServerStartMessage(
+ "StorageContainerLocationProtocol RPC server",
+ getClientRpcAddress()));
+ }
ms = HddsUtils.initializeMetrics(configuration, "StorageContainerManager");
commandWatcherLeaseManager.start();
getClientProtocolServer().start();
- LOG.info(buildRpcServerStartMessage("ScmBlockLocationProtocol RPC " +
- "server", getBlockProtocolServer().getBlockRpcAddress()));
+ if (LOG.isInfoEnabled()) {
+ LOG.info(buildRpcServerStartMessage("ScmBlockLocationProtocol RPC " +
+ "server", getBlockProtocolServer().getBlockRpcAddress()));
+ }
getBlockProtocolServer().start();
- LOG.info(buildRpcServerStartMessage("ScmDatanodeProtocl RPC " +
- "server", getDatanodeProtocolServer().getDatanodeRpcAddress()));
+ if (LOG.isInfoEnabled()) {
+ LOG.info(buildRpcServerStartMessage("ScmDatanodeProtocl RPC " +
+ "server", getDatanodeProtocolServer().getDatanodeRpcAddress()));
+ }
getDatanodeProtocolServer().start();
if (getSecurityProtocolServer() != null) {
getSecurityProtocolServer().start();
@@ -971,12 +969,10 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
}
public void checkAdminAccess(String remoteUser) throws IOException {
- if (remoteUser != null) {
- if (!scmAdminUsernames.contains(remoteUser)) {
- throw new IOException(
- "Access denied for user " + remoteUser + ". Superuser privilege " +
- "is required.");
- }
+ if (remoteUser != null && !scmAdminUsernames.contains(remoteUser)) {
+ throw new IOException(
+ "Access denied for user " + remoteUser + ". Superuser privilege " +
+ "is required.");
}
}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index dcb9b5c..8bbfa82 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -39,6 +39,9 @@ public final class OMConfigKeys {
"ozone.om.handler.count.key";
public static final int OZONE_OM_HANDLER_COUNT_DEFAULT = 20;
+ public static final String OZONE_OM_INTERNAL_SERVICE_ID =
+ "ozone.om.internal.service.id";
+
public static final String OZONE_OM_SERVICE_IDS_KEY =
"ozone.om.service.ids";
public static final String OZONE_OM_NODES_KEY =
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
index df44e0b..a594f41 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
@@ -216,7 +216,9 @@ public class OMException extends IOException {
INVALID_PART, // When part name is not found or not matching with partname
// in OM MPU partInfo.
- INVALID_PART_ORDER // When list of parts mentioned to complete MPU are not
- // given in ascending order.
+ INVALID_PART_ORDER, // When list of parts mentioned to complete MPU are not
+ // given in ascending order.
+
+ SCM_GET_PIPELINE_EXCEPTION
}
}
diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
index 62b7190..21577be 100644
--- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
+++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
@@ -306,6 +306,7 @@ enum Status {
INVALID_PART = 55;
INVALID_PART_ORDER = 56;
+ SCM_GET_PIPELINE_EXCEPTION = 57;
}
/**
diff --git a/hadoop-ozone/dev-support/checks/sonar.sh b/hadoop-ozone/dev-support/checks/sonar.sh
index 8183506..135a47e 100755
--- a/hadoop-ozone/dev-support/checks/sonar.sh
+++ b/hadoop-ozone/dev-support/checks/sonar.sh
@@ -20,4 +20,4 @@ if [ ! "$SONAR_TOKEN" ]; then
echo "SONAR_TOKEN environment variable should be set"
exit 1
fi
-mvn org.sonarsource.scanner.maven:sonar-maven-plugin:3.6.0.1398:sonar -Dsonar.host.url=https://sonarcloud.io -Dsonar.organization=apache -Dsonar.projectKey=hadoop-ozone -Dsonar.java.binaries=hadoop-hdds/tools/target/classes,hadoop-hdds/framework/target/classes,hadoop-hdds/config/target/classes,hadoop-hdds/server-scm/target/classes,hadoop-hdds/docs/target/classes,hadoop-hdds/common/target/classes,hadoop-hdds/container-service/target/classes,hadoop-hdds/client/target/classes,hadoop-ozone/o [...]
+mvn org.sonarsource.scanner.maven:sonar-maven-plugin:3.6.0.1398:sonar -Dsonar.host.url=https://sonarcloud.io -Dsonar.organization=apache -Dsonar.projectKey=hadoop-ozone
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
index 006d854..d756457 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
@@ -307,8 +307,12 @@ public final class MiniOzoneHAClusterImpl extends MiniOzoneClusterImpl {
* Initialize HA related configurations.
*/
private void initHAConfig(int basePort) throws IOException {
- // Set configurations required for starting OM HA service
+ // Set configurations required for starting OM HA service, because that
+ // is the serviceID being passed to start Ozone HA cluster.
+ // Here setting internal service and OZONE_OM_SERVICE_IDS_KEY, in this
+ // way in OM start it uses internal service id to find it's service id.
conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, omServiceId);
+ conf.set(OMConfigKeys.OZONE_OM_INTERNAL_SERVICE_ID, omServiceId);
String omNodesKey = OmUtils.addKeySuffixes(
OMConfigKeys.OZONE_OM_NODES_KEY, omServiceId);
StringBuilder omNodesKeyValue = new StringBuilder();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
index 9cfd218..b7dab79 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone;
import java.io.File;
import java.io.IOException;
import java.net.InetAddress;
+import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.security.KeyPair;
@@ -28,16 +29,13 @@ import java.util.Properties;
import java.util.UUID;
import java.util.concurrent.Callable;
+import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
import org.apache.hadoop.hdds.scm.HddsTestUtils;
import org.apache.hadoop.hdds.scm.ScmConfig;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.ScmInfo;
import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
import org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig;
@@ -55,10 +53,8 @@ import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.minikdc.MiniKdc;
import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.net.ServerSocketUtil;
import org.apache.hadoop.ozone.client.CertificateClientTestImpl;
import org.apache.hadoop.ozone.common.Storage;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMStorage;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.exceptions.OMException;
@@ -79,7 +75,6 @@ import org.bouncycastle.asn1.x500.X500Name;
import org.bouncycastle.asn1.x500.style.BCStyle;
import org.bouncycastle.cert.jcajce.JcaX509CertificateHolder;
import org.junit.After;
-import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
@@ -94,10 +89,29 @@ import java.time.LocalDateTime;
import java.time.temporal.ChronoUnit;
import java.util.Date;
-import static junit.framework.TestCase.assertNotNull;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
+import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.net.ServerSocketUtil.getPort;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.DELEGATION_TOKEN_MAX_LIFETIME_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_AUTH_METHOD;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_EXPIRED;
@@ -105,6 +119,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLU
import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@@ -118,82 +133,64 @@ public final class TestSecureOzoneCluster {
private static final String TEST_USER = "testUgiUser@EXAMPLE.COM";
private static final String COMPONENT = "test";
- private static final int CLIENT_TIMEOUT = 2 * 1000;
- private Logger logger = LoggerFactory
+ private static final int CLIENT_TIMEOUT = 2_000;
+ private static final String OM_CERT_SERIAL_ID = "9879877970576";
+ private static final Logger LOG = LoggerFactory
.getLogger(TestSecureOzoneCluster.class);
@Rule
- public Timeout timeout = new Timeout(80000);
+ public Timeout timeout = new Timeout(80_000);
+
+ @Rule
+ public TemporaryFolder folder= new TemporaryFolder();
private MiniKdc miniKdc;
private OzoneConfiguration conf;
private File workDir;
- private static Properties securityProperties;
private File scmKeytab;
private File spnegoKeytab;
private File omKeyTab;
private File testUserKeytab;
- private String curUser;
private String testUserPrincipal;
- private UserGroupInformation testKerberosUgi;
private StorageContainerManager scm;
private OzoneManager om;
private String host;
-
- private static String clusterId;
- private static String scmId;
- private static String omId;
+ private String clusterId;
+ private String scmId;
+ private String omId;
private OzoneManagerProtocolClientSideTranslatorPB omClient;
- private KeyPair keyPair;
- private Path metaDirPath;
- @Rule
- public TemporaryFolder folder= new TemporaryFolder();
- private String omCertSerialId = "9879877970576";
@Before
public void init() {
try {
conf = new OzoneConfiguration();
- conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost");
-
- conf.setInt(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY, ServerSocketUtil
- .getPort(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT, 100));
- conf.setInt(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY, ServerSocketUtil
- .getPort(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT, 100));
- conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_KEY,
- ServerSocketUtil.getPort(ScmConfigKeys
- .OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, 100));
- conf.setInt(ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY,
- ServerSocketUtil.getPort(ScmConfigKeys
- .OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT, 100));
-
- conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, "OMMarketingCluster001");
- conf.set(OMConfigKeys.OZONE_OM_NODES_KEY+".OMMarketingCluster001",
- "node1,node2,node3");
- conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY+".OMMarketingCluster001.node1",
- "localhost:9862");
- conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY+".OMMarketingCluster001.node2",
- "google.com:9863");
- conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY+".OMMarketingCluster001.node3",
- "yahoo.com:9864");
+ conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost");
+
+ conf.setInt(OZONE_SCM_CLIENT_PORT_KEY,
+ getPort(OZONE_SCM_CLIENT_PORT_DEFAULT, 100));
+ conf.setInt(OZONE_SCM_DATANODE_PORT_KEY,
+ getPort(OZONE_SCM_DATANODE_PORT_DEFAULT, 100));
+ conf.setInt(OZONE_SCM_BLOCK_CLIENT_PORT_KEY,
+ getPort(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, 100));
+ conf.setInt(OZONE_SCM_SECURITY_SERVICE_PORT_KEY,
+ getPort(OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT, 100));
DefaultMetricsSystem.setMiniClusterMode(true);
final String path = folder.newFolder().toString();
- metaDirPath = Paths.get(path, "om-meta");
+ Path metaDirPath = Paths.get(path, "om-meta");
conf.set(OZONE_METADATA_DIRS, metaDirPath.toString());
conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
- conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
- KERBEROS.toString());
+ conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.name());
+
+ workDir = GenericTestUtils.getTestDir(getClass().getSimpleName());
startMiniKdc();
- setSecureConfig(conf);
- createCredentialsInKDC(conf, miniKdc);
- generateKeyPair(conf);
+ setSecureConfig();
+ createCredentialsInKDC();
+ generateKeyPair();
// OzoneManager.setTestSecureOmFlag(true);
- } catch (IOException e) {
- logger.error("Failed to initialize TestSecureOzoneCluster", e);
} catch (Exception e) {
- logger.error("Failed to initialize TestSecureOzoneCluster", e);
+ LOG.error("Failed to initialize TestSecureOzoneCluster", e);
}
}
@@ -204,30 +201,22 @@ public final class TestSecureOzoneCluster {
if (scm != null) {
scm.stop();
}
- if (om != null) {
- om.stop();
- }
- if (omClient != null) {
- omClient.close();
- }
+ IOUtils.closeQuietly(om);
+ IOUtils.closeQuietly(omClient);
} catch (Exception e) {
- logger.error("Failed to stop TestSecureOzoneCluster", e);
+ LOG.error("Failed to stop TestSecureOzoneCluster", e);
}
}
- private void createCredentialsInKDC(Configuration configuration,
- MiniKdc kdc) throws Exception {
- OzoneConfiguration ozoneConfiguration =
- new OzoneConfiguration(configuration);
+ private void createCredentialsInKDC() throws Exception {
+ ScmConfig scmConfig = conf.getObject(ScmConfig.class);
SCMHTTPServerConfig httpServerConfig =
- ozoneConfiguration.getObject(SCMHTTPServerConfig.class);
- createPrincipal(scmKeytab,
- httpServerConfig.getKerberosPrincipal());
- createPrincipal(spnegoKeytab,
- httpServerConfig.getKerberosKeytab());
+ conf.getObject(SCMHTTPServerConfig.class);
+ createPrincipal(scmKeytab, scmConfig.getKerberosPrincipal());
+ createPrincipal(spnegoKeytab, httpServerConfig.getKerberosPrincipal());
createPrincipal(testUserKeytab, testUserPrincipal);
createPrincipal(omKeyTab,
- configuration.get(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY));
+ conf.get(OZONE_OM_KERBEROS_PRINCIPAL_KEY));
}
private void createPrincipal(File keytab, String... principal)
@@ -236,9 +225,7 @@ public final class TestSecureOzoneCluster {
}
private void startMiniKdc() throws Exception {
- workDir = GenericTestUtils
- .getTestDir(TestSecureOzoneCluster.class.getSimpleName());
- securityProperties = MiniKdc.createConf();
+ Properties securityProperties = MiniKdc.createConf();
miniKdc = new MiniKdc(securityProperties, workDir);
miniKdc.start();
}
@@ -247,31 +234,22 @@ public final class TestSecureOzoneCluster {
miniKdc.stop();
}
- private void setSecureConfig(Configuration configuration) throws IOException {
- SCMHTTPServerConfig httpServerConfig =
- conf.getObject(SCMHTTPServerConfig.class);
- ScmConfig scmConfig = conf.getObject(ScmConfig.class);
- configuration.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
+ private void setSecureConfig() throws IOException {
+ conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
host = InetAddress.getLocalHost().getCanonicalHostName()
.toLowerCase();
- String realm = miniKdc.getRealm();
- curUser = UserGroupInformation.getCurrentUser()
- .getUserName();
- configuration.set(
- CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
- "kerberos");
- configuration.set(OZONE_ADMINISTRATORS, curUser);
-
- configuration.set(ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
- "scm/" + host + "@" + realm);
- configuration.set(SCMHTTPServerConfig.ConfigStrings
- .HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY,
- "HTTP_SCM/" + host + "@" + realm);
- configuration.set(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY,
- "om/" + host + "@" + realm);
- configuration.set(OMConfigKeys.OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY,
- "HTTP_OM/" + host + "@" + realm);
+ conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+
+ String curUser = UserGroupInformation.getCurrentUser().getUserName();
+ conf.set(OZONE_ADMINISTRATORS, curUser);
+
+ String realm = miniKdc.getRealm();
+ String hostAndRealm = host + "@" + realm;
+ conf.set(HDDS_SCM_KERBEROS_PRINCIPAL_KEY, "scm/" + hostAndRealm);
+ conf.set(HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY, "HTTP_SCM/" + hostAndRealm);
+ conf.set(OZONE_OM_KERBEROS_PRINCIPAL_KEY, "om/" + hostAndRealm);
+ conf.set(OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY, "HTTP_OM/" + hostAndRealm);
scmKeytab = new File(workDir, "scm.keytab");
spnegoKeytab = new File(workDir, "http.keytab");
@@ -279,15 +257,13 @@ public final class TestSecureOzoneCluster {
testUserKeytab = new File(workDir, "testuser.keytab");
testUserPrincipal = "test@" + realm;
- configuration.set(ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
+ conf.set(HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
scmKeytab.getAbsolutePath());
- configuration.set(
- SCMHTTPServerConfig.ConfigStrings
- .HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY,
+ conf.set(HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY,
spnegoKeytab.getAbsolutePath());
- configuration.set(OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY,
+ conf.set(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY,
omKeyTab.getAbsolutePath());
- conf.set(OMConfigKeys.OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE,
+ conf.set(OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE,
spnegoKeytab.getAbsolutePath());
}
@@ -298,8 +274,8 @@ public final class TestSecureOzoneCluster {
scm = StorageContainerManager.createSCM(conf);
//Reads the SCM Info from SCM instance
ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
- Assert.assertEquals(clusterId, scmInfo.getClusterId());
- Assert.assertEquals(scmId, scmInfo.getScmId());
+ assertEquals(clusterId, scmInfo.getClusterId());
+ assertEquals(scmId, scmInfo.getScmId());
}
@Test
@@ -320,9 +296,9 @@ public final class TestSecureOzoneCluster {
HddsClientUtils.getScmSecurityClient(conf, ugi);
assertNotNull(scmSecurityProtocolClient);
String caCert = scmSecurityProtocolClient.getCACertificate();
+ assertNotNull(caCert);
LambdaTestUtils.intercept(RemoteException.class, "Certificate not found",
() -> scmSecurityProtocolClient.getCertificate("1"));
- assertNotNull(caCert);
// Case 2: User without Kerberos credentials should fail.
ugi = UserGroupInformation.createRemoteUser("test");
@@ -330,11 +306,10 @@ public final class TestSecureOzoneCluster {
SCMSecurityProtocol finalScmSecurityProtocolClient =
HddsClientUtils.getScmSecurityClient(conf, ugi);
- LambdaTestUtils.intercept(IOException.class, "Client cannot" +
- " authenticate via:[KERBEROS]",
- () -> finalScmSecurityProtocolClient.getCACertificate());
- LambdaTestUtils.intercept(IOException.class, "Client cannot" +
- " authenticate via:[KERBEROS]",
+ String cannotAuthMessage = "Client cannot authenticate via:[KERBEROS]";
+ LambdaTestUtils.intercept(IOException.class, cannotAuthMessage,
+ finalScmSecurityProtocolClient::getCACertificate);
+ LambdaTestUtils.intercept(IOException.class, cannotAuthMessage,
() -> finalScmSecurityProtocolClient.getCertificate("1"));
} finally {
if (scm != null) {
@@ -343,20 +318,15 @@ public final class TestSecureOzoneCluster {
}
}
- private void initSCM()
- throws IOException, AuthenticationException {
-
+ private void initSCM() throws IOException {
clusterId = UUID.randomUUID().toString();
scmId = UUID.randomUUID().toString();
omId = UUID.randomUUID().toString();
final String path = folder.newFolder().toString();
Path scmPath = Paths.get(path, "scm-meta");
- File temp = scmPath.toFile();
- if(!temp.exists()) {
- temp.mkdirs();
- }
- conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
+ Files.createDirectories(scmPath);
+ conf.set(OZONE_METADATA_DIRS, scmPath.toString());
SCMStorageConfig scmStore = new SCMStorageConfig(conf);
scmStore.setClusterId(clusterId);
scmStore.setScmId(scmId);
@@ -367,19 +337,16 @@ public final class TestSecureOzoneCluster {
@Test
public void testSecureScmStartupFailure() throws Exception {
initSCM();
- conf.set(ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY, "");
- conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
- "kerberos");
+ conf.set(HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY, "");
+ conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
LambdaTestUtils.intercept(IOException.class,
"Running in secure mode, but config doesn't have a keytab",
- () -> {
- StorageContainerManager.createSCM(conf);
- });
+ () -> StorageContainerManager.createSCM(conf));
- conf.set(ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
+ conf.set(HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
"scm/_HOST@EXAMPLE.com");
- conf.set(ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
+ conf.set(HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
"/etc/security/keytabs/scm.keytab");
testCommonKerberosFailures(
@@ -387,27 +354,26 @@ public final class TestSecureOzoneCluster {
}
- private void testCommonKerberosFailures(Callable callable) throws Exception {
- LambdaTestUtils.intercept(KerberosAuthException.class, "failure "
- + "to login: for principal:", callable);
- conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
- "OAuth2");
+ private void testCommonKerberosFailures(Callable<?> test) throws Exception {
+ LambdaTestUtils.intercept(KerberosAuthException.class,
+ "failure to login: for principal:",
+ test);
- LambdaTestUtils.intercept(IllegalArgumentException.class, "Invalid"
- + " attribute value for hadoop.security.authentication of OAuth2",
- callable);
+ String invalidValue = "OAuth2";
+ conf.set(HADOOP_SECURITY_AUTHENTICATION, invalidValue);
+ LambdaTestUtils.intercept(IllegalArgumentException.class,
+ "Invalid attribute value for " + HADOOP_SECURITY_AUTHENTICATION
+ + " of " + invalidValue,
+ test);
- conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
- "KERBEROS_SSL");
+ conf.set(HADOOP_SECURITY_AUTHENTICATION, "KERBEROS_SSL");
LambdaTestUtils.intercept(AuthenticationException.class,
"KERBEROS_SSL authentication method not",
- callable);
+ test);
}
/**
* Tests the secure om Initialization Failure.
- *
- * @throws IOException
*/
@Test
public void testSecureOMInitializationFailure() throws Exception {
@@ -415,23 +381,21 @@ public final class TestSecureOzoneCluster {
// Create a secure SCM instance as om client will connect to it
scm = StorageContainerManager.createSCM(conf);
setupOm(conf);
- conf.set(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY,
+ conf.set(OZONE_OM_KERBEROS_PRINCIPAL_KEY,
"non-existent-user@EXAMPLE.com");
testCommonKerberosFailures(() -> OzoneManager.createOm(conf));
}
/**
* Tests the secure om Initialization success.
- *
- * @throws IOException
*/
@Test
public void testSecureOmInitializationSuccess() throws Exception {
initSCM();
// Create a secure SCM instance as om client will connect to it
scm = StorageContainerManager.createSCM(conf);
- LogCapturer logs = LogCapturer.captureLogs(OzoneManager.LOG);
- GenericTestUtils.setLogLevel(OzoneManager.LOG, INFO);
+ LogCapturer logs = LogCapturer.captureLogs(OzoneManager.getLogger());
+ GenericTestUtils.setLogLevel(OzoneManager.getLogger(), INFO);
setupOm(conf);
try {
@@ -439,8 +403,7 @@ public final class TestSecureOzoneCluster {
} catch (Exception ex) {
// Expects timeout failure from scmClient in om but om user login via
// kerberos should succeed.
- assertTrue(logs.getOutput().contains("Ozone Manager login"
- + " successful"));
+ assertTrue(logs.getOutput().contains("Ozone Manager login successful"));
}
}
@@ -452,8 +415,6 @@ public final class TestSecureOzoneCluster {
* 4. Delegation token renewal without Kerberos auth fails.
* 5. Test success of token cancellation.
* 5. Test failure of token cancellation.
- *
- * @throws Exception
*/
@Test
public void testDelegationToken() throws Exception {
@@ -495,9 +456,10 @@ public final class TestSecureOzoneCluster {
assertTrue(renewalTime > 0);
// Check if token is of right kind and renewer is running om instance
- Assert.assertEquals(token.getKind().toString(), "OzoneToken");
- Assert.assertEquals(token.getService().toString(),
- OmUtils.getOmRpcAddress(conf));
+ assertNotNull(token);
+ assertEquals("OzoneToken", token.getKind().toString());
+ assertEquals(OmUtils.getOmRpcAddress(conf),
+ token.getService().toString());
omClient.close();
// Create a remote ugi and set its authentication method to Token
@@ -508,16 +470,13 @@ public final class TestSecureOzoneCluster {
UserGroupInformation.setLoginUser(testUser);
// Get Om client, this time authentication should happen via Token
- testUser.doAs(new PrivilegedExceptionAction<Void>() {
- @Override
- public Void run() throws Exception {
- omClient = new OzoneManagerProtocolClientSideTranslatorPB(
- RPC.getProxy(OzoneManagerProtocolPB.class, omVersion,
- OmUtils.getOmAddress(conf), testUser, conf,
- NetUtils.getDefaultSocketFactory(conf), CLIENT_TIMEOUT),
- RandomStringUtils.randomAscii(5));
- return null;
- }
+ testUser.doAs((PrivilegedExceptionAction<Void>) () -> {
+ omClient = new OzoneManagerProtocolClientSideTranslatorPB(
+ RPC.getProxy(OzoneManagerProtocolPB.class, omVersion,
+ OmUtils.getOmAddress(conf), testUser, conf,
+ NetUtils.getDefaultSocketFactory(conf), CLIENT_TIMEOUT),
+ RandomStringUtils.randomAscii(5));
+ return null;
});
// Case 3: Test Client can authenticate using token.
@@ -533,15 +492,10 @@ public final class TestSecureOzoneCluster {
// initial connection via DT succeeded
omLogs.clearOutput();
- LambdaTestUtils.intercept(OMException.class, "INVALID_AUTH_METHOD",
- () -> {
- try {
- omClient.renewDelegationToken(token);
- } catch (OMException ex) {
- assertTrue(ex.getResult().equals(INVALID_AUTH_METHOD));
- throw ex;
- }
- });
+ OMException ex = LambdaTestUtils.intercept(OMException.class,
+ "INVALID_AUTH_METHOD",
+ () -> omClient.renewDelegationToken(token));
+ assertEquals(INVALID_AUTH_METHOD, ex.getResult());
assertTrue(logs.getOutput().contains(
"Auth successful for " + username + " (auth:TOKEN)"));
omLogs.clearOutput();
@@ -570,17 +524,10 @@ public final class TestSecureOzoneCluster {
OmUtils.getOmAddress(conf), testUser, conf,
NetUtils.getDefaultSocketFactory(conf),
Client.getRpcTimeout(conf)), RandomStringUtils.randomAscii(5));
- LambdaTestUtils.intercept(OMException.class, "Cancel delegation " +
- "token failed",
- () -> {
- try {
- omClient.cancelDelegationToken(token);
- } catch (OMException ex) {
- assertTrue(ex.getResult().equals(TOKEN_ERROR_OTHER));
- throw ex;
- }
- });
-
+ ex = LambdaTestUtils.intercept(OMException.class,
+ "Cancel delegation token failed",
+ () -> omClient.cancelDelegationToken(token));
+ assertEquals(TOKEN_ERROR_OTHER, ex.getResult());
assertTrue(logs.getOutput().contains("Auth failed for"));
} finally {
om.stop();
@@ -588,17 +535,15 @@ public final class TestSecureOzoneCluster {
}
}
- private void generateKeyPair(OzoneConfiguration config) throws Exception {
+ private void generateKeyPair() throws Exception {
HDDSKeyGenerator keyGenerator = new HDDSKeyGenerator(conf);
- keyPair = keyGenerator.generateKey();
- KeyCodec pemWriter = new KeyCodec(new SecurityConfig(config), COMPONENT);
+ KeyPair keyPair = keyGenerator.generateKey();
+ KeyCodec pemWriter = new KeyCodec(new SecurityConfig(conf), COMPONENT);
pemWriter.writeKey(keyPair, true);
}
/**
* Tests delegation token renewal.
- *
- * @throws Exception
*/
@Test
public void testDelegationTokenRenewal() throws Exception {
@@ -608,7 +553,8 @@ public final class TestSecureOzoneCluster {
// Setup secure OM for start.
OzoneConfiguration newConf = new OzoneConfiguration(conf);
- newConf.setLong(OMConfigKeys.DELEGATION_TOKEN_MAX_LIFETIME_KEY, 500);
+ int tokenMaxLifetime = 500;
+ newConf.setLong(DELEGATION_TOKEN_MAX_LIFETIME_KEY, tokenMaxLifetime);
setupOm(newConf);
long omVersion =
RPC.getProtocolVersion(OzoneManagerProtocolPB.class);
@@ -632,9 +578,10 @@ public final class TestSecureOzoneCluster {
new Text("om"));
// Check if token is of right kind and renewer is running om instance
- Assert.assertEquals(token.getKind().toString(), "OzoneToken");
- Assert.assertEquals(token.getService().toString(), OmUtils
- .getOmRpcAddress(conf));
+ assertNotNull(token);
+ assertEquals("OzoneToken", token.getKind().toString());
+ assertEquals(OmUtils.getOmRpcAddress(conf),
+ token.getService().toString());
// Renew delegation token
long expiryTime = omClient.renewDelegationToken(token);
@@ -643,23 +590,18 @@ public final class TestSecureOzoneCluster {
// Test failure of delegation renewal
// 1. When token maxExpiryTime exceeds
- Thread.sleep(500);
- LambdaTestUtils.intercept(OMException.class,
+ Thread.sleep(tokenMaxLifetime);
+ OMException ex = LambdaTestUtils.intercept(OMException.class,
"TOKEN_EXPIRED",
- () -> {
- try {
- omClient.renewDelegationToken(token);
- } catch (OMException ex) {
- assertTrue(ex.getResult().equals(TOKEN_EXPIRED));
- throw ex;
- }
- });
-
+ () -> omClient.renewDelegationToken(token));
+ assertEquals(TOKEN_EXPIRED, ex.getResult());
omLogs.clearOutput();
// 2. When renewer doesn't match (implicitly covers when renewer is
// null or empty )
- Token token2 = omClient.getDelegationToken(new Text("randomService"));
+ Token<OzoneTokenIdentifier> token2 = omClient.getDelegationToken(
+ new Text("randomService"));
+ assertNotNull(token2);
LambdaTestUtils.intercept(OMException.class,
"Delegation token renewal failed",
() -> omClient.renewDelegationToken(token2));
@@ -692,7 +634,7 @@ public final class TestSecureOzoneCluster {
OMStorage omStore = new OMStorage(config);
omStore.setClusterId("testClusterId");
omStore.setScmId("testScmId");
- omStore.setOmCertSerialId(omCertSerialId);
+ omStore.setOmCertSerialId(OM_CERT_SERIAL_ID);
// writes the version file properties
omStore.initialize();
OzoneManager.setTestSecureOmFlag(true);
@@ -721,20 +663,16 @@ public final class TestSecureOzoneCluster {
CLIENT_TIMEOUT), RandomStringUtils.randomAscii(5));
//Creates a secret since it does not exist
- S3SecretValue firstAttempt = omClient
- .getS3Secret(UserGroupInformation.getCurrentUser().getUserName());
+ S3SecretValue attempt1 = omClient.getS3Secret(username);
//Fetches the secret from db since it was created in previous step
- S3SecretValue secondAttempt = omClient
- .getS3Secret(UserGroupInformation.getCurrentUser().getUserName());
+ S3SecretValue attempt2 = omClient.getS3Secret(username);
//secret fetched on both attempts must be same
- assertTrue(firstAttempt.getAwsSecret()
- .equals(secondAttempt.getAwsSecret()));
+ assertEquals(attempt1.getAwsSecret(), attempt2.getAwsSecret());
//access key fetched on both attempts must be same
- assertTrue(firstAttempt.getAwsAccessKey()
- .equals(secondAttempt.getAwsAccessKey()));
+ assertEquals(attempt1.getAwsAccessKey(), attempt2.getAwsAccessKey());
try {
@@ -744,9 +682,7 @@ public final class TestSecureOzoneCluster {
GenericTestUtils.assertExceptionContains("USER_MISMATCH", ex);
}
} finally {
- if(om != null){
- om.stop();
- }
+ IOUtils.closeQuietly(om);
}
}
@@ -759,7 +695,7 @@ public final class TestSecureOzoneCluster {
LogCapturer.captureLogs(OzoneManager.getLogger());
omLogs.clearOutput();
- /**
+ /*
* As all these processes run inside the same JVM, there are issues around
* the Hadoop UGI if different processes run with different principals.
* In this test, the OM has to contact the SCM to download certs. SCM runs
@@ -772,10 +708,10 @@ public final class TestSecureOzoneCluster {
* address this problem.
*/
String realm = miniKdc.getRealm();
- conf.set(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY,
+ conf.set(OZONE_OM_KERBEROS_PRINCIPAL_KEY,
"scm/" + host + "@" + realm);
omKeyTab = new File(workDir, "scm.keytab");
- conf.set(OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY,
+ conf.set(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY,
omKeyTab.getAbsolutePath());
initSCM();
@@ -798,10 +734,10 @@ public final class TestSecureOzoneCluster {
om.stop();
om = OzoneManager.createOm(conf);
- Assert.assertNotNull(om.getCertificateClient());
- Assert.assertNotNull(om.getCertificateClient().getPublicKey());
- Assert.assertNotNull(om.getCertificateClient().getPrivateKey());
- Assert.assertNotNull(om.getCertificateClient().getCertificate());
+ assertNotNull(om.getCertificateClient());
+ assertNotNull(om.getCertificateClient().getPublicKey());
+ assertNotNull(om.getCertificateClient().getPrivateKey());
+ assertNotNull(om.getCertificateClient().getCertificate());
assertTrue(omLogs.getOutput().contains("Init response: GETCERT"));
assertTrue(omLogs.getOutput().contains("Successfully stored " +
"SCM signed certificate"));
@@ -834,10 +770,10 @@ public final class TestSecureOzoneCluster {
OzoneManager.setTestSecureOmFlag(true);
om = OzoneManager.createOm(conf);
- Assert.assertNotNull(om.getCertificateClient());
- Assert.assertNotNull(om.getCertificateClient().getPublicKey());
- Assert.assertNotNull(om.getCertificateClient().getPrivateKey());
- Assert.assertNotNull(om.getCertificateClient().getCertificate());
+ assertNotNull(om.getCertificateClient());
+ assertNotNull(om.getCertificateClient().getPublicKey());
+ assertNotNull(om.getCertificateClient().getPrivateKey());
+ assertNotNull(om.getCertificateClient().getCertificate());
assertTrue(omLogs.getOutput().contains("Init response: GETCERT"));
assertTrue(omLogs.getOutput().contains("Successfully stored " +
"SCM signed certificate"));
@@ -853,10 +789,7 @@ public final class TestSecureOzoneCluster {
if (scm != null) {
scm.stop();
}
- if (om != null) {
- om.stop();
- }
-
+ IOUtils.closeQuietly(om);
}
}
@@ -868,11 +801,11 @@ public final class TestSecureOzoneCluster {
RDN cn = x500Issuer.getRDNs(BCStyle.CN)[0];
String hostName = InetAddress.getLocalHost().getHostName();
String scmUser = "scm@" + hostName;
- Assert.assertEquals(scmUser, cn.getFirst().getValue().toString());
+ assertEquals(scmUser, cn.getFirst().getValue().toString());
// Subject name should be om login user in real world but in this test
// UGI has scm user context.
- Assert.assertEquals(scmUser, cn.getFirst().getValue().toString());
+ assertEquals(scmUser, cn.getFirst().getValue().toString());
LocalDate today = LocalDateTime.now().toLocalDate();
Date invalidDate;
@@ -894,7 +827,7 @@ public final class TestSecureOzoneCluster {
// Verify that certificate matches the public key.
String encodedKey1 = cert.getPublicKey().toString();
String encodedKey2 = om.getCertificateClient().getPublicKey().toString();
- Assert.assertEquals(encodedKey1, encodedKey2);
+ assertEquals(encodedKey1, encodedKey2);
}
private void initializeOmStorage(OMStorage omStorage) throws IOException {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java
index 275dc62..344c51e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java
@@ -52,6 +52,7 @@ import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
+import static java.util.Collections.singletonList;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
@@ -140,13 +141,11 @@ public class TestCommitWatcher {
XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
CommitWatcher watcher = new CommitWatcher(bufferPool, ratisClient, 10000);
BlockID blockID = ContainerTestHelper.getTestBlockID(containerId);
- final List<ChunkBuffer> bufferList = new ArrayList<>();
List<XceiverClientReply> replies = new ArrayList<>();
long length = 0;
List<CompletableFuture<ContainerProtos.ContainerCommandResponseProto>>
futures = new ArrayList<>();
for (int i = 0; i < capacity; i++) {
- bufferList.clear();
ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
ContainerTestHelper
.getWriteChunkRequest(pipeline, blockID, chunkSize, null);
@@ -158,7 +157,7 @@ public class TestCommitWatcher {
ContainerTestHelper
.getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
XceiverClientReply reply = ratisClient.sendCommandAsync(putBlockRequest);
- bufferList.add(byteBuffer);
+ final List<ChunkBuffer> bufferList = singletonList(byteBuffer);
length += byteBuffer.position();
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future =
reply.getResponse().thenApply(v -> {
@@ -216,13 +215,11 @@ public class TestCommitWatcher {
XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
CommitWatcher watcher = new CommitWatcher(bufferPool, ratisClient, 10000);
BlockID blockID = ContainerTestHelper.getTestBlockID(containerId);
- final List<ChunkBuffer> bufferList = new ArrayList<>();
List<XceiverClientReply> replies = new ArrayList<>();
long length = 0;
List<CompletableFuture<ContainerProtos.ContainerCommandResponseProto>>
futures = new ArrayList<>();
for (int i = 0; i < capacity; i++) {
- bufferList.clear();
ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
ContainerTestHelper
.getWriteChunkRequest(pipeline, blockID, chunkSize, null);
@@ -234,7 +231,7 @@ public class TestCommitWatcher {
ContainerTestHelper
.getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
XceiverClientReply reply = ratisClient.sendCommandAsync(putBlockRequest);
- bufferList.add(byteBuffer);
+ final List<ChunkBuffer> bufferList = singletonList(byteBuffer);
length += byteBuffer.position();
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future =
reply.getResponse().thenApply(v -> {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
index 44a386a..22cc0ad 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.hdds.scm.container.MockNodeManager;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
@@ -53,9 +54,12 @@ import org.apache.hadoop.hdds.scm.net.NodeSchema;
import org.apache.hadoop.hdds.scm.net.NodeSchemaManager;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
import org.apache.hadoop.hdds.scm.server.SCMConfigurator;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneTestUtils;
@@ -102,7 +106,14 @@ import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA;
import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA;
import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.SCM_GET_PIPELINE_EXCEPTION;
import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
/**
* Test class for @{@link KeyManagerImpl}.
@@ -131,7 +142,7 @@ public class TestKeyManagerImpl {
dir = GenericTestUtils.getRandomizedTestDir();
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString());
conf.set(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, "true");
- mockScmBlockLocationProtocol = Mockito.mock(ScmBlockLocationProtocol.class);
+ mockScmBlockLocationProtocol = mock(ScmBlockLocationProtocol.class);
metadataManager = new OmMetadataManagerImpl(conf);
nodeManager = new MockNodeManager(true, 10);
NodeSchema[] schemas = new NodeSchema[]
@@ -329,7 +340,10 @@ public class TestKeyManagerImpl {
.setKeyName(keyName)
.build();
keyManager.createDirectory(keyArgs);
- Assert.assertTrue(keyManager.getFileStatus(keyArgs).isDirectory());
+ OzoneFileStatus fileStatus = keyManager.getFileStatus(keyArgs);
+ Assert.assertTrue(fileStatus.isDirectory());
+ Assert.assertTrue(fileStatus.getKeyInfo().getKeyLocationVersions().get(0)
+ .getLocationList().isEmpty());
}
@Test
@@ -851,6 +865,121 @@ public class TestKeyManagerImpl {
}
}
+ @Test
+ public void testRefreshPipeline() throws Exception {
+
+ MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf).build();
+ cluster.waitForClusterToBeReady();
+ OzoneManager ozoneManager = cluster.getOzoneManager();
+
+ StorageContainerLocationProtocol sclProtocolMock = mock(
+ StorageContainerLocationProtocol.class);
+ ContainerWithPipeline containerWithPipelineMock =
+ mock(ContainerWithPipeline.class);
+ when(containerWithPipelineMock.getPipeline())
+ .thenReturn(getRandomPipeline());
+ when(sclProtocolMock.getContainerWithPipeline(anyLong()))
+ .thenReturn(containerWithPipelineMock);
+
+ ScmClient scmClientMock = mock(ScmClient.class);
+ when(scmClientMock.getContainerClient()).thenReturn(sclProtocolMock);
+
+ OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo("v1",
+ "b1", "k1", ReplicationType.RATIS,
+ ReplicationFactor.THREE);
+
+ // Add block to key.
+ List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
+ Pipeline pipeline = getRandomPipeline();
+
+ OmKeyLocationInfo omKeyLocationInfo =
+ new OmKeyLocationInfo.Builder().setBlockID(
+ new BlockID(100L, 1000L))
+ .setOffset(0).setLength(100L).setPipeline(pipeline).build();
+
+ omKeyLocationInfoList.add(omKeyLocationInfo);
+
+ OmKeyLocationInfo omKeyLocationInfo2 =
+ new OmKeyLocationInfo.Builder().setBlockID(
+ new BlockID(200L, 1000L))
+ .setOffset(0).setLength(100L).setPipeline(pipeline).build();
+ omKeyLocationInfoList.add(omKeyLocationInfo2);
+
+ OmKeyLocationInfo omKeyLocationInfo3 =
+ new OmKeyLocationInfo.Builder().setBlockID(
+ new BlockID(100L, 2000L))
+ .setOffset(0).setLength(100L).setPipeline(pipeline).build();
+ omKeyLocationInfoList.add(omKeyLocationInfo3);
+
+ omKeyInfo.appendNewBlocks(omKeyLocationInfoList, false);
+
+ KeyManagerImpl keyManagerImpl =
+ new KeyManagerImpl(ozoneManager, scmClientMock, conf, "om1");
+
+ keyManagerImpl.refreshPipeline(omKeyInfo);
+
+ verify(sclProtocolMock, times(2)).getContainerWithPipeline(anyLong());
+ verify(sclProtocolMock, times(1)).getContainerWithPipeline(100L);
+ verify(sclProtocolMock, times(1)).getContainerWithPipeline(200L);
+ }
+
+
+ @Test
+ public void testRefreshPipelineException() throws Exception {
+
+ MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf).build();
+ cluster.waitForClusterToBeReady();
+ OzoneManager ozoneManager = cluster.getOzoneManager();
+
+ String errorMessage = "Cannot find container!!";
+ StorageContainerLocationProtocol sclProtocolMock = mock(
+ StorageContainerLocationProtocol.class);
+ doThrow(new IOException(errorMessage)).when(sclProtocolMock)
+ .getContainerWithPipeline(anyLong());
+
+ ScmClient scmClientMock = mock(ScmClient.class);
+ when(scmClientMock.getContainerClient()).thenReturn(sclProtocolMock);
+
+ OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo("v1",
+ "b1", "k1", ReplicationType.RATIS,
+ ReplicationFactor.THREE);
+
+ // Add block to key.
+ List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
+ Pipeline pipeline = getRandomPipeline();
+
+ OmKeyLocationInfo omKeyLocationInfo =
+ new OmKeyLocationInfo.Builder().setBlockID(
+ new BlockID(100L, 1000L))
+ .setOffset(0).setLength(100L).setPipeline(pipeline).build();
+ omKeyLocationInfoList.add(omKeyLocationInfo);
+ omKeyInfo.appendNewBlocks(omKeyLocationInfoList, false);
+
+ KeyManagerImpl keyManagerImpl =
+ new KeyManagerImpl(ozoneManager, scmClientMock, conf, "om1");
+
+ try {
+ keyManagerImpl.refreshPipeline(omKeyInfo);
+ Assert.fail();
+ } catch (OMException omEx) {
+ Assert.assertEquals(SCM_GET_PIPELINE_EXCEPTION, omEx.getResult());
+ Assert.assertTrue(omEx.getMessage().equals(errorMessage));
+ }
+ }
+
+ /**
+ * Get Random pipeline.
+ * @return pipeline
+ */
+ private Pipeline getRandomPipeline() {
+ return Pipeline.newBuilder()
+ .setState(Pipeline.PipelineState.OPEN)
+ .setId(PipelineID.randomId())
+ .setType(ReplicationType.RATIS)
+ .setFactor(ReplicationFactor.THREE)
+ .setNodes(new ArrayList<>())
+ .build();
+ }
/**
* Creates a depth two directory.
*
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 6c06fc8..16e878d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -122,6 +122,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INTERNAL_ERROR;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KMS_PROVIDER;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.SCM_GET_PIPELINE_EXCEPTION;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND;
import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY;
@@ -653,37 +654,17 @@ public class KeyManagerImpl implements KeyManager {
}
// Refresh container pipeline info from SCM
// based on OmKeyArgs.refreshPipeline flag
- // 1. Client send initial read request OmKeyArgs.refreshPipeline = false
- // and uses the pipeline cached in OM to access datanode
- // 2. If succeeded, done.
- // 3. If failed due to pipeline does not exist or invalid pipeline state
- // exception, client should retry lookupKey with
- // OmKeyArgs.refreshPipeline = true
if (args.getRefreshPipeline()) {
- for (OmKeyLocationInfoGroup key : value.getKeyLocationVersions()) {
- key.getLocationList().forEach(k -> {
- // TODO: fix Some tests that may not initialize container client
- // The production should always have containerClient initialized.
- if (scmClient.getContainerClient() != null) {
- try {
- ContainerWithPipeline cp = scmClient.getContainerClient()
- .getContainerWithPipeline(k.getContainerID());
- if (!cp.getPipeline().equals(k.getPipeline())) {
- k.setPipeline(cp.getPipeline());
- }
- } catch (IOException e) {
- LOG.error("Unable to update pipeline for container:{}",
- k.getContainerID());
- }
- }
- });
- }
+ refreshPipeline(value);
}
if (args.getSortDatanodes()) {
sortDatanodeInPipeline(value, clientAddress);
}
return value;
} catch (IOException ex) {
+ if (ex instanceof OMException) {
+ throw ex;
+ }
LOG.debug("Get key failed for volume:{} bucket:{} key:{}",
volumeName, bucketName, keyName, ex);
throw new OMException(ex.getMessage(),
@@ -694,6 +675,43 @@ public class KeyManagerImpl implements KeyManager {
}
}
+ /**
+ * Refresh pipeline info in OM by asking SCM.
+ * @param value OmKeyInfo
+ */
+ @VisibleForTesting
+ protected void refreshPipeline(OmKeyInfo value) throws IOException {
+ Map<Long, ContainerWithPipeline> containerWithPipelineMap = new HashMap<>();
+ for (OmKeyLocationInfoGroup key : value.getKeyLocationVersions()) {
+ for (OmKeyLocationInfo k : key.getLocationList()) {
+ // TODO: fix Some tests that may not initialize container client
+ // The production should always have containerClient initialized.
+ if (scmClient.getContainerClient() != null) {
+ try {
+ if (!containerWithPipelineMap.containsKey(k.getContainerID())) {
+ ContainerWithPipeline containerWithPipeline = scmClient
+ .getContainerClient()
+ .getContainerWithPipeline(k.getContainerID());
+ containerWithPipelineMap.put(k.getContainerID(),
+ containerWithPipeline);
+ }
+ } catch (IOException ioEx) {
+ LOG.debug("Get containerPipeline failed for volume:{} bucket:{} " +
+ "key:{}", value.getVolumeName(), value.getBucketName(),
+ value.getKeyName(), ioEx);
+ throw new OMException(ioEx.getMessage(),
+ SCM_GET_PIPELINE_EXCEPTION);
+ }
+ ContainerWithPipeline cp =
+ containerWithPipelineMap.get(k.getContainerID());
+ if (!cp.getPipeline().equals(k.getPipeline())) {
+ k.setPipeline(cp.getPipeline());
+ }
+ }
+ }
+ }
+ }
+
@Override
public void renameKey(OmKeyArgs args, String toKeyName) throws IOException {
Preconditions.checkNotNull(args);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java
index 8d9e709..bdfd69f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java
@@ -30,9 +30,11 @@ import org.slf4j.LoggerFactory;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.Collections;
import java.util.List;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_INTERNAL_SERVICE_ID;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_NODES_KEY;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_NODE_ID_KEY;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT;
@@ -91,8 +93,23 @@ public class OMHANodeDetails {
String localOMServiceId = null;
String localOMNodeId = null;
int localRatisPort = 0;
- Collection<String> omServiceIds = conf.getTrimmedStringCollection(
- OZONE_OM_SERVICE_IDS_KEY);
+
+ Collection<String> omServiceIds;
+
+ localOMServiceId = conf.getTrimmed(OZONE_OM_INTERNAL_SERVICE_ID);
+
+ if (localOMServiceId == null) {
+ // There is no internal om service id is being set, fall back to ozone
+ // .om.service.ids.
+ LOG.info(OZONE_OM_INTERNAL_SERVICE_ID + " is not defined, falling back " +
+ "to " + OZONE_OM_SERVICE_IDS_KEY + " to find serviceID for " +
+ "OzoneManager if it is HA enabled cluster");
+ omServiceIds = conf.getTrimmedStringCollection(
+ OZONE_OM_SERVICE_IDS_KEY);
+ } else {
+ LOG.info("ServiceID for OzoneManager is {}", localOMServiceId);
+ omServiceIds = Collections.singletonList(localOMServiceId);
+ }
String knownOMNodeId = conf.get(OZONE_OM_NODE_ID_KEY);
int found = 0;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java
index fec789a..1eb91d5 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java
@@ -21,8 +21,8 @@ package org.apache.hadoop.ozone.web.ozShell.keys;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
-import java.nio.file.Path;
-import java.nio.file.Paths;
+import java.io.InputStream;
+import java.io.OutputStream;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -31,7 +31,6 @@ import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientException;
import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.web.ozShell.Handler;
import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
import org.apache.hadoop.ozone.web.ozShell.Shell;
@@ -39,6 +38,7 @@ import org.apache.hadoop.ozone.web.ozShell.Shell;
import org.apache.commons.codec.digest.DigestUtils;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY;
+
import picocli.CommandLine.Command;
import picocli.CommandLine.Parameters;
@@ -65,8 +65,10 @@ public class GetKeyHandler extends Handler {
OzoneAddress address = new OzoneAddress(uri);
address.ensureKeyAddress();
- try (OzoneClient client =
- address.createClient(createOzoneConfiguration())) {
+
+ OzoneConfiguration conf = createOzoneConfiguration();
+
+ try (OzoneClient client = address.createClient(conf)) {
String volumeName = address.getVolumeName();
String bucketName = address.getBucketName();
@@ -78,7 +80,6 @@ public class GetKeyHandler extends Handler {
System.out.printf("Key Name : %s%n", keyName);
}
- Path dataFilePath = Paths.get(fileName);
File dataFile = new File(fileName);
if (dataFile.exists() && dataFile.isDirectory()) {
@@ -86,30 +87,25 @@ public class GetKeyHandler extends Handler {
}
if (dataFile.exists()) {
- throw new OzoneClientException(
- fileName + " exists. Download will overwrite an "
- + "existing file. Aborting.");
+ throw new OzoneClientException(dataFile.getPath() + " exists."
+ + " Download would overwrite an existing file. Aborting.");
}
+ int chunkSize = (int) conf.getStorageSize(OZONE_SCM_CHUNK_SIZE_KEY,
+ OZONE_SCM_CHUNK_SIZE_DEFAULT, StorageUnit.BYTES);
+
OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
OzoneBucket bucket = vol.getBucket(bucketName);
- OzoneInputStream keyInputStream = bucket.readKey(keyName);
- if (dataFilePath != null) {
- FileOutputStream outputStream = new FileOutputStream(dataFile);
- IOUtils.copyBytes(keyInputStream, outputStream,
- (int) new OzoneConfiguration()
- .getStorageSize(OZONE_SCM_CHUNK_SIZE_KEY,
- OZONE_SCM_CHUNK_SIZE_DEFAULT, StorageUnit.BYTES));
- outputStream.close();
- } else {
- throw new OzoneClientException(
- "Can not access the file \"" + fileName + "\"");
+ try (OutputStream output = new FileOutputStream(dataFile);
+ InputStream input = bucket.readKey(keyName)) {
+ IOUtils.copyBytes(input, output, chunkSize);
}
+
if (isVerbose()) {
- FileInputStream stream = new FileInputStream(dataFile);
- String hash = DigestUtils.md5Hex(stream);
- System.out.printf("Downloaded file hash : %s%n", hash);
- stream.close();
+ try (InputStream stream = new FileInputStream(dataFile)) {
+ String hash = DigestUtils.md5Hex(stream);
+ System.out.printf("Downloaded file hash : %s%n", hash);
+ }
}
}
return null;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
index 2829dd6..7bde7f8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.ozone.web.ozShell.keys;
import java.io.File;
import java.io.FileInputStream;
+import java.io.InputStream;
+import java.io.OutputStream;
import java.util.HashMap;
import java.util.Map;
@@ -33,7 +35,6 @@ import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.web.ozShell.Handler;
import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
import org.apache.hadoop.ozone.web.ozShell.Shell;
@@ -72,9 +73,9 @@ public class PutKeyHandler extends Handler {
*/
@Override
public Void call() throws Exception {
-
OzoneAddress address = new OzoneAddress(uri);
address.ensureKeyAddress();
+
try (OzoneClient client =
address.createClient(createOzoneConfiguration())) {
@@ -91,10 +92,10 @@ public class PutKeyHandler extends Handler {
File dataFile = new File(fileName);
if (isVerbose()) {
- FileInputStream stream = new FileInputStream(dataFile);
- String hash = DigestUtils.md5Hex(stream);
- System.out.printf("File Hash : %s%n", hash);
- stream.close();
+ try (InputStream stream = new FileInputStream(dataFile)) {
+ String hash = DigestUtils.md5Hex(stream);
+ System.out.printf("File Hash : %s%n", hash);
+ }
}
Configuration conf = new OzoneConfiguration();
@@ -107,20 +108,20 @@ public class PutKeyHandler extends Handler {
conf.get(OZONE_REPLICATION_TYPE, OZONE_REPLICATION_TYPE_DEFAULT));
OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
OzoneBucket bucket = vol.getBucket(bucketName);
+
Map<String, String> keyMetadata = new HashMap<>();
- if (Boolean.valueOf(bucket.getMetadata().get(OzoneConsts.GDPR_FLAG))) {
+ String gdprEnabled = bucket.getMetadata().get(OzoneConsts.GDPR_FLAG);
+ if (Boolean.parseBoolean(gdprEnabled)) {
keyMetadata.put(OzoneConsts.GDPR_FLAG, Boolean.TRUE.toString());
}
- OzoneOutputStream outputStream = bucket
- .createKey(keyName, dataFile.length(), replicationType,
- replicationFactor, keyMetadata);
- FileInputStream fileInputStream = new FileInputStream(dataFile);
- IOUtils.copyBytes(fileInputStream, outputStream, (int) conf
- .getStorageSize(OZONE_SCM_CHUNK_SIZE_KEY,
- OZONE_SCM_CHUNK_SIZE_DEFAULT,
- StorageUnit.BYTES));
- outputStream.close();
- fileInputStream.close();
+
+ int chunkSize = (int) conf.getStorageSize(OZONE_SCM_CHUNK_SIZE_KEY,
+ OZONE_SCM_CHUNK_SIZE_DEFAULT, StorageUnit.BYTES);
+ try (InputStream input = new FileInputStream(dataFile);
+ OutputStream output = bucket.createKey(keyName, dataFile.length(),
+ replicationType, replicationFactor, keyMetadata)) {
+ IOUtils.copyBytes(input, output, chunkSize);
+ }
}
return null;
}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java
index e8ed515..f5745a8 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -23,10 +23,10 @@ import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.ext.MessageBodyReader;
import javax.ws.rs.ext.Provider;
+import javax.xml.XMLConstants;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.UnmarshallerHandler;
import javax.xml.parsers.SAXParserFactory;
-import java.io.IOException;
import java.io.InputStream;
import java.lang.annotation.Annotation;
import java.lang.reflect.Type;
@@ -49,6 +49,7 @@ public class MultiDeleteRequestUnmarshaller
try {
context = JAXBContext.newInstance(MultiDeleteRequest.class);
SAXParserFactory saxParserFactory = SAXParserFactory.newInstance();
+ saxParserFactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
xmlReader = saxParserFactory.newSAXParser().getXMLReader();
} catch (Exception ex) {
throw new AssertionError("Can't instantiate MultiDeleteRequest parser",
@@ -65,8 +66,7 @@ public class MultiDeleteRequestUnmarshaller
@Override
public MultiDeleteRequest readFrom(Class<MultiDeleteRequest> type,
Type genericType, Annotation[] annotations, MediaType mediaType,
- MultivaluedMap<String, String> httpHeaders, InputStream entityStream)
- throws IOException, WebApplicationException {
+ MultivaluedMap<String, String> httpHeaders, InputStream entityStream) {
try {
UnmarshallerHandler unmarshallerHandler =
context.createUnmarshaller().getUnmarshallerHandler();
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeaderParserUtil.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeaderParserUtil.java
index b1b61cc..2e1b86c 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeaderParserUtil.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeaderParserUtil.java
@@ -50,12 +50,12 @@ public final class RangeHeaderParserUtil {
Matcher matcher = RANGE_HEADER_MATCH_PATTERN.matcher(rangeHeaderVal);
if (matcher.matches()) {
if (!matcher.group("start").equals("")) {
- start = Integer.parseInt(matcher.group("start"));
+ start = Long.parseLong(matcher.group("start"));
} else {
noStart = true;
}
if (!matcher.group("end").equals("")) {
- end = Integer.parseInt(matcher.group("end"));
+ end = Long.parseLong(matcher.group("end"));
} else {
end = length - 1;
}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRangeHeaderParserUtil.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRangeHeaderParserUtil.java
index 03c91bf..439b8b0 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRangeHeaderParserUtil.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRangeHeaderParserUtil.java
@@ -86,7 +86,11 @@ public class TestRangeHeaderParserUtil {
assertEquals(9, rangeHeader.getEndOffset());
assertEquals(false, rangeHeader.isInValidRange());
-
+ rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=3977248768" +
+ "-4977248768", 4977248769L);
+ assertEquals(3977248768L, rangeHeader.getStartOffset());
+ assertEquals(4977248768L, rangeHeader.getEndOffset());
+ assertEquals(false, rangeHeader.isInValidRange());
}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java
index a7282b2..a215a9e 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java
@@ -16,17 +16,30 @@
*/
package org.apache.hadoop.ozone.audit.parser.common;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.ozone.audit.parser.model.AuditEntry;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.*;
-import java.sql.*;
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.io.BufferedReader;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.Statement;
import java.util.ArrayList;
import java.util.Map;
import java.util.Properties;
import java.util.stream.Collectors;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.ozone.audit.parser.model.AuditEntry;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Database helper for ozone audit parser tool.
@@ -42,28 +55,17 @@ public final class DatabaseHelper {
LoggerFactory.getLogger(DatabaseHelper.class);
private static Map<String, String> properties;
- public static boolean setup(String dbName, String logs) {
- //loadProperties();
- if(createAuditTable(dbName)) {
+ public static boolean setup(String dbName, String logs) throws Exception {
+ if (createAuditTable(dbName)) {
return insertAudits(dbName, logs);
} else {
return false;
}
}
- private static Connection getConnection(String dbName) {
-
- Connection connection = null;
- try{
- Class.forName(ParserConsts.DRIVER);
- connection = DriverManager.getConnection(
- ParserConsts.CONNECTION_PREFIX + dbName);
- } catch (ClassNotFoundException e) {
- LOG.error(e.getMessage());
- } catch (SQLException e) {
- LOG.error(e.getMessage());
- }
- return connection;
+ private static Connection getConnection(String dbName) throws Exception {
+ Class.forName(ParserConsts.DRIVER);
+ return DriverManager.getConnection(ParserConsts.CONNECTION_PREFIX + dbName);
}
private static void loadProperties() {
@@ -89,32 +91,25 @@ public final class DatabaseHelper {
}
- private static boolean createAuditTable(String dbName) {
-
- try(Connection connection = getConnection(dbName);
- Statement st = connection.createStatement()) {
-
+ private static boolean createAuditTable(String dbName) throws Exception {
+ try (Connection connection = getConnection(dbName);
+ Statement st = connection.createStatement()) {
st.executeUpdate(properties.get(ParserConsts.CREATE_AUDIT_TABLE));
- } catch (SQLException e) {
- LOG.error(e.getMessage());
- return false;
}
return true;
}
- private static boolean insertAudits(String dbName, String logs) {
-
- try(Connection connection = getConnection(dbName);
- PreparedStatement preparedStatement = connection.prepareStatement(
- properties.get(ParserConsts.INSERT_AUDITS))) {
-
+ @SuppressFBWarnings("REC_CATCH_EXCEPTION")
+ private static boolean insertAudits(String dbName, String logs)
+ throws Exception {
+ try (Connection connection = getConnection(dbName);
+ PreparedStatement preparedStatement = connection.prepareStatement(
+ properties.get(ParserConsts.INSERT_AUDITS))) {
ArrayList<AuditEntry> auditEntries = parseAuditLogs(logs);
-
final int batchSize = 1000;
int count = 0;
-
//Insert list to db
- for(AuditEntry audit : auditEntries) {
+ for (AuditEntry audit : auditEntries) {
preparedStatement.setString(1, audit.getTimestamp());
preparedStatement.setString(2, audit.getLevel());
preparedStatement.setString(3, audit.getLogger());
@@ -124,45 +119,39 @@ public final class DatabaseHelper {
preparedStatement.setString(7, audit.getParams());
preparedStatement.setString(8, audit.getResult());
preparedStatement.setString(9, audit.getException());
-
preparedStatement.addBatch();
-
- if(++count % batchSize == 0) {
+ if (++count % batchSize == 0) {
preparedStatement.executeBatch();
}
}
- if(auditEntries.size() > 0) {
+ if (!auditEntries.isEmpty()) {
preparedStatement.executeBatch(); // insert remaining records
}
- } catch (Exception e) {
- LOG.error(e.getMessage());
- return false;
}
return true;
}
+ @SuppressWarnings("squid:S3776")
private static ArrayList<AuditEntry> parseAuditLogs(String filePath)
- throws Exception {
- ArrayList<AuditEntry> listResult = new ArrayList<AuditEntry>();
- try(FileInputStream fis = new FileInputStream(filePath);
- InputStreamReader isr = new InputStreamReader(fis, "UTF-8");
+ throws IOException {
+ ArrayList<AuditEntry> listResult = new ArrayList<>();
+ try (FileInputStream fis = new FileInputStream(filePath);
+ InputStreamReader isr = new InputStreamReader(fis, UTF_8);
BufferedReader bReader = new BufferedReader(isr)) {
- String currentLine = null;
- String[] entry = null;
+ String currentLine = bReader.readLine();
+ String nextLine = bReader.readLine();
+ String[] entry;
AuditEntry tempEntry = null;
- String nextLine = null;
- currentLine = bReader.readLine();
- nextLine = bReader.readLine();
- while(true) {
- if(tempEntry == null){
+ while (true) {
+ if (tempEntry == null){
tempEntry = new AuditEntry();
}
- if(currentLine == null) {
+ if (currentLine == null) {
break;
} else {
- if(!currentLine.matches(ParserConsts.DATE_REGEX)){
+ if (!currentLine.matches(ParserConsts.DATE_REGEX)){
tempEntry.appendException(currentLine);
} else {
entry = StringUtils.stripAll(currentLine.split("\\|"));
@@ -178,11 +167,11 @@ public final class DatabaseHelper {
.setParams(ops[1])
.setResult(entry[6].substring(entry[6].indexOf('=') + 1))
.build();
- if(entry.length == 8){
+ if (entry.length == 8){
tempEntry.setException(entry[7]);
}
}
- if(nextLine == null || nextLine.matches(ParserConsts.DATE_REGEX)){
+ if (nextLine == null || nextLine.matches(ParserConsts.DATE_REGEX)){
listResult.add(tempEntry);
tempEntry = null;
}
@@ -190,50 +179,38 @@ public final class DatabaseHelper {
nextLine = bReader.readLine();
}
}
- } catch (RuntimeException rx) {
- throw rx;
- } catch (Exception ex) {
- throw ex;
}
return listResult;
}
public static String executeCustomQuery(String dbName, String query)
- throws SQLException {
+ throws Exception {
return executeStatement(dbName, query);
}
public static String executeTemplate(String dbName, String template)
- throws SQLException {
- return executeStatement(dbName,
- properties.get(template));
+ throws Exception {
+ return executeStatement(dbName, properties.get(template));
}
private static String executeStatement(String dbName, String sql)
- throws SQLException {
+ throws Exception {
StringBuilder result = new StringBuilder();
- ResultSet rs = null;
- Statement st = null;
- ResultSetMetaData rsm = null;
- try(Connection connection = getConnection(dbName)) {
- //loadProperties();
-
- if(connection != null){
- st = connection.createStatement();
- rs = st.executeQuery(sql);
- if(rs != null) {
- rsm = rs.getMetaData();
- int cols = rsm.getColumnCount();
- while(rs.next()){
- for(int index =1; index<=cols; index++){
- result.append(rs.getObject(index) + "\t");
- }
- result.append("\n");
+ ResultSetMetaData rsm;
+ try (Connection connection = getConnection(dbName);
+ Statement st = connection.createStatement();
+ ResultSet rs = st.executeQuery(sql)) {
+ if (rs != null) {
+ rsm = rs.getMetaData();
+ int cols = rsm.getColumnCount();
+ while (rs.next()){
+ for (int index = 1; index <= cols; index++){
+ result.append(rs.getObject(index));
+ result.append("\t");
}
+ result.append("\n");
}
- st.close();
- rs.close();
}
}
return result.toString();
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/LoadCommandHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/LoadCommandHandler.java
index ec67bad..c5e28a4 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/LoadCommandHandler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/LoadCommandHandler.java
@@ -16,14 +16,15 @@
*/
package org.apache.hadoop.ozone.audit.parser.handler;
+import java.util.concurrent.Callable;
+
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.ozone.audit.parser.AuditParser;
import org.apache.hadoop.ozone.audit.parser.common.DatabaseHelper;
-import picocli.CommandLine.*;
+
import picocli.CommandLine.Command;
import picocli.CommandLine.Parameters;
-
-import java.util.concurrent.Callable;
+import picocli.CommandLine.ParentCommand;
/**
* Load command handler for ozone audit parser.
@@ -41,8 +42,8 @@ public class LoadCommandHandler implements Callable<Void> {
@ParentCommand
private AuditParser auditParser;
- public Void call() {
- if(DatabaseHelper.setup(auditParser.getDatabase(), logs)) {
+ public Void call() throws Exception {
+ if (DatabaseHelper.setup(auditParser.getDatabase(), logs)) {
System.out.println(logs + " has been loaded successfully");
} else {
System.out.println("Failed to load " + logs);
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/QueryCommandHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/QueryCommandHandler.java
index fe083ca..461026e 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/QueryCommandHandler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/QueryCommandHandler.java
@@ -16,15 +16,16 @@
*/
package org.apache.hadoop.ozone.audit.parser.handler;
+import java.sql.SQLException;
+import java.util.concurrent.Callable;
+
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.ozone.audit.parser.AuditParser;
import org.apache.hadoop.ozone.audit.parser.common.DatabaseHelper;
-import picocli.CommandLine.*;
+
import picocli.CommandLine.Command;
import picocli.CommandLine.Parameters;
-
-import java.sql.SQLException;
-import java.util.concurrent.Callable;
+import picocli.CommandLine.ParentCommand;
/**
* Custom query command handler for ozone audit parser.
@@ -44,7 +45,7 @@ public class QueryCommandHandler implements Callable<Void> {
@ParentCommand
private AuditParser auditParser;
- public Void call() {
+ public Void call() throws Exception {
try {
System.out.println(
DatabaseHelper.executeCustomQuery(auditParser.getDatabase(), query)
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java
index 9b97ee9..4e726ec 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java
@@ -16,15 +16,16 @@
*/
package org.apache.hadoop.ozone.audit.parser.handler;
+import java.sql.SQLException;
+import java.util.concurrent.Callable;
+
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.ozone.audit.parser.AuditParser;
import org.apache.hadoop.ozone.audit.parser.common.DatabaseHelper;
-import picocli.CommandLine.*;
+
import picocli.CommandLine.Command;
import picocli.CommandLine.Parameters;
-
-import java.sql.SQLException;
-import java.util.concurrent.Callable;
+import picocli.CommandLine.ParentCommand;
/**
* Template command handler for ozone audit parser.
@@ -42,7 +43,7 @@ public class TemplateCommandHandler implements Callable<Void> {
@ParentCommand
private AuditParser auditParser;
- public Void call() {
+ public Void call() throws Exception {
try {
if(DatabaseHelper.validateTemplate(template)) {
System.out.println(
diff --git a/pom.xml b/pom.xml
index 4c03e2b..d6e2da6 100644
--- a/pom.xml
+++ b/pom.xml
@@ -229,6 +229,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
<snakeyaml.version>1.16</snakeyaml.version>
<hbase.one.version>1.2.6</hbase.one.version>
<hbase.two.version>2.0.0-beta-1</hbase.two.version>
+ <sonar.java.binaries>${basedir}/target/classes</sonar.java.binaries>
</properties>
---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org