You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by xy...@apache.org on 2020/03/06 22:17:13 UTC
[hadoop-ozone] branch master updated: HDDS-3071. Datanodes unable
to connect to recon in Secure Environment (#629)
This is an automated email from the ASF dual-hosted git repository.
xyao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
The following commit(s) were added to refs/heads/master by this push:
new dc8bfc2 HDDS-3071. Datanodes unable to connect to recon in Secure Environment (#629)
dc8bfc2 is described below
commit dc8bfc261e36f9898122ca06e62f5c890e867a5e
Author: avijayanhwx <14...@users.noreply.github.com>
AuthorDate: Fri Mar 6 14:17:06 2020 -0800
HDDS-3071. Datanodes unable to connect to recon in Secure Environment (#629)
---
.../org/apache/hadoop/hdds/recon/ReconConfig.java | 93 +++++++++++++++++
.../apache/hadoop/hdds/recon/ReconConfigKeys.java | 4 -
.../org/apache/hadoop/hdds/conf/ConfigTag.java | 3 +-
.../common/statemachine/SCMConnectionManager.java | 64 ++++++++----
.../ozone/protocol/ReconDatanodeProtocol.java | 33 ++++++
.../ozone/protocolPB/ReconDatanodeProtocolPB.java | 38 +++++++
.../hdds/scm/server/SCMDatanodeProtocolServer.java | 114 ++++++++++++++++-----
.../dist/src/main/compose/ozonesecure/test.sh | 3 +-
.../dist/src/main/smoketest/recon/recon-api.robot | 7 +-
hadoop-ozone/recon/pom.xml | 5 +
.../org/apache/hadoop/ozone/recon/ReconServer.java | 30 +++---
.../ozone/recon/scm/ReconContainerManager.java | 30 +++++-
.../recon/scm/ReconContainerReportHandler.java | 29 ++----
.../recon/scm/ReconDatanodeProtocolServer.java | 27 ++++-
.../ReconIncrementalContainerReportHandler.java | 31 ++----
.../ozone/recon/scm/ReconPolicyProvider.java | 66 ++++++++++++
.../scm/ReconStorageContainerManagerFacade.java | 8 +-
.../scm/AbstractReconContainerManagerTest.java | 38 ++++++-
.../ozone/recon/scm/TestReconContainerManager.java | 16 +++
...TestReconIncrementalContainerReportHandler.java | 36 +------
20 files changed, 517 insertions(+), 158 deletions(-)
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfig.java
new file mode 100644
index 0000000..49e7836
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfig.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.recon;
+
+import org.apache.hadoop.hdds.conf.Config;
+import org.apache.hadoop.hdds.conf.ConfigGroup;
+import org.apache.hadoop.hdds.conf.ConfigTag;
+import org.apache.hadoop.hdds.conf.ConfigType;
+
+/**
+ * The configuration class for the Recon service.
+ */
+@ConfigGroup(prefix = "ozone.recon")
+public class ReconConfig {
+
+ @Config(key = "kerberos.principal",
+ type = ConfigType.STRING,
+ defaultValue = "",
+ tags = { ConfigTag.SECURITY, ConfigTag.RECON, ConfigTag.OZONE },
+ description = "This Kerberos principal is used by the Recon service."
+ )
+ private String principal;
+
+ @Config(key = "kerberos.keytab.file",
+ type = ConfigType.STRING,
+ defaultValue = "",
+ tags = { ConfigTag.SECURITY, ConfigTag.RECON, ConfigTag.OZONE },
+ description = "The keytab file used by Recon daemon to login as "+
+ "its service principal."
+ )
+ private String keytab;
+
+ @Config(key = "security.client.datanode.container.protocol.acl",
+ type = ConfigType.STRING,
+ defaultValue = "*",
+ tags = { ConfigTag.SECURITY, ConfigTag.RECON, ConfigTag.OZONE },
+ description = "Comma separated acls (users, groups) allowing clients " +
+ "accessing datanode container protocol"
+ )
+ private String acl;
+
+ public void setKerberosPrincipal(String kerberosPrincipal) {
+ this.principal = kerberosPrincipal;
+ }
+
+ public void setKerberosKeytab(String kerberosKeytab) {
+ this.keytab = kerberosKeytab;
+ }
+
+ public String getKerberosPrincipal() {
+ return this.principal;
+ }
+
+ public String getKerberosKeytab() {
+ return this.keytab;
+ }
+
+ public String getAcl() {
+ return acl;
+ }
+
+ public void setAcl(String acl) {
+ this.acl = acl;
+ }
+
+ /**
+ * Config Keys for Recon.
+ */
+ public static class ConfigStrings {
+ public static final String OZONE_RECON_KERBEROS_PRINCIPAL_KEY =
+ "ozone.recon.kerberos.principal";
+ public static final String OZONE_RECON_KERBEROS_KEYTAB_FILE_KEY =
+ "ozone.recon.kerberos.keytab.file";
+ public static final String
+ OZONE_RECON_SECURITY_CLIENT_DATANODE_CONTAINER_PROTOCOL_ACL =
+ "ozone.recon.security.client.datanode.container.protocol.acl";
+ }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java
index e184c61..c0b1d5d 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java
@@ -41,8 +41,4 @@ public final class ReconConfigKeys {
public static final String OZONE_RECON_DATANODE_BIND_HOST_DEFAULT =
"0.0.0.0";
public static final int OZONE_RECON_DATANODE_PORT_DEFAULT = 9891;
- public static final String OZONE_RECON_KERBEROS_KEYTAB_FILE_KEY =
- "ozone.recon.kerberos.keytab.file";
- public static final String OZONE_RECON_KERBEROS_PRINCIPAL_KEY =
- "ozone.recon.kerberos.principal";
}
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java
index b969a68..385840a 100644
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java
@@ -41,5 +41,6 @@ public enum ConfigTag {
PIPELINE,
STANDALONE,
S3GATEWAY,
- DATANODE
+ DATANODE,
+ RECON
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
index 31f3064..814eeb4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.protocolPB.ReconDatanodeProtocolPB;
import org.apache.hadoop.ozone.protocolPB
.StorageContainerDatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB;
@@ -130,7 +131,40 @@ public class SCMConnectionManager
* @throws IOException
*/
public void addSCMServer(InetSocketAddress address) throws IOException {
- addSCMServer(address, 1000, false);
+ writeLock();
+ try {
+ if (scmMachines.containsKey(address)) {
+ LOG.warn("Trying to add an existing SCM Machine to Machines group. " +
+ "Ignoring the request.");
+ return;
+ }
+
+ RPC.setProtocolEngine(conf, StorageContainerDatanodeProtocolPB.class,
+ ProtobufRpcEngine.class);
+ long version =
+ RPC.getProtocolVersion(StorageContainerDatanodeProtocolPB.class);
+
+ RetryPolicy retryPolicy =
+ RetryPolicies.retryForeverWithFixedSleep(
+ 1000, TimeUnit.MILLISECONDS);
+
+ StorageContainerDatanodeProtocolPB rpcProxy = RPC.getProtocolProxy(
+ StorageContainerDatanodeProtocolPB.class, version,
+ address, UserGroupInformation.getCurrentUser(), conf,
+ NetUtils.getDefaultSocketFactory(conf), getRpcTimeout(),
+ retryPolicy).getProxy();
+
+ StorageContainerDatanodeProtocolClientSideTranslatorPB rpcClient =
+ new StorageContainerDatanodeProtocolClientSideTranslatorPB(
+ rpcProxy);
+
+ EndpointStateMachine endPoint =
+ new EndpointStateMachine(address, rpcClient, conf);
+ endPoint.setPassive(false);
+ scmMachines.put(address, endPoint);
+ } finally {
+ writeUnlock();
+ }
}
/**
@@ -140,19 +174,6 @@ public class SCMConnectionManager
*/
public void addReconServer(InetSocketAddress address) throws IOException {
LOG.info("Adding Recon Server : {}", address.toString());
- addSCMServer(address, 60000, true);
- }
-
- /**
- * Add scm server helper method.
- * @param address address
- * @param sleepTime sleepTime
- * @param passiveScm flag to specify passive SCM or not. Recon is passive SCM.
- * @throws IOException
- */
- private void addSCMServer(InetSocketAddress address, long sleepTime,
- boolean passiveScm)
- throws IOException {
writeLock();
try {
if (scmMachines.containsKey(address)) {
@@ -160,16 +181,17 @@ public class SCMConnectionManager
"Ignoring the request.");
return;
}
- RPC.setProtocolEngine(conf, StorageContainerDatanodeProtocolPB.class,
+
+ RPC.setProtocolEngine(conf, ReconDatanodeProtocolPB.class,
ProtobufRpcEngine.class);
long version =
- RPC.getProtocolVersion(StorageContainerDatanodeProtocolPB.class);
+ RPC.getProtocolVersion(ReconDatanodeProtocolPB.class);
RetryPolicy retryPolicy =
- RetryPolicies.retryForeverWithFixedSleep(
- sleepTime, TimeUnit.MILLISECONDS);
- StorageContainerDatanodeProtocolPB rpcProxy = RPC.getProtocolProxy(
- StorageContainerDatanodeProtocolPB.class, version,
+ RetryPolicies.retryUpToMaximumCountWithFixedSleep(10,
+ 60000, TimeUnit.MILLISECONDS);
+ ReconDatanodeProtocolPB rpcProxy = RPC.getProtocolProxy(
+ ReconDatanodeProtocolPB.class, version,
address, UserGroupInformation.getCurrentUser(), conf,
NetUtils.getDefaultSocketFactory(conf), getRpcTimeout(),
retryPolicy).getProxy();
@@ -179,7 +201,7 @@ public class SCMConnectionManager
EndpointStateMachine endPoint =
new EndpointStateMachine(address, rpcClient, conf);
- endPoint.setPassive(passiveScm);
+ endPoint.setPassive(true);
scmMachines.put(address, endPoint);
} finally {
writeUnlock();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/ReconDatanodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/ReconDatanodeProtocol.java
new file mode 100644
index 0000000..c44a66f
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/ReconDatanodeProtocol.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.protocol;
+
+import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import org.apache.hadoop.hdds.recon.ReconConfig;
+import org.apache.hadoop.security.KerberosInfo;
+
+/**
+ * The protocol spoken between datanodes and Recon. For specifics please see
+ * the Protoc file that defines the parent protocol.
+ */
+@KerberosInfo(serverPrincipal =
+ ReconConfig.ConfigStrings.OZONE_RECON_KERBEROS_PRINCIPAL_KEY)
+@InterfaceAudience.Private
+public interface ReconDatanodeProtocol
+ extends StorageContainerDatanodeProtocol {
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/ReconDatanodeProtocolPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/ReconDatanodeProtocolPB.java
new file mode 100644
index 0000000..dbe8786
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/ReconDatanodeProtocolPB.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.protocolPB;
+
+import static org.apache.hadoop.hdds.recon.ReconConfig.ConfigStrings.OZONE_RECON_KERBEROS_PRINCIPAL_KEY;
+
+import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
+import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.security.KerberosInfo;
+
+/**
+ * Protocol used from a datanode to Recon. This extends
+ * the Protocol Buffers service interface to add Hadoop-specific annotations.
+ */
+@ProtocolInfo(protocolName =
+ "org.apache.hadoop.ozone.protocol.ReconDatanodeProtocol",
+ protocolVersion = 1)
+@KerberosInfo(
+ serverPrincipal = OZONE_RECON_KERBEROS_PRINCIPAL_KEY,
+ clientPrincipal = DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
+public interface ReconDatanodeProtocolPB extends
+ StorageContainerDatanodeProtocolPB {
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index a68fb1c..abc2f67 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -95,6 +95,8 @@ import static org.apache.hadoop.hdds.scm.events.SCMEvents.PIPELINE_REPORT;
import static org.apache.hadoop.hdds.scm.server.StorageContainerManager.startRpcServer;
import static org.apache.hadoop.hdds.server.ServerUtils.getRemoteUserName;
import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress;
+
+import org.apache.hadoop.security.authorize.PolicyProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -113,37 +115,38 @@ public class SCMDatanodeProtocolServer implements
/**
* The RPC server that listens to requests from DataNodes.
*/
- private final RPC.Server datanodeRpcServer;
+ private RPC.Server datanodeRpcServer;
private final OzoneStorageContainerManager scm;
private final InetSocketAddress datanodeRpcAddress;
private final SCMDatanodeHeartbeatDispatcher heartbeatDispatcher;
private final EventPublisher eventPublisher;
- private final ProtocolMessageMetrics protocolMessageMetrics;
+ private ProtocolMessageMetrics protocolMessageMetrics;
public SCMDatanodeProtocolServer(final OzoneConfiguration conf,
OzoneStorageContainerManager scm,
EventPublisher eventPublisher)
throws IOException {
+ // This constructor has broken down to smaller methods so that Recon's
+ // passive SCM server can override them.
Preconditions.checkNotNull(scm, "SCM cannot be null");
Preconditions.checkNotNull(eventPublisher, "EventPublisher cannot be null");
this.scm = scm;
this.eventPublisher = eventPublisher;
- final int handlerCount =
- conf.getInt(OZONE_SCM_HANDLER_COUNT_KEY,
- OZONE_SCM_HANDLER_COUNT_DEFAULT);
heartbeatDispatcher = new SCMDatanodeHeartbeatDispatcher(
scm.getScmNodeManager(), eventPublisher);
- RPC.setProtocolEngine(conf, StorageContainerDatanodeProtocolPB.class,
- ProtobufRpcEngine.class);
+ InetSocketAddress datanodeRpcAddr = getDataNodeBindAddress(conf);
- protocolMessageMetrics = ProtocolMessageMetrics
- .create("SCMDatanodeProtocol", "SCM Datanode protocol",
- StorageContainerDatanodeProtocolProtos.Type.values());
+ protocolMessageMetrics = getProtocolMessageMetrics();
+
+ final int handlerCount = conf.getInt(OZONE_SCM_HANDLER_COUNT_KEY,
+ OZONE_SCM_HANDLER_COUNT_DEFAULT);
+
+ RPC.setProtocolEngine(conf, getProtocolClass(), ProtobufRpcEngine.class);
BlockingService dnProtoPbService =
StorageContainerDatanodeProtocolProtos
@@ -152,25 +155,20 @@ public class SCMDatanodeProtocolServer implements
new StorageContainerDatanodeProtocolServerSideTranslatorPB(
this, protocolMessageMetrics));
- InetSocketAddress datanodeRpcAddr = getDataNodeBindAddress(conf);
-
- datanodeRpcServer =
- startRpcServer(
- conf,
- datanodeRpcAddr,
- StorageContainerDatanodeProtocolPB.class,
- dnProtoPbService,
- handlerCount);
+ datanodeRpcServer = startRpcServer(
+ conf,
+ datanodeRpcAddr,
+ getProtocolClass(),
+ dnProtoPbService,
+ handlerCount);
- datanodeRpcAddress =
- updateRPCListenAddress(
- conf, getScmDatanodeAddressKey(), datanodeRpcAddr,
+ datanodeRpcAddress = updateRPCListenAddress(
+ conf, getDatanodeAddressKey(), datanodeRpcAddr,
datanodeRpcServer);
if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
false)) {
- datanodeRpcServer.refreshServiceAcl(conf,
- SCMPolicyProvider.getInstance());
+ datanodeRpcServer.refreshServiceAcl(conf, getPolicyProvider());
}
}
@@ -407,15 +405,81 @@ public class SCMDatanodeProtocolServer implements
.replaceAll(" +", " ");
}
- protected String getScmDatanodeAddressKey() {
+ /**
+ * Get the RPC server for this SCM server. Contains the protocol bindings.
+ * @param conf ozone conf
+ * @param datanodeRpcAddr Datanode RPC address for this server
+ * @param metrics ProtocolMessageMetrics
+ * @return RPC Server
+ * @throws IOException on error.
+ */
+ protected RPC.Server getRpcServer(OzoneConfiguration conf,
+ InetSocketAddress datanodeRpcAddr,
+ ProtocolMessageMetrics metrics) throws IOException {
+
+ final int handlerCount = conf.getInt(OZONE_SCM_HANDLER_COUNT_KEY,
+ OZONE_SCM_HANDLER_COUNT_DEFAULT);
+
+ RPC.setProtocolEngine(conf, getProtocolClass(), ProtobufRpcEngine.class);
+
+ BlockingService dnProtoPbService =
+ StorageContainerDatanodeProtocolProtos
+ .StorageContainerDatanodeProtocolService
+ .newReflectiveBlockingService(
+ new StorageContainerDatanodeProtocolServerSideTranslatorPB(
+ this, metrics));
+ return startRpcServer(
+ conf,
+ datanodeRpcAddr,
+ getProtocolClass(),
+ dnProtoPbService,
+ handlerCount);
+ }
+
+ /**
+ * Get the ProtocolMessageMetrics for this server.
+ * @return ProtocolMessageMetrics
+ */
+ protected ProtocolMessageMetrics getProtocolMessageMetrics() {
+ return ProtocolMessageMetrics
+ .create("SCMDatanodeProtocol", "SCM Datanode protocol",
+ StorageContainerDatanodeProtocolProtos.Type.values());
+ }
+
+ /**
+ * Get Key associated with Datanode address for this server.
+ * @return
+ */
+ protected String getDatanodeAddressKey() {
return OZONE_SCM_DATANODE_ADDRESS_KEY;
}
+ /**
+ * Get Datanode bind address for SCM.
+ * @param conf ozone configuration
+ * @return InetSocketAddress
+ */
protected InetSocketAddress getDataNodeBindAddress(OzoneConfiguration conf) {
return HddsServerUtil.getScmDataNodeBindAddress(conf);
}
/**
+ * Get the authorization provider for this SCM server.
+ * @return SCM policy provider.
+ */
+ protected PolicyProvider getPolicyProvider() {
+ return SCMPolicyProvider.getInstance();
+ }
+
+ /**
+ * Get protocol class type for this RPC Server.
+ * @return Class type.
+ */
+ protected Class getProtocolClass() {
+ return StorageContainerDatanodeProtocolPB.class;
+ }
+
+ /**
* Wrapper class for events with the datanode origin.
*/
public static class NodeRegistrationContainerReport extends
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
index 9685a9f..d80d778 100755
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
@@ -37,8 +37,7 @@ execute_robot_test s3g s3
execute_robot_test scm scmcli
-# TODO: https://issues.apache.org/jira/browse/HDDgS-3071
-# execute_robot_test scm recon
+execute_robot_test scm recon
stop_docker_env
diff --git a/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot b/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot
index a809261..c0cf269 100644
--- a/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot
@@ -32,9 +32,9 @@ Recon REST API
Should contain ${result} containers
${result} = Execute curl --negotiate -u : -v ${API_ENDPOINT_URL}/datanodes
Should contain ${result} datanodes
- Should contain ${result} ozone_datanode_1.ozone_default
- Should contain ${result} ozone_datanode_2.ozone_default
- Should contain ${result} ozone_datanode_3.ozone_default
+ Should contain ${result} datanode_1
+ Should contain ${result} datanode_2
+ Should contain ${result} datanode_3
${result} = Execute curl --negotiate -u : -v ${API_ENDPOINT_URL}/pipelines
Should contain ${result} pipelines
Should contain ${result} RATIS
@@ -43,5 +43,6 @@ Recon REST API
Should contain ${result} datanode_2
Should contain ${result} datanode_3
Recon Web UI
+ Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit HTTP user
${result} = Execute curl --negotiate -u : -v ${ENDPOINT_URL}
Should contain ${result} Ozone Recon
\ No newline at end of file
diff --git a/hadoop-ozone/recon/pom.xml b/hadoop-ozone/recon/pom.xml
index ed4886b..e07e12b 100644
--- a/hadoop-ozone/recon/pom.xml
+++ b/hadoop-ozone/recon/pom.xml
@@ -334,5 +334,10 @@
<artifactId>activation</artifactId>
<version>1.1.1</version>
</dependency>
+ <dependency>
+ <groupId>com.github.spotbugs</groupId>
+ <artifactId>spotbugs</artifactId>
+ <scope>provided</scope>
+ </dependency>
</dependencies>
</project>
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
index 0d61233..9f0c17e 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
@@ -18,9 +18,13 @@
package org.apache.hadoop.ozone.recon;
+import static org.apache.hadoop.hdds.recon.ReconConfig.ConfigStrings.OZONE_RECON_KERBEROS_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.hdds.recon.ReconConfig.ConfigStrings.OZONE_RECON_KERBEROS_PRINCIPAL_KEY;
+
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.cli.GenericCli;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.recon.ReconConfig;
import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
import org.apache.hadoop.ozone.OzoneSecurityUtil;
import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider;
@@ -40,10 +44,6 @@ import com.google.inject.Injector;
import java.io.IOException;
import java.net.InetSocketAddress;
-import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_KERBEROS_KEYTAB_FILE_KEY;
-import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_KERBEROS_PRINCIPAL_KEY;
-
-
/**
* Recon server main class that stops and starts recon services.
*/
@@ -179,21 +179,21 @@ public class ReconServer extends GenericCli {
if (SecurityUtil.getAuthenticationMethod(conf).equals(
UserGroupInformation.AuthenticationMethod.KERBEROS)) {
-
- LOG.info("Ozone security is enabled. Attempting login for Recon user. "
- + "Principal: {}, keytab: {}", conf.get(
- OZONE_RECON_KERBEROS_PRINCIPAL_KEY),
- conf.get(OZONE_RECON_KERBEROS_KEYTAB_FILE_KEY));
-
+ ReconConfig reconConfig = conf.getObject(ReconConfig.class);
+ LOG.info("Ozone security is enabled. Attempting login for Recon service. "
+ + "Principal: {}, keytab: {}",
+ reconConfig.getKerberosPrincipal(),
+ reconConfig.getKerberosKeytab());
UserGroupInformation.setConfiguration(conf);
-
InetSocketAddress socAddr = HddsUtils.getReconAddresses(conf);
- SecurityUtil.login(conf, OZONE_RECON_KERBEROS_KEYTAB_FILE_KEY,
- OZONE_RECON_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
+ SecurityUtil.login(conf,
+ OZONE_RECON_KERBEROS_KEYTAB_FILE_KEY,
+ OZONE_RECON_KERBEROS_PRINCIPAL_KEY,
+ socAddr.getHostName());
} else {
throw new AuthenticationException(SecurityUtil.getAuthenticationMethod(
- conf) + " authentication method not supported. Recon user login "
- + "failed.");
+ conf) + " authentication method not supported. "
+ + "Recon service login failed.");
}
LOG.info("Recon login successful.");
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java
index 02eb398..75d8ea3 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java
@@ -24,12 +24,14 @@ import java.io.File;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
import org.apache.hadoop.ozone.recon.ReconUtils;
+import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -40,6 +42,7 @@ public class ReconContainerManager extends SCMContainerManager {
private static final Logger LOG =
LoggerFactory.getLogger(ReconContainerManager.class);
+ private StorageContainerServiceProvider scmClient;
/**
* Constructs a mapping class that creates mapping between container names
@@ -54,8 +57,10 @@ public class ReconContainerManager extends SCMContainerManager {
* @throws IOException on Failure.
*/
public ReconContainerManager(
- Configuration conf, PipelineManager pipelineManager) throws IOException {
+ Configuration conf, PipelineManager pipelineManager,
+ StorageContainerServiceProvider scm) throws IOException {
super(conf, pipelineManager);
+ this.scmClient = scm;
}
@Override
@@ -65,6 +70,29 @@ public class ReconContainerManager extends SCMContainerManager {
}
/**
+ * Check and add new container if not already present in Recon.
+ * @param containerID containerID to check.
+ * @param datanodeDetails Datanode from where we got this container.
+ * @throws IOException on Error.
+ */
+ public void checkAndAddNewContainer(ContainerID containerID,
+ DatanodeDetails datanodeDetails)
+ throws IOException {
+ if (!exists(containerID)) {
+ LOG.info("New container {} got from {}.", containerID,
+ datanodeDetails.getHostName());
+ ContainerWithPipeline containerWithPipeline =
+ scmClient.getContainerWithPipeline(containerID.getId());
+ LOG.debug("Verified new container from SCM {} ",
+ containerWithPipeline.getContainerInfo().containerID());
+ // If no other client added this, go ahead and add this container.
+ if (!exists(containerID)) {
+ addNewContainer(containerID.getId(), containerWithPipeline);
+ }
+ }
+ }
+
+ /**
* Adds a new container to Recon's container manager.
* @param containerId id
* @param containerWithPipeline containerInfo with pipeline info
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java
index 3c2cdc8..fdde7d6 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java
@@ -26,11 +26,9 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolPro
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.ContainerReportHandler;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode;
import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -41,13 +39,10 @@ public class ReconContainerReportHandler extends ContainerReportHandler {
private static final Logger LOG =
LoggerFactory.getLogger(ReconContainerReportHandler.class);
- private StorageContainerServiceProvider scmClient;
public ReconContainerReportHandler(NodeManager nodeManager,
- ContainerManager containerManager,
- StorageContainerServiceProvider scm) {
+ ContainerManager containerManager) {
super(nodeManager, containerManager);
- this.scmClient = scm;
}
@Override
@@ -63,22 +58,12 @@ public class ReconContainerReportHandler extends ContainerReportHandler {
for (ContainerReplicaProto containerReplicaProto : reportsList) {
final ContainerID id = ContainerID.valueof(
containerReplicaProto.getContainerID());
- synchronized (containerManager) {
- if (!containerManager.exists(id)) {
- LOG.info("New container {} got from {}.", id,
- reportFromDatanode.getDatanodeDetails());
- try {
- ContainerWithPipeline containerWithPipeline =
- scmClient.getContainerWithPipeline(id.getId());
- LOG.debug("Verified new container from SCM {} ",
- containerWithPipeline.getContainerInfo().containerID());
- containerManager.addNewContainer(id.getId(), containerWithPipeline);
- } catch (IOException ioEx) {
- LOG.error("Exception while getting new container info from SCM",
- ioEx);
- return;
- }
- }
+ try {
+ containerManager.checkAndAddNewContainer(id,
+ reportFromDatanode.getDatanodeDetails());
+ } catch (IOException ioEx) {
+ LOG.error("Exception while checking and adding new container.", ioEx);
+ return;
}
LOG.debug("Got container report for containerID {} ",
containerReplicaProto.getContainerID());
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDatanodeProtocolServer.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDatanodeProtocolServer.java
index 4ff8676..07a0013 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDatanodeProtocolServer.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDatanodeProtocolServer.java
@@ -22,17 +22,23 @@ import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
import org.apache.hadoop.hdds.utils.HddsServerUtil;
import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer;
import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics;
+import org.apache.hadoop.ozone.protocol.ReconDatanodeProtocol;
+import org.apache.hadoop.ozone.protocolPB.ReconDatanodeProtocolPB;
+import org.apache.hadoop.security.authorize.PolicyProvider;
import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_DATANODE_ADDRESS_KEY;
/**
* Recon's Datanode protocol server extended from SCM.
*/
-public class ReconDatanodeProtocolServer extends SCMDatanodeProtocolServer {
+public class ReconDatanodeProtocolServer extends SCMDatanodeProtocolServer
+ implements ReconDatanodeProtocol {
public ReconDatanodeProtocolServer(OzoneConfiguration conf,
OzoneStorageContainerManager scm,
@@ -42,7 +48,14 @@ public class ReconDatanodeProtocolServer extends SCMDatanodeProtocolServer {
}
@Override
- protected String getScmDatanodeAddressKey() {
+ public ProtocolMessageMetrics getProtocolMessageMetrics() {
+ return ProtocolMessageMetrics
+ .create("ReconDatanodeProtocol", "Recon Datanode protocol",
+ StorageContainerDatanodeProtocolProtos.Type.values());
+ }
+
+ @Override
+ protected String getDatanodeAddressKey() {
return OZONE_RECON_DATANODE_ADDRESS_KEY;
}
@@ -50,4 +63,14 @@ public class ReconDatanodeProtocolServer extends SCMDatanodeProtocolServer {
public InetSocketAddress getDataNodeBindAddress(OzoneConfiguration conf) {
return HddsServerUtil.getReconDataNodeBindAddress(conf);
}
+
+ @Override
+ protected PolicyProvider getPolicyProvider() {
+ return ReconPolicyProvider.getInstance();
+ }
+
+ protected Class<ReconDatanodeProtocolPB> getProtocolClass() {
+ return ReconDatanodeProtocolPB.class;
+ }
+
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java
index 0312344..8117bf1 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java
@@ -26,12 +26,10 @@ import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
import org.apache.hadoop.hdds.scm.container.IncrementalContainerReportHandler;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode;
import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -44,13 +42,9 @@ public class ReconIncrementalContainerReportHandler
private static final Logger LOG = LoggerFactory.getLogger(
ReconIncrementalContainerReportHandler.class);
- private StorageContainerServiceProvider scmClient;
-
public ReconIncrementalContainerReportHandler(NodeManager nodeManager,
- ContainerManager containerManager,
- StorageContainerServiceProvider scmClient) {
+ ContainerManager containerManager) {
super(nodeManager, containerManager);
- this.scmClient = scmClient;
}
@Override
@@ -70,23 +64,12 @@ public class ReconIncrementalContainerReportHandler
final DatanodeDetails dd = report.getDatanodeDetails();
final ContainerID id = ContainerID.valueof(
replicaProto.getContainerID());
- synchronized (containerManager) {
- if (!containerManager.exists(id)) {
- LOG.info("New container {} got from {}.", id,
- report.getDatanodeDetails());
- try {
- ContainerWithPipeline containerWithPipeline =
- scmClient.getContainerWithPipeline(id.getId());
- LOG.info("Verified new container from SCM {} ",
- containerWithPipeline.getContainerInfo().containerID());
- containerManager
- .addNewContainer(id.getId(), containerWithPipeline);
- } catch (IOException ioEx) {
- LOG.error("Exception while getting new container info from SCM",
- ioEx);
- return;
- }
- }
+ try {
+ containerManager.checkAndAddNewContainer(id,
+ report.getDatanodeDetails());
+ } catch (IOException ioEx) {
+ LOG.error("Exception while checking and adding new container.", ioEx);
+ return;
}
getNodeManager().addContainer(dd, id);
processContainerReplica(dd, replicaProto);
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPolicyProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPolicyProvider.java
new file mode 100644
index 0000000..d825901
--- /dev/null
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPolicyProvider.java
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.scm;
+
+import static org.apache.hadoop.hdds.recon.ReconConfig.ConfigStrings.OZONE_RECON_SECURITY_CLIENT_DATANODE_CONTAINER_PROTOCOL_ACL;
+
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import org.apache.hadoop.hdds.annotation.InterfaceStability;
+import org.apache.hadoop.ozone.protocol.ReconDatanodeProtocol;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+import org.apache.hadoop.security.authorize.Service;
+
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+
+/**
+ * {@link PolicyProvider} for Recon protocols.
+ */
+public final class ReconPolicyProvider extends PolicyProvider {
+
+ private static AtomicReference<ReconPolicyProvider> atomicReference =
+ new AtomicReference<>();
+
+ private ReconPolicyProvider() {
+ }
+
+ @InterfaceAudience.Private
+ @InterfaceStability.Unstable
+ public static ReconPolicyProvider getInstance() {
+ if (atomicReference.get() == null) {
+ atomicReference.compareAndSet(null, new ReconPolicyProvider());
+ }
+ return atomicReference.get();
+ }
+
+ private static final Service[] RECON_SERVICES =
+ new Service[]{
+ new Service(
+ OZONE_RECON_SECURITY_CLIENT_DATANODE_CONTAINER_PROTOCOL_ACL,
+ ReconDatanodeProtocol.class)
+ };
+
+ @SuppressFBWarnings("EI_EXPOSE_REP")
+ @Override
+ public Service[] getServices() {
+ return RECON_SERVICES;
+ }
+
+}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
index c480593..d774d7b 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
@@ -97,7 +97,8 @@ public class ReconStorageContainerManagerFacade
conf, this, eventQueue);
this.pipelineManager =
new ReconPipelineManager(conf, nodeManager, eventQueue);
- this.containerManager = new ReconContainerManager(conf, pipelineManager);
+ this.containerManager = new ReconContainerManager(conf, pipelineManager,
+ scmServiceProvider);
this.scmServiceProvider = scmServiceProvider;
NodeReportHandler nodeReportHandler =
@@ -117,12 +118,11 @@ public class ReconStorageContainerManagerFacade
pipelineManager, containerManager);
ContainerReportHandler containerReportHandler =
- new ReconContainerReportHandler(nodeManager, containerManager,
- scmServiceProvider);
+ new ReconContainerReportHandler(nodeManager, containerManager);
IncrementalContainerReportHandler icrHandler =
new ReconIncrementalContainerReportHandler(nodeManager,
- containerManager, scmServiceProvider);
+ containerManager);
CloseContainerEventHandler closeContainerHandler =
new CloseContainerEventHandler(pipelineManager, containerManager);
ContainerActionsHandler actionsHandler = new ContainerActionsHandler();
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
index d7745c4..fce22b2 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
@@ -18,18 +18,29 @@
package org.apache.hadoop.ozone.recon.scm;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.STAND_ALONE;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS;
+import static org.apache.hadoop.ozone.recon.AbstractOMMetadataManagerTest.getRandomPipeline;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
import java.io.IOException;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.net.NetworkTopology;
import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
@@ -60,7 +71,8 @@ public class AbstractReconContainerManagerTest {
NodeManager nodeManager =
new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap);
pipelineManager = new ReconPipelineManager(conf, nodeManager, eventQueue);
- containerManager = new ReconContainerManager(conf, pipelineManager);
+ containerManager = new ReconContainerManager(conf, pipelineManager,
+ getScmServiceProvider());
}
@After
@@ -81,4 +93,28 @@ public class AbstractReconContainerManagerTest {
return containerManager;
}
+ private StorageContainerServiceProvider getScmServiceProvider()
+ throws IOException {
+ Pipeline pipeline = getRandomPipeline();
+ getPipelineManager().addPipeline(pipeline);
+
+ ContainerID containerID = new ContainerID(100L);
+ ContainerInfo containerInfo =
+ new ContainerInfo.Builder()
+ .setContainerID(containerID.getId())
+ .setNumberOfKeys(10)
+ .setPipelineID(pipeline.getId())
+ .setReplicationFactor(ONE)
+ .setOwner("test")
+ .setState(OPEN)
+ .setReplicationType(STAND_ALONE)
+ .build();
+ ContainerWithPipeline containerWithPipeline =
+ new ContainerWithPipeline(containerInfo, pipeline);
+ StorageContainerServiceProvider scmServiceProviderMock = mock(
+ StorageContainerServiceProvider.class);
+ when(scmServiceProviderMock.getContainerWithPipeline(100L))
+ .thenReturn(containerWithPipeline);
+ return scmServiceProviderMock;
+ }
}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java
index 742a258..5fd6389 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.recon.scm;
+import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.STAND_ALONE;
@@ -30,6 +31,7 @@ import java.io.IOException;
import java.util.List;
import java.util.NavigableSet;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
@@ -78,4 +80,18 @@ public class TestReconContainerManager
assertEquals(containerID, containersInPipeline.first());
}
+ @Test
+ public void testCheckAndAddNewContainer() throws IOException {
+ ContainerID containerID = new ContainerID(100L);
+ ReconContainerManager containerManager = getContainerManager();
+ assertFalse(containerManager.exists(containerID));
+ DatanodeDetails datanodeDetails = randomDatanodeDetails();
+ containerManager.checkAndAddNewContainer(containerID, datanodeDetails);
+ assertTrue(containerManager.exists(containerID));
+
+ // Doing it one more time should not change any state.
+ containerManager.checkAndAddNewContainer(containerID, datanodeDetails);
+ assertTrue(containerManager.exists(containerID));
+ }
+
}
\ No newline at end of file
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
index 768de8f..d4f28c0 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
@@ -19,10 +19,6 @@
package org.apache.hadoop.ozone.recon.scm;
import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.STAND_ALONE;
-import static org.apache.hadoop.ozone.recon.AbstractOMMetadataManagerTest.getRandomPipeline;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
@@ -37,14 +33,10 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolPro
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto;
import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode;
import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
import org.junit.Test;
/**
@@ -56,29 +48,7 @@ public class TestReconIncrementalContainerReportHandler
@Test
public void testProcessICR() throws IOException, NodeNotFoundException {
- Pipeline pipeline = getRandomPipeline();
- ReconPipelineManager pipelineManager = getPipelineManager();
- pipelineManager.addPipeline(pipeline);
-
ContainerID containerID = new ContainerID(100L);
- ContainerInfo containerInfo =
- new ContainerInfo.Builder()
- .setContainerID(containerID.getId())
- .setNumberOfKeys(10)
- .setPipelineID(pipeline.getId())
- .setReplicationFactor(ONE)
- .setOwner("test")
- .setState(OPEN)
- .setReplicationType(STAND_ALONE)
- .build();
- ContainerWithPipeline containerWithPipeline =
- new ContainerWithPipeline(containerInfo, pipeline);
-
- StorageContainerServiceProvider scmServiceProviderMock = mock(
- StorageContainerServiceProvider.class);
- when(scmServiceProviderMock.getContainerWithPipeline(100L))
- .thenReturn(containerWithPipeline);
-
DatanodeDetails datanodeDetails = randomDatanodeDetails();
IncrementalContainerReportFromDatanode reportMock =
mock(IncrementalContainerReportFromDatanode.class);
@@ -92,12 +62,12 @@ public class TestReconIncrementalContainerReportHandler
NodeManager nodeManagerMock = mock(NodeManager.class);
ReconContainerManager containerManager = getContainerManager();
- ReconIncrementalContainerReportHandler recconIcr =
+ ReconIncrementalContainerReportHandler reconIcr =
new ReconIncrementalContainerReportHandler(nodeManagerMock,
- containerManager, scmServiceProviderMock);
+ containerManager);
EventPublisher eventPublisherMock = mock(EventPublisher.class);
- recconIcr.onMessage(reportMock, eventPublisherMock);
+ reconIcr.onMessage(reportMock, eventPublisherMock);
verify(nodeManagerMock, times(1))
.addContainer(datanodeDetails, containerID);
assertTrue(containerManager.exists(containerID));
---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org