You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by xy...@apache.org on 2020/07/24 23:37:20 UTC

[hadoop-ozone] branch master updated: HDDS-3996. Missing TLS client configurations to allow ozone.grpc.tls.… (#1234)

This is an automated email from the ASF dual-hosted git repository.

xyao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new facf36e  HDDS-3996. Missing TLS client configurations to allow ozone.grpc.tls.… (#1234)
facf36e is described below

commit facf36edb49660652b08aeaaec36d3467e0fd179
Author: Xiaoyu Yao <xy...@apache.org>
AuthorDate: Fri Jul 24 16:37:04 2020 -0700

    HDDS-3996. Missing TLS client configurations to allow ozone.grpc.tls.… (#1234)
---
 .../org/apache/hadoop/hdds/ratis/RatisHelper.java  |  4 +--
 .../apache/hadoop/ozone/HddsDatanodeService.java   | 10 ++++++--
 .../common/statemachine/DatanodeStateMachine.java  |  4 ++-
 .../CreatePipelineCommandHandler.java              |  3 ++-
 .../transport/server/ratis/XceiverServerRatis.java |  4 ++-
 .../ozone/container/ozoneimpl/OzoneContainer.java  |  9 +++++++
 .../replication/GrpcReplicationClient.java         | 29 ++++++++++++++++-----
 .../replication/SimpleContainerDownloader.java     | 30 +++++++++++++++-------
 8 files changed, 71 insertions(+), 22 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
index 505b6c9..8bd22a1 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
@@ -170,9 +170,9 @@ public final class RatisHelper {
   }
 
   public static RaftClient newRaftClient(RaftPeer leader,
-      ConfigurationSource conf) {
+      ConfigurationSource conf, GrpcTlsConfig tlsConfig) {
     return newRaftClient(getRpcType(conf), leader,
-        RatisHelper.createRetryPolicy(conf), conf);
+        RatisHelper.createRetryPolicy(conf), tlsConfig, conf);
   }
 
   public static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader,
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
index ac6fba4..aee0f03 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
@@ -352,9 +352,15 @@ public class HddsDatanodeService extends GenericCli implements ServicePlugin {
         dnCertClient.storeCertificate(pemEncodedCert, true);
         dnCertClient.storeCertificate(response.getX509CACertificate(), true,
             true);
-        datanodeDetails.setCertSerialId(getX509Certificate(pemEncodedCert).
-            getSerialNumber().toString());
+        String dnCertSerialId = getX509Certificate(pemEncodedCert).
+            getSerialNumber().toString();
+        datanodeDetails.setCertSerialId(dnCertSerialId);
         persistDatanodeDetails(datanodeDetails);
+        // Rebuild dnCertClient with the new CSR result so that the default
+        // certSerialId and the x509Certificate can be updated.
+        dnCertClient = new DNCertificateClient(
+            new SecurityConfig(config), dnCertSerialId);
+
       } else {
         throw new RuntimeException("Unable to retrieve datanode certificate " +
             "chain");
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index 27e814b..1f61f15 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@ -126,7 +126,9 @@ public class DatanodeStateMachine implements Closeable {
     ContainerReplicator replicator =
         new DownloadAndImportReplicator(container.getContainerSet(),
             container.getController(),
-            new SimpleContainerDownloader(conf), new TarContainerPacker());
+            new SimpleContainerDownloader(conf,
+                dnCertClient != null ? dnCertClient.getCACertificate() : null),
+            new TarContainerPacker());
 
     supervisor =
         new ReplicationSupervisor(container.getContainerSet(), replicator,
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java
index c60c112..78059fe 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java
@@ -98,7 +98,8 @@ public class CreatePipelineCommandHandler implements CommandHandler {
             d -> !d.getUuid().equals(dn.getUuid()))
             .forEach(d -> {
               final RaftPeer peer = RatisHelper.toRaftPeer(d);
-              try (RaftClient client = RatisHelper.newRaftClient(peer, conf)) {
+              try (RaftClient client = RatisHelper.newRaftClient(peer, conf,
+                  ozoneContainer.getTlsClientConfig())) {
                 client.groupAdd(group, peer.getId());
               } catch (AlreadyExistsException ae) {
                 // do not log
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index c751c5b..c1d8df6 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -422,12 +422,14 @@ public final class XceiverServerRatis implements XceiverServerSpi {
   // In summary:
   // authenticate from server to client is via TLS.
   // authenticate from client to server is via block token (or container token).
+  // DN Ratis server act as both SSL client and server and we must pass TLS
+  // configuration for both.
   static GrpcTlsConfig createTlsServerConfigForDN(SecurityConfig conf,
       CertificateClient caClient) {
     if (conf.isSecurityEnabled() && conf.isGrpcTlsEnabled()) {
       return new GrpcTlsConfig(
           caClient.getPrivateKey(), caClient.getCertificate(),
-          null, false);
+          caClient.getCACertificate(), false);
     }
     return null;
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index abe0382..26da487 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolPro
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
+import org.apache.hadoop.hdds.ratis.RatisHelper;
 import org.apache.hadoop.hdds.security.token.BlockTokenVerifier;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
@@ -59,6 +60,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVI
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
+import org.apache.ratis.grpc.GrpcTlsConfig;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -82,6 +84,7 @@ public class OzoneContainer {
   private ContainerMetadataScanner metadataScanner;
   private List<ContainerDataScanner> dataScanners;
   private final BlockDeletingService blockDeletingService;
+  private final GrpcTlsConfig tlsClientConfig;
 
   /**
    * Construct OzoneContainer object.
@@ -149,6 +152,12 @@ public class OzoneContainer {
     blockDeletingService =
         new BlockDeletingService(this, svcInterval, serviceTimeout,
             TimeUnit.MILLISECONDS, config);
+    tlsClientConfig = RatisHelper.createTlsClientConfig(
+        secConf, certClient != null ? certClient.getCACertificate() : null);
+  }
+
+  public GrpcTlsConfig getTlsClientConfig() {
+    return tlsClientConfig;
   }
 
   private GrpcReplicationService createReplicationService() {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
index 660ba4e..abeaf03 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
@@ -24,6 +24,7 @@ import java.io.OutputStream;
 import java.io.UncheckedIOException;
 import java.nio.file.Files;
 import java.nio.file.Path;
+import java.security.cert.X509Certificate;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.TimeUnit;
 
@@ -37,10 +38,13 @@ import org.apache.hadoop.hdds.protocol.datanode.proto
     .IntraDatanodeProtocolServiceGrpc.IntraDatanodeProtocolServiceStub;
 
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.ratis.thirdparty.io.grpc.ManagedChannel;
+import org.apache.ratis.thirdparty.io.grpc.netty.GrpcSslContexts;
 import org.apache.ratis.thirdparty.io.grpc.netty.NettyChannelBuilder;
 import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver;
+import org.apache.ratis.thirdparty.io.netty.handler.ssl.SslContextBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -58,13 +62,26 @@ public class GrpcReplicationClient {
 
   private final Path workingDirectory;
 
-  public GrpcReplicationClient(String host,
-      int port, Path workingDir) {
+  public GrpcReplicationClient(String host, int port, Path workingDir,
+      SecurityConfig secConfig, X509Certificate caCert) throws IOException {
+    NettyChannelBuilder channelBuilder =
+        NettyChannelBuilder.forAddress(host, port)
+            .usePlaintext()
+            .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE);
 
-    channel = NettyChannelBuilder.forAddress(host, port)
-        .usePlaintext()
-        .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE)
-        .build();
+    if (secConfig.isGrpcTlsEnabled()) {
+      channelBuilder.useTransportSecurity();
+
+      SslContextBuilder sslContextBuilder = GrpcSslContexts.forClient();
+      if (caCert != null) {
+        sslContextBuilder.trustManager(caCert);
+      }
+      if (secConfig.useTestCert()) {
+        channelBuilder.overrideAuthority("localhost");
+      }
+      channelBuilder.sslContext(sslContextBuilder.build());
+    }
+    channel = channelBuilder.build();
     client = IntraDatanodeProtocolServiceGrpc.newStub(channel);
     workingDirectory = workingDir;
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java
index d7666ea..9d7b551 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java
@@ -18,8 +18,10 @@
 
 package org.apache.hadoop.ozone.container.replication;
 
+import java.io.IOException;
 import java.nio.file.Path;
 import java.nio.file.Paths;
+import java.security.cert.X509Certificate;
 import java.util.List;
 import java.util.concurrent.CompletableFuture;
 import java.util.function.Function;
@@ -27,6 +29,7 @@ import java.util.function.Function;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name;
+import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 
 import org.slf4j.Logger;
@@ -45,9 +48,11 @@ public class SimpleContainerDownloader implements ContainerDownloader {
       LoggerFactory.getLogger(SimpleContainerDownloader.class);
 
   private final Path workingDirectory;
+  private final SecurityConfig securityConfig;
+  private final X509Certificate caCert;
 
-  public SimpleContainerDownloader(ConfigurationSource conf) {
-
+  public SimpleContainerDownloader(ConfigurationSource conf,
+      X509Certificate caCert) {
     String workDirString =
         conf.get(OzoneConfigKeys.OZONE_CONTAINER_COPY_WORKDIR);
 
@@ -57,6 +62,8 @@ public class SimpleContainerDownloader implements ContainerDownloader {
     } else {
       workingDirectory = Paths.get(workDirString);
     }
+    securityConfig = new SecurityConfig(conf);
+    this.caCert = caCert;
   }
 
   @Override
@@ -66,22 +73,27 @@ public class SimpleContainerDownloader implements ContainerDownloader {
     CompletableFuture<Path> result = null;
     for (DatanodeDetails datanode : sourceDatanodes) {
       try {
-
         if (result == null) {
           GrpcReplicationClient grpcReplicationClient =
               new GrpcReplicationClient(datanode.getIpAddress(),
                   datanode.getPort(Name.STANDALONE).getValue(),
-                  workingDirectory);
+                  workingDirectory, securityConfig, caCert);
           result = grpcReplicationClient.download(containerId);
         } else {
           result = result.thenApply(CompletableFuture::completedFuture)
               .exceptionally(t -> {
                 LOG.error("Error on replicating container: " + containerId, t);
-                GrpcReplicationClient grpcReplicationClient =
-                    new GrpcReplicationClient(datanode.getIpAddress(),
-                        datanode.getPort(Name.STANDALONE).getValue(),
-                        workingDirectory);
-                return grpcReplicationClient.download(containerId);
+                try {
+                  GrpcReplicationClient grpcReplicationClient =
+                      new GrpcReplicationClient(datanode.getIpAddress(),
+                          datanode.getPort(Name.STANDALONE).getValue(),
+                          workingDirectory, securityConfig, caCert);
+                  return grpcReplicationClient.download(containerId);
+                } catch (IOException e) {
+                  LOG.error("Error on replicating container: " + containerId,
+                      t);
+                  return null;
+                }
               }).thenCompose(Function.identity());
         }
       } catch (Exception ex) {


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org