You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@ozone.apache.org by GitBox <gi...@apache.org> on 2020/08/26 05:07:59 UTC

[GitHub] [hadoop-ozone] avijayanhwx commented on a change in pull request #1353: HDDS-4148. Add servlet to return SCM DB checkpoint.

avijayanhwx commented on a change in pull request #1353:
URL: https://github.com/apache/hadoop-ozone/pull/1353#discussion_r477027519



##########
File path: hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
##########
@@ -79,6 +79,10 @@
   public static final String OZONE_USER = "user";
   public static final String OZONE_REQUEST = "request";
 
+  // SCM Http server endpoints
+  public static final String OZONE_SCM_DB_CHECKPOINT_HTTP_ENDPOINT =

Review comment:
       Can we avoid duplication and use the same String constant for both OM and SCM servlet endpoints?  

##########
File path: hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java
##########
@@ -0,0 +1,244 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm;
+
+import javax.servlet.ServletContext;
+import javax.servlet.ServletException;
+import javax.servlet.ServletOutputStream;
+import javax.servlet.WriteListener;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.UUID;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMMetrics;
+import org.apache.hadoop.hdds.scm.server.SCMDBCheckpointServlet;
+import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+
+import org.apache.commons.io.FileUtils;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH;
+import static org.apache.hadoop.ozone.om.OMDBCheckpointServlet.writeOmDBCheckpointToStream;
+import org.junit.After;
+import org.junit.Assert;
+import static org.junit.Assert.assertNotNull;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.junit.rules.Timeout;
+import org.mockito.Matchers;
+import static org.mockito.Mockito.doCallRealMethod;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * Class used for testing the OM DB Checkpoint provider servlet.
+ */
+public class TestSCMDbCheckpointServlet {

Review comment:
       If we abstract out the common logic, we don't need 2 different test classes. 

##########
File path: hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDBCheckpointServlet.java
##########
@@ -0,0 +1,216 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.server;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMMetrics;
+import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
+import org.apache.hadoop.hdds.utils.db.DBStore;
+import org.apache.hadoop.hdfs.util.DataTransferThrottler;
+import org.apache.hadoop.ozone.OzoneConsts;
+
+import org.apache.commons.compress.archivers.ArchiveEntry;
+import org.apache.commons.compress.archivers.ArchiveOutputStream;
+import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
+import org.apache.commons.compress.compressors.CompressorException;
+import org.apache.commons.compress.compressors.CompressorOutputStream;
+import org.apache.commons.compress.compressors.CompressorStreamFactory;
+import org.apache.commons.compress.utils.IOUtils;
+import org.apache.commons.lang3.StringUtils;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Provides the current checkpoint Snapshot of the OM DB. (tar.gz)
+ */
+public class SCMDBCheckpointServlet extends HttpServlet {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SCMDBCheckpointServlet.class);
+  private static final long serialVersionUID = 1L;
+
+  private transient StorageContainerManager scm;
+  private transient DBStore scmDbStore;
+  private transient SCMMetrics scmMetrics;
+  private transient DataTransferThrottler throttler = null;
+
+  @Override
+  public void init() throws ServletException {
+
+    scm = (StorageContainerManager) getServletContext()
+        .getAttribute(OzoneConsts.SCM_CONTEXT_ATTRIBUTE);
+
+    if (scm == null) {
+      LOG.error("Unable to initialize SCMDBCheckpointServlet. SCM is null");
+      return;
+    }
+
+    scmDbStore = scm.getScmMetadataStore().getStore();
+    scmMetrics= scm.getMetrics();
+
+    OzoneConfiguration configuration = scm.getConfiguration();
+    long transferBandwidth = configuration.getLongBytes(
+        ScmConfigKeys.SCM_DB_CHECKPOINT_TRANSFER_RATE_KEY,
+        ScmConfigKeys.SCM_DB_CHECKPOINT_TRANSFER_RATE_DEFAULT);
+
+    if (transferBandwidth > 0) {
+      throttler = new DataTransferThrottler(transferBandwidth);
+    }
+  }
+
+  /**
+   * Process a GET request for the SCM DB checkpoint snapshot.
+   *
+   * @param request  The servlet request we are processing
+   * @param response The servlet response we are creating
+   */
+  @Override
+  public void doGet(HttpServletRequest request, HttpServletResponse response) {
+
+    LOG.info("Received request to obtain SCM DB checkpoint snapshot");
+    if (scmDbStore == null) {
+      LOG.error(
+          "Unable to process metadata snapshot request. DB Store is null");
+      response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+      return;
+    }
+
+    DBCheckpoint checkpoint = null;
+    try {
+
+      boolean flush = false;
+      String flushParam =
+          request.getParameter(OZONE_DB_CHECKPOINT_REQUEST_FLUSH);
+      if (StringUtils.isNotEmpty(flushParam)) {
+        flush = Boolean.valueOf(flushParam);
+      }
+
+      checkpoint = scmDbStore.getCheckpoint(flush);
+      if (checkpoint == null || checkpoint.getCheckpointLocation() == null) {
+        LOG.error("Unable to process metadata snapshot request. " +
+            "Checkpoint request returned null.");
+        response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+        return;
+      }
+      scmMetrics.setLastCheckpointCreationTimeTaken(
+          checkpoint.checkpointCreationTimeTaken());

Review comment:
       Same here. All this code is common. We should avoid duplication. 

##########
File path: hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDBCheckpointServlet.java
##########
@@ -0,0 +1,216 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.server;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMMetrics;
+import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
+import org.apache.hadoop.hdds.utils.db.DBStore;
+import org.apache.hadoop.hdfs.util.DataTransferThrottler;
+import org.apache.hadoop.ozone.OzoneConsts;
+
+import org.apache.commons.compress.archivers.ArchiveEntry;
+import org.apache.commons.compress.archivers.ArchiveOutputStream;
+import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
+import org.apache.commons.compress.compressors.CompressorException;
+import org.apache.commons.compress.compressors.CompressorOutputStream;
+import org.apache.commons.compress.compressors.CompressorStreamFactory;
+import org.apache.commons.compress.utils.IOUtils;
+import org.apache.commons.lang3.StringUtils;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Provides the current checkpoint Snapshot of the OM DB. (tar.gz)
+ */
+public class SCMDBCheckpointServlet extends HttpServlet {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SCMDBCheckpointServlet.class);
+  private static final long serialVersionUID = 1L;
+
+  private transient StorageContainerManager scm;
+  private transient DBStore scmDbStore;
+  private transient SCMMetrics scmMetrics;
+  private transient DataTransferThrottler throttler = null;
+
+  @Override
+  public void init() throws ServletException {

Review comment:
       Most of the code between the OM and SCM servlet is common. We should be able to abstract out common logic between the two and have just one copy. How about a super DBCheckpointServlet class from which both the servlets inherit logic?  




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-issues-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-issues-help@hadoop.apache.org