You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@druid.apache.org by GitBox <gi...@apache.org> on 2020/03/24 10:44:35 UTC

[GitHub] [druid] frnidito commented on a change in pull request #8987: Adding support for autoscaling in GCE

frnidito commented on a change in pull request #8987: Adding support for autoscaling in GCE
URL: https://github.com/apache/druid/pull/8987#discussion_r397057336
 
 

 ##########
 File path: extensions-contrib/gce-extensions/src/main/java/org/apache/druid/indexing/overlord/autoscaling/gce/GceAutoScaler.java
 ##########
 @@ -0,0 +1,539 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.indexing.overlord.autoscaling.gce;
+
+import com.fasterxml.jackson.annotation.JacksonInject;
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonTypeName;
+import com.google.api.client.googleapis.auth.oauth2.GoogleCredential;
+import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport;
+import com.google.api.client.http.HttpTransport;
+import com.google.api.client.json.JsonFactory;
+import com.google.api.client.json.jackson2.JacksonFactory;
+import com.google.api.services.compute.Compute;
+import com.google.api.services.compute.ComputeScopes;
+import com.google.api.services.compute.model.Instance;
+import com.google.api.services.compute.model.InstanceGroupManagersDeleteInstancesRequest;
+import com.google.api.services.compute.model.InstanceGroupManagersListManagedInstancesResponse;
+import com.google.api.services.compute.model.InstanceList;
+import com.google.api.services.compute.model.ManagedInstance;
+import com.google.api.services.compute.model.NetworkInterface;
+import com.google.api.services.compute.model.Operation;
+import com.google.common.base.Preconditions;
+import com.google.common.net.InetAddresses;
+import org.apache.curator.shaded.com.google.common.annotations.VisibleForTesting;
+import org.apache.druid.indexing.overlord.autoscaling.AutoScaler;
+import org.apache.druid.indexing.overlord.autoscaling.AutoScalingData;
+import org.apache.druid.indexing.overlord.autoscaling.SimpleWorkerProvisioningConfig;
+import org.apache.druid.java.util.common.StringUtils;
+import org.apache.druid.java.util.emitter.EmittingLogger;
+
+import java.io.IOException;
+import java.security.GeneralSecurityException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+
+/**
+ * This module permits the autoscaling of the workers in GCE
+ *
+ * General notes:
+ * - The IPs are IPs as in Internet Protocol, and they look like 1.2.3.4
+ * - The IDs are the names of the instances of instances created, they look like prefix-abcd,
+ *   where the prefix is chosen by you and abcd is a suffix assigned by GCE
+ */
+@JsonTypeName("gce")
+public class GceAutoScaler implements AutoScaler<GceEnvironmentConfig>
+{
+  private static final EmittingLogger log = new EmittingLogger(GceAutoScaler.class);
+
+  private final GceEnvironmentConfig envConfig;
+  private final int minNumWorkers;
+  private final int maxNumWorkers;
+  private final SimpleWorkerProvisioningConfig config;  // For future use
+
+  private Compute cachedComputeService = null;
+
+  private static final long POLL_INTERVAL_MS = 5 * 1000;  // 5 sec
+  private static final int RUNNING_INSTANCES_MAX_RETRIES = 10;
+  private static final int OPERATION_END_MAX_RETRIES = 10;
+
+  @JsonCreator
+  public GceAutoScaler(
+          @JsonProperty("minNumWorkers") int minNumWorkers,
+          @JsonProperty("maxNumWorkers") int maxNumWorkers,
+          @JsonProperty("envConfig") GceEnvironmentConfig envConfig,
+          @JacksonInject SimpleWorkerProvisioningConfig config
+  )
+  {
+    Preconditions.checkArgument(minNumWorkers > 0,
+                                "minNumWorkers must be greater than 0");
+    this.minNumWorkers = minNumWorkers;
+    Preconditions.checkArgument(maxNumWorkers > 0,
+                                "maxNumWorkers must be greater than 0");
+    Preconditions.checkArgument(maxNumWorkers > minNumWorkers,
+                                "maxNumWorkers must be greater than minNumWorkers");
+    this.maxNumWorkers = maxNumWorkers;
+    this.envConfig = envConfig;
+    this.config = config;
+  }
+
+  /**
+   * CAVEAT: this is meant to be used only for testing passing a mock version of Compute
+   */
+  @VisibleForTesting
+  public GceAutoScaler(
+          int minNumWorkers,
+          int maxNumWorkers,
+          GceEnvironmentConfig envConfig,
+          SimpleWorkerProvisioningConfig config,
+          Compute compute
+  )
+  {
+    this(minNumWorkers, maxNumWorkers, envConfig, config);
+    this.cachedComputeService = compute;
+  }
+
+  @Override
+  @JsonProperty
+  public int getMinNumWorkers()
+  {
+    return minNumWorkers;
+  }
+
+  @Override
+  @JsonProperty
+  public int getMaxNumWorkers()
+  {
+    return maxNumWorkers;
+  }
+
+  @Override
+  @JsonProperty
+  public GceEnvironmentConfig getEnvConfig()
+  {
+    return envConfig;
+  }
+
+  private synchronized Compute createComputeService()
+      throws IOException, GeneralSecurityException, InterruptedException, GceServiceException
+  {
+    final int maxRetries = 5;
+
+    int retries = 0;
+    while (cachedComputeService == null && retries < maxRetries) {
+      if (retries > 0) {
+        Thread.sleep(POLL_INTERVAL_MS);
+      }
+
+      log.info("Creating new ComputeService [%d/%d]", retries + 1, maxRetries);
+
+      try {
+        HttpTransport httpTransport = GoogleNetHttpTransport.newTrustedTransport();
+        JsonFactory jsonFactory = JacksonFactory.getDefaultInstance();
+        GoogleCredential credential = GoogleCredential.getApplicationDefault(
+                httpTransport,
+                jsonFactory
+        );
+        if (credential.createScopedRequired()) {
+          List<String> scopes = new ArrayList<>();
+          scopes.add(ComputeScopes.CLOUD_PLATFORM);
+          scopes.add(ComputeScopes.COMPUTE);
+          credential = credential.createScoped(scopes);
+        }
+
+        if (credential.getClientAuthentication() != null) {
+          throw new GceServiceException("Not using a service account");
+        }
+
+        cachedComputeService = new Compute.Builder(httpTransport, jsonFactory, credential)
+                .setApplicationName("DruidAutoscaler")
+                .build();
+
+        retries++;
+      }
+      catch (Throwable e) {
+        log.error(e, "Got Exception in creating the ComputeService");
+        throw e;
+      }
+    }
+    return cachedComputeService;
+  }
+
+  // Used to wait for an operation to finish
+  private Operation.Error waitForOperationEnd(
+      Compute compute,
+      Operation operation) throws Exception
+  {
+    String status = operation.getStatus();
+    String opId = operation.getName();
+    for (int i = 0; i < OPERATION_END_MAX_RETRIES; i++) {
+      if (operation == null || "DONE".equals(status)) {
+        return operation == null ? null : operation.getError();
+      }
+      log.info("Waiting for operation %s to end", opId);
+      Thread.sleep(POLL_INTERVAL_MS);
+      Compute.ZoneOperations.Get get = compute.zoneOperations().get(
+          envConfig.getProjectId(),
+          envConfig.getZoneName(),
+          opId
+      );
+      operation = get.execute();
+      if (operation != null) {
+        status = operation.getStatus();
+      }
+    }
+    throw new InterruptedException(
+        StringUtils.format("Timed out waiting for operation %s to complete", opId)
+    );
+  }
+
+  /**
+   * When called resizes envConfig.getManagedInstanceGroupName() increasing it by creating
+   * envConfig.getNumInstances() new workers (unless the maximum is reached). Return the
+   * IDs of the workers created
+   */
+  @Override
+  public AutoScalingData provision()
+  {
+    final String project = envConfig.getProjectId();
+    final String zone = envConfig.getZoneName();
+    final int numInstances = envConfig.getNumInstances();
+    final String managedInstanceGroupName = envConfig.getManagedInstanceGroupName();
+
+    try {
+      List<String> before = getRunningInstances();
+      log.debug("Existing instances [%s]", String.join(",", before));
+
+      int toSize = Math.min(before.size() + numInstances, getMaxNumWorkers());
+      if (before.size() >= toSize) {
+        // nothing to scale
+        return new AutoScalingData(new ArrayList<>());
+      }
+      log.info("Asked to provision instances, will resize to %d", toSize);
+
+      Compute computeService = createComputeService();
+      Compute.InstanceGroupManagers.Resize request =
+              computeService.instanceGroupManagers().resize(project, zone,
+                      managedInstanceGroupName, toSize);
+
+      Operation response = request.execute();
+      Operation.Error err = waitForOperationEnd(computeService, response);
+      if (err == null || err.isEmpty()) {
+        List<String> after = null;
+        // as the waitForOperationEnd only waits for the operation to be scheduled
+        // this loop waits until the requested machines actually go up (or up to a
+        // certain amount of retries in checking)
+        for (int i = 0; i < RUNNING_INSTANCES_MAX_RETRIES; i++) {
+          after = getRunningInstances();
+          if (after.size() == toSize) {
+            break;
+          }
+          log.info("Machines not up yet, waiting");
+          Thread.sleep(POLL_INTERVAL_MS);
+        }
+        after.removeAll(before); // these should be the new ones
+        log.info("Added instances [%s]", String.join(",", after));
+        return new AutoScalingData(after);
+      } else {
+        log.error("Unable to provision instances: %s", err.toPrettyString());
+      }
+    }
+    catch (Exception e) {
+      log.error(e, "Unable to provision any gce instances.");
+    }
+
+    return new AutoScalingData(new ArrayList<>());
+  }
+
+  /**
+   * Terminates the instances in the list of IPs provided by the caller
+   */
+  @Override
+  public AutoScalingData terminate(List<String> ips)
+  {
+    log.info("Asked to terminate: [%s]", String.join(",", ips));
+
+    if (ips.isEmpty()) {
+      return new AutoScalingData(new ArrayList<>());
+    }
+
+    List<String> nodeIds = ipToIdLookup(ips); // if they are not IPs, they will be unchanged
+    try {
+      return new AutoScalingData(idToIpLookup(
+          terminateWithIds(nodeIds != null ? nodeIds : new ArrayList<>()).getNodeIds())
+      );
+    }
+    catch (Exception e) {
+      log.error(e, "Unable to terminate any instances.");
+    }
+
+    return new AutoScalingData(new ArrayList<>());
+  }
+
+  private List<String> namesToInstances(List<String> names)
+  {
+    List<String> instances = new ArrayList<>();
+    for (String name : names) {
+      instances.add(
+          // convert the name into a URL's path to be used in calls to the API
+          StringUtils.format("zones/%s/instances/%s", envConfig.getZoneName(), name)
+      );
+    }
+    return instances;
+  }
+
+  /**
+   * Terminates the instances in the list of IDs provided by the caller
+   */
+  @Override
+  public AutoScalingData terminateWithIds(List<String> ids)
+  {
+    log.info("Asked to terminate IDs: [%s]", String.join(",", ids));
+
+    if (ids.isEmpty()) {
+      return new AutoScalingData(new ArrayList<>());
+    }
+
+    try {
+      final String project = envConfig.getProjectId();
+      final String zone = envConfig.getZoneName();
+      final String managedInstanceGroupName = envConfig.getManagedInstanceGroupName();
+
+      List<String> before = getRunningInstances();
+
+      InstanceGroupManagersDeleteInstancesRequest requestBody =
+              new InstanceGroupManagersDeleteInstancesRequest();
+      requestBody.setInstances(namesToInstances(ids));
+
+      Compute computeService = createComputeService();
+      Compute.InstanceGroupManagers.DeleteInstances request =
+              computeService
+                      .instanceGroupManagers()
+                      .deleteInstances(project, zone, managedInstanceGroupName, requestBody);
+
+      Operation response = request.execute();
+      Operation.Error err = waitForOperationEnd(computeService, response);
+      if (err == null || err.isEmpty()) {
+        List<String> after = null;
+        // as the waitForOperationEnd only waits for the operation to be scheduled
+        // this loop waits until the requested machines actually go down (or up to a
+        // certain amount of retries in checking)
+        for (int i = 0; i < RUNNING_INSTANCES_MAX_RETRIES; i++) {
+          after = getRunningInstances();
+          if (after.size() == (before.size() - ids.size())) {
 
 Review comment:
   > I imagine this is a super edge case, and it's fine if it isn't supported
   
   I would expect that to fail in this call but being picked up at the next round after the overlord already has the list of the instances that are actually there and decides what to actually do. If that would have been a `while(true)` loop I would have been concerned way more :) 
   
   > Have you tested the latest version of this PR in an actual cluster to make sure the changes are good?
   
   Yes, that has been tested :)

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@druid.apache.org
For additional commands, e-mail: commits-help@druid.apache.org