You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by el...@apache.org on 2020/08/11 13:10:11 UTC

[hadoop-ozone] branch master updated: HDDS-3833. Use Pipeline choose policy to choose pipeline from exist pipeline list (#1096)

This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new a79dfae  HDDS-3833. Use Pipeline choose policy to choose pipeline from exist pipeline list (#1096)
a79dfae is described below

commit a79dfae611e06d7fc30e90ffe61ca62909ae9e35
Author: maobaolong <30...@qq.com>
AuthorDate: Tue Aug 11 21:09:57 2020 +0800

    HDDS-3833. Use Pipeline choose policy to choose pipeline from exist pipeline list (#1096)
---
 .../hadoop/hdds/scm/PipelineChoosePolicy.java      |  37 +++++++
 .../hdds/scm/PipelineRequestInformation.java       |  59 ++++++++++++
 .../java/org/apache/hadoop/hdds/scm/ScmConfig.java |  23 +++++
 .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java  |   1 +
 .../hadoop/hdds/scm/exceptions/SCMException.java   |   3 +-
 .../src/main/proto/ScmServerProtocol.proto         |   1 +
 .../hadoop/hdds/scm/block/BlockManagerImpl.java    |  14 ++-
 .../algorithms/HealthyPipelineChoosePolicy.java    |  46 +++++++++
 .../algorithms/PipelineChoosePolicyFactory.java    | 106 +++++++++++++++++++++
 .../algorithms/RandomPipelineChoosePolicy.java     |  38 ++++++++
 .../pipeline/choose/algorithms/package-info.java   |  18 ++++
 .../hdds/scm/server/StorageContainerManager.java   |   8 ++
 .../TestPipelineChoosePolicyFactory.java           |  94 ++++++++++++++++++
 13 files changed, 443 insertions(+), 5 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java
new file mode 100644
index 0000000..c829e2e
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm;
+
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+
+import java.util.List;
+
+/**
+ * A {@link PipelineChoosePolicy} support choosing pipeline from exist list.
+ */
+public interface PipelineChoosePolicy {
+
+  /**
+   * Given an initial list of pipelines, return one of the pipelines.
+   *
+   * @param pipelineList list of pipelines.
+   * @return one of the pipelines.
+   */
+  Pipeline choosePipeline(List<Pipeline> pipelineList,
+      PipelineRequestInformation pri);
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineRequestInformation.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineRequestInformation.java
new file mode 100644
index 0000000..ac0cfbe
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineRequestInformation.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm;
+
+/**
+ * The information of the request of pipeline.
+ */
+public final class PipelineRequestInformation {
+  private long size;
+
+  /**
+   * Builder for PipelineRequestInformation.
+   */
+  public static class Builder {
+    private long size;
+
+    public static Builder getBuilder() {
+      return new Builder();
+    }
+
+    /**
+     * sets the size.
+     * @param sz request size
+     * @return Builder for PipelineRequestInformation
+     */
+    public Builder setSize(long sz) {
+      this.size = sz;
+      return this;
+    }
+
+    public PipelineRequestInformation build() {
+      return new PipelineRequestInformation(size);
+    }
+  }
+
+  private PipelineRequestInformation(long size) {
+    this.size = size;
+  }
+
+  public long getSize() {
+    return size;
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java
index 73701ea..3084bb4 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java
@@ -59,6 +59,21 @@ public class ScmConfig {
   )
   private String action;
 
+  @Config(key = "pipeline.choose.policy.impl",
+      type = ConfigType.STRING,
+      defaultValue = "org.apache.hadoop.hdds.scm.pipeline.choose.algorithms" +
+          ".RandomPipelineChoosePolicy",
+      tags = { ConfigTag.SCM, ConfigTag.PIPELINE },
+      description =
+          "The full name of class which implements "
+          + "org.apache.hadoop.hdds.scm.PipelineChoosePolicy. "
+          + "The class decides which pipeline will be used to find or "
+          + "allocate container. If not set, "
+          + "org.apache.hadoop.hdds.scm.pipeline.choose.algorithms. "
+          + "RandomPipelineChoosePolicy will be used as default value."
+  )
+  private String pipelineChoosePolicyName;
+
   public void setKerberosPrincipal(String kerberosPrincipal) {
     this.principal = kerberosPrincipal;
   }
@@ -72,6 +87,10 @@ public class ScmConfig {
     this.action = unknownContainerAction;
   }
 
+  public void setPipelineChoosePolicyName(String pipelineChoosePolicyName) {
+    this.pipelineChoosePolicyName = pipelineChoosePolicyName;
+  }
+
   public String getKerberosPrincipal() {
     return this.principal;
   }
@@ -84,6 +103,10 @@ public class ScmConfig {
     return this.action;
   }
 
+  public String getPipelineChoosePolicyName() {
+    return pipelineChoosePolicyName;
+  }
+
   /**
    * Configuration strings class.
    * required for SCMSecurityProtocol where the KerberosInfo references
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index ddb988a..4e624c6 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -289,6 +289,7 @@ public final class ScmConfigKeys {
   public static final String OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT =
       "ozone.scm.pipeline.owner.container.count";
   public static final int OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT = 3;
+
   // Pipeline placement policy:
   // Upper limit for how many pipelines a datanode can engage in.
   public static final String OZONE_DATANODE_PIPELINE_LIMIT =
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
index db1f82a..0146eae 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
@@ -122,6 +122,7 @@ public class SCMException extends IOException {
     FAILED_TO_FIND_ACTIVE_PIPELINE,
     FAILED_TO_INIT_CONTAINER_PLACEMENT_POLICY,
     FAILED_TO_ALLOCATE_ENOUGH_BLOCKS,
-    INTERNAL_ERROR
+    INTERNAL_ERROR,
+    FAILED_TO_INIT_PIPELINE_CHOOSE_POLICY
   }
 }
diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto
index fc7a598..682d4d9 100644
--- a/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto
+++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto
@@ -114,6 +114,7 @@ enum Status {
   FAILED_TO_INIT_CONTAINER_PLACEMENT_POLICY = 26;
   FAILED_TO_ALLOCATE_ENOUGH_BLOCKS = 27;
   INTERNAL_ERROR = 29;
+  FAILED_TO_INIT_PIPELINE_CHOOSE_POLICY = 30;
 }
 
 /**
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index 7387585..b5b2aaf 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -32,6 +32,8 @@ import org.apache.hadoop.hdds.conf.StorageUnit;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
+import org.apache.hadoop.hdds.scm.PipelineChoosePolicy;
+import org.apache.hadoop.hdds.scm.PipelineRequestInformation;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmUtils;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
@@ -77,6 +79,7 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean {
 
   private ObjectName mxBean;
   private SafeModePrecheck safeModePrecheck;
+  private PipelineChoosePolicy pipelineChoosePolicy;
 
   /**
    * Constructor.
@@ -90,7 +93,7 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean {
     Objects.requireNonNull(scm, "SCM cannot be null");
     this.pipelineManager = scm.getPipelineManager();
     this.containerManager = scm.getContainerManager();
-
+    this.pipelineChoosePolicy = scm.getPipelineChoosePolicy();
     this.containerSize = (long)conf.getStorageSize(
         ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
         ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT,
@@ -222,9 +225,12 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean {
       }
 
       if (null == pipeline) {
-        // TODO: #CLUTIL Make the selection policy driven.
-        pipeline = availablePipelines
-            .get((int) (Math.random() * availablePipelines.size()));
+        PipelineRequestInformation pri =
+            PipelineRequestInformation.Builder.getBuilder()
+                .setSize(size)
+                .build();
+        pipeline = pipelineChoosePolicy.choosePipeline(
+            availablePipelines, pri);
       }
 
       // look for OPEN containers that match the criteria.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/HealthyPipelineChoosePolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/HealthyPipelineChoosePolicy.java
new file mode 100644
index 0000000..9f77caa
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/HealthyPipelineChoosePolicy.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.pipeline.choose.algorithms;
+
+import org.apache.hadoop.hdds.scm.PipelineRequestInformation;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+
+import java.util.List;
+
+/**
+ * The healthy pipeline choose policy that chooses pipeline
+ * until return healthy pipeline.
+ */
+public class HealthyPipelineChoosePolicy extends RandomPipelineChoosePolicy {
+
+  @Override
+  public Pipeline choosePipeline(List<Pipeline> pipelineList,
+      PipelineRequestInformation pri) {
+    Pipeline fallback = null;
+    while (pipelineList.size() > 0) {
+      Pipeline pipeline = super.choosePipeline(pipelineList, pri);
+      if (pipeline.isHealthy()) {
+        return pipeline;
+      } else {
+        fallback = pipeline;
+        pipelineList.remove(pipeline);
+      }
+    }
+    return fallback;
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java
new file mode 100644
index 0000000..b24091f
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.pipeline.choose.algorithms;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.scm.PipelineChoosePolicy;
+import org.apache.hadoop.hdds.scm.ScmConfig;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.jetbrains.annotations.NotNull;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.lang.reflect.Constructor;
+
+/**
+ * A factory to create pipeline choose policy instance based on configuration
+ * property {@link ScmConfig}.
+ */
+public final class PipelineChoosePolicyFactory {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(PipelineChoosePolicyFactory.class);
+
+  @VisibleForTesting
+  public static final Class<? extends PipelineChoosePolicy>
+      OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT =
+      RandomPipelineChoosePolicy.class;
+
+  private PipelineChoosePolicyFactory() {
+  }
+
+  public static PipelineChoosePolicy getPolicy(
+      ConfigurationSource conf) throws SCMException {
+    ScmConfig scmConfig = conf.getObject(ScmConfig.class);
+    Class<? extends PipelineChoosePolicy> policyClass = getClass(
+        scmConfig.getPipelineChoosePolicyName(), PipelineChoosePolicy.class);
+
+    try {
+      return createPipelineChoosePolicyFromClass(policyClass);
+    } catch (Exception e) {
+      if (policyClass != OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT) {
+        LOG.error("Met an exception while create pipeline choose policy "
+            + "for the given class " + policyClass.getName()
+            + ". Fallback to the default pipeline choose policy "
+            + OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT, e);
+        return createPipelineChoosePolicyFromClass(
+            OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT);
+      }
+      throw e;
+    }
+  }
+
+  @NotNull
+  private static PipelineChoosePolicy createPipelineChoosePolicyFromClass(
+      Class<? extends PipelineChoosePolicy> policyClass) throws SCMException {
+    Constructor<? extends PipelineChoosePolicy> constructor;
+    try {
+      constructor = policyClass.getDeclaredConstructor();
+      LOG.info("Create pipeline choose policy of type {}",
+          policyClass.getCanonicalName());
+    } catch (NoSuchMethodException e) {
+      String msg = "Failed to find constructor() for class " +
+          policyClass.getCanonicalName();
+      LOG.error(msg);
+      throw new SCMException(msg,
+          SCMException.ResultCodes.FAILED_TO_INIT_PIPELINE_CHOOSE_POLICY);
+    }
+
+    try {
+      return constructor.newInstance();
+    } catch (Exception e) {
+      throw new RuntimeException("Failed to instantiate class " +
+          policyClass.getCanonicalName() + " for " + e.getMessage());
+    }
+  }
+
+  private static <U> Class<? extends U> getClass(String name,
+      Class<U> xface) {
+    try {
+      Class<?> theClass = Class.forName(name);
+      if (!xface.isAssignableFrom(theClass)) {
+        throw new RuntimeException(theClass + " not " + xface.getName());
+      } else {
+        return theClass.asSubclass(xface);
+      }
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/RandomPipelineChoosePolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/RandomPipelineChoosePolicy.java
new file mode 100644
index 0000000..080ea96
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/RandomPipelineChoosePolicy.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.pipeline.choose.algorithms;
+
+import org.apache.hadoop.hdds.scm.PipelineChoosePolicy;
+import org.apache.hadoop.hdds.scm.PipelineRequestInformation;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+
+import java.util.List;
+
+/**
+ * Random choose policy that randomly chooses pipeline.
+ * That are we just randomly place containers without any considerations of
+ * utilization.
+ */
+public class RandomPipelineChoosePolicy implements PipelineChoosePolicy {
+
+  @Override
+  public Pipeline choosePipeline(List<Pipeline> pipelineList,
+      PipelineRequestInformation pri) {
+    return pipelineList.get((int) (Math.random() * pipelineList.size()));
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/package-info.java
new file mode 100644
index 0000000..7ca0b7d
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/package-info.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.pipeline.choose.algorithms;
+// Various pipeline choosing algorithms.
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index b36ca87..352e34a 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
+import org.apache.hadoop.hdds.scm.PipelineChoosePolicy;
 import org.apache.hadoop.hdds.scm.PlacementPolicy;
 import org.apache.hadoop.hdds.scm.ScmConfig;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -82,6 +83,7 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineActionHandler;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineReportHandler;
 import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.PipelineChoosePolicyFactory;
 import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
 import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
@@ -199,6 +201,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
    *  Network topology Map.
    */
   private NetworkTopology clusterMap;
+  private PipelineChoosePolicy pipelineChoosePolicy;
 
   /**
    * Creates a new StorageContainerManager. Configuration will be
@@ -422,6 +425,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
               pipelineManager);
     }
 
+    pipelineChoosePolicy = PipelineChoosePolicyFactory.getPolicy(conf);
     if (configurator.getScmBlockManager() != null) {
       scmBlockManager = configurator.getScmBlockManager();
     } else {
@@ -1133,4 +1137,8 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
     }
     return map;
   }
+
+  public PipelineChoosePolicy getPipelineChoosePolicy() {
+    return this.pipelineChoosePolicy;
+  }
 }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestPipelineChoosePolicyFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestPipelineChoosePolicyFactory.java
new file mode 100644
index 0000000..804c5bb
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestPipelineChoosePolicyFactory.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.pipeline.choose.algorithms;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.PipelineChoosePolicy;
+import org.apache.hadoop.hdds.scm.PipelineRequestInformation;
+import org.apache.hadoop.hdds.scm.ScmConfig;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.List;
+
+import static org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.PipelineChoosePolicyFactory.OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT;
+
+/**
+ * Test for scm pipeline choose policy factory.
+ */
+public class TestPipelineChoosePolicyFactory {
+
+  private OzoneConfiguration conf;
+
+  private ScmConfig scmConfig;
+
+  @Before
+  public void setup() {
+    //initialize network topology instance
+    conf = new OzoneConfiguration();
+    scmConfig = conf.getObject(ScmConfig.class);
+  }
+
+  @Test
+  public void testDefaultPolicy() throws IOException {
+    PipelineChoosePolicy policy = PipelineChoosePolicyFactory
+        .getPolicy(conf);
+    Assert.assertSame(OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT,
+        policy.getClass());
+  }
+
+
+  /**
+   * A dummy pipeline choose policy implementation for test.
+   */
+  public static class DummyImpl implements PipelineChoosePolicy {
+
+    public DummyImpl(String dummy) {
+    }
+
+    @Override
+    public Pipeline choosePipeline(List<Pipeline> pipelineList,
+        PipelineRequestInformation pri) {
+      return null;
+    }
+  }
+
+  @Test
+  public void testConstuctorNotFound() throws SCMException {
+    // set a policy class which does't have the right constructor implemented
+    scmConfig.setPipelineChoosePolicyName(DummyImpl.class.getName());
+    PipelineChoosePolicy policy = PipelineChoosePolicyFactory.getPolicy(conf);
+    Assert.assertSame(OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT,
+        policy.getClass());
+  }
+
+  @Test
+  public void testClassNotImplemented() throws SCMException {
+    // set a placement class not implemented
+    conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
+        "org.apache.hadoop.hdds.scm.pipeline.choose.policy.HelloWorld");
+    PipelineChoosePolicy policy = PipelineChoosePolicyFactory.getPolicy(conf);
+    Assert.assertSame(OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT,
+        policy.getClass());
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org