You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by so...@apache.org on 2022/03/16 16:02:16 UTC
[ozone] branch HDDS-3816-ec updated: HDDS-6248. EC: Container list command should allow filtering of EC containers (#3179)
This is an automated email from the ASF dual-hosted git repository.
sodonnell pushed a commit to branch HDDS-3816-ec
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/HDDS-3816-ec by this push:
new 06f1eb5 HDDS-6248. EC: Container list command should allow filtering of EC containers (#3179)
06f1eb5 is described below
commit 06f1eb5ad6f57765ecbdaaf7294139808602beeb
Author: Stephen O'Donnell <st...@gmail.com>
AuthorDate: Wed Mar 16 15:56:00 2022 +0000
HDDS-6248. EC: Container list command should allow filtering of EC containers (#3179)
---
.../apache/hadoop/hdds/scm/client/ScmClient.java | 7 +-
.../protocol/StorageContainerLocationProtocol.java | 23 +++++++
...inerLocationProtocolClientSideTranslatorPB.java | 36 ++++++++--
.../src/main/proto/ScmAdminProtocol.proto | 2 +
...inerLocationProtocolServerSideTranslatorPB.java | 34 +++++++++-
.../hdds/scm/server/SCMClientProtocolServer.java | 76 +++++++++++++++++++++-
.../hdds/scm/cli/ContainerOperationClient.java | 6 +-
.../hdds/scm/cli/container/ListSubcommand.java | 28 ++++++--
.../src/main/smoketest/admincli/container.robot | 4 +-
.../hadoop/ozone/shell/TestOzoneShellHA.java | 8 +--
10 files changed, 200 insertions(+), 24 deletions(-)
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
index f1885f8..f014251 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.scm.client;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.annotation.InterfaceStability;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.scm.DatanodeAdminError;
import org.apache.hadoop.hdds.scm.container.ContainerReplicaInfo;
import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport;
@@ -126,12 +127,14 @@ public interface ScmClient extends Closeable {
* @param startContainerID start containerID.
* @param count count must be {@literal >} 0.
* @param state Container of this state will be returned.
- * @param factor container factor.
+ * @param replicationConfig container replication Config.
* @return a list of pipeline.
* @throws IOException
*/
List<ContainerInfo> listContainer(long startContainerID, int count,
- HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor)
+ HddsProtos.LifeCycleState state,
+ HddsProtos.ReplicationType replicationType,
+ ReplicationConfig replicationConfig)
throws IOException;
/**
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
index 9f78b31..45e5142 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdds.scm.protocol;
import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.Type;
import org.apache.hadoop.hdds.scm.DatanodeAdminError;
@@ -183,6 +184,28 @@ public interface StorageContainerLocationProtocol extends Closeable {
int count, HddsProtos.LifeCycleState state,
HddsProtos.ReplicationFactor factor) throws IOException;
+
+ /**
+ * Ask SCM for a list of containers with a range of container ID, state
+ * and replication config, and the limit of count.
+ * The containers are returned from startID (exclusive), and
+ * filtered by state and replication config. The returned list is limited to
+ * count entries.
+ *
+ * @param startContainerID start container ID.
+ * @param count count, if count {@literal <} 0, the max size is unlimited.(
+ * Usually the count will be replace with a very big
+ * value instead of being unlimited in case the db is very big)
+ * @param state Container with this state will be returned.
+ * @param replicationConfig Replication config for the containers
+ * @return a list of container.
+ * @throws IOException
+ */
+ List<ContainerInfo> listContainer(long startContainerID,
+ int count, HddsProtos.LifeCycleState state,
+ HddsProtos.ReplicationType replicationType,
+ ReplicationConfig replicationConfig) throws IOException;
+
/**
* Deletes a container in SCM.
*
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
index 77ef3f0..0ba9f3b 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
@@ -21,6 +21,9 @@ import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicatedReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.GetScmInfoResponseProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.UpgradeFinalizationStatus;
@@ -111,6 +114,8 @@ import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Consumer;
+
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.EC;
import static org.apache.hadoop.ozone.ClientVersions.CURRENT_VERSION;
/**
@@ -357,18 +362,20 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
@Override
public List<ContainerInfo> listContainer(long startContainerID, int count)
throws IOException {
- return listContainer(startContainerID, count, null, null);
+ return listContainer(startContainerID, count, null, null, null);
}
@Override
public List<ContainerInfo> listContainer(long startContainerID, int count,
HddsProtos.LifeCycleState state) throws IOException {
- return listContainer(startContainerID, count, state, null);
+ return listContainer(startContainerID, count, state, null, null);
}
@Override
public List<ContainerInfo> listContainer(long startContainerID, int count,
- HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor)
+ HddsProtos.LifeCycleState state,
+ HddsProtos.ReplicationType replicationType,
+ ReplicationConfig replicationConfig)
throws IOException {
Preconditions.checkState(startContainerID >= 0,
"Container ID cannot be negative.");
@@ -382,8 +389,18 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
if (state != null) {
builder.setState(state);
}
- if (factor != null) {
- builder.setFactor(factor);
+ if (replicationConfig != null) {
+ if (replicationConfig.getReplicationType() == EC) {
+ builder.setType(EC);
+ builder.setEcReplicationConfig(
+ ((ECReplicationConfig)replicationConfig).toProto());
+ } else {
+ builder.setType(replicationConfig.getReplicationType());
+ builder.setFactor(((ReplicatedReplicationConfig)replicationConfig)
+ .getReplicationFactor());
+ }
+ } else if (replicationType != null) {
+ builder.setType(replicationType);
}
SCMListContainerRequestProto request = builder.build();
@@ -400,6 +417,15 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
return containerList;
}
+ @Deprecated
+ @Override
+ public List<ContainerInfo> listContainer(long startContainerID, int count,
+ HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor)
+ throws IOException {
+ throw new UnsupportedOperationException("Should no longer be called from " +
+ "the client side");
+ }
+
/**
* Ask SCM to delete a container by name. SCM will remove
* the container mapping in its database.
diff --git a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto
index e2d7b16..1d7ebf5 100644
--- a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto
+++ b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto
@@ -265,6 +265,8 @@ message SCMListContainerRequestProto {
optional string traceID = 3;
optional LifeCycleState state = 4;
optional ReplicationFactor factor = 5;
+ optional ReplicationType type = 6;
+ optional ECReplicationConfig ecReplicationConfig = 7;
}
message SCMListContainerResponseProto {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
index 5d6ee5b..66fa4ae 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
@@ -22,6 +22,8 @@ import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.UpgradeFinalizationStatus;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos;
@@ -525,14 +527,40 @@ public final class StorageContainerLocationProtocolServerSideTranslatorPB
count = request.getCount();
HddsProtos.LifeCycleState state = null;
HddsProtos.ReplicationFactor factor = null;
+ HddsProtos.ReplicationType replicationType = null;
+ ReplicationConfig repConfig = null;
if (request.hasState()) {
state = request.getState();
}
- if (request.hasFactor()) {
+ if (request.hasType()) {
+ replicationType = request.getType();
+ }
+ if (replicationType != null) {
+ // This must come from an upgraded client as the older version never
+ // passed Type. Therefore, we must check for replicationConfig.
+ if (replicationType == HddsProtos.ReplicationType.EC) {
+ if (request.hasEcReplicationConfig()) {
+ repConfig = new ECReplicationConfig(request.getEcReplicationConfig());
+ }
+ } else {
+ if (request.hasFactor()) {
+ repConfig = ReplicationConfig
+ .fromProtoTypeAndFactor(request.getType(), request.getFactor());
+ }
+ }
+ } else if (request.hasFactor()) {
factor = request.getFactor();
}
- List<ContainerInfo> containerList =
- impl.listContainer(startContainerID, count, state, factor);
+ List<ContainerInfo> containerList;
+ if (factor != null) {
+ // Call from a legacy client
+ containerList =
+ impl.listContainer(startContainerID, count, state, factor);
+ } else {
+ containerList =
+ impl.listContainer(startContainerID, count, state, replicationType,
+ repConfig);
+ }
SCMListContainerResponseProto.Builder builder =
SCMListContainerResponseProto.newBuilder();
for (ContainerInfo container : containerList) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index a5d9d3e..c818d4f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -88,6 +88,7 @@ import java.util.Optional;
import java.util.Set;
import java.util.TreeSet;
import java.util.stream.Collectors;
+import java.util.stream.Stream;
import static org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StorageContainerLocationProtocolService.newReflectiveBlockingService;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_DEFAULT;
@@ -382,7 +383,7 @@ public class SCMClientProtocolServer implements
@Override
public List<ContainerInfo> listContainer(long startContainerID,
int count) throws IOException {
- return listContainer(startContainerID, count, null, null);
+ return listContainer(startContainerID, count, null, null, null);
}
/**
@@ -398,7 +399,7 @@ public class SCMClientProtocolServer implements
@Override
public List<ContainerInfo> listContainer(long startContainerID,
int count, HddsProtos.LifeCycleState state) throws IOException {
- return listContainer(startContainerID, count, state, null);
+ return listContainer(startContainerID, count, state, null, null);
}
/**
@@ -412,6 +413,7 @@ public class SCMClientProtocolServer implements
* @throws IOException
*/
@Override
+ @Deprecated
public List<ContainerInfo> listContainer(long startContainerID,
int count, HddsProtos.LifeCycleState state,
HddsProtos.ReplicationFactor factor) throws IOException {
@@ -467,6 +469,76 @@ public class SCMClientProtocolServer implements
}
}
+ /**
+ * Lists a range of containers and get their info.
+ *
+ * @param startContainerID start containerID.
+ * @param count count must be {@literal >} 0.
+ * @param state Container with this state will be returned.
+ * @param repConfig Replication Config for the container.
+ * @return a list of pipeline.
+ * @throws IOException
+ */
+ @Override
+ public List<ContainerInfo> listContainer(long startContainerID,
+ int count, HddsProtos.LifeCycleState state,
+ HddsProtos.ReplicationType replicationType,
+ ReplicationConfig repConfig) throws IOException {
+ boolean auditSuccess = true;
+ Map<String, String> auditMap = Maps.newHashMap();
+ auditMap.put("startContainerID", String.valueOf(startContainerID));
+ auditMap.put("count", String.valueOf(count));
+ if (state != null) {
+ auditMap.put("state", state.name());
+ }
+ if (replicationType != null) {
+ auditMap.put("replicationType", replicationType.toString());
+ }
+ if (repConfig != null) {
+ auditMap.put("replicationConfig", repConfig.toString());
+ }
+ try {
+ final ContainerID containerId = ContainerID.valueOf(startContainerID);
+ if (state == null && replicationType == null && repConfig == null) {
+ // Not filters, so just return everything
+ return scm.getContainerManager().getContainers(containerId, count);
+ }
+
+ List<ContainerInfo> containerList;
+ if (state != null) {
+ containerList = scm.getContainerManager().getContainers(state);
+ } else {
+ containerList = scm.getContainerManager().getContainers();
+ }
+
+ Stream<ContainerInfo> containerStream = containerList.stream()
+ .filter(info -> info.containerID().getId() >= startContainerID);
+ // If we have repConfig filter by it, as it includes repType too.
+ // Otherwise, we may have a filter just for repType, eg all EC containers
+ // without filtering on their replication scheme
+ if (repConfig != null) {
+ containerStream = containerStream
+ .filter(info -> info.getReplicationConfig().equals(repConfig));
+ } else if (replicationType != null) {
+ containerStream = containerStream
+ .filter(info -> info.getReplicationType() == replicationType);
+ }
+ return containerStream.sorted()
+ .limit(count)
+ .collect(Collectors.toList());
+ } catch (Exception ex) {
+ auditSuccess = false;
+ AUDIT.logReadFailure(
+ buildAuditMessageForFailure(SCMAction.LIST_CONTAINER, auditMap, ex));
+ throw ex;
+ } finally {
+ if (auditSuccess) {
+ AUDIT.logReadSuccess(
+ buildAuditMessageForSuccess(SCMAction.LIST_CONTAINER, auditMap));
+ }
+ }
+ }
+
@Override
public void deleteContainer(long containerID) throws IOException {
boolean auditSuccess = true;
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
index 4fbabf9..e530934 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
@@ -21,6 +21,7 @@ import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto;
@@ -408,9 +409,10 @@ public class ContainerOperationClient implements ScmClient {
@Override
public List<ContainerInfo> listContainer(long startContainerID,
int count, HddsProtos.LifeCycleState state,
- HddsProtos.ReplicationFactor factor) throws IOException {
+ HddsProtos.ReplicationType repType,
+ ReplicationConfig replicationConfig) throws IOException {
return storageContainerLocationClient.listContainer(
- startContainerID, count, state, factor);
+ startContainerID, count, state, repType, replicationConfig);
}
/**
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java
index 3d776bb..b120fe4 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java
@@ -20,7 +20,11 @@ package org.apache.hadoop.hdds.scm.cli.container;
import java.io.IOException;
import java.util.List;
+import com.google.common.base.Strings;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.cli.ScmSubcommand;
import org.apache.hadoop.hdds.scm.client.ScmClient;
@@ -65,9 +69,14 @@ public class ListSubcommand extends ScmSubcommand {
"DELETING, DELETED)")
private HddsProtos.LifeCycleState state;
- @Option(names = {"--factor"},
- description = "Container factor(ONE, THREE)")
- private HddsProtos.ReplicationFactor factor;
+ @Option(names = {"-t", "--type"},
+ description = "Replication Type (RATIS, STAND_ALONE or EC)")
+ private HddsProtos.ReplicationType type;
+
+ @Option(names = {"-r", "--replication", "--factor"},
+ description = "Container replication (ONE, THREE for Ratis, " +
+ "rs-6-3-1024k for EC)")
+ private String replication;
private static final ObjectWriter WRITER;
@@ -90,8 +99,19 @@ public class ListSubcommand extends ScmSubcommand {
@Override
public void execute(ScmClient scmClient) throws IOException {
+ if (!Strings.isNullOrEmpty(replication) && type == null) {
+ // Set type to RATIS as that is what any command prior to this change
+ // would have expected.
+ type = HddsProtos.ReplicationType.RATIS;
+ }
+ ReplicationConfig repConfig = null;
+ if (!Strings.isNullOrEmpty(replication)) {
+ repConfig = ReplicationConfig.parse(
+ ReplicationType.fromProto(type),
+ replication, new OzoneConfiguration());
+ }
List<ContainerInfo> containerList =
- scmClient.listContainer(startId, count, state, factor);
+ scmClient.listContainer(startId, count, state, type, repConfig);
// Output data list
for (ContainerInfo container : containerList) {
diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot
index 5752eed..f0d8fa8 100644
--- a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot
@@ -54,11 +54,11 @@ List containers with container state
Should Not contain ${output} OPEN
List containers with replication factor ONE
- ${output} = Execute ozone admin container list --factor=ONE
+ ${output} = Execute ozone admin container list -t RATIS -r ONE
Should Not contain ${output} THREE
List containers with replication factor THREE
- ${output} = Execute ozone admin container list --factor=THREE
+ ${output} = Execute ozone admin container list -t RATIS -r THREE
Should Not contain ${output} ONE
Container info
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
index 52b0b31..dc5c8bb 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
@@ -508,17 +508,17 @@ public class TestOzoneShellHA {
execute(ozoneAdminShell, args);
// Test case 3: list THREE replica container
- String factor = "--factor=THREE";
+ String factor = "--replication=THREE";
args = new String[] {"container", "list", "--scm",
"localhost:" + cluster.getStorageContainerManager().getClientRpcPort(),
- factor};
+ factor, "--type=RATIS"};
execute(ozoneAdminShell, args);
// Test case 4: list ONE replica container
- factor = "--factor=ONE";
+ factor = "--replication=ONE";
args = new String[] {"container", "list", "--scm",
"localhost:" + cluster.getStorageContainerManager().getClientRpcPort(),
- factor};
+ factor, "--type=RATIS"};
execute(ozoneAdminShell, args);
}
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org