You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by in...@apache.org on 2017/09/07 20:20:39 UTC
[36/40] hadoop git commit: HDFS-10880. Federation Mount Table State
Store internal API. Contributed by Jason Kace and Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/83f2aa35/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java
new file mode 100644
index 0000000..7f7c998
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProtoOrBuilder;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.records.impl.pb.PBRecord;
+
+import com.google.protobuf.Message;
+
+/**
+ * Protobuf implementation of the state store API object
+ * RemoveMountTableEntryRequest.
+ */
+public class RemoveMountTableEntryRequestPBImpl
+ extends RemoveMountTableEntryRequest implements PBRecord {
+
+ private FederationProtocolPBTranslator<RemoveMountTableEntryRequestProto,
+ RemoveMountTableEntryRequestProto.Builder,
+ RemoveMountTableEntryRequestProtoOrBuilder> translator =
+ new FederationProtocolPBTranslator<RemoveMountTableEntryRequestProto,
+ RemoveMountTableEntryRequestProto.Builder,
+ RemoveMountTableEntryRequestProtoOrBuilder>(
+ RemoveMountTableEntryRequestProto.class);
+
+ public RemoveMountTableEntryRequestPBImpl() {
+ }
+
+ public RemoveMountTableEntryRequestPBImpl(
+ RemoveMountTableEntryRequestProto proto) {
+ this.setProto(proto);
+ }
+
+ @Override
+ public RemoveMountTableEntryRequestProto getProto() {
+ return this.translator.build();
+ }
+
+ @Override
+ public void setProto(Message proto) {
+ this.translator.setProto(proto);
+ }
+
+ @Override
+ public void readInstance(String base64String) throws IOException {
+ this.translator.readInstance(base64String);
+ }
+
+ @Override
+ public String getSrcPath() {
+ return this.translator.getProtoOrBuilder().getSrcPath();
+ }
+
+ @Override
+ public void setSrcPath(String path) {
+ this.translator.getBuilder().setSrcPath(path);
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/83f2aa35/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java
new file mode 100644
index 0000000..0c943ac
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryResponseProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryResponseProto.Builder;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryResponseProtoOrBuilder;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.records.impl.pb.PBRecord;
+
+import com.google.protobuf.Message;
+
+/**
+ * Protobuf implementation of the state store API object
+ * RemoveMountTableEntryResponse.
+ */
+public class RemoveMountTableEntryResponsePBImpl
+ extends RemoveMountTableEntryResponse implements PBRecord {
+
+ private FederationProtocolPBTranslator<RemoveMountTableEntryResponseProto,
+ Builder, RemoveMountTableEntryResponseProtoOrBuilder> translator =
+ new FederationProtocolPBTranslator<RemoveMountTableEntryResponseProto,
+ RemoveMountTableEntryResponseProto.Builder,
+ RemoveMountTableEntryResponseProtoOrBuilder>(
+ RemoveMountTableEntryResponseProto.class);
+
+ public RemoveMountTableEntryResponsePBImpl() {
+ }
+
+ public RemoveMountTableEntryResponsePBImpl(
+ RemoveMountTableEntryResponseProto proto) {
+ this.setProto(proto);
+ }
+
+ @Override
+ public RemoveMountTableEntryResponseProto getProto() {
+ return this.translator.build();
+ }
+
+ @Override
+ public void setProto(Message proto) {
+ this.translator.setProto(proto);
+ }
+
+ @Override
+ public void readInstance(String base64String) throws IOException {
+ this.translator.readInstance(base64String);
+ }
+
+ @Override
+ public boolean getStatus() {
+ return this.translator.getProtoOrBuilder().getStatus();
+ }
+
+ @Override
+ public void setStatus(boolean result) {
+ this.translator.getBuilder().setStatus(result);
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/83f2aa35/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/UpdateMountTableEntryRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/UpdateMountTableEntryRequestPBImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/UpdateMountTableEntryRequestPBImpl.java
new file mode 100644
index 0000000..621bb3a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/UpdateMountTableEntryRequestPBImpl.java
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.MountTableRecordProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryRequestProtoOrBuilder;
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+import org.apache.hadoop.hdfs.server.federation.store.records.impl.pb.MountTablePBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.records.impl.pb.PBRecord;
+
+import com.google.protobuf.Message;
+
+/**
+ * Protobuf implementation of the state store API object
+ * UpdateMountTableEntryRequest.
+ */
+public class UpdateMountTableEntryRequestPBImpl
+ extends UpdateMountTableEntryRequest implements PBRecord {
+
+ private FederationProtocolPBTranslator<UpdateMountTableEntryRequestProto,
+ UpdateMountTableEntryRequestProto.Builder,
+ UpdateMountTableEntryRequestProtoOrBuilder> translator =
+ new FederationProtocolPBTranslator<UpdateMountTableEntryRequestProto,
+ UpdateMountTableEntryRequestProto.Builder,
+ UpdateMountTableEntryRequestProtoOrBuilder>(
+ UpdateMountTableEntryRequestProto.class);
+
+ public UpdateMountTableEntryRequestPBImpl() {
+ }
+
+ public UpdateMountTableEntryRequestPBImpl(
+ UpdateMountTableEntryRequestProto proto) {
+ this.translator.setProto(proto);
+ }
+
+ @Override
+ public UpdateMountTableEntryRequestProto getProto() {
+ return this.translator.build();
+ }
+
+ @Override
+ public void setProto(Message proto) {
+ this.translator.setProto(proto);
+ }
+
+ @Override
+ public void readInstance(String base64String) throws IOException {
+ this.translator.readInstance(base64String);
+ }
+
+ @Override
+ public MountTable getEntry() throws IOException {
+ MountTableRecordProto statsProto =
+ this.translator.getProtoOrBuilder().getEntry();
+ MountTable stats = StateStoreSerializer.newRecord(MountTable.class);
+ if (stats instanceof MountTablePBImpl) {
+ MountTablePBImpl entryPB = (MountTablePBImpl)stats;
+ entryPB.setProto(statsProto);
+ return entryPB;
+ } else {
+ throw new IOException("Cannot get stats for the membership");
+ }
+ }
+
+ @Override
+ public void setEntry(MountTable mount) throws IOException {
+ if (mount instanceof MountTablePBImpl) {
+ MountTablePBImpl mountPB = (MountTablePBImpl)mount;
+ MountTableRecordProto mountProto =
+ (MountTableRecordProto)mountPB.getProto();
+ this.translator.getBuilder().setEntry(mountProto);
+ } else {
+ throw new IOException("Cannot set mount table entry");
+ }
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/83f2aa35/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/UpdateMountTableEntryResponsePBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/UpdateMountTableEntryResponsePBImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/UpdateMountTableEntryResponsePBImpl.java
new file mode 100644
index 0000000..5d566d6
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/UpdateMountTableEntryResponsePBImpl.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryResponseProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryResponseProtoOrBuilder;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.records.impl.pb.PBRecord;
+
+import com.google.protobuf.Message;
+
+/**
+ * Protobuf implementation of the state store API object
+ * UpdateMountTableEntryResponse.
+ */
+public class UpdateMountTableEntryResponsePBImpl
+ extends UpdateMountTableEntryResponse implements PBRecord {
+
+ private FederationProtocolPBTranslator<UpdateMountTableEntryResponseProto,
+ UpdateMountTableEntryResponseProto.Builder,
+ UpdateMountTableEntryResponseProtoOrBuilder> translator =
+ new FederationProtocolPBTranslator<UpdateMountTableEntryResponseProto,
+ UpdateMountTableEntryResponseProto.Builder,
+ UpdateMountTableEntryResponseProtoOrBuilder>(
+ UpdateMountTableEntryResponseProto.class);
+
+ public UpdateMountTableEntryResponsePBImpl() {
+ }
+
+ public UpdateMountTableEntryResponsePBImpl(
+ UpdateMountTableEntryResponseProto proto) {
+ this.setProto(proto);
+ }
+
+ @Override
+ public UpdateMountTableEntryResponseProto getProto() {
+ return this.translator.build();
+ }
+
+ @Override
+ public void setProto(Message proto) {
+ this.translator.setProto(proto);
+ }
+
+ @Override
+ public void readInstance(String base64String) throws IOException {
+ this.translator.readInstance(base64String);
+ }
+
+ @Override
+ public boolean getStatus() {
+ return this.translator.getProtoOrBuilder().getStatus();
+ }
+
+ @Override
+ public void setStatus(boolean result) {
+ this.translator.getBuilder().setStatus(result);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/83f2aa35/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
new file mode 100644
index 0000000..16f2b8b
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
@@ -0,0 +1,301 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.records;
+
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Data schema for
+ * {@link org.apache.hadoop.hdfs.server.federation.store.
+ * MountTableStore FederationMountTableStore} data stored in the
+ * {@link org.apache.hadoop.hdfs.server.federation.store.
+ * StateStoreService FederationStateStoreService}. Supports string
+ * serialization.
+ */
+public abstract class MountTable extends BaseRecord {
+
+ private static final Logger LOG = LoggerFactory.getLogger(MountTable.class);
+
+
+ /**
+ * Default constructor for a mount table entry.
+ */
+ public MountTable() {
+ super();
+ }
+
+ public static MountTable newInstance() {
+ MountTable record = StateStoreSerializer.newRecord(MountTable.class);
+ record.init();
+ return record;
+ }
+
+ /**
+ * Constructor for a mount table entry with a single destinations.
+ *
+ * @param src Source path in the mount entry.
+ * @param destinations Nameservice destination of the mount point.
+ * @param dateCreated Created date.
+ * @param dateModified Modified date.
+ * @throws IOException
+ */
+ public static MountTable newInstance(final String src,
+ final Map<String, String> destinations,
+ long dateCreated, long dateModified) throws IOException {
+
+ MountTable record = newInstance(src, destinations);
+ record.setDateCreated(dateCreated);
+ record.setDateModified(dateModified);
+ return record;
+ }
+
+ /**
+ * Constructor for a mount table entry with multiple destinations.
+ *
+ * @param src Source path in the mount entry.
+ * @param destinations Nameservice destinations of the mount point.
+ * @throws IOException
+ */
+ public static MountTable newInstance(final String src,
+ final Map<String, String> destinations) throws IOException {
+ MountTable record = newInstance();
+
+ // Normalize the mount path
+ record.setSourcePath(normalizeFileSystemPath(src));
+
+ // Build a list of remote locations
+ final List<RemoteLocation> locations = new LinkedList<>();
+ for (Entry<String, String> entry : destinations.entrySet()) {
+ String nsId = entry.getKey();
+ String path = normalizeFileSystemPath(entry.getValue());
+ RemoteLocation location = new RemoteLocation(nsId, path);
+ locations.add(location);
+ }
+
+ // Set the serialized dest string
+ record.setDestinations(locations);
+
+ // Validate
+ record.validate();
+ return record;
+ }
+
+ /**
+ * Get source path in the federated namespace.
+ *
+ * @return Source path in the federated namespace.
+ */
+ public abstract String getSourcePath();
+
+ /**
+ * Set source path in the federated namespace.
+ *
+ * @param path Source path in the federated namespace.
+ */
+ public abstract void setSourcePath(String path);
+
+ /**
+ * Get a list of destinations (namespace + path) present for this entry.
+ *
+ * @return List of RemoteLocation destinations. Null if no destinations.
+ */
+ public abstract List<RemoteLocation> getDestinations();
+
+ /**
+ * Set the destination paths.
+ *
+ * @param paths Destination paths.
+ */
+ public abstract void setDestinations(List<RemoteLocation> dests);
+
+ /**
+ * Add a new destination to this mount table entry.
+ */
+ public abstract boolean addDestination(String nsId, String path);
+
+ /**
+ * Check if the entry is read only.
+ *
+ * @return If the entry is read only.
+ */
+ public abstract boolean isReadOnly();
+
+ /**
+ * Set an entry to be read only.
+ *
+ * @param ro If the entry is read only.
+ */
+ public abstract void setReadOnly(boolean ro);
+
+ /**
+ * Get the order of the destinations for this mount table entry.
+ *
+ * @return Order of the destinations.
+ */
+ public abstract DestinationOrder getDestOrder();
+
+ /**
+ * Set the order of the destinations for this mount table entry.
+ *
+ * @param order Order of the destinations.
+ */
+ public abstract void setDestOrder(DestinationOrder order);
+
+ /**
+ * Get the default location.
+ * @return The default location.
+ */
+ public RemoteLocation getDefaultLocation() {
+ List<RemoteLocation> dests = this.getDestinations();
+ if (dests == null || dests.isEmpty()) {
+ return null;
+ }
+ return dests.get(0);
+ }
+
+ @Override
+ public boolean like(final BaseRecord o) {
+ if (o instanceof MountTable) {
+ MountTable other = (MountTable)o;
+ if (getSourcePath() != null &&
+ !getSourcePath().equals(other.getSourcePath())) {
+ return false;
+ }
+ if (getDestinations() != null &&
+ !getDestinations().equals(other.getDestinations())) {
+ return false;
+ }
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append(this.getSourcePath());
+ sb.append("->");
+ List<RemoteLocation> destinations = this.getDestinations();
+ sb.append(destinations);
+ if (destinations != null && destinations.size() > 1) {
+ sb.append("[" + this.getDestOrder() + "]");
+ }
+ if (this.isReadOnly()) {
+ sb.append("[RO]");
+ }
+ return sb.toString();
+ }
+
+ @Override
+ public SortedMap<String, String> getPrimaryKeys() {
+ SortedMap<String, String> map = new TreeMap<>();
+ map.put("sourcePath", this.getSourcePath());
+ return map;
+ }
+
+ @Override
+ public boolean validate() {
+ boolean ret = super.validate();
+ if (this.getSourcePath() == null || this.getSourcePath().length() == 0) {
+ LOG.error("Invalid entry, no source path specified ", this);
+ ret = false;
+ }
+ if (!this.getSourcePath().startsWith("/")) {
+ LOG.error("Invalid entry, all mount points must start with / ", this);
+ ret = false;
+ }
+ if (this.getDestinations() == null || this.getDestinations().size() == 0) {
+ LOG.error("Invalid entry, no destination paths specified ", this);
+ ret = false;
+ }
+ for (RemoteLocation loc : getDestinations()) {
+ String nsId = loc.getNameserviceId();
+ if (nsId == null || nsId.length() == 0) {
+ LOG.error("Invalid entry, invalid destination nameservice ", this);
+ ret = false;
+ }
+ if (loc.getDest() == null || loc.getDest().length() == 0) {
+ LOG.error("Invalid entry, invalid destination path ", this);
+ ret = false;
+ }
+ if (!loc.getDest().startsWith("/")) {
+ LOG.error("Invalid entry, all destination must start with / ", this);
+ ret = false;
+ }
+ }
+ return ret;
+ }
+
+ @Override
+ public long getExpirationMs() {
+ return 0;
+ }
+
+ @Override
+ public int hashCode() {
+ return new HashCodeBuilder(17, 31)
+ .append(this.getSourcePath())
+ .append(this.getDestinations())
+ .append(this.isReadOnly())
+ .append(this.getDestOrder())
+ .toHashCode();
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj instanceof MountTable) {
+ MountTable other = (MountTable)obj;
+ if (!this.getSourcePath().equals(other.getSourcePath())) {
+ return false;
+ } else if (!this.getDestinations().equals(other.getDestinations())) {
+ return false;
+ } else if (this.isReadOnly() != other.isReadOnly()) {
+ return false;
+ } else if (!this.getDestOrder().equals(other.getDestOrder())) {
+ return false;
+ }
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Normalize a path for that filesystem.
+ *
+ * @param path Path to normalize.
+ * @return Normalized path.
+ */
+ private static String normalizeFileSystemPath(final String path) {
+ Path normalizedPath = new Path(path);
+ return normalizedPath.toString();
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/83f2aa35/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MountTablePBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MountTablePBImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MountTablePBImpl.java
new file mode 100644
index 0000000..d2870bd
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MountTablePBImpl.java
@@ -0,0 +1,213 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.records.impl.pb;
+
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.List;
+
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.MountTableRecordProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.MountTableRecordProto.Builder;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.MountTableRecordProto.DestOrder;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.MountTableRecordProtoOrBuilder;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoteLocationProto;
+import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.FederationProtocolPBTranslator;
+import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+
+import com.google.protobuf.Message;
+
+/**
+ * Protobuf implementation of the MountTable record.
+ */
+public class MountTablePBImpl extends MountTable implements PBRecord {
+
+ private FederationProtocolPBTranslator<MountTableRecordProto, Builder,
+ MountTableRecordProtoOrBuilder> translator =
+ new FederationProtocolPBTranslator<MountTableRecordProto, Builder,
+ MountTableRecordProtoOrBuilder>(MountTableRecordProto.class);
+
+ public MountTablePBImpl() {
+ }
+
+ public MountTablePBImpl(MountTableRecordProto proto) {
+ this.setProto(proto);
+ }
+
+ @Override
+ public MountTableRecordProto getProto() {
+ return this.translator.build();
+ }
+
+ @Override
+ public void setProto(Message proto) {
+ this.translator.setProto(proto);
+ }
+
+ @Override
+ public void readInstance(String base64String) throws IOException {
+ this.translator.readInstance(base64String);
+ }
+
+ @Override
+ public String getSourcePath() {
+ MountTableRecordProtoOrBuilder proto = this.translator.getProtoOrBuilder();
+ if (!proto.hasSrcPath()) {
+ return null;
+ }
+ return proto.getSrcPath();
+ }
+
+ @Override
+ public void setSourcePath(String path) {
+ Builder builder = this.translator.getBuilder();
+ if (path == null) {
+ builder.clearSrcPath();
+ } else {
+ builder.setSrcPath(path);
+ }
+ }
+
+ @Override
+ public List<RemoteLocation> getDestinations() {
+ MountTableRecordProtoOrBuilder proto = this.translator.getProtoOrBuilder();
+ if (proto.getDestinationsCount() == 0) {
+ return null;
+ }
+
+ final List<RemoteLocation> ret = new LinkedList<>();
+ final List<RemoteLocationProto> destList = proto.getDestinationsList();
+ for (RemoteLocationProto dest : destList) {
+ String nsId = dest.getNameserviceId();
+ String path = dest.getPath();
+ RemoteLocation loc = new RemoteLocation(nsId, path);
+ ret.add(loc);
+ }
+ return ret;
+ }
+
+ @Override
+ public void setDestinations(final List<RemoteLocation> dests) {
+ Builder builder = this.translator.getBuilder();
+ builder.clearDestinations();
+ for (RemoteLocation dest : dests) {
+ RemoteLocationProto.Builder itemBuilder =
+ RemoteLocationProto.newBuilder();
+ String nsId = dest.getNameserviceId();
+ String path = dest.getDest();
+ itemBuilder.setNameserviceId(nsId);
+ itemBuilder.setPath(path);
+ RemoteLocationProto item = itemBuilder.build();
+ builder.addDestinations(item);
+ }
+ }
+
+ @Override
+ public boolean addDestination(String nsId, String path) {
+ // Check if the location is already there
+ List<RemoteLocation> dests = getDestinations();
+ for (RemoteLocation dest : dests) {
+ if (dest.getNameserviceId().equals(nsId) && dest.getDest().equals(path)) {
+ return false;
+ }
+ }
+
+ // Add it to the existing list
+ Builder builder = this.translator.getBuilder();
+ RemoteLocationProto.Builder itemBuilder =
+ RemoteLocationProto.newBuilder();
+ itemBuilder.setNameserviceId(nsId);
+ itemBuilder.setPath(path);
+ RemoteLocationProto item = itemBuilder.build();
+ builder.addDestinations(item);
+ return true;
+ }
+
+ @Override
+ public void setDateModified(long time) {
+ this.translator.getBuilder().setDateModified(time);
+ }
+
+ @Override
+ public long getDateModified() {
+ return this.translator.getProtoOrBuilder().getDateModified();
+ }
+
+ @Override
+ public void setDateCreated(long time) {
+ this.translator.getBuilder().setDateCreated(time);
+ }
+
+ @Override
+ public long getDateCreated() {
+ return this.translator.getProtoOrBuilder().getDateCreated();
+ }
+
+ @Override
+ public boolean isReadOnly() {
+ MountTableRecordProtoOrBuilder proto = this.translator.getProtoOrBuilder();
+ if (!proto.hasReadOnly()) {
+ return false;
+ }
+ return proto.getReadOnly();
+ }
+
+ @Override
+ public void setReadOnly(boolean ro) {
+ this.translator.getBuilder().setReadOnly(ro);
+ }
+
+ @Override
+ public DestinationOrder getDestOrder() {
+ MountTableRecordProtoOrBuilder proto = this.translator.getProtoOrBuilder();
+ return convert(proto.getDestOrder());
+ }
+
+ @Override
+ public void setDestOrder(DestinationOrder order) {
+ Builder builder = this.translator.getBuilder();
+ if (order == null) {
+ builder.clearDestOrder();
+ } else {
+ builder.setDestOrder(convert(order));
+ }
+ }
+
+ private DestinationOrder convert(DestOrder order) {
+ switch (order) {
+ case LOCAL:
+ return DestinationOrder.LOCAL;
+ case RANDOM:
+ return DestinationOrder.RANDOM;
+ default:
+ return DestinationOrder.HASH;
+ }
+ }
+
+ private DestOrder convert(DestinationOrder order) {
+ switch (order) {
+ case LOCAL:
+ return DestOrder.LOCAL;
+ case RANDOM:
+ return DestOrder.RANDOM;
+ default:
+ return DestOrder.HASH;
+ }
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/83f2aa35/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/FederationProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/FederationProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/FederationProtocol.proto
index 487fe46..32a6250 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/FederationProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/FederationProtocol.proto
@@ -104,4 +104,63 @@ message NamenodeHeartbeatRequestProto {
message NamenodeHeartbeatResponseProto {
optional bool status = 1;
-}
\ No newline at end of file
+}
+
+
+/////////////////////////////////////////////////
+// Mount table
+/////////////////////////////////////////////////
+
+message RemoteLocationProto {
+ optional string nameserviceId = 1;
+ optional string path = 2;
+}
+
+message MountTableRecordProto {
+ optional string srcPath = 1;
+ repeated RemoteLocationProto destinations = 2;
+ optional uint64 dateCreated = 3;
+ optional uint64 dateModified = 4;
+ optional bool readOnly = 5 [default = false];
+
+ enum DestOrder {
+ HASH = 0;
+ LOCAL = 1;
+ RANDOM = 2;
+ }
+ optional DestOrder destOrder = 6 [default = HASH];
+}
+
+message AddMountTableEntryRequestProto {
+ optional MountTableRecordProto entry = 1;
+}
+
+message AddMountTableEntryResponseProto {
+ optional bool status = 1;
+}
+
+message UpdateMountTableEntryRequestProto {
+ optional MountTableRecordProto entry = 1;
+}
+
+message UpdateMountTableEntryResponseProto {
+ optional bool status = 1;
+}
+
+message RemoveMountTableEntryRequestProto {
+ optional string srcPath = 1;
+}
+
+message RemoveMountTableEntryResponseProto{
+ optional bool status = 1;
+}
+
+message GetMountTableEntriesRequestProto {
+ optional string srcPath = 1;
+}
+
+message GetMountTableEntriesResponseProto {
+ repeated MountTableRecordProto entries = 1;
+ optional uint64 timestamp = 2;
+}
+
http://git-wip-us.apache.org/repos/asf/hadoop/blob/83f2aa35/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
index 87427fd..a481553 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.server.federation.resolver.NamenodePriorityCompara
import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport;
import org.apache.hadoop.hdfs.server.federation.resolver.PathLocation;
import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import org.apache.hadoop.hdfs.server.federation.router.Router;
import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
import org.apache.hadoop.util.Time;
@@ -68,6 +69,10 @@ public class MockResolver
this();
}
+ public MockResolver(Configuration conf, Router router) {
+ this();
+ }
+
public void addLocation(String mount, String nsId, String location) {
List<RemoteLocation> locationsList = this.locations.get(mount);
if (locationsList == null) {
@@ -258,7 +263,6 @@ public class MockResolver
@Override
public PathLocation getDestinationForPath(String path) throws IOException {
- Set<String> namespaceSet = new HashSet<>();
List<RemoteLocation> remoteLocations = new LinkedList<>();
for (String key : this.locations.keySet()) {
if (path.startsWith(key)) {
@@ -268,7 +272,6 @@ public class MockResolver
RemoteLocation remoteLocation =
new RemoteLocation(nameservice, finalPath);
remoteLocations.add(remoteLocation);
- namespaceSet.add(nameservice);
}
break;
}
@@ -277,7 +280,7 @@ public class MockResolver
// Path isn't supported, mimic resolver behavior.
return null;
}
- return new PathLocation(path, remoteLocations, namespaceSet);
+ return new PathLocation(path, remoteLocations);
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/83f2aa35/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
new file mode 100644
index 0000000..682d569
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
@@ -0,0 +1,396 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.resolver;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.federation.router.Router;
+import org.apache.hadoop.hdfs.server.federation.store.MountTableStore;
+import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test the {@link MountTableStore} from the {@link Router}.
+ */
+public class TestMountTableResolver {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestMountTableResolver.class);
+
+ private MountTableResolver mountTable;
+
+ private Map<String, String> getMountTableEntry(
+ String subcluster, String path) {
+ Map<String, String> ret = new HashMap<>();
+ ret.put(subcluster, path);
+ return ret;
+ }
+
+ /**
+ * Setup the mount table.
+ * / -> 1:/
+ * __tmp -> 2:/tmp
+ * __user -> 3:/user
+ * ____a -> 2:/user/test
+ * ______demo
+ * ________test
+ * __________a -> 1:/user/test
+ * __________b -> 3:/user/test
+ * ____b
+ * ______file1.txt -> 4:/user/file1.txt
+ * __usr
+ * ____bin -> 2:/bin
+ *
+ * @throws IOException If it cannot set the mount table.
+ */
+ private void setupMountTable() throws IOException {
+ Configuration conf = new Configuration();
+ mountTable = new MountTableResolver(conf);
+
+ // Root mount point
+ Map<String, String> map = getMountTableEntry("1", "/");
+ mountTable.addEntry(MountTable.newInstance("/", map));
+
+ // /tmp
+ map = getMountTableEntry("2", "/");
+ mountTable.addEntry(MountTable.newInstance("/tmp", map));
+
+ // /user
+ map = getMountTableEntry("3", "/user");
+ mountTable.addEntry(MountTable.newInstance("/user", map));
+
+ // /usr/bin
+ map = getMountTableEntry("2", "/bin");
+ mountTable.addEntry(MountTable.newInstance("/usr/bin", map));
+
+ // /user/a
+ map = getMountTableEntry("2", "/user/test");
+ mountTable.addEntry(MountTable.newInstance("/user/a", map));
+
+ // /user/b/file1.txt
+ map = getMountTableEntry("4", "/user/file1.txt");
+ mountTable.addEntry(MountTable.newInstance("/user/b/file1.txt", map));
+
+ // /user/a/demo/test/a
+ map = getMountTableEntry("1", "/user/test");
+ mountTable.addEntry(MountTable.newInstance("/user/a/demo/test/a", map));
+
+ // /user/a/demo/test/b
+ map = getMountTableEntry("3", "/user/test");
+ mountTable.addEntry(MountTable.newInstance("/user/a/demo/test/b", map));
+ }
+
+ @Before
+ public void setup() throws IOException {
+ setupMountTable();
+ }
+
+ @Test
+ public void testDestination() throws IOException {
+
+ // Check files
+ assertEquals("1->/tesfile1.txt",
+ mountTable.getDestinationForPath("/tesfile1.txt").toString());
+
+ assertEquals("3->/user/testfile2.txt",
+ mountTable.getDestinationForPath("/user/testfile2.txt").toString());
+
+ assertEquals("2->/user/test/testfile3.txt",
+ mountTable.getDestinationForPath("/user/a/testfile3.txt").toString());
+
+ assertEquals("3->/user/b/testfile4.txt",
+ mountTable.getDestinationForPath("/user/b/testfile4.txt").toString());
+
+ assertEquals("1->/share/file5.txt",
+ mountTable.getDestinationForPath("/share/file5.txt").toString());
+
+ assertEquals("2->/bin/file7.txt",
+ mountTable.getDestinationForPath("/usr/bin/file7.txt").toString());
+
+ assertEquals("1->/usr/file8.txt",
+ mountTable.getDestinationForPath("/usr/file8.txt").toString());
+
+ assertEquals("2->/user/test/demo/file9.txt",
+ mountTable.getDestinationForPath("/user/a/demo/file9.txt").toString());
+
+ // Check folders
+ assertEquals("3->/user/testfolder",
+ mountTable.getDestinationForPath("/user/testfolder").toString());
+
+ assertEquals("2->/user/test/b",
+ mountTable.getDestinationForPath("/user/a/b").toString());
+
+ assertEquals("3->/user/test/a",
+ mountTable.getDestinationForPath("/user/test/a").toString());
+
+ }
+
+ private void compareLists(List<String> list1, String[] list2) {
+ assertEquals(list1.size(), list2.length);
+ for (String item : list2) {
+ assertTrue(list1.contains(item));
+ }
+ }
+
+ @Test
+ public void testGetMountPoints() throws IOException {
+
+ // Check getting all mount points (virtual and real) beneath a path
+ List<String> mounts = mountTable.getMountPoints("/");
+ assertEquals(3, mounts.size());
+ compareLists(mounts, new String[] {"tmp", "user", "usr"});
+
+ mounts = mountTable.getMountPoints("/user");
+ assertEquals(2, mounts.size());
+ compareLists(mounts, new String[] {"a", "b"});
+
+ mounts = mountTable.getMountPoints("/user/a");
+ assertEquals(1, mounts.size());
+ compareLists(mounts, new String[] {"demo"});
+
+ mounts = mountTable.getMountPoints("/user/a/demo");
+ assertEquals(1, mounts.size());
+ compareLists(mounts, new String[] {"test"});
+
+ mounts = mountTable.getMountPoints("/user/a/demo/test");
+ assertEquals(2, mounts.size());
+ compareLists(mounts, new String[] {"a", "b"});
+
+ mounts = mountTable.getMountPoints("/tmp");
+ assertEquals(0, mounts.size());
+
+ mounts = mountTable.getMountPoints("/t");
+ assertNull(mounts);
+
+ mounts = mountTable.getMountPoints("/unknownpath");
+ assertNull(mounts);
+ }
+
+ private void compareRecords(List<MountTable> list1, String[] list2) {
+ assertEquals(list1.size(), list2.length);
+ for (String item : list2) {
+ for (MountTable record : list1) {
+ if (record.getSourcePath().equals(item)) {
+ return;
+ }
+ }
+ }
+ fail();
+ }
+
+ @Test
+ public void testGetMounts() throws IOException {
+
+ // Check listing the mount table records at or beneath a path
+ List<MountTable> records = mountTable.getMounts("/");
+ assertEquals(8, records.size());
+ compareRecords(records, new String[] {"/", "/tmp", "/user", "/usr/bin",
+ "user/a", "/user/a/demo/a", "/user/a/demo/b", "/user/b/file1.txt"});
+
+ records = mountTable.getMounts("/user");
+ assertEquals(5, records.size());
+ compareRecords(records, new String[] {"/user", "/user/a/demo/a",
+ "/user/a/demo/b", "user/a", "/user/b/file1.txt"});
+
+ records = mountTable.getMounts("/user/a");
+ assertEquals(3, records.size());
+ compareRecords(records,
+ new String[] {"/user/a/demo/a", "/user/a/demo/b", "/user/a"});
+
+ records = mountTable.getMounts("/tmp");
+ assertEquals(1, records.size());
+ compareRecords(records, new String[] {"/tmp"});
+ }
+
+ @Test
+ public void testRemoveSubTree()
+ throws UnsupportedOperationException, IOException {
+
+ // 3 mount points are present /tmp, /user, /usr
+ compareLists(mountTable.getMountPoints("/"),
+ new String[] {"user", "usr", "tmp"});
+
+ // /tmp currently points to namespace 2
+ assertEquals("2", mountTable.getDestinationForPath("/tmp/testfile.txt")
+ .getDefaultLocation().getNameserviceId());
+
+ // Remove tmp
+ mountTable.removeEntry("/tmp");
+
+ // Now 2 mount points are present /user, /usr
+ compareLists(mountTable.getMountPoints("/"),
+ new String[] {"user", "usr"});
+
+ // /tmp no longer exists, uses default namespace for mapping /
+ assertEquals("1", mountTable.getDestinationForPath("/tmp/testfile.txt")
+ .getDefaultLocation().getNameserviceId());
+ }
+
+ @Test
+ public void testRemoveVirtualNode()
+ throws UnsupportedOperationException, IOException {
+
+ // 3 mount points are present /tmp, /user, /usr
+ compareLists(mountTable.getMountPoints("/"),
+ new String[] {"user", "usr", "tmp"});
+
+ // /usr is virtual, uses namespace 1->/
+ assertEquals("1", mountTable.getDestinationForPath("/usr/testfile.txt")
+ .getDefaultLocation().getNameserviceId());
+
+ // Attempt to remove /usr
+ mountTable.removeEntry("/usr");
+
+ // Verify the remove failed
+ compareLists(mountTable.getMountPoints("/"),
+ new String[] {"user", "usr", "tmp"});
+ }
+
+ @Test
+ public void testRemoveLeafNode()
+ throws UnsupportedOperationException, IOException {
+
+ // /user/a/demo/test/a currently points to namespace 1
+ assertEquals("1", mountTable.getDestinationForPath("/user/a/demo/test/a")
+ .getDefaultLocation().getNameserviceId());
+
+ // Remove /user/a/demo/test/a
+ mountTable.removeEntry("/user/a/demo/test/a");
+
+ // Now /user/a/demo/test/a points to namespace 2 using the entry for /user/a
+ assertEquals("2", mountTable.getDestinationForPath("/user/a/demo/test/a")
+ .getDefaultLocation().getNameserviceId());
+
+ // Verify the virtual node at /user/a/demo still exists and was not deleted
+ compareLists(mountTable.getMountPoints("/user/a"), new String[] {"demo"});
+
+ // Verify the sibling node was unaffected and still points to ns 3
+ assertEquals("3", mountTable.getDestinationForPath("/user/a/demo/test/b")
+ .getDefaultLocation().getNameserviceId());
+ }
+
+ @Test
+ public void testRefreshEntries()
+ throws UnsupportedOperationException, IOException {
+
+ // Initial table loaded
+ testDestination();
+ assertEquals(8, mountTable.getMounts("/").size());
+
+ // Replace table with /1 and /2
+ List<MountTable> records = new ArrayList<>();
+ Map<String, String> map1 = getMountTableEntry("1", "/");
+ records.add(MountTable.newInstance("/1", map1));
+ Map<String, String> map2 = getMountTableEntry("2", "/");
+ records.add(MountTable.newInstance("/2", map2));
+ mountTable.refreshEntries(records);
+
+ // Verify addition
+ PathLocation destination1 = mountTable.getDestinationForPath("/1");
+ RemoteLocation defaultLoc1 = destination1.getDefaultLocation();
+ assertEquals("1", defaultLoc1.getNameserviceId());
+
+ PathLocation destination2 = mountTable.getDestinationForPath("/2");
+ RemoteLocation defaultLoc2 = destination2.getDefaultLocation();
+ assertEquals("2", defaultLoc2.getNameserviceId());
+
+ // Verify existing entries were removed
+ assertEquals(2, mountTable.getMounts("/").size());
+ boolean assertionThrown = false;
+ try {
+ testDestination();
+ fail();
+ } catch (AssertionError e) {
+ // The / entry was removed, so it triggers an exception
+ assertionThrown = true;
+ }
+ assertTrue(assertionThrown);
+ }
+
+ @Test
+ public void testMountTableScalability() throws IOException {
+
+ List<MountTable> emptyList = new ArrayList<>();
+ mountTable.refreshEntries(emptyList);
+
+ // Add 100,000 entries in flat list
+ for (int i = 0; i < 100000; i++) {
+ Map<String, String> map = getMountTableEntry("1", "/" + i);
+ MountTable record = MountTable.newInstance("/" + i, map);
+ mountTable.addEntry(record);
+ if (i % 10000 == 0) {
+ LOG.info("Adding flat mount record {}: {}", i, record);
+ }
+ }
+
+ assertEquals(100000, mountTable.getMountPoints("/").size());
+ assertEquals(100000, mountTable.getMounts("/").size());
+
+ // Add 1000 entries in deep list
+ mountTable.refreshEntries(emptyList);
+ String parent = "/";
+ for (int i = 0; i < 1000; i++) {
+ final int index = i;
+ Map<String, String> map = getMountTableEntry("1", "/" + index);
+ if (i > 0) {
+ parent = parent + "/";
+ }
+ parent = parent + i;
+ MountTable record = MountTable.newInstance(parent, map);
+ mountTable.addEntry(record);
+ }
+
+ assertEquals(1, mountTable.getMountPoints("/").size());
+ assertEquals(1000, mountTable.getMounts("/").size());
+
+ // Add 100,000 entries in deep and wide tree
+ mountTable.refreshEntries(emptyList);
+ Random rand = new Random();
+ parent = "/" + Integer.toString(rand.nextInt());
+ int numRootTrees = 1;
+ for (int i = 0; i < 100000; i++) {
+ final int index = i;
+ Map<String, String> map = getMountTableEntry("1", "/" + index);
+ parent = parent + "/" + i;
+ if (parent.length() > 2000) {
+ // Start new tree
+ parent = "/" + Integer.toString(rand.nextInt());
+ numRootTrees++;
+ }
+ MountTable record = MountTable.newInstance(parent, map);
+ mountTable.addEntry(record);
+ }
+
+ assertEquals(numRootTrees, mountTable.getMountPoints("/").size());
+ assertEquals(100000, mountTable.getMounts("/").size());
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/83f2aa35/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/FederationStateStoreTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/FederationStateStoreTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/FederationStateStoreTestUtils.java
index 598b9cf..dbb8f3f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/FederationStateStoreTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/FederationStateStoreTestUtils.java
@@ -25,7 +25,9 @@ import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
@@ -40,6 +42,7 @@ import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFile
import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState;
import org.apache.hadoop.hdfs.server.federation.store.records.MembershipStats;
+import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
import org.apache.hadoop.util.Time;
/**
@@ -234,6 +237,19 @@ public final class FederationStateStoreTestUtils {
return false;
}
+ public static List<MountTable> createMockMountTable(
+ List<String> nameservices) throws IOException {
+ // create table entries
+ List<MountTable> entries = new ArrayList<>();
+ for (String ns : nameservices) {
+ Map<String, String> destMap = new HashMap<>();
+ destMap.put(ns, "/target-" + ns);
+ MountTable entry = MountTable.newInstance("/" + ns, destMap);
+ entries.add(entry);
+ }
+ return entries;
+ }
+
public static MembershipState createMockRegistrationForNamenode(
String nameserviceId, String namenodeId,
FederationNamenodeServiceState state) throws IOException {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/83f2aa35/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMountTable.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMountTable.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMountTable.java
new file mode 100644
index 0000000..d30d6ba
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMountTable.java
@@ -0,0 +1,250 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store;
+
+import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMESERVICES;
+import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyException;
+import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.clearRecords;
+import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.createMockMountTable;
+import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.synchronizeRecords;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+import org.apache.hadoop.hdfs.server.federation.store.records.QueryResult;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Test the basic {@link StateStoreService}
+ * {@link MountTableStore} functionality.
+ */
+public class TestStateStoreMountTable extends TestStateStoreBase {
+
+ private static List<String> nameservices;
+ private static MountTableStore mountStore;
+
+ @BeforeClass
+ public static void create() throws IOException {
+ nameservices = new ArrayList<>();
+ nameservices.add(NAMESERVICES[0]);
+ nameservices.add(NAMESERVICES[1]);
+ }
+
+ @Before
+ public void setup() throws IOException, InterruptedException {
+ mountStore =
+ getStateStore().getRegisteredRecordStore(MountTableStore.class);
+ // Clear Mount table registrations
+ assertTrue(clearRecords(getStateStore(), MountTable.class));
+ }
+
+ @Test
+ public void testStateStoreDisconnected() throws Exception {
+
+ // Close the data store driver
+ getStateStore().closeDriver();
+ assertFalse(getStateStore().isDriverReady());
+
+ // Test APIs that access the store to check they throw the correct exception
+ AddMountTableEntryRequest addRequest =
+ AddMountTableEntryRequest.newInstance();
+ verifyException(mountStore, "addMountTableEntry",
+ StateStoreUnavailableException.class,
+ new Class[] {AddMountTableEntryRequest.class},
+ new Object[] {addRequest});
+
+ UpdateMountTableEntryRequest updateRequest =
+ UpdateMountTableEntryRequest.newInstance();
+ verifyException(mountStore, "updateMountTableEntry",
+ StateStoreUnavailableException.class,
+ new Class[] {UpdateMountTableEntryRequest.class},
+ new Object[] {updateRequest});
+
+ RemoveMountTableEntryRequest removeRequest =
+ RemoveMountTableEntryRequest.newInstance();
+ verifyException(mountStore, "removeMountTableEntry",
+ StateStoreUnavailableException.class,
+ new Class[] {RemoveMountTableEntryRequest.class},
+ new Object[] {removeRequest});
+
+ GetMountTableEntriesRequest getRequest =
+ GetMountTableEntriesRequest.newInstance();
+ mountStore.loadCache(true);
+ verifyException(mountStore, "getMountTableEntries",
+ StateStoreUnavailableException.class,
+ new Class[] {GetMountTableEntriesRequest.class},
+ new Object[] {getRequest});
+ }
+
+ @Test
+ public void testSynchronizeMountTable() throws IOException {
+ // Synchronize and get mount table entries
+ List<MountTable> entries = createMockMountTable(nameservices);
+ assertTrue(synchronizeRecords(getStateStore(), entries, MountTable.class));
+ for (MountTable e : entries) {
+ mountStore.loadCache(true);
+ MountTable entry = getMountTableEntry(e.getSourcePath());
+ assertNotNull(entry);
+ assertEquals(e.getDefaultLocation().getDest(),
+ entry.getDefaultLocation().getDest());
+ }
+ }
+
+ @Test
+ public void testAddMountTableEntry() throws IOException {
+
+ // Add 1
+ List<MountTable> entries = createMockMountTable(nameservices);
+ List<MountTable> entries1 = getMountTableEntries("/").getRecords();
+ assertEquals(0, entries1.size());
+ MountTable entry0 = entries.get(0);
+ AddMountTableEntryRequest request =
+ AddMountTableEntryRequest.newInstance(entry0);
+ AddMountTableEntryResponse response =
+ mountStore.addMountTableEntry(request);
+ assertTrue(response.getStatus());
+
+ mountStore.loadCache(true);
+ List<MountTable> entries2 = getMountTableEntries("/").getRecords();
+ assertEquals(1, entries2.size());
+ }
+
+ @Test
+ public void testRemoveMountTableEntry() throws IOException {
+
+ // Add many
+ List<MountTable> entries = createMockMountTable(nameservices);
+ synchronizeRecords(getStateStore(), entries, MountTable.class);
+ mountStore.loadCache(true);
+ List<MountTable> entries1 = getMountTableEntries("/").getRecords();
+ assertEquals(entries.size(), entries1.size());
+
+ // Remove 1
+ RemoveMountTableEntryRequest request =
+ RemoveMountTableEntryRequest.newInstance();
+ request.setSrcPath(entries.get(0).getSourcePath());
+ assertTrue(mountStore.removeMountTableEntry(request).getStatus());
+
+ // Verify remove
+ mountStore.loadCache(true);
+ List<MountTable> entries2 = getMountTableEntries("/").getRecords();
+ assertEquals(entries.size() - 1, entries2.size());
+ }
+
+ @Test
+ public void testUpdateMountTableEntry() throws IOException {
+
+ // Add 1
+ List<MountTable> entries = createMockMountTable(nameservices);
+ MountTable entry0 = entries.get(0);
+ String srcPath = entry0.getSourcePath();
+ String nsId = entry0.getDefaultLocation().getNameserviceId();
+ AddMountTableEntryRequest request =
+ AddMountTableEntryRequest.newInstance(entry0);
+ AddMountTableEntryResponse response =
+ mountStore.addMountTableEntry(request);
+ assertTrue(response.getStatus());
+
+ // Verify
+ mountStore.loadCache(true);
+ MountTable matchingEntry0 = getMountTableEntry(srcPath);
+ assertNotNull(matchingEntry0);
+ assertEquals(nsId, matchingEntry0.getDefaultLocation().getNameserviceId());
+
+ // Edit destination nameservice for source path
+ Map<String, String> destMap =
+ Collections.singletonMap("testnameservice", "/");
+ MountTable replacement =
+ MountTable.newInstance(srcPath, destMap);
+ UpdateMountTableEntryRequest updateRequest =
+ UpdateMountTableEntryRequest.newInstance(replacement);
+ UpdateMountTableEntryResponse updateResponse =
+ mountStore.updateMountTableEntry(updateRequest);
+ assertTrue(updateResponse.getStatus());
+
+ // Verify
+ mountStore.loadCache(true);
+ MountTable matchingEntry1 = getMountTableEntry(srcPath);
+ assertNotNull(matchingEntry1);
+ assertEquals("testnameservice",
+ matchingEntry1.getDefaultLocation().getNameserviceId());
+ }
+
+ /**
+ * Gets an existing mount table record in the state store.
+ *
+ * @param mount The mount point of the record to remove.
+ * @return The matching record if found, null if it is not found.
+ * @throws IOException If the state store could not be accessed.
+ */
+ private MountTable getMountTableEntry(String mount) throws IOException {
+ GetMountTableEntriesRequest request =
+ GetMountTableEntriesRequest.newInstance(mount);
+ GetMountTableEntriesResponse response =
+ mountStore.getMountTableEntries(request);
+ List<MountTable> results = response.getEntries();
+ if (results.size() > 0) {
+ // First result is sorted to have the shortest mount string length
+ return results.get(0);
+ }
+ return null;
+ }
+
+ /**
+ * Fetch all mount table records beneath a root path.
+ *
+ * @param store FederationMountTableStore instance to commit the data.
+ * @param mount The root search path, enter "/" to return all mount table
+ * records.
+ *
+ * @return A list of all mount table records found below the root mount.
+ *
+ * @throws IOException If the state store could not be accessed.
+ */
+ private QueryResult<MountTable> getMountTableEntries(String mount)
+ throws IOException {
+ if (mount == null) {
+ throw new IOException("Please specify a root search path");
+ }
+ GetMountTableEntriesRequest request =
+ GetMountTableEntriesRequest.newInstance();
+ request.setSrcPath(mount);
+ GetMountTableEntriesResponse response =
+ mountStore.getMountTableEntries(request);
+ List<MountTable> records = response.getEntries();
+ long timestamp = response.getTimestamp();
+ return new QueryResult<MountTable>(records, timestamp);
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/83f2aa35/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
index dc51ee9..8239fb1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
@@ -27,6 +27,7 @@ import java.io.IOException;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -39,6 +40,7 @@ import org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUt
import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState;
+import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
import org.apache.hadoop.hdfs.server.federation.store.records.Query;
import org.apache.hadoop.hdfs.server.federation.store.records.QueryResult;
import org.junit.AfterClass;
@@ -109,6 +111,11 @@ public class TestStateStoreDriverBase {
generateRandomString(), generateRandomString(),
generateRandomString(), generateRandomString(),
generateRandomEnum(FederationNamenodeServiceState.class), false);
+ } else if (recordClass == MountTable.class) {
+ String src = "/" + generateRandomString();
+ Map<String, String> destMap = Collections.singletonMap(
+ generateRandomString(), "/" + generateRandomString());
+ return (T) MountTable.newInstance(src, destMap);
}
return null;
@@ -155,6 +162,7 @@ public class TestStateStoreDriverBase {
public static void removeAll(StateStoreDriver driver) throws IOException {
driver.removeAll(MembershipState.class);
+ driver.removeAll(MountTable.class);
}
public <T extends BaseRecord> void testInsert(
@@ -347,22 +355,26 @@ public class TestStateStoreDriverBase {
public void testInsert(StateStoreDriver driver)
throws IllegalArgumentException, IllegalAccessException, IOException {
testInsert(driver, MembershipState.class);
+ testInsert(driver, MountTable.class);
}
public void testPut(StateStoreDriver driver)
throws IllegalArgumentException, ReflectiveOperationException,
IOException, SecurityException {
testPut(driver, MembershipState.class);
+ testPut(driver, MountTable.class);
}
public void testRemove(StateStoreDriver driver)
throws IllegalArgumentException, IllegalAccessException, IOException {
testRemove(driver, MembershipState.class);
+ testRemove(driver, MountTable.class);
}
public void testFetchErrors(StateStoreDriver driver)
throws IllegalArgumentException, IllegalAccessException, IOException {
testFetchErrors(driver, MembershipState.class);
+ testFetchErrors(driver, MountTable.class);
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/83f2aa35/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java
new file mode 100644
index 0000000..b6f91cf
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java
@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.records;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;
+import org.junit.Test;
+
+/**
+ * Test the Mount Table entry in the State Store.
+ */
+public class TestMountTable {
+
+ private static final String SRC = "/test";
+ private static final String DST_NS_0 = "ns0";
+ private static final String DST_NS_1 = "ns1";
+ private static final String DST_PATH_0 = "/path1";
+ private static final String DST_PATH_1 = "/path/path2";
+ private static final List<RemoteLocation> DST = new LinkedList<>();
+ static {
+ DST.add(new RemoteLocation(DST_NS_0, DST_PATH_0));
+ DST.add(new RemoteLocation(DST_NS_1, DST_PATH_1));
+ }
+ private static final Map<String, String> DST_MAP = new LinkedHashMap<>();
+ static {
+ DST_MAP.put(DST_NS_0, DST_PATH_0);
+ DST_MAP.put(DST_NS_1, DST_PATH_1);
+ }
+
+ private static final long DATE_CREATED = 100;
+ private static final long DATE_MOD = 200;
+
+
+ @Test
+ public void testGetterSetter() throws IOException {
+
+ MountTable record = MountTable.newInstance(SRC, DST_MAP);
+
+ validateDestinations(record);
+ assertEquals(SRC, record.getSourcePath());
+ assertEquals(DST, record.getDestinations());
+ assertTrue(DATE_CREATED > 0);
+ assertTrue(DATE_MOD > 0);
+
+ MountTable record2 =
+ MountTable.newInstance(SRC, DST_MAP, DATE_CREATED, DATE_MOD);
+
+ validateDestinations(record2);
+ assertEquals(SRC, record2.getSourcePath());
+ assertEquals(DST, record2.getDestinations());
+ assertEquals(DATE_CREATED, record2.getDateCreated());
+ assertEquals(DATE_MOD, record2.getDateModified());
+ assertFalse(record.isReadOnly());
+ assertEquals(DestinationOrder.HASH, record.getDestOrder());
+ }
+
+ @Test
+ public void testSerialization() throws IOException {
+ testSerialization(DestinationOrder.RANDOM);
+ testSerialization(DestinationOrder.HASH);
+ testSerialization(DestinationOrder.LOCAL);
+ }
+
+ private void testSerialization(final DestinationOrder order)
+ throws IOException {
+
+ MountTable record = MountTable.newInstance(
+ SRC, DST_MAP, DATE_CREATED, DATE_MOD);
+ record.setReadOnly(true);
+ record.setDestOrder(order);
+
+ StateStoreSerializer serializer = StateStoreSerializer.getSerializer();
+ String serializedString = serializer.serializeString(record);
+ MountTable record2 =
+ serializer.deserialize(serializedString, MountTable.class);
+
+ validateDestinations(record2);
+ assertEquals(SRC, record2.getSourcePath());
+ assertEquals(DST, record2.getDestinations());
+ assertEquals(DATE_CREATED, record2.getDateCreated());
+ assertEquals(DATE_MOD, record2.getDateModified());
+ assertTrue(record2.isReadOnly());
+ assertEquals(order, record2.getDestOrder());
+ }
+
+ @Test
+ public void testReadOnly() throws IOException {
+
+ Map<String, String> dest = new HashMap<>();
+ dest.put(DST_NS_0, DST_PATH_0);
+ dest.put(DST_NS_1, DST_PATH_1);
+ MountTable record1 = MountTable.newInstance(SRC, dest);
+ record1.setReadOnly(true);
+
+ validateDestinations(record1);
+ assertEquals(SRC, record1.getSourcePath());
+ assertEquals(DST, record1.getDestinations());
+ assertTrue(DATE_CREATED > 0);
+ assertTrue(DATE_MOD > 0);
+ assertTrue(record1.isReadOnly());
+
+ MountTable record2 = MountTable.newInstance(
+ SRC, DST_MAP, DATE_CREATED, DATE_MOD);
+ record2.setReadOnly(true);
+
+ validateDestinations(record2);
+ assertEquals(SRC, record2.getSourcePath());
+ assertEquals(DST, record2.getDestinations());
+ assertEquals(DATE_CREATED, record2.getDateCreated());
+ assertEquals(DATE_MOD, record2.getDateModified());
+ assertTrue(record2.isReadOnly());
+ }
+
+ @Test
+ public void testOrder() throws IOException {
+ testOrder(DestinationOrder.HASH);
+ testOrder(DestinationOrder.LOCAL);
+ testOrder(DestinationOrder.RANDOM);
+ }
+
+ private void testOrder(final DestinationOrder order)
+ throws IOException {
+
+ MountTable record = MountTable.newInstance(
+ SRC, DST_MAP, DATE_CREATED, DATE_MOD);
+ record.setDestOrder(order);
+
+ validateDestinations(record);
+ assertEquals(SRC, record.getSourcePath());
+ assertEquals(DST, record.getDestinations());
+ assertEquals(DATE_CREATED, record.getDateCreated());
+ assertEquals(DATE_MOD, record.getDateModified());
+ assertEquals(order, record.getDestOrder());
+ }
+
+ private void validateDestinations(MountTable record) {
+
+ assertEquals(SRC, record.getSourcePath());
+ assertEquals(2, record.getDestinations().size());
+
+ RemoteLocation location1 = record.getDestinations().get(0);
+ assertEquals(DST_NS_0, location1.getNameserviceId());
+ assertEquals(DST_PATH_0, location1.getDest());
+
+ RemoteLocation location2 = record.getDestinations().get(1);
+ assertEquals(DST_NS_1, location2.getNameserviceId());
+ assertEquals(DST_PATH_1, location2.getDest());
+ }
+}
\ No newline at end of file
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org