You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2015/09/29 22:30:51 UTC

[49/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28d313d1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerMap.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerMap.java
index 1496da2,0000000..cfacd5f
mode 100644,000000..100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerMap.java
@@@ -1,124 -1,0 +1,130 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.hadoop.storagecontainer;
 +
 +import com.google.common.annotations.VisibleForTesting;
 +import com.google.common.base.Preconditions;
 +import org.apache.hadoop.hdfs.protocol.Block;
 +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
 +import org.apache.hadoop.util.GSet;
 +
++import java.util.Collection;
 +import java.util.HashMap;
 +import java.util.Iterator;
 +import java.util.Map;
 +
 +/**
 + * Maps a storage container to its location on datanodes. Similar to
 + * {@link org.apache.hadoop.hdfs.server.blockmanagement.BlocksMap}
 + */
 +public class StorageContainerMap implements GSet<Block, BlockInfo> {
 +
 +  private Map<Long, BitWiseTrieContainerMap> containerPrefixMap
 +      = new HashMap<Long, BitWiseTrieContainerMap>();
 +  private int size;
 +  public static final int PREFIX_LENGTH = 28;
 +
 +  @Override
 +  public int size() {
 +    // TODO: update size when new containers created
 +    return size;
 +  }
 +
 +  @Override
 +  public boolean contains(Block key) {
 +    return getBlockInfoContiguous(key.getBlockId()) != null;
 +  }
 +
 +  @Override
 +  public BlockInfoContiguous get(Block key) {
 +    return getBlockInfoContiguous(key.getBlockId());
 +  }
 +
 +  @Override
 +  public BlockInfoContiguous put(BlockInfo element) {
 +    BlockInfoContiguous info = getBlockInfoContiguous(element.getBlockId());
 +    if (info == null) {
 +      throw new IllegalStateException(
 +          "The containers are created by splitting");
 +    }
 +    // TODO: replace
 +    return info;
 +  }
 +
 +  @Override
 +  public BlockInfoContiguous remove(Block key) {
 +    // It doesn't remove
 +    return getBlockInfoContiguous(key.getBlockId());
 +  }
 +
 +  @Override
 +  public void clear() {
 +    containerPrefixMap.clear();
 +  }
 +
 +  @Override
++  public Collection<BlockInfo> values() {
++    return null;
++  }
++
++  @Override
 +  public Iterator<BlockInfo> iterator() {
 +    // TODO : Support iteration
 +    throw new UnsupportedOperationException("");
 +  }
 +
 +  /**
 +   * Initialize a new trie for a new bucket.
 +   */
 +  public synchronized void initPrefix(long prefix) {
 +    Preconditions.checkArgument((prefix >>> PREFIX_LENGTH) == 0,
 +        "Prefix shouldn't be longer than "+PREFIX_LENGTH+" bits");
 +    if (getTrieMap(prefix << (64 - PREFIX_LENGTH)) != null) {
 +      // Already initialized
 +      return;
 +    }
 +    BitWiseTrieContainerMap newTrie = new BitWiseTrieContainerMap(prefix,
 +        PREFIX_LENGTH);
 +    containerPrefixMap.put(prefix, newTrie);
 +  }
 +
 +  @VisibleForTesting
 +  synchronized BitWiseTrieContainerMap getTrieMap(long containerId) {
 +    long prefix = containerId >>> (64 - PREFIX_LENGTH);
 +    return containerPrefixMap.get(prefix);
 +  }
 +
 +  @VisibleForTesting
 +  BlockInfoContiguous getBlockInfoContiguous(long containerId) {
 +    BitWiseTrieContainerMap map = getTrieMap(containerId);
 +    if (map == null) {
 +      return null;
 +    }
 +    return map.get(containerId);
 +  }
 +
 +  public void splitContainer(long key) {
 +    BitWiseTrieContainerMap map = getTrieMap(key);
 +    if (map == null) {
 +      throw new IllegalArgumentException("No container exists");
 +    }
 +    map.addBit(key);
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28d313d1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerNameService.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerNameService.java
index 7d1eb66,0000000..60e9c20
mode 100644,000000..100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerNameService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/storagecontainer/StorageContainerNameService.java
@@@ -1,162 -1,0 +1,170 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +
 +package org.apache.hadoop.storagecontainer;
 +
 +import org.apache.hadoop.hdfs.protocol.Block;
++import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
 +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 +import org.apache.hadoop.hdfs.server.namenode.CacheManager;
 +import org.apache.hadoop.hdfs.server.namenode.NameNode;
 +import org.apache.hadoop.hdfs.server.namenode.Namesystem;
++import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 +import org.apache.hadoop.ipc.StandbyException;
 +import org.apache.hadoop.security.AccessControlException;
 +
 +import java.util.concurrent.locks.ReentrantReadWriteLock;
 +
 +/**
 + * Namesystem implementation to be used by StorageContainerManager.
 + */
 +public class StorageContainerNameService implements Namesystem {
 +
 +  private ReentrantReadWriteLock coarseLock = new ReentrantReadWriteLock();
 +  private String blockPoolId;
 +  private volatile boolean serviceRunning = true;
 +
 +  public void shutdown() {
 +    serviceRunning = false;
 +  }
 +
 +  @Override
 +  public boolean isRunning() {
 +    return serviceRunning;
 +  }
 +
 +  @Override
 +  public void checkSuperuserPrivilege() throws AccessControlException {
 +    // TBD
 +  }
 +
 +  @Override
 +  public String getBlockPoolId() {
 +    return blockPoolId;
 +  }
 +
 +  public void setBlockPoolId(String id) {
 +    this.blockPoolId = id;
 +  }
 +
 +  @Override
 +  public boolean isInStandbyState() {
 +    // HA mode is not supported
 +    return false;
 +  }
 +
 +  @Override
 +  public boolean isGenStampInFuture(Block block) {
 +    // HA mode is not supported
 +    return false;
 +  }
 +
 +  @Override
++  public BlockCollection getBlockCollection(long id) {
++    return null;
++  }
++
++  @Override
 +  public void adjustSafeModeBlockTotals(int deltaSafe, int deltaTotal) {
 +    // TBD
 +  }
 +
 +  @Override
 +  public void checkOperation(NameNode.OperationCategory read)
 +    throws StandbyException {
 +    // HA mode is not supported
 +  }
 +
 +  @Override
 +  public boolean isInSnapshot(BlockInfo blockInfo) {
 +    // Snapshots not supported
 +    return false;
 +  }
 +
 +  @Override
 +  public CacheManager getCacheManager() {
 +    // Cache Management is not supported
 +    return null;
 +  }
 +
 +  @Override
++  public HAContext getHAContext() {
++    return null;
++  }
++
++  @Override
 +  public void readLock() {
 +    coarseLock.readLock().lock();
 +  }
 +
 +  @Override
 +  public void readUnlock() {
 +    coarseLock.readLock().unlock();
 +  }
 +
 +  @Override
 +  public boolean hasReadLock() {
 +    return coarseLock.getReadHoldCount() > 0 || hasWriteLock();
 +  }
 +
 +  @Override
 +  public void writeLock() {
 +    coarseLock.writeLock().lock();
 +  }
 +
 +  @Override
 +  public void writeLockInterruptibly() throws InterruptedException {
 +    coarseLock.writeLock().lockInterruptibly();
 +  }
 +
 +  @Override
 +  public void writeUnlock() {
 +    coarseLock.writeLock().unlock();
 +  }
 +
 +  @Override
 +  public boolean hasWriteLock() {
 +    return coarseLock.isWriteLockedByCurrentThread();
 +  }
 +
 +  @Override
 +  public void checkSafeMode() {
 +    // TBD
 +  }
 +
 +  @Override
 +  public boolean isInSafeMode() {
 +    return false;
 +  }
 +
 +  @Override
 +  public boolean isInStartupSafeMode() {
 +    return false;
 +  }
 +
-   @Override
-   public boolean isPopulatingReplQueues() {
-     return false;
-   }
 +
 +  @Override
 +  public void incrementSafeBlockCount(int replication) {
 +    // Do nothing
 +  }
 +
 +  @Override
 +  public void decrementSafeBlockCount(BlockInfo b) {
 +    // Do nothing
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28d313d1/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto
index 0000000,3b60e51..f2cc36c
mode 000000,100644..100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto
@@@ -1,0 -1,201 +1,209 @@@
+ /**
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ /**
+  * These .proto interfaces are private and stable.
+  * Please see http://wiki.apache.org/hadoop/Compatibility
+  * for what changes are allowed for a *stable* .proto interface.
+  */
+ 
+ // This file contains protocol buffers that are used throughout HDFS -- i.e.
+ // by the client, server, and data transfer protocols.
+ 
+ 
+ option java_package = "org.apache.hadoop.hdfs.protocol.proto";
+ option java_outer_classname = "HdfsServerProtos";
+ option java_generate_equals_and_hash = true;
+ package hadoop.hdfs;
+ 
+ import "hdfs.proto";
+ 
+ /**
+  * A list of storage IDs.
+  */
+ message StorageUuidsProto {
+   repeated string storageUuids = 1;
+ }
+ 
+ /**
+  * Block access token information
+  */
+ message BlockKeyProto {
+   required uint32 keyId = 1;      // Key identifier
+   required uint64 expiryDate = 2; // Expiry time in milliseconds
+   optional bytes keyBytes = 3;    // Key secret
+ }
+ 
+ /**
+  * Current key and set of block keys at the namenode.
+  */
+ message ExportedBlockKeysProto {
+   required bool isBlockTokenEnabled = 1;
+   required uint64 keyUpdateInterval = 2;
+   required uint64 tokenLifeTime = 3;
+   required BlockKeyProto currentKey = 4;
+   repeated BlockKeyProto allKeys = 5;
+ }
+ 
+ /**
+  * Block and datanodes where is it located
+  */
+ message BlockWithLocationsProto {
+   required BlockProto block = 1;   // Block
+   repeated string datanodeUuids = 2; // Datanodes with replicas of the block
+   repeated string storageUuids = 3;  // Storages with replicas of the block
+   repeated StorageTypeProto storageTypes = 4;
+ }
+ 
+ /**
+  * List of block with locations
+  */
+ message BlocksWithLocationsProto {
+   repeated BlockWithLocationsProto blocks = 1;
+ }
+ 
+ /**
+  * Editlog information with available transactions
+  */
+ message RemoteEditLogProto {
+   required uint64 startTxId = 1;  // Starting available edit log transaction
+   required uint64 endTxId = 2;    // Ending available edit log transaction
+   optional bool isInProgress = 3 [default = false];
+ }
+ 
+ /**
+  * Enumeration of editlogs available on a remote namenode
+  */
+ message RemoteEditLogManifestProto {
+   repeated RemoteEditLogProto logs = 1;
+ }
+ 
+ /**
+  * Namespace information that describes namespace on a namenode
+  */
+ message NamespaceInfoProto {
+   required string buildVersion = 1;         // Software revision version (e.g. an svn or git revision)
+   required uint32 unused = 2;               // Retained for backward compatibility
+   required string blockPoolID = 3;          // block pool used by the namespace
+   required StorageInfoProto storageInfo = 4;// Node information
+   required string softwareVersion = 5;      // Software version number (e.g. 2.0.0)
+   optional uint64 capabilities = 6 [default = 0]; // feature flags
+ }
+ 
+ /**
+  * State of a block replica at a datanode
+  */
+ enum ReplicaStateProto {
+   FINALIZED = 0;  // State of a replica when it is not modified
+   RBW = 1;        // State of replica that is being written to
+   RWR = 2;        // State of replica that is waiting to be recovered
+   RUR = 3;        // State of replica that is under recovery
+   TEMPORARY = 4;  // State of replica that is created for replication
+ }
+ 
+ /**
+  * Block that needs to be recovered with at a given location
+  */
+ message RecoveringBlockProto {
+   required uint64 newGenStamp = 1;        // New genstamp post recovery
+   required LocatedBlockProto block = 2;   // Block to be recovered
+   optional BlockProto truncateBlock = 3;  // New block for recovery (truncate)
+ }
+ 
+ /**
+  * Unique signature to identify checkpoint transactions.
+  */
+ message CheckpointSignatureProto {
+   required string blockPoolId = 1;
+   required uint64 mostRecentCheckpointTxId = 2;
+   required uint64 curSegmentTxId = 3;
+   required StorageInfoProto storageInfo = 4;
+ }
+ 
+ /**
+  * Command returned from primary to checkpointing namenode.
+  * This command has checkpoint signature that identifies
+  * checkpoint transaction and is needed for further
+  * communication related to checkpointing.
+  */
+ message CheckpointCommandProto {
+   // Unique signature to identify checkpoint transation
+   required CheckpointSignatureProto signature = 1;
+ 
+   // If true, return transfer image to primary upon the completion of checkpoint
+   required bool needToReturnImage = 2;
+ }
+ 
+ /**
+  * Command sent from one namenode to another namenode.
+  */
+ message NamenodeCommandProto {
+   enum Type {
+     NamenodeCommand = 0;      // Base command
+     CheckPointCommand = 1;    // Check point command
+   }
+   required uint32 action = 1;
+   required Type type = 2;
+   optional CheckpointCommandProto checkpointCmd = 3;
+ }
+ 
+ /**
+  * void request
+  */
+ message VersionRequestProto {
+ }
+ 
+ /**
+  * Version response from namenode.
+  */
+ message VersionResponseProto {
+   required NamespaceInfoProto info = 1;
+ }
+ 
+ /**
+  * Common node information shared by all the nodes in the cluster
+  */
+ message StorageInfoProto {
+   required uint32 layoutVersion = 1; // Layout version of the file system
+   required uint32 namespceID = 2;    // File system namespace ID
+   required string clusterID = 3;     // ID of the cluster
+   required uint64 cTime = 4;         // File system creation time
++
++  enum NodeTypeProto {
++    NAME_NODE = 1;
++    DATA_NODE = 2;
++    JOURNAL_NODE = 3;
++    STORAGE_CONTAINER_SERVICE = 4;
++  }
++  optional NodeTypeProto nodeType = 5;
+ }
+ 
+ /**
+  * Information sent by a namenode to identify itself to the primary namenode.
+  */
+ message NamenodeRegistrationProto {
+   required string rpcAddress = 1;    // host:port of the namenode RPC address
+   required string httpAddress = 2;   // host:port of the namenode http server
+   enum NamenodeRoleProto {
+     NAMENODE = 1;
+     BACKUP = 2;
+     CHECKPOINT = 3;
+   }
+   required StorageInfoProto storageInfo = 3;  // Node information
+   optional NamenodeRoleProto role = 4 [default = NAMENODE];        // Namenode role
+ }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28d313d1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28d313d1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
----------------------------------------------------------------------