You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by wh...@apache.org on 2015/04/20 09:36:44 UTC

[2/3] hadoop git commit: HDFS-8169. Move LocatedBlocks and related classes to hdfs-client. Contributed by Haohui Mai.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d3a4d51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
deleted file mode 100644
index 2dc1d04..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
+++ /dev/null
@@ -1,243 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import java.io.*;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.*;
-
-/**************************************************
- * A Block is a Hadoop FS primitive, identified by a 
- * long.
- *
- **************************************************/
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class Block implements Writable, Comparable<Block> {
-  public static final String BLOCK_FILE_PREFIX = "blk_";
-  public static final String METADATA_EXTENSION = ".meta";
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (Block.class,
-       new WritableFactory() {
-         @Override
-         public Writable newInstance() { return new Block(); }
-       });
-  }
-
-  public static final Pattern blockFilePattern = Pattern
-      .compile(BLOCK_FILE_PREFIX + "(-??\\d++)$");
-  public static final Pattern metaFilePattern = Pattern
-      .compile(BLOCK_FILE_PREFIX + "(-??\\d++)_(\\d++)\\" + METADATA_EXTENSION
-          + "$");
-  public static final Pattern metaOrBlockFilePattern = Pattern
-      .compile(BLOCK_FILE_PREFIX + "(-??\\d++)(_(\\d++)\\" + METADATA_EXTENSION
-          + ")?$");
-
-  public static boolean isBlockFilename(File f) {
-    String name = f.getName();
-    return blockFilePattern.matcher(name).matches();
-  }
-
-  public static long filename2id(String name) {
-    Matcher m = blockFilePattern.matcher(name);
-    return m.matches() ? Long.parseLong(m.group(1)) : 0;
-  }
-
-  public static boolean isMetaFilename(String name) {
-    return metaFilePattern.matcher(name).matches();
-  }
-
-  public static File metaToBlockFile(File metaFile) {
-    return new File(metaFile.getParent(), metaFile.getName().substring(
-        0, metaFile.getName().lastIndexOf('_')));
-  }
-
-  /**
-   * Get generation stamp from the name of the metafile name
-   */
-  public static long getGenerationStamp(String metaFile) {
-    Matcher m = metaFilePattern.matcher(metaFile);
-    return m.matches() ? Long.parseLong(m.group(2))
-        : HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP;
-  }
-
-  /**
-   * Get the blockId from the name of the meta or block file
-   */
-  public static long getBlockId(String metaOrBlockFile) {
-    Matcher m = metaOrBlockFilePattern.matcher(metaOrBlockFile);
-    return m.matches() ? Long.parseLong(m.group(1)) : 0;
-  }
-
-  private long blockId;
-  private long numBytes;
-  private long generationStamp;
-
-  public Block() {this(0, 0, 0);}
-
-  public Block(final long blkid, final long len, final long generationStamp) {
-    set(blkid, len, generationStamp);
-  }
-
-  public Block(final long blkid) {
-    this(blkid, 0, HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP);
-  }
-
-  public Block(Block blk) {
-    this(blk.blockId, blk.numBytes, blk.generationStamp);
-  }
-
-  /**
-   * Find the blockid from the given filename
-   */
-  public Block(File f, long len, long genstamp) {
-    this(filename2id(f.getName()), len, genstamp);
-  }
-
-  public void set(long blkid, long len, long genStamp) {
-    this.blockId = blkid;
-    this.numBytes = len;
-    this.generationStamp = genStamp;
-  }
-  /**
-   */
-  public long getBlockId() {
-    return blockId;
-  }
-  
-  public void setBlockId(long bid) {
-    blockId = bid;
-  }
-
-  /**
-   */
-  public String getBlockName() {
-    return BLOCK_FILE_PREFIX + String.valueOf(blockId);
-  }
-
-  /**
-   */
-  public long getNumBytes() {
-    return numBytes;
-  }
-  public void setNumBytes(long len) {
-    this.numBytes = len;
-  }
-
-  public long getGenerationStamp() {
-    return generationStamp;
-  }
-  
-  public void setGenerationStamp(long stamp) {
-    generationStamp = stamp;
-  }
-
-  /**
-   */
-  @Override
-  public String toString() {
-    return getBlockName() + "_" + getGenerationStamp();
-  }
-
-  public void appendStringTo(StringBuilder sb) {
-    sb.append(BLOCK_FILE_PREFIX)
-      .append(blockId)
-      .append("_")
-      .append(getGenerationStamp());
-  }
-
-
-  /////////////////////////////////////
-  // Writable
-  /////////////////////////////////////
-  @Override // Writable
-  public void write(DataOutput out) throws IOException {
-    writeHelper(out);
-  }
-
-  @Override // Writable
-  public void readFields(DataInput in) throws IOException {
-    readHelper(in);
-  }
-  
-  final void writeHelper(DataOutput out) throws IOException {
-    out.writeLong(blockId);
-    out.writeLong(numBytes);
-    out.writeLong(generationStamp);
-  }
-  
-  final void readHelper(DataInput in) throws IOException {
-    this.blockId = in.readLong();
-    this.numBytes = in.readLong();
-    this.generationStamp = in.readLong();
-    if (numBytes < 0) {
-      throw new IOException("Unexpected block size: " + numBytes);
-    }
-  }
-  
-  // write only the identifier part of the block
-  public void writeId(DataOutput out) throws IOException {
-    out.writeLong(blockId);
-    out.writeLong(generationStamp);
-  }
-
-  // Read only the identifier part of the block
-  public void readId(DataInput in) throws IOException {
-    this.blockId = in.readLong();
-    this.generationStamp = in.readLong();
-  }
-
-  @Override // Comparable
-  public int compareTo(Block b) {
-    return blockId < b.blockId ? -1 :
-           blockId > b.blockId ? 1 : 0;
-  }
-
-  @Override // Object
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (!(o instanceof Block)) {
-      return false;
-    }
-    return compareTo((Block)o) == 0;
-  }
-  
-  /**
-   * @return true if the two blocks have the same block ID and the same
-   * generation stamp, or if both blocks are null.
-   */
-  public static boolean matchingIdAndGenStamp(Block a, Block b) {
-    if (a == b) return true; // same block, or both null
-    if (a == null || b == null) return false; // only one null
-    return a.blockId == b.blockId &&
-           a.generationStamp == b.generationStamp;
-  }
-
-  @Override // Object
-  public int hashCode() {
-    //GenerationStamp is IRRELEVANT and should not be used here
-    return (int)(blockId^(blockId>>>32));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d3a4d51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
deleted file mode 100644
index f91696f..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
+++ /dev/null
@@ -1,279 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.protocol;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * This class represents the primary identifier for a Datanode.
- * Datanodes are identified by how they can be contacted (hostname
- * and ports) and their storage ID, a unique number that associates
- * the Datanodes blocks with a particular Datanode.
- *
- * {@link DatanodeInfo#getName()} should be used to get the network
- * location (for topology) of a datanode, instead of using
- * {@link DatanodeID#getXferAddr()} here. Helpers are defined below
- * for each context in which a DatanodeID is used.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class DatanodeID implements Comparable<DatanodeID> {
-  public static final DatanodeID[] EMPTY_ARRAY = {};
-
-  private String ipAddr;     // IP address
-  private String hostName;   // hostname claimed by datanode
-  private String peerHostName; // hostname from the actual connection
-  private int xferPort;      // data streaming port
-  private int infoPort;      // info server port
-  private int infoSecurePort; // info server port
-  private int ipcPort;       // IPC server port
-  private String xferAddr;
-
-  /**
-   * UUID identifying a given datanode. For upgraded Datanodes this is the
-   * same as the StorageID that was previously used by this Datanode. 
-   * For newly formatted Datanodes it is a UUID.
-   */
-  private final String datanodeUuid;
-
-  public DatanodeID(DatanodeID from) {
-    this(from.getDatanodeUuid(), from);
-  }
-
-  @VisibleForTesting
-  public DatanodeID(String datanodeUuid, DatanodeID from) {
-    this(from.getIpAddr(),
-        from.getHostName(),
-        datanodeUuid,
-        from.getXferPort(),
-        from.getInfoPort(),
-        from.getInfoSecurePort(),
-        from.getIpcPort());
-    this.peerHostName = from.getPeerHostName();
-  }
-
-  /**
-   * Create a DatanodeID
-   * @param ipAddr IP
-   * @param hostName hostname
-   * @param datanodeUuid data node ID, UUID for new Datanodes, may be the
-   *                     storage ID for pre-UUID datanodes. NULL if unknown
-   *                     e.g. if this is a new datanode. A new UUID will
-   *                     be assigned by the namenode.
-   * @param xferPort data transfer port
-   * @param infoPort info server port 
-   * @param ipcPort ipc server port
-   */
-  public DatanodeID(String ipAddr, String hostName, String datanodeUuid,
-      int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
-    setIpAndXferPort(ipAddr, xferPort);
-    this.hostName = hostName;
-    this.datanodeUuid = checkDatanodeUuid(datanodeUuid);
-    this.infoPort = infoPort;
-    this.infoSecurePort = infoSecurePort;
-    this.ipcPort = ipcPort;
-  }
-  
-  public void setIpAddr(String ipAddr) {
-    //updated during registration, preserve former xferPort
-    setIpAndXferPort(ipAddr, xferPort);
-  }
-
-  private void setIpAndXferPort(String ipAddr, int xferPort) {
-    // build xferAddr string to reduce cost of frequent use
-    this.ipAddr = ipAddr;
-    this.xferPort = xferPort;
-    this.xferAddr = ipAddr + ":" + xferPort;
-  }
-
-  public void setPeerHostName(String peerHostName) {
-    this.peerHostName = peerHostName;
-  }
-  
-  /**
-   * @return data node ID.
-   */
-  public String getDatanodeUuid() {
-    return datanodeUuid;
-  }
-
-  private String checkDatanodeUuid(String uuid) {
-    if (uuid == null || uuid.isEmpty()) {
-      return null;
-    } else {
-      return uuid;
-    }
-  }
-
-  /**
-   * @return ipAddr;
-   */
-  public String getIpAddr() {
-    return ipAddr;
-  }
-
-  /**
-   * @return hostname
-   */
-  public String getHostName() {
-    return hostName;
-  }
-
-  /**
-   * @return hostname from the actual connection 
-   */
-  public String getPeerHostName() {
-    return peerHostName;
-  }
-  
-  /**
-   * @return IP:xferPort string
-   */
-  public String getXferAddr() {
-    return xferAddr;
-  }
-
-  /**
-   * @return IP:ipcPort string
-   */
-  private String getIpcAddr() {
-    return ipAddr + ":" + ipcPort;
-  }
-
-  /**
-   * @return IP:infoPort string
-   */
-  public String getInfoAddr() {
-    return ipAddr + ":" + infoPort;
-  }
-
-  /**
-   * @return IP:infoPort string
-   */
-  public String getInfoSecureAddr() {
-    return ipAddr + ":" + infoSecurePort;
-  }
-
-  /**
-   * @return hostname:xferPort
-   */
-  public String getXferAddrWithHostname() {
-    return hostName + ":" + xferPort;
-  }
-
-  /**
-   * @return hostname:ipcPort
-   */
-  private String getIpcAddrWithHostname() {
-    return hostName + ":" + ipcPort;
-  }
-
-  /**
-   * @param useHostname true to use the DN hostname, use the IP otherwise
-   * @return name:xferPort
-   */
-  public String getXferAddr(boolean useHostname) {
-    return useHostname ? getXferAddrWithHostname() : getXferAddr();
-  }
-
-  /**
-   * @param useHostname true to use the DN hostname, use the IP otherwise
-   * @return name:ipcPort
-   */
-  public String getIpcAddr(boolean useHostname) {
-    return useHostname ? getIpcAddrWithHostname() : getIpcAddr();
-  }
-
-  /**
-   * @return xferPort (the port for data streaming)
-   */
-  public int getXferPort() {
-    return xferPort;
-  }
-
-  /**
-   * @return infoPort (the port at which the HTTP server bound to)
-   */
-  public int getInfoPort() {
-    return infoPort;
-  }
-
-  /**
-   * @return infoSecurePort (the port at which the HTTPS server bound to)
-   */
-  public int getInfoSecurePort() {
-    return infoSecurePort;
-  }
-
-  /**
-   * @return ipcPort (the port at which the IPC server bound to)
-   */
-  public int getIpcPort() {
-    return ipcPort;
-  }
-
-  @Override
-  public boolean equals(Object to) {
-    if (this == to) {
-      return true;
-    }
-    if (!(to instanceof DatanodeID)) {
-      return false;
-    }
-    return (getXferAddr().equals(((DatanodeID)to).getXferAddr()) &&
-        datanodeUuid.equals(((DatanodeID)to).getDatanodeUuid()));
-  }
-  
-  @Override
-  public int hashCode() {
-    return datanodeUuid.hashCode();
-  }
-  
-  @Override
-  public String toString() {
-    return getXferAddr();
-  }
-  
-  /**
-   * Update fields when a new registration request comes in.
-   * Note that this does not update storageID.
-   */
-  public void updateRegInfo(DatanodeID nodeReg) {
-    setIpAndXferPort(nodeReg.getIpAddr(), nodeReg.getXferPort());
-    hostName = nodeReg.getHostName();
-    peerHostName = nodeReg.getPeerHostName();
-    infoPort = nodeReg.getInfoPort();
-    infoSecurePort = nodeReg.getInfoSecurePort();
-    ipcPort = nodeReg.getIpcPort();
-  }
-    
-  /**
-   * Compare based on data transfer address.
-   *
-   * @param that datanode to compare with
-   * @return as specified by Comparable
-   */
-  @Override
-  public int compareTo(DatanodeID that) {
-    return getXferAddr().compareTo(that.getXferAddr());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d3a4d51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
deleted file mode 100644
index 1fd0018..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
+++ /dev/null
@@ -1,512 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.net.NetworkTopology;
-import org.apache.hadoop.net.Node;
-import org.apache.hadoop.net.NodeBase;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.Time;
-
-import java.util.Date;
-import java.util.LinkedList;
-import java.util.List;
-
-import static org.apache.hadoop.hdfs.DFSUtil.percent2String;
-
-/** 
- * This class extends the primary identifier of a Datanode with ephemeral
- * state, eg usage information, current administrative state, and the
- * network location that is communicated to clients.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class DatanodeInfo extends DatanodeID implements Node {
-  private long capacity;
-  private long dfsUsed;
-  private long remaining;
-  private long blockPoolUsed;
-  private long cacheCapacity;
-  private long cacheUsed;
-  private long lastUpdate;
-  private long lastUpdateMonotonic;
-  private int xceiverCount;
-  private String location = NetworkTopology.DEFAULT_RACK;
-  private String softwareVersion;
-  private List<String> dependentHostNames = new LinkedList<String>();
-  
-  
-  // Datanode administrative states
-  public enum AdminStates {
-    NORMAL("In Service"), 
-    DECOMMISSION_INPROGRESS("Decommission In Progress"), 
-    DECOMMISSIONED("Decommissioned");
-
-    final String value;
-
-    AdminStates(final String v) {
-      this.value = v;
-    }
-
-    @Override
-    public String toString() {
-      return value;
-    }
-    
-    public static AdminStates fromValue(final String value) {
-      for (AdminStates as : AdminStates.values()) {
-        if (as.value.equals(value)) return as;
-      }
-      return NORMAL;
-    }
-  }
-
-  protected AdminStates adminState;
-
-  public DatanodeInfo(DatanodeInfo from) {
-    super(from);
-    this.capacity = from.getCapacity();
-    this.dfsUsed = from.getDfsUsed();
-    this.remaining = from.getRemaining();
-    this.blockPoolUsed = from.getBlockPoolUsed();
-    this.cacheCapacity = from.getCacheCapacity();
-    this.cacheUsed = from.getCacheUsed();
-    this.lastUpdate = from.getLastUpdate();
-    this.lastUpdateMonotonic = from.getLastUpdateMonotonic();
-    this.xceiverCount = from.getXceiverCount();
-    this.location = from.getNetworkLocation();
-    this.adminState = from.getAdminState();
-  }
-
-  public DatanodeInfo(DatanodeID nodeID) {
-    super(nodeID);
-    this.capacity = 0L;
-    this.dfsUsed = 0L;
-    this.remaining = 0L;
-    this.blockPoolUsed = 0L;
-    this.cacheCapacity = 0L;
-    this.cacheUsed = 0L;
-    this.lastUpdate = 0L;
-    this.lastUpdateMonotonic = 0L;
-    this.xceiverCount = 0;
-    this.adminState = null;    
-  }
-  
-  public DatanodeInfo(DatanodeID nodeID, String location) {
-    this(nodeID);
-    this.location = location;
-  }
-  
-  public DatanodeInfo(DatanodeID nodeID, String location,
-      final long capacity, final long dfsUsed, final long remaining,
-      final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
-      final long lastUpdate, final long lastUpdateMonotonic,
-      final int xceiverCount, final AdminStates adminState) {
-    this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getDatanodeUuid(),
-        nodeID.getXferPort(), nodeID.getInfoPort(), nodeID.getInfoSecurePort(),
-        nodeID.getIpcPort(), capacity, dfsUsed, remaining, blockPoolUsed,
-        cacheCapacity, cacheUsed, lastUpdate, lastUpdateMonotonic,
-        xceiverCount, location, adminState);
-  }
-
-  /** Constructor */
-  public DatanodeInfo(final String ipAddr, final String hostName,
-      final String datanodeUuid, final int xferPort, final int infoPort,
-      final int infoSecurePort, final int ipcPort,
-      final long capacity, final long dfsUsed, final long remaining,
-      final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
-      final long lastUpdate, final long lastUpdateMonotonic,
-      final int xceiverCount, final String networkLocation,
-      final AdminStates adminState) {
-    super(ipAddr, hostName, datanodeUuid, xferPort, infoPort,
-            infoSecurePort, ipcPort);
-    this.capacity = capacity;
-    this.dfsUsed = dfsUsed;
-    this.remaining = remaining;
-    this.blockPoolUsed = blockPoolUsed;
-    this.cacheCapacity = cacheCapacity;
-    this.cacheUsed = cacheUsed;
-    this.lastUpdate = lastUpdate;
-    this.lastUpdateMonotonic = lastUpdateMonotonic;
-    this.xceiverCount = xceiverCount;
-    this.location = networkLocation;
-    this.adminState = adminState;
-  }
-  
-  /** Network location name */
-  @Override
-  public String getName() {
-    return getXferAddr();
-  }
-  
-  /** The raw capacity. */
-  public long getCapacity() { return capacity; }
-  
-  /** The used space by the data node. */
-  public long getDfsUsed() { return dfsUsed; }
-
-  /** The used space by the block pool on data node. */
-  public long getBlockPoolUsed() { return blockPoolUsed; }
-
-  /** The used space by the data node. */
-  public long getNonDfsUsed() { 
-    long nonDFSUsed = capacity - dfsUsed - remaining;
-    return nonDFSUsed < 0 ? 0 : nonDFSUsed;
-  }
-
-  /** The used space by the data node as percentage of present capacity */
-  public float getDfsUsedPercent() { 
-    return DFSUtil.getPercentUsed(dfsUsed, capacity);
-  }
-
-  /** The raw free space. */
-  public long getRemaining() { return remaining; }
-
-  /** Used space by the block pool as percentage of present capacity */
-  public float getBlockPoolUsedPercent() {
-    return DFSUtil.getPercentUsed(blockPoolUsed, capacity);
-  }
-  
-  /** The remaining space as percentage of configured capacity. */
-  public float getRemainingPercent() { 
-    return DFSUtil.getPercentRemaining(remaining, capacity);
-  }
-
-  /**
-   * @return Amount of cache capacity in bytes
-   */
-  public long getCacheCapacity() {
-    return cacheCapacity;
-  }
-
-  /**
-   * @return Amount of cache used in bytes
-   */
-  public long getCacheUsed() {
-    return cacheUsed;
-  }
-
-  /**
-   * @return Cache used as a percentage of the datanode's total cache capacity
-   */
-  public float getCacheUsedPercent() {
-    return DFSUtil.getPercentUsed(cacheUsed, cacheCapacity);
-  }
-
-  /**
-   * @return Amount of cache remaining in bytes
-   */
-  public long getCacheRemaining() {
-    return cacheCapacity - cacheUsed;
-  }
-
-  /**
-   * @return Cache remaining as a percentage of the datanode's total cache
-   * capacity
-   */
-  public float getCacheRemainingPercent() {
-    return DFSUtil.getPercentRemaining(getCacheRemaining(), cacheCapacity);
-  }
-
-  /**
-   * Get the last update timestamp.
-   * Return value is suitable for Date conversion.
-   */
-  public long getLastUpdate() { return lastUpdate; }
-
-  /** 
-   * The time when this information was accurate. <br>
-   * Ps: So return value is ideal for calculation of time differences.
-   * Should not be used to convert to Date.  
-   */
-  public long getLastUpdateMonotonic() { return lastUpdateMonotonic;}
-
-  /**
-   * Set lastUpdate monotonic time
-   */
-  public void setLastUpdateMonotonic(long lastUpdateMonotonic) {
-    this.lastUpdateMonotonic = lastUpdateMonotonic;
-  }
-
-  /** number of active connections */
-  public int getXceiverCount() { return xceiverCount; }
-
-  /** Sets raw capacity. */
-  public void setCapacity(long capacity) { 
-    this.capacity = capacity; 
-  }
-  
-  /** Sets the used space for the datanode. */
-  public void setDfsUsed(long dfsUsed) {
-    this.dfsUsed = dfsUsed;
-  }
-
-  /** Sets raw free space. */
-  public void setRemaining(long remaining) { 
-    this.remaining = remaining; 
-  }
-
-  /** Sets block pool used space */
-  public void setBlockPoolUsed(long bpUsed) { 
-    this.blockPoolUsed = bpUsed; 
-  }
-
-  /** Sets cache capacity. */
-  public void setCacheCapacity(long cacheCapacity) {
-    this.cacheCapacity = cacheCapacity;
-  }
-
-  /** Sets cache used. */
-  public void setCacheUsed(long cacheUsed) {
-    this.cacheUsed = cacheUsed;
-  }
-
-  /** Sets time when this information was accurate. */
-  public void setLastUpdate(long lastUpdate) { 
-    this.lastUpdate = lastUpdate; 
-  }
-
-  /** Sets number of active connections */
-  public void setXceiverCount(int xceiverCount) { 
-    this.xceiverCount = xceiverCount; 
-  }
-
-  /** network location */
-  public synchronized String getNetworkLocation() {return location;}
-    
-  /** Sets the network location */
-  public synchronized void setNetworkLocation(String location) {
-    this.location = NodeBase.normalize(location);
-  }
-  
-  /** Add a hostname to a list of network dependencies */
-  public void addDependentHostName(String hostname) {
-    dependentHostNames.add(hostname);
-  }
-  
-  /** List of Network dependencies */
-  public List<String> getDependentHostNames() {
-    return dependentHostNames;
-  }
-  
-  /** Sets the network dependencies */
-  public void setDependentHostNames(List<String> dependencyList) {
-    dependentHostNames = dependencyList;
-  }
-    
-  /** A formatted string for reporting the status of the DataNode. */
-  public String getDatanodeReport() {
-    StringBuilder buffer = new StringBuilder();
-    long c = getCapacity();
-    long r = getRemaining();
-    long u = getDfsUsed();
-    long nonDFSUsed = getNonDfsUsed();
-    float usedPercent = getDfsUsedPercent();
-    float remainingPercent = getRemainingPercent();
-    long cc = getCacheCapacity();
-    long cr = getCacheRemaining();
-    long cu = getCacheUsed();
-    float cacheUsedPercent = getCacheUsedPercent();
-    float cacheRemainingPercent = getCacheRemainingPercent();
-    String lookupName = NetUtils.getHostNameOfIP(getName());
-
-    buffer.append("Name: "+ getName());
-    if (lookupName != null) {
-      buffer.append(" (" + lookupName + ")");
-    }
-    buffer.append("\n");
-    buffer.append("Hostname: " + getHostName() + "\n");
-
-    if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
-      buffer.append("Rack: "+location+"\n");
-    }
-    buffer.append("Decommission Status : ");
-    if (isDecommissioned()) {
-      buffer.append("Decommissioned\n");
-    } else if (isDecommissionInProgress()) {
-      buffer.append("Decommission in progress\n");
-    } else {
-      buffer.append("Normal\n");
-    }
-    buffer.append("Configured Capacity: "+c+" ("+StringUtils.byteDesc(c)+")"+"\n");
-    buffer.append("DFS Used: "+u+" ("+StringUtils.byteDesc(u)+")"+"\n");
-    buffer.append("Non DFS Used: "+nonDFSUsed+" ("+StringUtils.byteDesc(nonDFSUsed)+")"+"\n");
-    buffer.append("DFS Remaining: " +r+ " ("+StringUtils.byteDesc(r)+")"+"\n");
-    buffer.append("DFS Used%: "+percent2String(usedPercent) + "\n");
-    buffer.append("DFS Remaining%: "+percent2String(remainingPercent) + "\n");
-    buffer.append("Configured Cache Capacity: "+cc+" ("+StringUtils.byteDesc(cc)+")"+"\n");
-    buffer.append("Cache Used: "+cu+" ("+StringUtils.byteDesc(cu)+")"+"\n");
-    buffer.append("Cache Remaining: " +cr+ " ("+StringUtils.byteDesc(cr)+")"+"\n");
-    buffer.append("Cache Used%: "+percent2String(cacheUsedPercent) + "\n");
-    buffer.append("Cache Remaining%: "+percent2String(cacheRemainingPercent) + "\n");
-    buffer.append("Xceivers: "+getXceiverCount()+"\n");
-    buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
-    return buffer.toString();
-  }
-
-  /** A formatted string for printing the status of the DataNode. */
-  public String dumpDatanode() {
-    StringBuilder buffer = new StringBuilder();
-    long c = getCapacity();
-    long r = getRemaining();
-    long u = getDfsUsed();
-    long cc = getCacheCapacity();
-    long cr = getCacheRemaining();
-    long cu = getCacheUsed();
-    buffer.append(getName());
-    if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
-      buffer.append(" "+location);
-    }
-    if (isDecommissioned()) {
-      buffer.append(" DD");
-    } else if (isDecommissionInProgress()) {
-      buffer.append(" DP");
-    } else {
-      buffer.append(" IN");
-    }
-    buffer.append(" " + c + "(" + StringUtils.byteDesc(c)+")");
-    buffer.append(" " + u + "(" + StringUtils.byteDesc(u)+")");
-    buffer.append(" " + percent2String(u/(double)c));
-    buffer.append(" " + r + "(" + StringUtils.byteDesc(r)+")");
-    buffer.append(" " + cc + "(" + StringUtils.byteDesc(cc)+")");
-    buffer.append(" " + cu + "(" + StringUtils.byteDesc(cu)+")");
-    buffer.append(" " + percent2String(cu/(double)cc));
-    buffer.append(" " + cr + "(" + StringUtils.byteDesc(cr)+")");
-    buffer.append(" " + new Date(lastUpdate));
-    return buffer.toString();
-  }
-
-  /**
-   * Start decommissioning a node.
-   * old state.
-   */
-  public void startDecommission() {
-    adminState = AdminStates.DECOMMISSION_INPROGRESS;
-  }
-
-  /**
-   * Stop decommissioning a node.
-   * old state.
-   */
-  public void stopDecommission() {
-    adminState = null;
-  }
-
-  /**
-   * Returns true if the node is in the process of being decommissioned
-   */
-  public boolean isDecommissionInProgress() {
-    return adminState == AdminStates.DECOMMISSION_INPROGRESS;
-  }
-
-  /**
-   * Returns true if the node has been decommissioned.
-   */
-  public boolean isDecommissioned() {
-    return adminState == AdminStates.DECOMMISSIONED;
-  }
-
-  /**
-   * Sets the admin state to indicate that decommission is complete.
-   */
-  public void setDecommissioned() {
-    adminState = AdminStates.DECOMMISSIONED;
-  }
-
-  /**
-   * Retrieves the admin state of this node.
-   */
-  public AdminStates getAdminState() {
-    if (adminState == null) {
-      return AdminStates.NORMAL;
-    }
-    return adminState;
-  }
- 
-  /**
-   * Check if the datanode is in stale state. Here if 
-   * the namenode has not received heartbeat msg from a 
-   * datanode for more than staleInterval (default value is
-   * {@link DFSConfigKeys#DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT}),
-   * the datanode will be treated as stale node.
-   * 
-   * @param staleInterval
-   *          the time interval for marking the node as stale. If the last
-   *          update time is beyond the given time interval, the node will be
-   *          marked as stale.
-   * @return true if the node is stale
-   */
-  public boolean isStale(long staleInterval) {
-    return (Time.monotonicNow() - lastUpdateMonotonic) >= staleInterval;
-  }
-  
-  /**
-   * Sets the admin state of this node.
-   */
-  protected void setAdminState(AdminStates newState) {
-    if (newState == AdminStates.NORMAL) {
-      adminState = null;
-    }
-    else {
-      adminState = newState;
-    }
-  }
-
-  private transient int level; //which level of the tree the node resides
-  private transient Node parent; //its parent
-
-  /** Return this node's parent */
-  @Override
-  public Node getParent() { return parent; }
-  @Override
-  public void setParent(Node parent) {this.parent = parent;}
-   
-  /** Return this node's level in the tree.
-   * E.g. the root of a tree returns 0 and its children return 1
-   */
-  @Override
-  public int getLevel() { return level; }
-  @Override
-  public void setLevel(int level) {this.level = level;}
-
-  @Override
-  public int hashCode() {
-    // Super implementation is sufficient
-    return super.hashCode();
-  }
-  
-  @Override
-  public boolean equals(Object obj) {
-    // Sufficient to use super equality as datanodes are uniquely identified
-    // by DatanodeID
-    return (this == obj) || super.equals(obj);
-  }
-
-  public String getSoftwareVersion() {
-    return softwareVersion;
-  }
-
-  public void setSoftwareVersion(String softwareVersion) {
-    this.softwareVersion = softwareVersion;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d3a4d51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
deleted file mode 100644
index 18c940b..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class DatanodeInfoWithStorage extends DatanodeInfo {
-  private final String storageID;
-  private final StorageType storageType;
-
-  public DatanodeInfoWithStorage(DatanodeInfo from, String storageID,
-                                 StorageType storageType) {
-    super(from);
-    this.storageID = storageID;
-    this.storageType = storageType;
-    setSoftwareVersion(from.getSoftwareVersion());
-    setDependentHostNames(from.getDependentHostNames());
-    setLevel(from.getLevel());
-    setParent(from.getParent());
-  }
-
-  public String getStorageID() {
-    return storageID;
-  }
-
-  public StorageType getStorageType() {
-    return storageType;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    // allows this class to be used interchangeably with DatanodeInfo
-    return super.equals(o);
-  }
-
-  @Override
-  public int hashCode() {
-    // allows this class to be used interchangeably with DatanodeInfo
-    return super.hashCode();
-  }
-
-  @Override
-  public String toString() {
-    return "DatanodeInfoWithStorage[" + super.toString() + "," + storageID +
-        "," + storageType + "]";
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d3a4d51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
deleted file mode 100644
index 27c1761..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * Identifies a Block uniquely across the block pools
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class ExtendedBlock {
-  private String poolId;
-  private Block block;
-
-  public ExtendedBlock() {
-    this(null, 0, 0, 0);
-  }
-
-  public ExtendedBlock(final ExtendedBlock b) {
-    this(b.poolId, new Block(b.block));
-  }
-  
-  public ExtendedBlock(final String poolId, final long blockId) {
-    this(poolId, blockId, 0, 0);
-  }
-  
-  public ExtendedBlock(String poolId, Block b) {
-    this.poolId = poolId;
-    this.block = b;
-  }
-
-  public ExtendedBlock(final String poolId, final long blkid, final long len,
-      final long genstamp) {
-    this.poolId = poolId;
-    block = new Block(blkid, len, genstamp);
-  }
-
-  public String getBlockPoolId() {
-    return poolId;
-  }
-
-  /** Returns the block file name for the block */
-  public String getBlockName() {
-    return block.getBlockName();
-  }
-
-  public long getNumBytes() {
-    return block.getNumBytes();
-  }
-
-  public long getBlockId() {
-    return block.getBlockId();
-  }
-
-  public long getGenerationStamp() {
-    return block.getGenerationStamp();
-  }
-
-  public void setBlockId(final long bid) {
-    block.setBlockId(bid);
-  }
-  
-  public void setGenerationStamp(final long genStamp) {
-    block.setGenerationStamp(genStamp);
-  }
-
-  public void setNumBytes(final long len) {
-    block.setNumBytes(len);
-  }
-  
-  public void set(String poolId, Block blk) {
-    this.poolId = poolId;
-    this.block = blk;
-  }
-
-  public static Block getLocalBlock(final ExtendedBlock b) {
-    return b == null ? null : b.getLocalBlock();
-  }
-  
-  public Block getLocalBlock() {
-    return block;
-  }
-  
-  @Override // Object
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (!(o instanceof ExtendedBlock)) {
-      return false;
-    }
-    ExtendedBlock b = (ExtendedBlock)o;
-    return b.block.equals(block) && b.poolId.equals(poolId);
-  }
-  
-  @Override // Object
-  public int hashCode() {
-    int result = 31 + poolId.hashCode();
-    return (31 * result + block.hashCode());
-  }
-  
-  @Override // Object
-  public String toString() {
-    return poolId + ":" + block;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d3a4d51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java
deleted file mode 100644
index f74472d..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.permission.FsPermission;
-
-/**
- * HDFS permission subclass used to indicate an ACL is present and/or that the
- * underlying file/dir is encrypted. The ACL/encrypted bits are not visible
- * directly to users of {@link FsPermission} serialization.  This is
- * done for backwards compatibility in case any existing clients assume the
- * value of FsPermission is in a particular range.
- */
-@InterfaceAudience.Private
-public class FsPermissionExtension extends FsPermission {
-  private final static short ACL_BIT = 1 << 12;
-  private final static short ENCRYPTED_BIT = 1 << 13;
-  private final boolean aclBit;
-  private final boolean encryptedBit;
-
-  /**
-   * Constructs a new FsPermissionExtension based on the given FsPermission.
-   *
-   * @param perm FsPermission containing permission bits
-   */
-  public FsPermissionExtension(FsPermission perm, boolean hasAcl,
-      boolean isEncrypted) {
-    super(perm.toShort());
-    aclBit = hasAcl;
-    encryptedBit = isEncrypted;
-  }
-
-  /**
-   * Creates a new FsPermissionExtension by calling the base class constructor.
-   *
-   * @param perm short containing permission bits
-   */
-  public FsPermissionExtension(short perm) {
-    super(perm);
-    aclBit = (perm & ACL_BIT) != 0;
-    encryptedBit = (perm & ENCRYPTED_BIT) != 0;
-  }
-
-  @Override
-  public short toExtendedShort() {
-    return (short)(toShort() |
-        (aclBit ? ACL_BIT : 0) | (encryptedBit ? ENCRYPTED_BIT : 0));
-  }
-
-  @Override
-  public boolean getAclBit() {
-    return aclBit;
-  }
-
-  @Override
-  public boolean getEncryptedBit() {
-    return encryptedBit;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    // This intentionally delegates to the base class.  This is only overridden
-    // to suppress a FindBugs warning.
-    return super.equals(o);
-  }
-
-  @Override
-  public int hashCode() {
-    // This intentionally delegates to the base class.  This is only overridden
-    // to suppress a FindBugs warning.
-    return super.hashCode();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d3a4d51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
deleted file mode 100644
index 94d9a92..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
+++ /dev/null
@@ -1,271 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import java.net.URI;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.FileEncryptionInfo;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSUtil;
-
-/** Interface that represents the over the wire information for a file.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class HdfsFileStatus {
-
-  private final byte[] path;  // local name of the inode that's encoded in java UTF8
-  private final byte[] symlink; // symlink target encoded in java UTF8 or null
-  private final long length;
-  private final boolean isdir;
-  private final short block_replication;
-  private final long blocksize;
-  private final long modification_time;
-  private final long access_time;
-  private final FsPermission permission;
-  private final String owner;
-  private final String group;
-  private final long fileId;
-
-  private final FileEncryptionInfo feInfo;
-  
-  // Used by dir, not including dot and dotdot. Always zero for a regular file.
-  private final int childrenNum;
-  private final byte storagePolicy;
-  
-  public static final byte[] EMPTY_NAME = new byte[0];
-
-  /**
-   * Constructor
-   * @param length the number of bytes the file has
-   * @param isdir if the path is a directory
-   * @param block_replication the replication factor
-   * @param blocksize the block size
-   * @param modification_time modification time
-   * @param access_time access time
-   * @param permission permission
-   * @param owner the owner of the path
-   * @param group the group of the path
-   * @param path the local name in java UTF8 encoding the same as that in-memory
-   * @param fileId the file id
-   * @param feInfo the file's encryption info
-   */
-  public HdfsFileStatus(long length, boolean isdir, int block_replication,
-      long blocksize, long modification_time, long access_time,
-      FsPermission permission, String owner, String group, byte[] symlink,
-      byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo,
-      byte storagePolicy) {
-    this.length = length;
-    this.isdir = isdir;
-    this.block_replication = (short)block_replication;
-    this.blocksize = blocksize;
-    this.modification_time = modification_time;
-    this.access_time = access_time;
-    this.permission = (permission == null) ? 
-        ((isdir || symlink!=null) ? 
-            FsPermission.getDefault() : 
-            FsPermission.getFileDefault()) :
-        permission;
-    this.owner = (owner == null) ? "" : owner;
-    this.group = (group == null) ? "" : group;
-    this.symlink = symlink;
-    this.path = path;
-    this.fileId = fileId;
-    this.childrenNum = childrenNum;
-    this.feInfo = feInfo;
-    this.storagePolicy = storagePolicy;
-  }
-
-  /**
-   * Get the length of this file, in bytes.
-   * @return the length of this file, in bytes.
-   */
-  public final long getLen() {
-    return length;
-  }
-
-  /**
-   * Is this a directory?
-   * @return true if this is a directory
-   */
-  public final boolean isDir() {
-    return isdir;
-  }
-
-  /**
-   * Is this a symbolic link?
-   * @return true if this is a symbolic link
-   */
-  public boolean isSymlink() {
-    return symlink != null;
-  }
-  
-  /**
-   * Get the block size of the file.
-   * @return the number of bytes
-   */
-  public final long getBlockSize() {
-    return blocksize;
-  }
-
-  /**
-   * Get the replication factor of a file.
-   * @return the replication factor of a file.
-   */
-  public final short getReplication() {
-    return block_replication;
-  }
-
-  /**
-   * Get the modification time of the file.
-   * @return the modification time of file in milliseconds since January 1, 1970 UTC.
-   */
-  public final long getModificationTime() {
-    return modification_time;
-  }
-
-  /**
-   * Get the access time of the file.
-   * @return the access time of file in milliseconds since January 1, 1970 UTC.
-   */
-  public final long getAccessTime() {
-    return access_time;
-  }
-
-  /**
-   * Get FsPermission associated with the file.
-   * @return permssion
-   */
-  public final FsPermission getPermission() {
-    return permission;
-  }
-  
-  /**
-   * Get the owner of the file.
-   * @return owner of the file
-   */
-  public final String getOwner() {
-    return owner;
-  }
-  
-  /**
-   * Get the group associated with the file.
-   * @return group for the file. 
-   */
-  public final String getGroup() {
-    return group;
-  }
-  
-  /**
-   * Check if the local name is empty
-   * @return true if the name is empty
-   */
-  public final boolean isEmptyLocalName() {
-    return path.length == 0;
-  }
-
-  /**
-   * Get the string representation of the local name
-   * @return the local name in string
-   */
-  public final String getLocalName() {
-    return DFSUtil.bytes2String(path);
-  }
-  
-  /**
-   * Get the Java UTF8 representation of the local name
-   * @return the local name in java UTF8
-   */
-  public final byte[] getLocalNameInBytes() {
-    return path;
-  }
-
-  /**
-   * Get the string representation of the full path name
-   * @param parent the parent path
-   * @return the full path in string
-   */
-  public final String getFullName(final String parent) {
-    if (isEmptyLocalName()) {
-      return parent;
-    }
-    
-    StringBuilder fullName = new StringBuilder(parent);
-    if (!parent.endsWith(Path.SEPARATOR)) {
-      fullName.append(Path.SEPARATOR);
-    }
-    fullName.append(getLocalName());
-    return fullName.toString();
-  }
-
-  /**
-   * Get the full path
-   * @param parent the parent path
-   * @return the full path
-   */
-  public final Path getFullPath(final Path parent) {
-    if (isEmptyLocalName()) {
-      return parent;
-    }
-    
-    return new Path(parent, getLocalName());
-  }
-
-  /**
-   * Get the string representation of the symlink.
-   * @return the symlink as a string.
-   */
-  public final String getSymlink() {
-    return DFSUtil.bytes2String(symlink);
-  }
-  
-  public final byte[] getSymlinkInBytes() {
-    return symlink;
-  }
-  
-  public final long getFileId() {
-    return fileId;
-  }
-  
-  public final FileEncryptionInfo getFileEncryptionInfo() {
-    return feInfo;
-  }
-
-  public final int getChildrenNum() {
-    return childrenNum;
-  }
-
-  /** @return the storage policy id */
-  public final byte getStoragePolicy() {
-    return storagePolicy;
-  }
-
-  public final FileStatus makeQualified(URI defaultUri, Path path) {
-    return new FileStatus(getLen(), isDir(), getReplication(),
-        getBlockSize(), getModificationTime(),
-        getAccessTime(),
-        getPermission(), getOwner(), getGroup(),
-        isSymlink() ? new Path(getSymlink()) : null,
-        (getFullPath(path)).makeQualified(
-            defaultUri, null)); // fully-qualify path
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d3a4d51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
deleted file mode 100644
index e729869..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
+++ /dev/null
@@ -1,217 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import java.util.Arrays;
-import java.util.List;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
-import org.apache.hadoop.security.token.Token;
-
-import com.google.common.collect.Lists;
-
-/**
- * Associates a block with the Datanodes that contain its replicas
- * and other block metadata (E.g. the file offset associated with this
- * block, whether it is corrupt, a location is cached in memory,
- * security token, etc).
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class LocatedBlock {
-
-  private final ExtendedBlock b;
-  private long offset;  // offset of the first byte of the block in the file
-  private final DatanodeInfoWithStorage[] locs;
-  /** Cached storage ID for each replica */
-  private final String[] storageIDs;
-  /** Cached storage type for each replica, if reported. */
-  private final StorageType[] storageTypes;
-  // corrupt flag is true if all of the replicas of a block are corrupt.
-  // else false. If block has few corrupt replicas, they are filtered and 
-  // their locations are not part of this object
-  private boolean corrupt;
-  private Token<BlockTokenIdentifier> blockToken = new Token<BlockTokenIdentifier>();
-  /**
-   * List of cached datanode locations
-   */
-  private DatanodeInfo[] cachedLocs;
-
-  // Used when there are no locations
-  private static final DatanodeInfoWithStorage[] EMPTY_LOCS =
-      new DatanodeInfoWithStorage[0];
-
-  public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs) {
-    // By default, startOffset is unknown(-1) and corrupt is false.
-    this(b, locs, null, null, -1, false, EMPTY_LOCS);
-  }
-
-  public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs,
-                      String[] storageIDs, StorageType[] storageTypes) {
-    this(b, locs, storageIDs, storageTypes, -1, false, EMPTY_LOCS);
-  }
-
-  public LocatedBlock(ExtendedBlock b, DatanodeStorageInfo[] storages,
-      long startOffset, boolean corrupt) {
-    this(b, DatanodeStorageInfo.toDatanodeInfos(storages),
-        DatanodeStorageInfo.toStorageIDs(storages),
-        DatanodeStorageInfo.toStorageTypes(storages),
-        startOffset, corrupt, EMPTY_LOCS); // startOffset is unknown
-  }
-
-  public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs, String[] storageIDs,
-                      StorageType[] storageTypes, long startOffset,
-                      boolean corrupt, DatanodeInfo[] cachedLocs) {
-    this.b = b;
-    this.offset = startOffset;
-    this.corrupt = corrupt;
-    if (locs==null) {
-      this.locs = EMPTY_LOCS;
-    } else {
-      this.locs = new DatanodeInfoWithStorage[locs.length];
-      for(int i = 0; i < locs.length; i++) {
-        DatanodeInfo di = locs[i];
-        DatanodeInfoWithStorage storage = new DatanodeInfoWithStorage(di,
-            storageIDs != null ? storageIDs[i] : null,
-            storageTypes != null ? storageTypes[i] : null);
-        this.locs[i] = storage;
-      }
-    }
-    this.storageIDs = storageIDs;
-    this.storageTypes = storageTypes;
-
-    if (cachedLocs == null || cachedLocs.length == 0) {
-      this.cachedLocs = EMPTY_LOCS;
-    } else {
-      this.cachedLocs = cachedLocs;
-    }
-  }
-
-  public Token<BlockTokenIdentifier> getBlockToken() {
-    return blockToken;
-  }
-
-  public void setBlockToken(Token<BlockTokenIdentifier> token) {
-    this.blockToken = token;
-  }
-
-  public ExtendedBlock getBlock() {
-    return b;
-  }
-
-  /**
-   * Returns the locations associated with this block. The returned array is not
-   * expected to be modified. If it is, caller must immediately invoke
-   * {@link org.apache.hadoop.hdfs.protocol.LocatedBlock#updateCachedStorageInfo}
-   * to update the cached Storage ID/Type arrays.
-   */
-  public DatanodeInfo[] getLocations() {
-    return locs;
-  }
-
-  public StorageType[] getStorageTypes() {
-    return storageTypes;
-  }
-  
-  public String[] getStorageIDs() {
-    return storageIDs;
-  }
-
-  /**
-   * Updates the cached StorageID and StorageType information. Must be
-   * called when the locations array is modified.
-   */
-  public void updateCachedStorageInfo() {
-    if (storageIDs != null) {
-      for(int i = 0; i < locs.length; i++) {
-        storageIDs[i] = locs[i].getStorageID();
-      }
-    }
-    if (storageTypes != null) {
-      for(int i = 0; i < locs.length; i++) {
-        storageTypes[i] = locs[i].getStorageType();
-      }
-    }
-  }
-
-  public long getStartOffset() {
-    return offset;
-  }
-  
-  public long getBlockSize() {
-    return b.getNumBytes();
-  }
-
-  public void setStartOffset(long value) {
-    this.offset = value;
-  }
-
-  public void setCorrupt(boolean corrupt) {
-    this.corrupt = corrupt;
-  }
-  
-  public boolean isCorrupt() {
-    return this.corrupt;
-  }
-
-  /**
-   * Add a the location of a cached replica of the block.
-   * 
-   * @param loc of datanode with the cached replica
-   */
-  public void addCachedLoc(DatanodeInfo loc) {
-    List<DatanodeInfo> cachedList = Lists.newArrayList(cachedLocs);
-    if (cachedList.contains(loc)) {
-      return;
-    }
-    // Try to re-use a DatanodeInfo already in loc
-    for (DatanodeInfoWithStorage di : locs) {
-      if (loc.equals(di)) {
-        cachedList.add(di);
-        cachedLocs = cachedList.toArray(cachedLocs);
-        return;
-      }
-    }
-    // Not present in loc, add it and go
-    cachedList.add(loc);
-    cachedLocs = cachedList.toArray(cachedLocs);
-  }
-
-  /**
-   * @return Datanodes with a cached block replica
-   */
-  public DatanodeInfo[] getCachedLocations() {
-    return cachedLocs;
-  }
-
-  @Override
-  public String toString() {
-    return getClass().getSimpleName() + "{" + b
-        + "; getBlockSize()=" + getBlockSize()
-        + "; corrupt=" + corrupt
-        + "; offset=" + offset
-        + "; locs=" + Arrays.asList(locs)
-        + "}";
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d3a4d51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
deleted file mode 100644
index e35a431..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
+++ /dev/null
@@ -1,189 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import java.util.List;
-import java.util.Collections;
-import java.util.Comparator;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.FileEncryptionInfo;
-
-/**
- * Collection of blocks with their locations and the file length.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class LocatedBlocks {
-  private final long fileLength;
-  private final List<LocatedBlock> blocks; // array of blocks with prioritized locations
-  private final boolean underConstruction;
-  private final LocatedBlock lastLocatedBlock;
-  private final boolean isLastBlockComplete;
-  private final FileEncryptionInfo fileEncryptionInfo;
-
-  public LocatedBlocks() {
-    fileLength = 0;
-    blocks = null;
-    underConstruction = false;
-    lastLocatedBlock = null;
-    isLastBlockComplete = false;
-    fileEncryptionInfo = null;
-  }
-
-  public LocatedBlocks(long flength, boolean isUnderConstuction,
-    List<LocatedBlock> blks, LocatedBlock lastBlock,
-    boolean isLastBlockCompleted, FileEncryptionInfo feInfo) {
-    fileLength = flength;
-    blocks = blks;
-    underConstruction = isUnderConstuction;
-    this.lastLocatedBlock = lastBlock;
-    this.isLastBlockComplete = isLastBlockCompleted;
-    this.fileEncryptionInfo = feInfo;
-  }
-  
-  /**
-   * Get located blocks.
-   */
-  public List<LocatedBlock> getLocatedBlocks() {
-    return blocks;
-  }
-  
-  /** Get the last located block. */
-  public LocatedBlock getLastLocatedBlock() {
-    return lastLocatedBlock;
-  }
-  
-  /** Is the last block completed? */
-  public boolean isLastBlockComplete() {
-    return isLastBlockComplete;
-  }
-
-  /**
-   * Get located block.
-   */
-  public LocatedBlock get(int index) {
-    return blocks.get(index);
-  }
-  
-  /**
-   * Get number of located blocks.
-   */
-  public int locatedBlockCount() {
-    return blocks == null ? 0 : blocks.size();
-  }
-
-  /**
-   * 
-   */
-  public long getFileLength() {
-    return this.fileLength;
-  }
-
-  /**
-   * Return true if file was under construction when this LocatedBlocks was
-   * constructed, false otherwise.
-   */
-  public boolean isUnderConstruction() {
-    return underConstruction;
-  }
-
-  /**
-   * @return the FileEncryptionInfo for the LocatedBlocks
-   */
-  public FileEncryptionInfo getFileEncryptionInfo() {
-    return fileEncryptionInfo;
-  }
-
-  /**
-   * Find block containing specified offset.
-   * 
-   * @return block if found, or null otherwise.
-   */
-  public int findBlock(long offset) {
-    // create fake block of size 0 as a key
-    LocatedBlock key = new LocatedBlock(
-        new ExtendedBlock(), new DatanodeInfo[0]);
-    key.setStartOffset(offset);
-    key.getBlock().setNumBytes(1);
-    Comparator<LocatedBlock> comp = 
-      new Comparator<LocatedBlock>() {
-        // Returns 0 iff a is inside b or b is inside a
-        @Override
-        public int compare(LocatedBlock a, LocatedBlock b) {
-          long aBeg = a.getStartOffset();
-          long bBeg = b.getStartOffset();
-          long aEnd = aBeg + a.getBlockSize();
-          long bEnd = bBeg + b.getBlockSize();
-          if(aBeg <= bBeg && bEnd <= aEnd 
-              || bBeg <= aBeg && aEnd <= bEnd)
-            return 0; // one of the blocks is inside the other
-          if(aBeg < bBeg)
-            return -1; // a's left bound is to the left of the b's
-          return 1;
-        }
-      };
-    return Collections.binarySearch(blocks, key, comp);
-  }
-  
-  public void insertRange(int blockIdx, List<LocatedBlock> newBlocks) {
-    int oldIdx = blockIdx;
-    int insStart = 0, insEnd = 0;
-    for(int newIdx = 0; newIdx < newBlocks.size() && oldIdx < blocks.size(); 
-                                                        newIdx++) {
-      long newOff = newBlocks.get(newIdx).getStartOffset();
-      long oldOff = blocks.get(oldIdx).getStartOffset();
-      if(newOff < oldOff) {
-        insEnd++;
-      } else if(newOff == oldOff) {
-        // replace old cached block by the new one
-        blocks.set(oldIdx, newBlocks.get(newIdx));
-        if(insStart < insEnd) { // insert new blocks
-          blocks.addAll(oldIdx, newBlocks.subList(insStart, insEnd));
-          oldIdx += insEnd - insStart;
-        }
-        insStart = insEnd = newIdx+1;
-        oldIdx++;
-      } else {  // newOff > oldOff
-        assert false : "List of LocatedBlock must be sorted by startOffset";
-      }
-    }
-    insEnd = newBlocks.size();
-    if(insStart < insEnd) { // insert new blocks
-      blocks.addAll(oldIdx, newBlocks.subList(insStart, insEnd));
-    }
-  }
-  
-  public static int getInsertIndex(int binSearchResult) {
-    return binSearchResult >= 0 ? binSearchResult : -(binSearchResult+1);
-  }
-
-  @Override
-  public String toString() {
-    final StringBuilder b = new StringBuilder(getClass().getSimpleName());
-    b.append("{")
-     .append("\n  fileLength=").append(fileLength)
-     .append("\n  underConstruction=").append(underConstruction)
-     .append("\n  blocks=").append(blocks)
-     .append("\n  lastLocatedBlock=").append(lastLocatedBlock)
-     .append("\n  isLastBlockComplete=").append(isLastBlockComplete)
-     .append("}");
-    return b.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d3a4d51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
index 31feb1e..518e91a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
@@ -25,7 +25,6 @@ import java.util.Date;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 
 /**
  * Metadata about a snapshottable directory
@@ -62,7 +61,7 @@ public class SnapshottableDirectoryStatus {
       int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
     this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
         access_time, permission, owner, group, null, localName, inodeId,
-        childrenNum, null, BlockStoragePolicySuite.ID_UNSPECIFIED);
+        childrenNum, null, HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
     this.snapshotNumber = snapshotNumber;
     this.snapshotQuota = snapshotQuota;
     this.parentFullPath = parentFullPath;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d3a4d51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 3b603be..689b54d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -184,7 +184,6 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
@@ -1443,7 +1442,7 @@ public class PBHelper {
         fs.hasChildrenNum() ? fs.getChildrenNum() : -1,
         fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null,
         fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy()
-            : BlockStoragePolicySuite.ID_UNSPECIFIED);
+            : HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
   }
 
   public static SnapshottableDirectoryStatus convert(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d3a4d51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
deleted file mode 100644
index e293dcc..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
+++ /dev/null
@@ -1,189 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.security.token.block;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.EnumSet;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
-
-@InterfaceAudience.Private
-public class BlockTokenIdentifier extends TokenIdentifier {
-  static final Text KIND_NAME = new Text("HDFS_BLOCK_TOKEN");
-
-  public enum AccessMode {
-    READ, WRITE, COPY, REPLACE
-  }
-
-  private long expiryDate;
-  private int keyId;
-  private String userId;
-  private String blockPoolId;
-  private long blockId;
-  private final EnumSet<AccessMode> modes;
-
-  private byte [] cache;
-  
-  public BlockTokenIdentifier() {
-    this(null, null, 0, EnumSet.noneOf(AccessMode.class));
-  }
-
-  public BlockTokenIdentifier(String userId, String bpid, long blockId,
-      EnumSet<AccessMode> modes) {
-    this.cache = null;
-    this.userId = userId;
-    this.blockPoolId = bpid;
-    this.blockId = blockId;
-    this.modes = modes == null ? EnumSet.noneOf(AccessMode.class) : modes;
-  }
-
-  @Override
-  public Text getKind() {
-    return KIND_NAME;
-  }
-
-  @Override
-  public UserGroupInformation getUser() {
-    if (userId == null || "".equals(userId)) {
-      String user = blockPoolId + ":" + Long.toString(blockId);
-      return UserGroupInformation.createRemoteUser(user);
-    }
-    return UserGroupInformation.createRemoteUser(userId);
-  }
-
-  public long getExpiryDate() {
-    return expiryDate;
-  }
-
-  public void setExpiryDate(long expiryDate) {
-    this.cache = null;
-    this.expiryDate = expiryDate;
-  }
-
-  public int getKeyId() {
-    return this.keyId;
-  }
-
-  public void setKeyId(int keyId) {
-    this.cache = null;
-    this.keyId = keyId;
-  }
-
-  public String getUserId() {
-    return userId;
-  }
-
-  public String getBlockPoolId() {
-    return blockPoolId;
-  }
-
-  public long getBlockId() {
-    return blockId;
-  }
-
-  public EnumSet<AccessMode> getAccessModes() {
-    return modes;
-  }
-
-  @Override
-  public String toString() {
-    return "block_token_identifier (expiryDate=" + this.getExpiryDate()
-        + ", keyId=" + this.getKeyId() + ", userId=" + this.getUserId()
-        + ", blockPoolId=" + this.getBlockPoolId()
-        + ", blockId=" + this.getBlockId() + ", access modes="
-        + this.getAccessModes() + ")";
-  }
-
-  static boolean isEqual(Object a, Object b) {
-    return a == null ? b == null : a.equals(b);
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == this) {
-      return true;
-    }
-    if (obj instanceof BlockTokenIdentifier) {
-      BlockTokenIdentifier that = (BlockTokenIdentifier) obj;
-      return this.expiryDate == that.expiryDate && this.keyId == that.keyId
-          && isEqual(this.userId, that.userId) 
-          && isEqual(this.blockPoolId, that.blockPoolId)
-          && this.blockId == that.blockId
-          && isEqual(this.modes, that.modes);
-    }
-    return false;
-  }
-
-  @Override
-  public int hashCode() {
-    return (int) expiryDate ^ keyId ^ (int) blockId ^ modes.hashCode()
-        ^ (userId == null ? 0 : userId.hashCode())
-        ^ (blockPoolId == null ? 0 : blockPoolId.hashCode());
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    this.cache = null;
-    expiryDate = WritableUtils.readVLong(in);
-    keyId = WritableUtils.readVInt(in);
-    userId = WritableUtils.readString(in);
-    blockPoolId = WritableUtils.readString(in);
-    blockId = WritableUtils.readVLong(in);
-    int length = WritableUtils.readVIntInRange(in, 0,
-        AccessMode.class.getEnumConstants().length);
-    for (int i = 0; i < length; i++) {
-      modes.add(WritableUtils.readEnum(in, AccessMode.class));
-    }
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    WritableUtils.writeVLong(out, expiryDate);
-    WritableUtils.writeVInt(out, keyId);
-    WritableUtils.writeString(out, userId);
-    WritableUtils.writeString(out, blockPoolId);
-    WritableUtils.writeVLong(out, blockId);
-    WritableUtils.writeVInt(out, modes.size());
-    for (AccessMode aMode : modes) {
-      WritableUtils.writeEnum(out, aMode);
-    }
-  }
-  
-  @Override
-  public byte[] getBytes() {
-    if(cache == null) cache = super.getBytes();
-    
-    return cache;
-  }
-
-  @InterfaceAudience.Private
-  public static class Renewer extends Token.TrivialRenewer {
-    @Override
-    protected Text getKind() {
-      return KIND_NAME;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d3a4d51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenIdentifier.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenIdentifier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenIdentifier.java
deleted file mode 100644
index 38ac687..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenIdentifier.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.security.token.delegation;
-
-import java.io.ByteArrayInputStream;
-import java.io.DataInputStream;
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
-
-/**
- * A delegation token identifier that is specific to HDFS.
- */
-@InterfaceAudience.Private
-public class DelegationTokenIdentifier 
-    extends AbstractDelegationTokenIdentifier {
-  public static final Text HDFS_DELEGATION_KIND = new Text("HDFS_DELEGATION_TOKEN");
-
-  /**
-   * Create an empty delegation token identifier for reading into.
-   */
-  public DelegationTokenIdentifier() {
-  }
-
-  /**
-   * Create a new delegation token identifier
-   * @param owner the effective username of the token owner
-   * @param renewer the username of the renewer
-   * @param realUser the real username of the token owner
-   */
-  public DelegationTokenIdentifier(Text owner, Text renewer, Text realUser) {
-    super(owner, renewer, realUser);
-  }
-
-  @Override
-  public Text getKind() {
-    return HDFS_DELEGATION_KIND;
-  }
-
-  @Override
-  public String toString() {
-    return getKind() + " token " + getSequenceNumber()
-        + " for " + getUser().getShortUserName();
-  }
-
-  /** @return a string representation of the token */
-  public static String stringifyToken(final Token<?> token) throws IOException {
-    DelegationTokenIdentifier ident = new DelegationTokenIdentifier();
-    ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
-    DataInputStream in = new DataInputStream(buf);  
-    ident.readFields(in);
-
-    if (token.getService().getLength() > 0) {
-      return ident + " on " + token.getService();
-    } else {
-      return ident.toString();
-    }
-  }
-  
-  public static class WebHdfsDelegationTokenIdentifier
-      extends DelegationTokenIdentifier {
-    public WebHdfsDelegationTokenIdentifier() {
-      super();
-    }
-    @Override
-    public Text getKind() {
-      return WebHdfsFileSystem.TOKEN_KIND;
-    }
-  }
-  
-  public static class SWebHdfsDelegationTokenIdentifier
-      extends WebHdfsDelegationTokenIdentifier {
-    public SWebHdfsDelegationTokenIdentifier() {
-      super();
-    }
-    @Override
-    public Text getKind() {
-      return SWebHdfsFileSystem.TOKEN_KIND;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d3a4d51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index ae13a29..c4357d2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -123,7 +123,7 @@ public class BlockManager {
   private final AtomicLong excessBlocksCount = new AtomicLong(0L);
   private final AtomicLong postponedMisreplicatedBlocksCount = new AtomicLong(0L);
   private final long startupDelayBlockDeletionInMs;
-  
+
   /** Used by metrics */
   public long getPendingReplicationBlocksCount() {
     return pendingReplicationBlocksCount;
@@ -836,7 +836,7 @@ public class BlockManager {
           (BlockInfoContiguousUnderConstruction) blk;
       final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations();
       final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), blk);
-      return new LocatedBlock(eb, storages, pos, false);
+      return newLocatedBlock(eb, storages, pos, false);
     }
 
     // get block locations
@@ -868,7 +868,7 @@ public class BlockManager {
       " numCorrupt: " + numCorruptNodes +
       " numCorruptRepls: " + numCorruptReplicas;
     final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), blk);
-    return new LocatedBlock(eb, machines, pos, isCorrupt);
+    return newLocatedBlock(eb, machines, pos, isCorrupt);
   }
 
   /** Create a LocatedBlocks. */
@@ -3726,7 +3726,18 @@ public class BlockManager {
     postponedMisreplicatedBlocks.clear();
     postponedMisreplicatedBlocksCount.set(0);
   };
-  
+
+  public static LocatedBlock newLocatedBlock(
+      ExtendedBlock b, DatanodeStorageInfo[] storages,
+      long startOffset, boolean corrupt) {
+    // startOffset is unknown
+    return new LocatedBlock(
+        b, DatanodeStorageInfo.toDatanodeInfos(storages),
+        DatanodeStorageInfo.toStorageIDs(storages),
+        DatanodeStorageInfo.toStorageTypes(storages),
+        startOffset, corrupt,
+        null);
+  }
 
   private static class ReplicationWork {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d3a4d51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
index 6fee219..942227e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
@@ -42,7 +42,6 @@ public class BlockStoragePolicySuite {
   public static final XAttr.NameSpace XAttrNS = XAttr.NameSpace.SYSTEM;
 
   public static final int ID_BIT_LENGTH = 4;
-  public static final byte ID_UNSPECIFIED = 0;
 
   @VisibleForTesting
   public static BlockStoragePolicySuite createDefaultSuite() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d3a4d51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index be16a87..8c752ac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
@@ -25,6 +25,9 @@ import com.google.common.annotations.VisibleForTesting;
 
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d3a4d51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
index b0ab315..9017fe1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
@@ -22,7 +22,7 @@ import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
@@ -115,7 +115,7 @@ class HeartbeatManager implements DatanodeStatistics {
 
   @Override
   public synchronized float getCapacityUsedPercent() {
-    return DFSUtil.getPercentUsed(stats.capacityUsed, stats.capacityTotal);
+    return DFSUtilClient.getPercentUsed(stats.capacityUsed, stats.capacityTotal);
   }
 
   @Override
@@ -125,8 +125,8 @@ class HeartbeatManager implements DatanodeStatistics {
 
   @Override
   public synchronized float getCapacityRemainingPercent() {
-    return DFSUtil.getPercentRemaining(
-        stats.capacityRemaining, stats.capacityTotal);
+    return DFSUtilClient.getPercentRemaining(stats.capacityRemaining,
+                                             stats.capacityTotal);
   }
 
   @Override
@@ -136,7 +136,8 @@ class HeartbeatManager implements DatanodeStatistics {
 
   @Override
   public synchronized float getPercentBlockPoolUsed() {
-    return DFSUtil.getPercentUsed(stats.blockPoolUsed, stats.capacityTotal);
+    return DFSUtilClient.getPercentUsed(stats.blockPoolUsed,
+                                        stats.capacityTotal);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d3a4d51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
index 89487aa..f1beb75 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
@@ -53,7 +53,6 @@ import org.apache.hadoop.util.ToolRunner;
 
 import java.io.BufferedReader;
 import java.io.FileInputStream;
-import java.io.FileReader;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.net.URI;
@@ -331,7 +330,7 @@ public class Mover {
     private boolean processFile(String fullPath, HdfsLocatedFileStatus status) {
       final byte policyId = status.getStoragePolicy();
       // currently we ignore files with unspecified storage policy
-      if (policyId == BlockStoragePolicySuite.ID_UNSPECIFIED) {
+      if (policyId == HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
         return false;
       }
       final BlockStoragePolicy policy = blockStoragePolicies[policyId];