You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2012/04/02 09:28:49 UTC
svn commit: r1308260 [2/4] - in
/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project:
hadoop-hdfs-httpfs/src/main/sbin/ hadoop-hdfs/ hadoop-hdfs/src/main/bin/
hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/
hadoop-hdfs/src/main/java/ hadoop...
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java Mon Apr 2 07:28:42 2012
@@ -238,7 +238,7 @@ public class DatanodeManager {
final DatanodeDescriptor node = getDatanode(nodeID.getStorageID());
if (node == null)
return null;
- if (!node.getName().equals(nodeID.getName())) {
+ if (!node.getXferAddr().equals(nodeID.getXferAddr())) {
final UnregisteredNodeException e = new UnregisteredNodeException(
nodeID, node);
NameNode.stateChangeLog.fatal("BLOCK* NameSystem.getDatanode: "
@@ -270,7 +270,7 @@ public class DatanodeManager {
networktopology.remove(nodeInfo);
if (LOG.isDebugEnabled()) {
- LOG.debug("remove datanode " + nodeInfo.getName());
+ LOG.debug("remove datanode " + nodeInfo);
}
namesystem.checkSafeMode();
}
@@ -288,7 +288,7 @@ public class DatanodeManager {
removeDatanode(descriptor);
} else {
NameNode.stateChangeLog.warn("BLOCK* removeDatanode: "
- + node.getName() + " does not exist");
+ + node + " does not exist");
}
} finally {
namesystem.writeUnlock();
@@ -306,7 +306,7 @@ public class DatanodeManager {
}
if (d != null && isDatanodeDead(d)) {
NameNode.stateChangeLog.info(
- "BLOCK* removeDeadDatanode: lost heartbeat from " + d.getName());
+ "BLOCK* removeDeadDatanode: lost heartbeat from " + d);
removeDatanode(d);
}
}
@@ -332,19 +332,19 @@ public class DatanodeManager {
if (LOG.isDebugEnabled()) {
LOG.debug(getClass().getSimpleName() + ".addDatanode: "
- + "node " + node.getName() + " is added to datanodeMap.");
+ + "node " + node + " is added to datanodeMap.");
}
}
/** Physically remove node from datanodeMap. */
- private void wipeDatanode(final DatanodeID node) throws IOException {
+ private void wipeDatanode(final DatanodeID node) {
final String key = node.getStorageID();
synchronized (datanodeMap) {
host2DatanodeMap.remove(datanodeMap.remove(key));
}
if (LOG.isDebugEnabled()) {
LOG.debug(getClass().getSimpleName() + ".wipeDatanode("
- + node.getName() + "): storage " + key
+ + node + "): storage " + key
+ " is removed from datanodeMap.");
}
}
@@ -354,7 +354,7 @@ public class DatanodeManager {
List<String> names = new ArrayList<String>(1);
if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
// get the node's IP address
- names.add(node.getHost());
+ names.add(node.getIpAddr());
} else {
// get the node's host name
String hostName = node.getHostName();
@@ -376,12 +376,12 @@ public class DatanodeManager {
node.setNetworkLocation(networkLocation);
}
- private boolean inHostsList(DatanodeID node, String ipAddr) {
- return checkInList(node, ipAddr, hostsReader.getHosts(), false);
+ private boolean inHostsList(DatanodeID node) {
+ return checkInList(node, hostsReader.getHosts(), false);
}
- private boolean inExcludedHostsList(DatanodeID node, String ipAddr) {
- return checkInList(node, ipAddr, hostsReader.getExcludedHosts(), true);
+ private boolean inExcludedHostsList(DatanodeID node) {
+ return checkInList(node, hostsReader.getExcludedHosts(), true);
}
/**
@@ -419,7 +419,7 @@ public class DatanodeManager {
for (Iterator<DatanodeDescriptor> it = nodeList.iterator(); it.hasNext();) {
DatanodeDescriptor node = it.next();
- if ((!inHostsList(node, null)) && (!inExcludedHostsList(node, null))
+ if ((!inHostsList(node)) && (!inExcludedHostsList(node))
&& node.isDecommissioned()) {
// Include list is not empty, an existing datanode does not appear
// in both include or exclude lists and it has been decommissioned.
@@ -430,37 +430,23 @@ public class DatanodeManager {
}
/**
- * Check if the given node (of DatanodeID or ipAddress) is in the (include or
- * exclude) list. If ipAddress in null, check only based upon the given
- * DatanodeID. If ipAddress is not null, the ipAddress should refers to the
- * same host that given DatanodeID refers to.
+ * Check if the given DatanodeID is in the given (include or exclude) list.
*
- * @param node, the host DatanodeID
- * @param ipAddress, if not null, should refers to the same host
- * that DatanodeID refers to
- * @param hostsList, the list of hosts in the include/exclude file
- * @param isExcludeList, boolean, true if this is the exclude list
- * @return boolean, if in the list
+ * @param node the DatanodeID to check
+ * @param hostsList the list of hosts in the include/exclude file
+ * @param isExcludeList true if this is the exclude list
+ * @return true if the node is in the list, false otherwise
*/
private static boolean checkInList(final DatanodeID node,
- final String ipAddress,
final Set<String> hostsList,
final boolean isExcludeList) {
final InetAddress iaddr;
- if (ipAddress != null) {
- try {
- iaddr = InetAddress.getByName(ipAddress);
- } catch (UnknownHostException e) {
- LOG.warn("Unknown ip address: " + ipAddress, e);
- return isExcludeList;
- }
- } else {
- try {
- iaddr = InetAddress.getByName(node.getHost());
- } catch (UnknownHostException e) {
- LOG.warn("Unknown host: " + node.getHost(), e);
- return isExcludeList;
- }
+
+ try {
+ iaddr = InetAddress.getByName(node.getIpAddr());
+ } catch (UnknownHostException e) {
+ LOG.warn("Unknown IP: " + node.getIpAddr(), e);
+ return isExcludeList;
}
// if include list is empty, host is in include list
@@ -470,10 +456,10 @@ public class DatanodeManager {
return // compare ipaddress(:port)
(hostsList.contains(iaddr.getHostAddress().toString()))
|| (hostsList.contains(iaddr.getHostAddress().toString() + ":"
- + node.getPort()))
+ + node.getXferPort()))
// compare hostname(:port)
|| (hostsList.contains(iaddr.getHostName()))
- || (hostsList.contains(iaddr.getHostName() + ":" + node.getPort()))
+ || (hostsList.contains(iaddr.getHostName() + ":" + node.getXferPort()))
|| ((node instanceof DatanodeInfo) && hostsList
.contains(((DatanodeInfo) node).getHostName()));
}
@@ -481,10 +467,9 @@ public class DatanodeManager {
/**
* Decommission the node if it is in exclude list.
*/
- private void checkDecommissioning(DatanodeDescriptor nodeReg, String ipAddr)
- throws IOException {
+ private void checkDecommissioning(DatanodeDescriptor nodeReg, String ipAddr) {
// If the registered node is in exclude list, then decommission it
- if (inExcludedHostsList(nodeReg, ipAddr)) {
+ if (inExcludedHostsList(nodeReg)) {
startDecommission(nodeReg);
}
}
@@ -499,16 +484,16 @@ public class DatanodeManager {
if (node.isDecommissionInProgress()) {
if (!blockManager.isReplicationInProgress(node)) {
node.setDecommissioned();
- LOG.info("Decommission complete for node " + node.getName());
+ LOG.info("Decommission complete for node " + node);
}
}
return node.isDecommissioned();
}
/** Start decommissioning the specified datanode. */
- private void startDecommission(DatanodeDescriptor node) throws IOException {
+ private void startDecommission(DatanodeDescriptor node) {
if (!node.isDecommissionInProgress() && !node.isDecommissioned()) {
- LOG.info("Start Decommissioning node " + node.getName() + " with " +
+ LOG.info("Start Decommissioning node " + node + " with " +
node.numBlocks() + " blocks.");
heartbeatManager.startDecommission(node);
node.decommissioningStatus.setStartTime(now());
@@ -519,9 +504,9 @@ public class DatanodeManager {
}
/** Stop decommissioning the specified datanodes. */
- void stopDecommission(DatanodeDescriptor node) throws IOException {
+ void stopDecommission(DatanodeDescriptor node) {
if (node.isDecommissionInProgress() || node.isDecommissioned()) {
- LOG.info("Stop Decommissioning node " + node.getName());
+ LOG.info("Stop Decommissioning node " + node);
heartbeatManager.stopDecommission(node);
blockManager.processOverReplicatedBlocksOnReCommission(node);
}
@@ -545,41 +530,44 @@ public class DatanodeManager {
return newID;
}
- public void registerDatanode(DatanodeRegistration nodeReg
- ) throws IOException {
+ /**
+ * Register the given datanode with the namenode. NB: the given
+ * registration is mutated and given back to the datanode.
+ *
+ * @param nodeReg the datanode registration
+ * @throws DisallowedDatanodeException if the registration request is
+ * denied because the datanode does not match includes/excludes
+ */
+ public void registerDatanode(DatanodeRegistration nodeReg)
+ throws DisallowedDatanodeException {
String dnAddress = Server.getRemoteAddress();
if (dnAddress == null) {
// Mostly called inside an RPC.
// But if not, use address passed by the data-node.
- dnAddress = nodeReg.getHost();
- }
+ dnAddress = nodeReg.getIpAddr();
+ }
+
+ // Update the IP to the address of the RPC request that is
+ // registering this datanode.
+ nodeReg.setIpAddr(dnAddress);
+ nodeReg.setExportedKeys(blockManager.getBlockKeys());
// Checks if the node is not on the hosts list. If it is not, then
// it will be disallowed from registering.
- if (!inHostsList(nodeReg, dnAddress)) {
+ if (!inHostsList(nodeReg)) {
throw new DisallowedDatanodeException(nodeReg);
}
-
- String hostName = nodeReg.getHost();
-
- // update the datanode's name with ip:port
- DatanodeID dnReg = new DatanodeID(dnAddress + ":" + nodeReg.getPort(),
- nodeReg.getStorageID(),
- nodeReg.getInfoPort(),
- nodeReg.getIpcPort());
- nodeReg.updateRegInfo(dnReg);
- nodeReg.exportedKeys = blockManager.getBlockKeys();
NameNode.stateChangeLog.info("BLOCK* NameSystem.registerDatanode: "
- + "node registration from " + nodeReg.getName()
+ + "node registration from " + nodeReg
+ " storage " + nodeReg.getStorageID());
DatanodeDescriptor nodeS = datanodeMap.get(nodeReg.getStorageID());
- DatanodeDescriptor nodeN = getDatanodeByHost(nodeReg.getName());
+ DatanodeDescriptor nodeN = getDatanodeByHost(nodeReg.getXferAddr());
if (nodeN != null && nodeN != nodeS) {
NameNode.LOG.info("BLOCK* NameSystem.registerDatanode: "
- + "node from name: " + nodeN.getName());
+ + "node from name: " + nodeN);
// nodeN previously served a different data storage,
// which is not served by anybody anymore.
removeDatanode(nodeN);
@@ -608,15 +596,14 @@ public class DatanodeManager {
but this is might not work if VERSION file format has changed
*/
NameNode.stateChangeLog.info( "BLOCK* NameSystem.registerDatanode: "
- + "node " + nodeS.getName()
- + " is replaced by " + nodeReg.getName() +
+ + "node " + nodeS
+ + " is replaced by " + nodeReg +
" with the same storageID " +
nodeReg.getStorageID());
}
// update cluster map
getNetworkTopology().remove(nodeS);
nodeS.updateRegInfo(nodeReg);
- nodeS.setHostName(hostName);
nodeS.setDisallowed(false); // Node is in the include list
// resolve network location
@@ -630,11 +617,11 @@ public class DatanodeManager {
}
// this is a new datanode serving a new data storage
- if (nodeReg.getStorageID().equals("")) {
+ if ("".equals(nodeReg.getStorageID())) {
// this data storage has never been registered
// it is either empty or was created by pre-storageID version of DFS
- nodeReg.storageID = newStorageID();
- if(NameNode.stateChangeLog.isDebugEnabled()) {
+ nodeReg.setStorageID(newStorageID());
+ if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug(
"BLOCK* NameSystem.registerDatanode: "
+ "new storageID " + nodeReg.getStorageID() + " assigned.");
@@ -642,7 +629,7 @@ public class DatanodeManager {
}
// register new datanode
DatanodeDescriptor nodeDescr
- = new DatanodeDescriptor(nodeReg, NetworkTopology.DEFAULT_RACK, hostName);
+ = new DatanodeDescriptor(nodeReg, NetworkTopology.DEFAULT_RACK);
resolveNetworkLocation(nodeDescr);
addDatanode(nodeDescr);
checkDecommissioning(nodeDescr, dnAddress);
@@ -690,10 +677,10 @@ public class DatanodeManager {
private void refreshDatanodes() throws IOException {
for(DatanodeDescriptor node : datanodeMap.values()) {
// Check if not include.
- if (!inHostsList(node, null)) {
+ if (!inHostsList(node)) {
node.setDisallowed(true); // case 2.
} else {
- if (inExcludedHostsList(node, null)) {
+ if (inExcludedHostsList(node)) {
startDecommission(node); // case 3.
} else {
stopDecommission(node); // case 4.
@@ -820,16 +807,16 @@ public class DatanodeManager {
}
//Remove any form of the this datanode in include/exclude lists.
try {
- InetAddress inet = InetAddress.getByName(dn.getHost());
+ InetAddress inet = InetAddress.getByName(dn.getIpAddr());
// compare hostname(:port)
mustList.remove(inet.getHostName());
- mustList.remove(inet.getHostName()+":"+dn.getPort());
+ mustList.remove(inet.getHostName()+":"+dn.getXferPort());
// compare ipaddress(:port)
mustList.remove(inet.getHostAddress().toString());
- mustList.remove(inet.getHostAddress().toString()+ ":" +dn.getPort());
+ mustList.remove(inet.getHostAddress().toString()+ ":" +dn.getXferPort());
} catch ( UnknownHostException e ) {
mustList.remove(dn.getName());
- mustList.remove(dn.getHost());
+ mustList.remove(dn.getIpAddr());
LOG.warn(e);
}
}
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java Mon Apr 2 07:28:42 2012
@@ -39,10 +39,10 @@ class Host2NodesMap {
return false;
}
- String host = node.getHost();
+ String ipAddr = node.getIpAddr();
hostmapLock.readLock().lock();
try {
- DatanodeDescriptor[] nodes = map.get(host);
+ DatanodeDescriptor[] nodes = map.get(ipAddr);
if (nodes != null) {
for(DatanodeDescriptor containedNode:nodes) {
if (node==containedNode) {
@@ -66,8 +66,8 @@ class Host2NodesMap {
return false;
}
- String host = node.getHost();
- DatanodeDescriptor[] nodes = map.get(host);
+ String ipAddr = node.getIpAddr();
+ DatanodeDescriptor[] nodes = map.get(ipAddr);
DatanodeDescriptor[] newNodes;
if (nodes==null) {
newNodes = new DatanodeDescriptor[1];
@@ -77,7 +77,7 @@ class Host2NodesMap {
System.arraycopy(nodes, 0, newNodes, 0, nodes.length);
newNodes[nodes.length] = node;
}
- map.put(host, newNodes);
+ map.put(ipAddr, newNodes);
return true;
} finally {
hostmapLock.writeLock().unlock();
@@ -92,17 +92,17 @@ class Host2NodesMap {
return false;
}
- String host = node.getHost();
+ String ipAddr = node.getIpAddr();
hostmapLock.writeLock().lock();
try {
- DatanodeDescriptor[] nodes = map.get(host);
+ DatanodeDescriptor[] nodes = map.get(ipAddr);
if (nodes==null) {
return false;
}
if (nodes.length==1) {
if (nodes[0]==node) {
- map.remove(host);
+ map.remove(ipAddr);
return true;
} else {
return false;
@@ -122,7 +122,7 @@ class Host2NodesMap {
newNodes = new DatanodeDescriptor[nodes.length-1];
System.arraycopy(nodes, 0, newNodes, 0, i);
System.arraycopy(nodes, i+1, newNodes, i, nodes.length-i-1);
- map.put(host, newNodes);
+ map.put(ipAddr, newNodes);
return true;
}
} finally {
@@ -130,17 +130,18 @@ class Host2NodesMap {
}
}
- /** get a data node by its host.
- * @return DatanodeDescriptor if found; otherwise null.
+ /**
+ * Get a data node by its IP address.
+ * @return DatanodeDescriptor if found, null otherwise
*/
- DatanodeDescriptor getDatanodeByHost(String host) {
- if (host==null) {
+ DatanodeDescriptor getDatanodeByHost(String ipAddr) {
+ if (ipAddr == null) {
return null;
}
hostmapLock.readLock().lock();
try {
- DatanodeDescriptor[] nodes = map.get(host);
+ DatanodeDescriptor[] nodes = map.get(ipAddr);
// no entry
if (nodes== null) {
return null;
@@ -155,40 +156,4 @@ class Host2NodesMap {
hostmapLock.readLock().unlock();
}
}
-
- /**
- * Find data node by its name.
- *
- * @return DatanodeDescriptor if found or null otherwise
- */
- public DatanodeDescriptor getDatanodeByName(String name) {
- if (name==null) {
- return null;
- }
-
- int colon = name.indexOf(":");
- String host;
- if (colon < 0) {
- host = name;
- } else {
- host = name.substring(0, colon);
- }
-
- hostmapLock.readLock().lock();
- try {
- DatanodeDescriptor[] nodes = map.get(host);
- // no entry
- if (nodes== null) {
- return null;
- }
- for(DatanodeDescriptor containedNode:nodes) {
- if (name.equals(containedNode.getName())) {
- return containedNode;
- }
- }
- return null;
- } finally {
- hostmapLock.readLock().unlock();
- }
- }
}
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java Mon Apr 2 07:28:42 2012
@@ -75,7 +75,7 @@ class InvalidateBlocks {
numBlocks++;
if (log) {
NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName()
- + ": add " + block + " to " + datanode.getName());
+ + ": add " + block + " to " + datanode);
}
}
}
@@ -111,7 +111,8 @@ class InvalidateBlocks {
for(Map.Entry<String,LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
final LightWeightHashSet<Block> blocks = entry.getValue();
if (blocks.size() > 0) {
- out.println(datanodeManager.getDatanode(entry.getKey()).getName() + blocks);
+ out.println(datanodeManager.getDatanode(entry.getKey()));
+ out.println(blocks);
}
}
}
@@ -135,7 +136,7 @@ class InvalidateBlocks {
if (NameNode.stateChangeLog.isInfoEnabled()) {
NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName()
- + ": ask " + dn.getName() + " to delete " + toInvalidate);
+ + ": ask " + dn + " to delete " + toInvalidate);
}
return toInvalidate.size();
}
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java Mon Apr 2 07:28:42 2012
@@ -88,9 +88,6 @@ public class JspHelper {
private static class NodeRecord extends DatanodeInfo {
int frequency;
- public NodeRecord() {
- frequency = -1;
- }
public NodeRecord(DatanodeInfo info, int count) {
super(info);
this.frequency = count;
@@ -172,7 +169,7 @@ public class JspHelper {
//just ping to check whether the node is alive
InetSocketAddress targetAddr = NetUtils.createSocketAddr(
- chosenNode.getHost() + ":" + chosenNode.getInfoPort());
+ chosenNode.getInfoAddr());
try {
s = NetUtils.getDefaultSocketFactory(conf).createSocket();
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java Mon Apr 2 07:28:42 2012
@@ -64,18 +64,12 @@ import org.apache.hadoop.util.VersionInf
public abstract class Storage extends StorageInfo {
public static final Log LOG = LogFactory.getLog(Storage.class.getName());
- // Constants
-
// last layout version that did not support upgrades
public static final int LAST_PRE_UPGRADE_LAYOUT_VERSION = -3;
- // this corresponds to Hadoop-0.14.
- public static final int LAST_UPGRADABLE_LAYOUT_VERSION = -7;
- protected static final String LAST_UPGRADABLE_HADOOP_VERSION = "Hadoop-0.14";
-
- /* this should be removed when LAST_UPGRADABLE_LV goes beyond -13.
- * any upgrade code that uses this constant should also be removed. */
- public static final int PRE_GENERATIONSTAMP_LAYOUT_VERSION = -13;
+ // this corresponds to Hadoop-0.18
+ public static final int LAST_UPGRADABLE_LAYOUT_VERSION = -16;
+ protected static final String LAST_UPGRADABLE_HADOOP_VERSION = "Hadoop-0.18";
/** Layout versions of 0.20.203 release */
public static final int[] LAYOUT_VERSIONS_203 = {-19, -31};
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java Mon Apr 2 07:28:42 2012
@@ -325,10 +325,10 @@ class BPOfferService {
void registrationSucceeded(BPServiceActor bpServiceActor,
DatanodeRegistration reg) throws IOException {
if (bpRegistration != null) {
- checkNSEquality(bpRegistration.storageInfo.getNamespaceID(),
- reg.storageInfo.getNamespaceID(), "namespace ID");
- checkNSEquality(bpRegistration.storageInfo.getClusterID(),
- reg.storageInfo.getClusterID(), "cluster ID");
+ checkNSEquality(bpRegistration.getStorageInfo().getNamespaceID(),
+ reg.getStorageInfo().getNamespaceID(), "namespace ID");
+ checkNSEquality(bpRegistration.getStorageInfo().getClusterID(),
+ reg.getStorageInfo().getClusterID(), "cluster ID");
} else {
bpRegistration = reg;
}
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java Mon Apr 2 07:28:42 2012
@@ -602,7 +602,7 @@ class BPServiceActor implements Runnable
while (shouldRun()) {
try {
- // Use returned registration from namenode with updated machine name.
+ // Use returned registration from namenode with updated fields
bpRegistration = bpNamenode.registerDatanode(bpRegistration);
break;
} catch(SocketTimeoutException e) { // namenode is busy
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Mon Apr 2 07:28:42 2012
@@ -164,9 +164,9 @@ import org.apache.hadoop.util.VersionInf
import org.mortbay.util.ajax.JSON;
import com.google.common.base.Preconditions;
+import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.BlockingService;
-
/**********************************************************
* DataNode is a class (and program) that stores a set of
* blocks for a DFS deployment. A single deployment can
@@ -244,9 +244,10 @@ public class DataNode extends Configured
private DataStorage storage = null;
private HttpServer infoServer = null;
DataNodeMetrics metrics;
- private InetSocketAddress selfAddr;
+ private InetSocketAddress streamingAddr;
- private volatile String hostName; // Host name of this datanode
+ private String hostName;
+ private DatanodeID id;
boolean isBlockTokenEnabled;
BlockPoolTokenSecretManager blockPoolTokenSecretManager;
@@ -288,6 +289,7 @@ public class DataNode extends Configured
.get(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY);
try {
hostName = getHostName(conf);
+ LOG.info("Configured hostname is " + hostName);
startDataNode(conf, dataDirs, resources);
} catch (IOException ie) {
shutdown();
@@ -305,16 +307,25 @@ public class DataNode extends Configured
clusterId = nsCid;
}
+ /**
+ * Returns the hostname for this datanode. If the hostname is not
+ * explicitly configured in the given config, then it is determined
+ * via the DNS class.
+ *
+ * @param config
+ * @return the hostname (NB: may not be a FQDN)
+ * @throws UnknownHostException if the dfs.datanode.dns.interface
+ * option is used and the hostname can not be determined
+ */
private static String getHostName(Configuration config)
throws UnknownHostException {
- // use configured nameserver & interface to get local hostname
String name = config.get(DFS_DATANODE_HOST_NAME_KEY);
if (name == null) {
- name = DNS
- .getDefaultHost(config.get(DFS_DATANODE_DNS_INTERFACE_KEY,
- DFS_DATANODE_DNS_INTERFACE_DEFAULT), config.get(
- DFS_DATANODE_DNS_NAMESERVER_KEY,
- DFS_DATANODE_DNS_NAMESERVER_DEFAULT));
+ name = DNS.getDefaultHost(
+ config.get(DFS_DATANODE_DNS_INTERFACE_KEY,
+ DFS_DATANODE_DNS_INTERFACE_DEFAULT),
+ config.get(DFS_DATANODE_DNS_NAMESERVER_KEY,
+ DFS_DATANODE_DNS_NAMESERVER_DEFAULT));
}
return name;
}
@@ -485,23 +496,22 @@ public class DataNode extends Configured
}
private void initDataXceiver(Configuration conf) throws IOException {
- InetSocketAddress streamingAddr = DataNode.getStreamingAddr(conf);
-
// find free port or use privileged port provided
ServerSocket ss;
- if(secureResources == null) {
+ if (secureResources == null) {
+ InetSocketAddress addr = DataNode.getStreamingAddr(conf);
ss = (dnConf.socketWriteTimeout > 0) ?
ServerSocketChannel.open().socket() : new ServerSocket();
- Server.bind(ss, streamingAddr, 0);
+ Server.bind(ss, addr, 0);
} else {
ss = secureResources.getStreamingSocket();
}
ss.setReceiveBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
- // adjust machine name with the actual port
- int tmpPort = ss.getLocalPort();
- selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(),
- tmpPort);
- LOG.info("Opened streaming server at " + selfAddr);
+
+ streamingAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(),
+ ss.getLocalPort());
+
+ LOG.info("Opened streaming server at " + streamingAddr);
this.threadGroup = new ThreadGroup("dataXceiverServer");
this.dataXceiverServer = new Daemon(threadGroup,
new DataXceiverServer(ss, conf, this));
@@ -646,7 +656,7 @@ public class DataNode extends Configured
this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
initIpcServer(conf);
- metrics = DataNodeMetrics.create(conf, getMachineName());
+ metrics = DataNodeMetrics.create(conf, getDisplayName());
blockPoolManager = new BlockPoolManager(this);
blockPoolManager.refreshNamenodes(conf);
@@ -657,14 +667,18 @@ public class DataNode extends Configured
* @param nsInfo the namespace info from the first part of the NN handshake
*/
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
- DatanodeRegistration bpRegistration = createUnknownBPRegistration();
- String blockPoolId = nsInfo.getBlockPoolID();
-
+ final String xferIp = streamingAddr.getAddress().getHostAddress();
+ DatanodeRegistration bpRegistration = new DatanodeRegistration(xferIp);
+ bpRegistration.setXferPort(getXferPort());
+ bpRegistration.setInfoPort(getInfoPort());
+ bpRegistration.setIpcPort(getIpcPort());
+ bpRegistration.setHostName(hostName);
bpRegistration.setStorageID(getStorageId());
- StorageInfo storageInfo = storage.getBPStorage(blockPoolId);
+
+ StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
if (storageInfo == null) {
// it's null in the case of SimulatedDataSet
- bpRegistration.storageInfo.layoutVersion = HdfsConstants.LAYOUT_VERSION;
+ bpRegistration.getStorageInfo().layoutVersion = HdfsConstants.LAYOUT_VERSION;
bpRegistration.setStorageInfo(nsInfo);
} else {
bpRegistration.setStorageInfo(storageInfo);
@@ -679,17 +693,18 @@ public class DataNode extends Configured
* Also updates the block pool's state in the secret manager.
*/
synchronized void bpRegistrationSucceeded(DatanodeRegistration bpRegistration,
- String blockPoolId)
- throws IOException {
- hostName = bpRegistration.getHost();
+ String blockPoolId) throws IOException {
+ // Set the ID if we haven't already
+ if (null == id) {
+ id = bpRegistration;
+ }
if (storage.getStorageID().equals("")) {
- // This is a fresh datanode -- take the storage ID provided by the
- // NN and persist it.
+ // This is a fresh datanode, persist the NN-provided storage ID
storage.setStorageID(bpRegistration.getStorageID());
storage.writeAll();
LOG.info("New storage id " + bpRegistration.getStorageID()
- + " is assigned to data-node " + bpRegistration.getName());
+ + " is assigned to data-node " + bpRegistration);
} else if(!storage.getStorageID().equals(bpRegistration.getStorageID())) {
throw new IOException("Inconsistent storage IDs. Name-node returned "
+ bpRegistration.getStorageID()
@@ -708,7 +723,7 @@ public class DataNode extends Configured
*/
private void registerBlockPoolWithSecretManager(DatanodeRegistration bpRegistration,
String blockPoolId) throws IOException {
- ExportedBlockKeys keys = bpRegistration.exportedKeys;
+ ExportedBlockKeys keys = bpRegistration.getExportedKeys();
isBlockTokenEnabled = keys.isBlockTokenEnabled();
// TODO should we check that all federated nns are either enabled or
// disabled?
@@ -728,8 +743,8 @@ public class DataNode extends Configured
}
blockPoolTokenSecretManager.setKeys(blockPoolId,
- bpRegistration.exportedKeys);
- bpRegistration.exportedKeys = ExportedBlockKeys.DUMMY_KEYS;
+ bpRegistration.getExportedKeys());
+ bpRegistration.setExportedKeys(ExportedBlockKeys.DUMMY_KEYS);
}
/**
@@ -783,18 +798,6 @@ public class DataNode extends Configured
data.addBlockPool(nsInfo.getBlockPoolID(), conf);
}
- /**
- * Create a DatanodeRegistration object with no valid StorageInfo.
- * This is used when reporting an error during handshake - ie
- * before we can load any specific block pool.
- */
- private DatanodeRegistration createUnknownBPRegistration() {
- DatanodeRegistration reg = new DatanodeRegistration(getMachineName());
- reg.setInfoPort(infoServer.getPort());
- reg.setIpcPort(getIpcPort());
- return reg;
- }
-
BPOfferService[] getAllBpOs() {
return blockPoolManager.getAllNamenodeThreads();
}
@@ -844,23 +847,37 @@ public class DataNode extends Configured
MBeans.register("DataNode", "DataNodeInfo", this);
}
- int getPort() {
- return selfAddr.getPort();
+ int getXferPort() {
+ return streamingAddr.getPort();
}
String getStorageId() {
return storage.getStorageID();
}
-
- /**
- * Get host:port with host set to Datanode host and port set to the
- * port {@link DataXceiver} is serving.
- * @return host:port string
+
+ /**
+ * @return name useful for logging
*/
- public String getMachineName() {
- return hostName + ":" + getPort();
+ public String getDisplayName() {
+ // NB: our DatanodeID may not be set yet
+ return hostName + ":" + getIpcPort();
}
-
+
+ /**
+ * NB: The datanode can perform data transfer on the streaming
+ * address however clients are given the IPC IP address for data
+ * transfer, and that may be be a different address.
+ *
+ * @return socket address for data transfer
+ */
+ public InetSocketAddress getXferAddress() {
+ return streamingAddr;
+ }
+
+ /**
+ * @return the datanode's IPC port
+ */
+ @VisibleForTesting
public int getIpcPort() {
return ipcServer.getListenerAddress().getPort();
}
@@ -881,25 +898,6 @@ public class DataNode extends Configured
}
/**
- * get BP registration by machine and port name (host:port)
- * @param mName - the name that the NN used
- * @return BP registration
- * @throws IOException
- */
- DatanodeRegistration getDNRegistrationByMachineName(String mName) {
- // TODO: all the BPs should have the same name as each other, they all come
- // from getName() here! and the use cases only are in tests where they just
- // call with getName(). So we could probably just make this method return
- // the first BPOS's registration. See HDFS-2609.
- BPOfferService [] bposArray = blockPoolManager.getAllNamenodeThreads();
- for (BPOfferService bpos : bposArray) {
- if(bpos.bpRegistration.getName().equals(mName))
- return bpos.bpRegistration;
- }
- return null;
- }
-
- /**
* Creates either NIO or regular depending on socketWriteTimeout.
*/
protected Socket newSocket() throws IOException {
@@ -918,8 +916,8 @@ public class DataNode extends Configured
public static InterDatanodeProtocol createInterDataNodeProtocolProxy(
DatanodeID datanodeid, final Configuration conf, final int socketTimeout)
throws IOException {
- final InetSocketAddress addr = NetUtils.createSocketAddr(
- datanodeid.getHost() + ":" + datanodeid.getIpcPort());
+ final InetSocketAddress addr =
+ NetUtils.createSocketAddr(datanodeid.getIpcAddr());
if (InterDatanodeProtocol.LOG.isDebugEnabled()) {
InterDatanodeProtocol.LOG.debug("InterDatanodeProtocol addr=" + addr);
}
@@ -936,10 +934,6 @@ public class DataNode extends Configured
throw new IOException(ie.getMessage());
}
}
-
- public InetSocketAddress getSelfAddr() {
- return selfAddr;
- }
DataNodeMetrics getMetrics() {
return metrics;
@@ -947,7 +941,7 @@ public class DataNode extends Configured
public static void setNewStorageID(DatanodeID dnId) {
LOG.info("Datanode is " + dnId);
- dnId.storageID = createNewStorageId(dnId.getPort());
+ dnId.setStorageID(createNewStorageId(dnId.getXferPort()));
}
static String createNewStorageId(int port) {
@@ -1223,7 +1217,7 @@ public class DataNode extends Configured
if (LOG.isInfoEnabled()) {
StringBuilder xfersBuilder = new StringBuilder();
for (int i = 0; i < numTargets; i++) {
- xfersBuilder.append(xferTargets[i].getName());
+ xfersBuilder.append(xferTargets[i]);
xfersBuilder.append(" ");
}
LOG.info(bpReg + " Starting thread to transfer block " +
@@ -1381,7 +1375,7 @@ public class DataNode extends Configured
try {
InetSocketAddress curTarget =
- NetUtils.createSocketAddr(targets[0].getName());
+ NetUtils.createSocketAddr(targets[0].getXferAddr());
sock = newSocket();
NetUtils.connect(sock, curTarget, dnConf.socketTimeout);
sock.setSoTimeout(targets.length * dnConf.socketTimeout);
@@ -1434,9 +1428,8 @@ public class DataNode extends Configured
}
}
} catch (IOException ie) {
- LOG.warn(
- bpReg + ":Failed to transfer " + b + " to " + targets[0].getName()
- + " got ", ie);
+ LOG.warn(bpReg + ":Failed to transfer " + b + " to " +
+ targets[0] + " got ", ie);
// check if there are any disk problem
checkDiskError();
@@ -1632,7 +1625,7 @@ public class DataNode extends Configured
@Override
public String toString() {
- return "DataNode{data=" + data + ", localName='" + getMachineName()
+ return "DataNode{data=" + data + ", localName='" + getDisplayName()
+ "', storageID='" + getStorageId() + "', xmitsInProgress="
+ xmitsInProgress.get() + "}";
}
@@ -1990,15 +1983,14 @@ public class DataNode extends Configured
private static void logRecoverBlock(String who,
ExtendedBlock block, DatanodeID[] targets) {
- StringBuilder msg = new StringBuilder(targets[0].getName());
+ StringBuilder msg = new StringBuilder(targets[0].toString());
for (int i = 1; i < targets.length; i++) {
- msg.append(", " + targets[i].getName());
+ msg.append(", " + targets[i]);
}
LOG.info(who + " calls recoverBlock(block=" + block
+ ", targets=[" + msg + "])");
}
- // ClientDataNodeProtocol implementation
@Override // ClientDataNodeProtocol
public long getReplicaVisibleLength(final ExtendedBlock block) throws IOException {
checkWriteAccess(block);
@@ -2076,8 +2068,7 @@ public class DataNode extends Configured
storage.finalizeUpgrade(blockPoolId);
}
- // Determine a Datanode's streaming address
- public static InetSocketAddress getStreamingAddr(Configuration conf) {
+ static InetSocketAddress getStreamingAddr(Configuration conf) {
return NetUtils.createSocketAddr(
conf.get(DFS_DATANODE_ADDRESS_KEY, DFS_DATANODE_ADDRESS_DEFAULT));
}
@@ -2099,8 +2090,11 @@ public class DataNode extends Configured
return this.getConf().get("dfs.datanode.info.port");
}
- public int getInfoPort(){
- return this.infoServer.getPort();
+ /**
+ * @return the datanode's http port
+ */
+ public int getInfoPort() {
+ return infoServer.getPort();
}
/**
@@ -2142,7 +2136,7 @@ public class DataNode extends Configured
blockPoolManager.refreshNamenodes(conf);
}
- @Override //ClientDatanodeProtocol
+ @Override // ClientDatanodeProtocol
public void refreshNamenodes() throws IOException {
conf = new Configuration();
refreshNamenodes(conf);
@@ -2204,10 +2198,9 @@ public class DataNode extends Configured
return true;
}
- /** Methods used by fault injection tests */
+ @VisibleForTesting
public DatanodeID getDatanodeId() {
- return new DatanodeID(getMachineName(), getStorageId(),
- infoServer.getPort(), getIpcPort());
+ return id;
}
/**
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Mon Apr 2 07:28:42 2012
@@ -73,9 +73,6 @@ public class DataStorage extends Storage
public final static String STORAGE_DIR_FINALIZED = "finalized";
public final static String STORAGE_DIR_TMP = "tmp";
- private static final Pattern PRE_GENSTAMP_META_FILE_PATTERN =
- Pattern.compile("(.*blk_[-]*\\d+)\\.meta$");
-
/** Access to this variable is guarded by "this" */
private String storageID;
@@ -197,7 +194,7 @@ public class DataStorage extends Storage
}
// make sure we have storage id set - if not - generate new one
- createStorageID(datanode.getPort());
+ createStorageID(datanode.getXferPort());
// 3. Update all storages. Some of them might have just been formatted.
this.writeAll();
@@ -669,13 +666,6 @@ public class DataStorage extends Storage
in.close();
}
} else {
-
- //check if we are upgrading from pre-generation stamp version.
- if (oldLV >= PRE_GENERATIONSTAMP_LAYOUT_VERSION) {
- // Link to the new file name.
- to = new File(convertMetatadataFileName(to.getAbsolutePath()));
- }
-
HardLink.createHardLink(from, to);
hl.linkStats.countSingleLinks++;
}
@@ -687,50 +677,32 @@ public class DataStorage extends Storage
if (!to.mkdirs())
throw new IOException("Cannot create directory " + to);
- //If upgrading from old stuff, need to munge the filenames. That has to
- //be done one file at a time, so hardlink them one at a time (slow).
- if (oldLV >= PRE_GENERATIONSTAMP_LAYOUT_VERSION) {
- String[] blockNames = from.list(new java.io.FilenameFilter() {
- public boolean accept(File dir, String name) {
- return name.startsWith(BLOCK_SUBDIR_PREFIX)
- || name.startsWith(BLOCK_FILE_PREFIX)
- || name.startsWith(COPY_FILE_PREFIX);
- }
- });
- if (blockNames.length == 0) {
- hl.linkStats.countEmptyDirs++;
+ String[] blockNames = from.list(new java.io.FilenameFilter() {
+ public boolean accept(File dir, String name) {
+ return name.startsWith(BLOCK_FILE_PREFIX);
}
- else for(int i = 0; i < blockNames.length; i++)
- linkBlocks(new File(from, blockNames[i]),
- new File(to, blockNames[i]), oldLV, hl);
- }
- else {
- //If upgrading from a relatively new version, we only need to create
- //links with the same filename. This can be done in bulk (much faster).
- String[] blockNames = from.list(new java.io.FilenameFilter() {
+ });
+
+ // Block files just need hard links with the same file names
+ // but a different directory
+ if (blockNames.length > 0) {
+ HardLink.createHardLinkMult(from, blockNames, to);
+ hl.linkStats.countMultLinks++;
+ hl.linkStats.countFilesMultLinks += blockNames.length;
+ } else {
+ hl.linkStats.countEmptyDirs++;
+ }
+
+ // Now take care of the rest of the files and subdirectories
+ String[] otherNames = from.list(new java.io.FilenameFilter() {
public boolean accept(File dir, String name) {
- return name.startsWith(BLOCK_FILE_PREFIX);
+ return name.startsWith(BLOCK_SUBDIR_PREFIX)
+ || name.startsWith(COPY_FILE_PREFIX);
}
});
- if (blockNames.length > 0) {
- HardLink.createHardLinkMult(from, blockNames, to);
- hl.linkStats.countMultLinks++;
- hl.linkStats.countFilesMultLinks += blockNames.length;
- } else {
- hl.linkStats.countEmptyDirs++;
- }
-
- //now take care of the rest of the files and subdirectories
- String[] otherNames = from.list(new java.io.FilenameFilter() {
- public boolean accept(File dir, String name) {
- return name.startsWith(BLOCK_SUBDIR_PREFIX)
- || name.startsWith(COPY_FILE_PREFIX);
- }
- });
- for(int i = 0; i < otherNames.length; i++)
- linkBlocks(new File(from, otherNames[i]),
- new File(to, otherNames[i]), oldLV, hl);
- }
+ for(int i = 0; i < otherNames.length; i++)
+ linkBlocks(new File(from, otherNames[i]),
+ new File(to, otherNames[i]), oldLV, hl);
}
private void verifyDistributedUpgradeProgress(UpgradeManagerDatanode um,
@@ -742,22 +714,6 @@ public class DataStorage extends Storage
}
/**
- * This is invoked on target file names when upgrading from pre generation
- * stamp version (version -13) to correct the metatadata file name.
- * @param oldFileName
- * @return the new metadata file name with the default generation stamp.
- */
- private static String convertMetatadataFileName(String oldFileName) {
- Matcher matcher = PRE_GENSTAMP_META_FILE_PATTERN.matcher(oldFileName);
- if (matcher.matches()) {
- //return the current metadata file name
- return DatanodeUtil.getMetaFileName(matcher.group(1),
- GenerationStamp.GRANDFATHER_GENERATION_STAMP);
- }
- return oldFileName;
- }
-
- /**
* Add bpStorage into bpStorageMap
*/
private void addBlockPoolStorage(String bpID, BlockPoolSliceStorage bpStorage
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java Mon Apr 2 07:28:42 2012
@@ -168,13 +168,13 @@ class DataXceiver extends Receiver imple
++opsProcessed;
} while (!s.isClosed() && dnConf.socketKeepaliveTimeout > 0);
} catch (Throwable t) {
- LOG.error(datanode.getMachineName() + ":DataXceiver error processing " +
+ LOG.error(datanode.getDisplayName() + ":DataXceiver error processing " +
((op == null) ? "unknown" : op.name()) + " operation " +
" src: " + remoteAddress +
" dest: " + localAddress, t);
} finally {
if (LOG.isDebugEnabled()) {
- LOG.debug(datanode.getMachineName() + ":Number of active connections is: "
+ LOG.debug(datanode.getDisplayName() + ":Number of active connections is: "
+ datanode.getXceiverCount());
}
updateCurrentThreadName("Cleaning up");
@@ -352,7 +352,7 @@ class DataXceiver extends Receiver imple
if (targets.length > 0) {
InetSocketAddress mirrorTarget = null;
// Connect to backup machine
- mirrorNode = targets[0].getName();
+ mirrorNode = targets[0].getXferAddr();
mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
mirrorSock = datanode.newSocket();
try {
@@ -667,8 +667,8 @@ class DataXceiver extends Receiver imple
try {
// get the output stream to the proxy
- InetSocketAddress proxyAddr = NetUtils.createSocketAddr(
- proxySource.getName());
+ InetSocketAddress proxyAddr =
+ NetUtils.createSocketAddr(proxySource.getXferAddr());
proxySock = datanode.newSocket();
NetUtils.connect(proxySock, proxyAddr, dnConf.socketTimeout);
proxySock.setSoTimeout(dnConf.socketTimeout);
@@ -820,7 +820,7 @@ class DataXceiver extends Receiver imple
if (mode == BlockTokenSecretManager.AccessMode.WRITE) {
DatanodeRegistration dnR =
datanode.getDNRegistrationForBP(blk.getBlockPoolId());
- resp.setFirstBadLink(dnR.getName());
+ resp.setFirstBadLink(dnR.getXferAddr());
}
resp.build().writeDelimitedTo(out);
out.flush();
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java Mon Apr 2 07:28:42 2012
@@ -152,11 +152,11 @@ class DataXceiverServer implements Runna
// another thread closed our listener socket - that's expected during shutdown,
// but not in other circumstances
if (datanode.shouldRun) {
- LOG.warn(datanode.getMachineName() + ":DataXceiverServer: ", ace);
+ LOG.warn(datanode.getDisplayName() + ":DataXceiverServer: ", ace);
}
} catch (IOException ie) {
IOUtils.closeSocket(s);
- LOG.warn(datanode.getMachineName() + ":DataXceiverServer: ", ie);
+ LOG.warn(datanode.getDisplayName() + ":DataXceiverServer: ", ie);
} catch (OutOfMemoryError ie) {
IOUtils.closeSocket(s);
// DataNode can run out of memory if there is too many transfers.
@@ -169,7 +169,7 @@ class DataXceiverServer implements Runna
// ignore
}
} catch (Throwable te) {
- LOG.error(datanode.getMachineName()
+ LOG.error(datanode.getDisplayName()
+ ":DataXceiverServer: Exiting due to: ", te);
datanode.shouldRun = false;
}
@@ -177,7 +177,7 @@ class DataXceiverServer implements Runna
try {
ss.close();
} catch (IOException ie) {
- LOG.warn(datanode.getMachineName()
+ LOG.warn(datanode.getDisplayName()
+ " :DataXceiverServer: close exception", ie);
}
}
@@ -188,7 +188,7 @@ class DataXceiverServer implements Runna
try {
this.ss.close();
} catch (IOException ie) {
- LOG.warn(datanode.getMachineName() + ":DataXceiverServer.kill(): ", ie);
+ LOG.warn(datanode.getDisplayName() + ":DataXceiverServer.kill(): ", ie);
}
// close all the sockets that were accepted earlier
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java Mon Apr 2 07:28:42 2012
@@ -136,10 +136,8 @@ public class DatanodeJspHelper {
out.print("Empty file");
} else {
DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock, conf);
- String fqdn = canonicalize(chosenNode.getHost());
- String datanodeAddr = chosenNode.getName();
- int datanodePort = Integer.parseInt(datanodeAddr.substring(
- datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
+ String fqdn = canonicalize(chosenNode.getIpAddr());
+ int datanodePort = chosenNode.getXferPort();
String redirectLocation = "http://" + fqdn + ":"
+ chosenNode.getInfoPort() + "/browseBlock.jsp?blockId="
+ firstBlock.getBlock().getBlockId() + "&blockSize="
@@ -313,7 +311,7 @@ public class DatanodeJspHelper {
dfs.close();
return;
}
- String fqdn = canonicalize(chosenNode.getHost());
+ String fqdn = canonicalize(chosenNode.getIpAddr());
String tailUrl = "http://" + fqdn + ":" + chosenNode.getInfoPort()
+ "/tail.jsp?filename=" + URLEncoder.encode(filename, "UTF-8")
+ "&namenodeInfoPort=" + namenodeInfoPort
@@ -360,10 +358,9 @@ public class DatanodeJspHelper {
out.print("<td>" + blockidstring + ":</td>");
DatanodeInfo[] locs = cur.getLocations();
for (int j = 0; j < locs.length; j++) {
- String datanodeAddr = locs[j].getName();
- datanodePort = Integer.parseInt(datanodeAddr.substring(datanodeAddr
- .indexOf(':') + 1, datanodeAddr.length()));
- fqdn = canonicalize(locs[j].getHost());
+ String datanodeAddr = locs[j].getXferAddr();
+ datanodePort = locs[j].getXferPort();
+ fqdn = canonicalize(locs[j].getIpAddr());
String blockUrl = "http://" + fqdn + ":" + locs[j].getInfoPort()
+ "/browseBlock.jsp?blockId=" + blockidstring
+ "&blockSize=" + blockSize
@@ -519,10 +516,8 @@ public class DatanodeJspHelper {
nextStartOffset = 0;
nextBlockSize = nextBlock.getBlock().getNumBytes();
DatanodeInfo d = JspHelper.bestNode(nextBlock, conf);
- String datanodeAddr = d.getName();
- nextDatanodePort = Integer.parseInt(datanodeAddr.substring(
- datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
- nextHost = d.getHost();
+ nextDatanodePort = d.getXferPort();
+ nextHost = d.getIpAddr();
nextPort = d.getInfoPort();
}
}
@@ -573,10 +568,8 @@ public class DatanodeJspHelper {
prevStartOffset = 0;
prevBlockSize = prevBlock.getBlock().getNumBytes();
DatanodeInfo d = JspHelper.bestNode(prevBlock, conf);
- String datanodeAddr = d.getName();
- prevDatanodePort = Integer.parseInt(datanodeAddr.substring(
- datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
- prevHost = d.getHost();
+ prevDatanodePort = d.getXferPort();
+ prevHost = d.getIpAddr();
prevPort = d.getInfoPort();
}
}
@@ -693,7 +686,8 @@ public class DatanodeJspHelper {
dfs.close();
return;
}
- InetSocketAddress addr = NetUtils.createSocketAddr(chosenNode.getName());
+ InetSocketAddress addr =
+ NetUtils.createSocketAddr(chosenNode.getXferAddr());
// view the last chunkSizeToView bytes while Tailing
final long startOffset = blockSize >= chunkSizeToView ? blockSize
- chunkSizeToView : 0;
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java Mon Apr 2 07:28:42 2012
@@ -55,7 +55,7 @@ class UpgradeManagerDatanode extends Upg
if( ! super.initializeUpgrade())
return; // distr upgrade is not needed
DataNode.LOG.info("\n Distributed upgrade for DataNode "
- + dataNode.getMachineName()
+ + dataNode.getDisplayName()
+ " version " + getUpgradeVersion() + " to current LV "
+ HdfsConstants.LAYOUT_VERSION + " is initialized.");
UpgradeObjectDatanode curUO = (UpgradeObjectDatanode)currentUpgrades.first();
@@ -113,7 +113,7 @@ class UpgradeManagerDatanode extends Upg
upgradeDaemon = new Daemon(curUO);
upgradeDaemon.start();
DataNode.LOG.info("\n Distributed upgrade for DataNode "
- + dataNode.getMachineName()
+ + dataNode.getDisplayName()
+ " version " + getUpgradeVersion() + " to current LV "
+ HdfsConstants.LAYOUT_VERSION + " is started.");
return true;
@@ -128,7 +128,7 @@ class UpgradeManagerDatanode extends Upg
if(startUpgrade()) // upgrade started
return;
throw new IOException(
- "Distributed upgrade for DataNode " + dataNode.getMachineName()
+ "Distributed upgrade for DataNode " + dataNode.getDisplayName()
+ " version " + getUpgradeVersion() + " to current LV "
+ HdfsConstants.LAYOUT_VERSION + " cannot be started. "
+ "The upgrade object is not defined.");
@@ -143,7 +143,7 @@ class UpgradeManagerDatanode extends Upg
currentUpgrades = null;
upgradeDaemon = null;
DataNode.LOG.info("\n Distributed upgrade for DataNode "
- + dataNode.getMachineName()
+ + dataNode.getDisplayName()
+ " version " + getUpgradeVersion() + " to current LV "
+ HdfsConstants.LAYOUT_VERSION + " is complete.");
}
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Mon Apr 2 07:28:42 2012
@@ -62,7 +62,6 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
import org.apache.hadoop.hdfs.util.Holder;
-import org.apache.hadoop.io.IOUtils;
import com.google.common.base.Joiner;
@@ -231,37 +230,13 @@ public class FSEditLogLoader {
// get name and replication
final short replication = fsNamesys.getBlockManager(
).adjustReplication(addCloseOp.replication);
- PermissionStatus permissions = fsNamesys.getUpgradePermission();
- if (addCloseOp.permissions != null) {
- permissions = addCloseOp.permissions;
- }
- long blockSize = addCloseOp.blockSize;
-
- // Versions of HDFS prior to 0.17 may log an OP_ADD transaction
- // which includes blocks in it. When we update the minimum
- // upgrade version to something more recent than 0.17, we can
- // simplify this code by asserting that OP_ADD transactions
- // don't have any blocks.
-
- // Older versions of HDFS does not store the block size in inode.
- // If the file has more than one block, use the size of the
- // first block as the blocksize. Otherwise use the default
- // block size.
- if (-8 <= logVersion && blockSize == 0) {
- if (addCloseOp.blocks.length > 1) {
- blockSize = addCloseOp.blocks[0].getNumBytes();
- } else {
- long first = ((addCloseOp.blocks.length == 1)?
- addCloseOp.blocks[0].getNumBytes(): 0);
- blockSize = Math.max(fsNamesys.getDefaultBlockSize(), first);
- }
- }
+ assert addCloseOp.blocks.length == 0;
// add to the file tree
newFile = (INodeFile)fsDir.unprotectedAddFile(
- addCloseOp.path, permissions,
+ addCloseOp.path, addCloseOp.permissions,
replication, addCloseOp.mtime,
- addCloseOp.atime, blockSize,
+ addCloseOp.atime, addCloseOp.blockSize,
true, addCloseOp.clientName, addCloseOp.clientMachine);
fsNamesys.leaseManager.addLease(addCloseOp.clientName, addCloseOp.path);
@@ -373,12 +348,7 @@ public class FSEditLogLoader {
}
case OP_MKDIR: {
MkdirOp mkdirOp = (MkdirOp)op;
- PermissionStatus permissions = fsNamesys.getUpgradePermission();
- if (mkdirOp.permissions != null) {
- permissions = mkdirOp.permissions;
- }
-
- fsDir.unprotectedMkdir(mkdirOp.path, permissions,
+ fsDir.unprotectedMkdir(mkdirOp.path, mkdirOp.permissions,
mkdirOp.timestamp);
break;
}
@@ -493,9 +463,6 @@ public class FSEditLogLoader {
// no data in here currently.
break;
}
- case OP_DATANODE_ADD:
- case OP_DATANODE_REMOVE:
- break;
default:
throw new IOException("Invalid operation read " + op.opCode);
}
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java Mon Apr 2 07:28:42 2012
@@ -30,11 +30,8 @@ import org.apache.hadoop.fs.Options.Rena
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.util.PureJavaCrc32;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.*;
@@ -81,8 +78,6 @@ public abstract class FSEditLogOp {
instances.put(OP_DELETE, new DeleteOp());
instances.put(OP_MKDIR, new MkdirOp());
instances.put(OP_SET_GENSTAMP, new SetGenstampOp());
- instances.put(OP_DATANODE_ADD, new DatanodeAddOp());
- instances.put(OP_DATANODE_REMOVE, new DatanodeRemoveOp());
instances.put(OP_SET_PERMISSIONS, new SetPermissionsOp());
instances.put(OP_SET_OWNER, new SetOwnerOp());
instances.put(OP_SET_NS_QUOTA, new SetNSQuotaOp());
@@ -147,7 +142,6 @@ public abstract class FSEditLogOp {
PermissionStatus permissions;
String clientName;
String clientMachine;
- //final DatanodeDescriptor[] dataNodeDescriptors; UNUSED
private AddCloseOp(FSEditLogOpCodes opCode) {
super(opCode);
@@ -226,13 +220,10 @@ public abstract class FSEditLogOp {
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
- // versions > 0 support per file replication
- // get name and replication
if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.length = in.readInt();
}
- if (-7 == logVersion && length != 3||
- -17 < logVersion && logVersion < -7 && length != 4 ||
+ if ((-17 < logVersion && length != 4) ||
(logVersion <= -17 && length != 5 && !LayoutVersion.supports(
Feature.EDITLOG_OP_OPTIMIZATION, logVersion))) {
throw new IOException("Incorrect data format." +
@@ -259,49 +250,26 @@ public abstract class FSEditLogOp {
} else {
this.atime = 0;
}
- if (logVersion < -7) {
- if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
- this.blockSize = FSImageSerialization.readLong(in);
- } else {
- this.blockSize = readLong(in);
- }
+
+ if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
+ this.blockSize = FSImageSerialization.readLong(in);
} else {
- this.blockSize = 0;
+ this.blockSize = readLong(in);
}
- // get blocks
this.blocks = readBlocks(in, logVersion);
-
- if (logVersion <= -11) {
- this.permissions = PermissionStatus.read(in);
- } else {
- this.permissions = null;
- }
+ this.permissions = PermissionStatus.read(in);
// clientname, clientMachine and block locations of last block.
- if (this.opCode == OP_ADD && logVersion <= -12) {
+ if (this.opCode == OP_ADD) {
this.clientName = FSImageSerialization.readString(in);
this.clientMachine = FSImageSerialization.readString(in);
- if (-13 <= logVersion) {
- readDatanodeDescriptorArray(in);
- }
} else {
this.clientName = "";
this.clientMachine = "";
}
}
- /** This method is defined for compatibility reason. */
- private static DatanodeDescriptor[] readDatanodeDescriptorArray(DataInput in)
- throws IOException {
- DatanodeDescriptor[] locations = new DatanodeDescriptor[in.readInt()];
- for (int i = 0; i < locations.length; i++) {
- locations[i] = new DatanodeDescriptor();
- locations[i].readFieldsFromFSEditLog(in);
- }
- return locations;
- }
-
private static Block[] readBlocks(
DataInputStream in,
int logVersion) throws IOException {
@@ -309,14 +277,7 @@ public abstract class FSEditLogOp {
Block[] blocks = new Block[numBlocks];
for (int i = 0; i < numBlocks; i++) {
Block blk = new Block();
- if (logVersion <= -14) {
- blk.readFields(in);
- } else {
- BlockTwo oldblk = new BlockTwo();
- oldblk.readFields(in);
- blk.set(oldblk.blkid, oldblk.len,
- GenerationStamp.GRANDFATHER_GENERATION_STAMP);
- }
+ blk.readFields(in);
blocks[i] = blk;
}
return blocks;
@@ -788,17 +749,14 @@ public abstract class FSEditLogOp {
}
@Override
- void readFields(DataInputStream in, int logVersion)
- throws IOException {
-
+ void readFields(DataInputStream in, int logVersion) throws IOException {
if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.length = in.readInt();
}
if (-17 < logVersion && length != 2 ||
logVersion <= -17 && length != 3
&& !LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
- throw new IOException("Incorrect data format. "
- + "Mkdir operation.");
+ throw new IOException("Incorrect data format. Mkdir operation.");
}
this.path = FSImageSerialization.readString(in);
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
@@ -811,7 +769,6 @@ public abstract class FSEditLogOp {
// However, currently this is not being updated/used because of
// performance reasons.
if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, logVersion)) {
- /* unused this.atime = */
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
FSImageSerialization.readLong(in);
} else {
@@ -819,11 +776,7 @@ public abstract class FSEditLogOp {
}
}
- if (logVersion <= -11) {
- this.permissions = PermissionStatus.read(in);
- } else {
- this.permissions = null;
- }
+ this.permissions = PermissionStatus.read(in);
}
@Override
@@ -888,77 +841,6 @@ public abstract class FSEditLogOp {
}
}
- @SuppressWarnings("deprecation")
- static class DatanodeAddOp extends FSEditLogOp {
- private DatanodeAddOp() {
- super(OP_DATANODE_ADD);
- }
-
- static DatanodeAddOp getInstance() {
- return (DatanodeAddOp)opInstances.get()
- .get(OP_DATANODE_ADD);
- }
-
- @Override
- void writeFields(DataOutputStream out) throws IOException {
- throw new IOException("Deprecated, should not write");
- }
-
- @Override
- void readFields(DataInputStream in, int logVersion)
- throws IOException {
- //Datanodes are not persistent any more.
- FSImageSerialization.DatanodeImage.skipOne(in);
- }
-
- @Override
- public String toString() {
- StringBuilder builder = new StringBuilder();
- builder.append("DatanodeAddOp [opCode=");
- builder.append(opCode);
- builder.append(", txid=");
- builder.append(txid);
- builder.append("]");
- return builder.toString();
- }
- }
-
- @SuppressWarnings("deprecation")
- static class DatanodeRemoveOp extends FSEditLogOp {
- private DatanodeRemoveOp() {
- super(OP_DATANODE_REMOVE);
- }
-
- static DatanodeRemoveOp getInstance() {
- return (DatanodeRemoveOp)opInstances.get()
- .get(OP_DATANODE_REMOVE);
- }
-
- @Override
- void writeFields(DataOutputStream out) throws IOException {
- throw new IOException("Deprecated, should not write");
- }
-
- @Override
- void readFields(DataInputStream in, int logVersion)
- throws IOException {
- DatanodeID nodeID = new DatanodeID();
- nodeID.readFields(in);
- //Datanodes are not persistent any more.
- }
-
- @Override
- public String toString() {
- StringBuilder builder = new StringBuilder();
- builder.append("DatanodeRemoveOp [opCode=");
- builder.append(opCode);
- builder.append(", txid=");
- builder.append(txid);
- builder.append("]");
- return builder.toString();
- }
- }
-
static class SetPermissionsOp extends FSEditLogOp {
String src;
FsPermission permissions;
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java Mon Apr 2 07:28:42 2012
@@ -36,8 +36,8 @@ public enum FSEditLogOpCodes {
OP_DELETE ((byte) 2),
OP_MKDIR ((byte) 3),
OP_SET_REPLICATION ((byte) 4),
- @Deprecated OP_DATANODE_ADD ((byte) 5),
- @Deprecated OP_DATANODE_REMOVE((byte) 6),
+ @Deprecated OP_DATANODE_ADD ((byte) 5), // obsolete
+ @Deprecated OP_DATANODE_REMOVE((byte) 6), // obsolete
OP_SET_PERMISSIONS ((byte) 7),
OP_SET_OWNER ((byte) 8),
OP_CLOSE ((byte) 9),
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Mon Apr 2 07:28:42 2012
@@ -131,34 +131,22 @@ class FSImageFormat {
DataInputStream in = new DataInputStream(fin);
try {
- /*
- * Note: Remove any checks for version earlier than
- * Storage.LAST_UPGRADABLE_LAYOUT_VERSION since we should never get
- * to here with older images.
- */
-
- /*
- * TODO we need to change format of the image file
- * it should not contain version and namespace fields
- */
// read image version: first appeared in version -1
int imgVersion = in.readInt();
- if(getLayoutVersion() != imgVersion)
+ if (getLayoutVersion() != imgVersion) {
throw new InconsistentFSStateException(curFile,
"imgVersion " + imgVersion +
" expected to be " + getLayoutVersion());
+ }
// read namespaceID: first appeared in version -2
in.readInt();
- // read number of files
- long numFiles = readNumFiles(in);
+ long numFiles = in.readLong();
// read in the last generation stamp.
- if (imgVersion <= -12) {
- long genstamp = in.readLong();
- namesystem.setGenerationStamp(genstamp);
- }
+ long genstamp = in.readLong();
+ namesystem.setGenerationStamp(genstamp);
// read the transaction ID of the last edit represented by
// this image
@@ -167,7 +155,6 @@ class FSImageFormat {
} else {
imgTxId = 0;
}
-
// read compression related info
FSImageCompression compression;
@@ -189,13 +176,9 @@ class FSImageFormat {
loadFullNameINodes(numFiles, in);
}
- // load datanode info
- this.loadDatanodes(in);
+ loadFilesUnderConstruction(in);
- // load Files Under Construction
- this.loadFilesUnderConstruction(in);
-
- this.loadSecretManagerState(in);
+ loadSecretManagerState(in);
// make sure to read to the end of file
int eof = in.read();
@@ -335,89 +318,44 @@ class FSImageFormat {
if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, imgVersion)) {
atime = in.readLong();
}
- if (imgVersion <= -8) {
- blockSize = in.readLong();
- }
+ blockSize = in.readLong();
int numBlocks = in.readInt();
BlockInfo blocks[] = null;
- // for older versions, a blocklist of size 0
- // indicates a directory.
- if ((-9 <= imgVersion && numBlocks > 0) ||
- (imgVersion < -9 && numBlocks >= 0)) {
+ if (numBlocks >= 0) {
blocks = new BlockInfo[numBlocks];
for (int j = 0; j < numBlocks; j++) {
blocks[j] = new BlockInfo(replication);
- if (-14 < imgVersion) {
- blocks[j].set(in.readLong(), in.readLong(),
- GenerationStamp.GRANDFATHER_GENERATION_STAMP);
- } else {
- blocks[j].readFields(in);
- }
- }
- }
- // Older versions of HDFS does not store the block size in inode.
- // If the file has more than one block, use the size of the
- // first block as the blocksize. Otherwise use the default block size.
- //
- if (-8 <= imgVersion && blockSize == 0) {
- if (numBlocks > 1) {
- blockSize = blocks[0].getNumBytes();
- } else {
- long first = ((numBlocks == 1) ? blocks[0].getNumBytes(): 0);
- blockSize = Math.max(namesystem.getDefaultBlockSize(), first);
+ blocks[j].readFields(in);
}
}
// get quota only when the node is a directory
long nsQuota = -1L;
- if (LayoutVersion.supports(Feature.NAMESPACE_QUOTA, imgVersion)
- && blocks == null && numBlocks == -1) {
- nsQuota = in.readLong();
- }
- long dsQuota = -1L;
- if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imgVersion)
- && blocks == null && numBlocks == -1) {
- dsQuota = in.readLong();
- }
-
- // Read the symlink only when the node is a symlink
- String symlink = "";
- if (numBlocks == -2) {
- symlink = Text.readString(in);
- }
-
- PermissionStatus permissions = namesystem.getUpgradePermission();
- if (imgVersion <= -11) {
- permissions = PermissionStatus.read(in);
- }
-
- return INode.newINode(permissions, blocks, symlink, replication,
- modificationTime, atime, nsQuota, dsQuota, blockSize);
+ if (blocks == null && numBlocks == -1) {
+ nsQuota = in.readLong();
+ }
+ long dsQuota = -1L;
+ if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imgVersion)
+ && blocks == null && numBlocks == -1) {
+ dsQuota = in.readLong();
}
- private void loadDatanodes(DataInputStream in)
- throws IOException {
- int imgVersion = getLayoutVersion();
-
- if (imgVersion > -3) // pre datanode image version
- return;
- if (imgVersion <= -12) {
- return; // new versions do not store the datanodes any more.
- }
- int size = in.readInt();
- for(int i = 0; i < size; i++) {
- // We don't need to add these descriptors any more.
- FSImageSerialization.DatanodeImage.skipOne(in);
- }
+ // Read the symlink only when the node is a symlink
+ String symlink = "";
+ if (numBlocks == -2) {
+ symlink = Text.readString(in);
}
+
+ PermissionStatus permissions = PermissionStatus.read(in);
+
+ return INode.newINode(permissions, blocks, symlink, replication,
+ modificationTime, atime, nsQuota, dsQuota, blockSize);
+ }
private void loadFilesUnderConstruction(DataInputStream in)
throws IOException {
FSDirectory fsDir = namesystem.dir;
- int imgVersion = getLayoutVersion();
- if (imgVersion > -13) // pre lease image version
- return;
int size = in.readInt();
LOG.info("Number of files under construction = " + size);
@@ -457,17 +395,6 @@ class FSImageFormat {
return namesystem.getFSImage().getStorage().getLayoutVersion();
}
- private long readNumFiles(DataInputStream in)
- throws IOException {
- int imgVersion = getLayoutVersion();
-
- if (LayoutVersion.supports(Feature.NAMESPACE_QUOTA, imgVersion)) {
- return in.readLong();
- } else {
- return in.readInt();
- }
- }
-
private boolean isRoot(byte[][] path) {
return path.length == 1 &&
path[0] == null;
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java Mon Apr 2 07:28:42 2012
@@ -17,9 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import java.io.DataInput;
import java.io.DataInputStream;
-import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
@@ -31,7 +29,6 @@ import org.apache.hadoop.fs.permission.P
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DeprecatedUTF8;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
@@ -39,7 +36,6 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.ShortWritable;
import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
/**
@@ -107,13 +103,10 @@ public class FSImageSerialization {
String clientName = readString(in);
String clientMachine = readString(in);
- // These locations are not used at all
+ // We previously stored locations for the last block, now we
+ // just record that there are none
int numLocs = in.readInt();
- DatanodeDescriptor[] locations = new DatanodeDescriptor[numLocs];
- for (i = 0; i < numLocs; i++) {
- locations[i] = new DatanodeDescriptor();
- locations[i].readFields(in);
- }
+ assert numLocs == 0 : "Unexpected block locations";
return new INodeFileUnderConstruction(name,
blockReplication,
@@ -320,53 +313,4 @@ public class FSImageSerialization {
}
return ret;
}
-
- /**
- * DatanodeImage is used to store persistent information
- * about datanodes into the fsImage.
- */
- static class DatanodeImage implements Writable {
- DatanodeDescriptor node = new DatanodeDescriptor();
-
- static void skipOne(DataInput in) throws IOException {
- DatanodeImage nodeImage = new DatanodeImage();
- nodeImage.readFields(in);
- }
-
- /////////////////////////////////////////////////
- // Writable
- /////////////////////////////////////////////////
- /**
- * Public method that serializes the information about a
- * Datanode to be stored in the fsImage.
- */
- public void write(DataOutput out) throws IOException {
- new DatanodeID(node).write(out);
- out.writeLong(node.getCapacity());
- out.writeLong(node.getRemaining());
- out.writeLong(node.getLastUpdate());
- out.writeInt(node.getXceiverCount());
- }
-
- /**
- * Public method that reads a serialized Datanode
- * from the fsImage.
- */
- public void readFields(DataInput in) throws IOException {
- DatanodeID id = new DatanodeID();
- id.readFields(in);
- long capacity = in.readLong();
- long remaining = in.readLong();
- long lastUpdate = in.readLong();
- int xceiverCount = in.readInt();
-
- // update the DatanodeDescriptor with the data we read in
- node.updateRegInfo(id);
- node.setStorageID(id.getStorageID());
- node.setCapacity(capacity);
- node.setRemaining(remaining);
- node.setLastUpdate(lastUpdate);
- node.setXceiverCount(xceiverCount);
- }
- }
}
\ No newline at end of file