You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by vi...@apache.org on 2013/09/03 20:30:16 UTC
svn commit: r1519787 [2/4] - in
/hadoop/common/branches/YARN-321/hadoop-hdfs-project: ./ hadoop-hdfs-nfs/
hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/
hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/ hadoop-hdfs/
hadoop...
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java Tue Sep 3 18:30:05 2013
@@ -125,10 +125,13 @@ public interface QJournalProtocol {
* @param sinceTxId the first transaction which the client cares about
* @param forReading whether or not the caller intends to read from the edit
* logs
+ * @param inProgressOk whether or not to check the in-progress edit log
+ * segment
* @return a list of edit log segments since the given transaction ID.
*/
- public GetEditLogManifestResponseProto getEditLogManifest(
- String jid, long sinceTxId, boolean forReading) throws IOException;
+ public GetEditLogManifestResponseProto getEditLogManifest(String jid,
+ long sinceTxId, boolean forReading, boolean inProgressOk)
+ throws IOException;
/**
* Begin the recovery process for a given segment. See the HDFS-3077
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java Tue Sep 3 18:30:05 2013
@@ -203,7 +203,8 @@ public class QJournalProtocolServerSideT
return impl.getEditLogManifest(
request.getJid().getIdentifier(),
request.getSinceTxId(),
- request.getForReading());
+ request.getForReading(),
+ request.getInProgressOk());
} catch (IOException e) {
throw new ServiceException(e);
}
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java Tue Sep 3 18:30:05 2013
@@ -228,13 +228,15 @@ public class QJournalProtocolTranslatorP
@Override
public GetEditLogManifestResponseProto getEditLogManifest(String jid,
- long sinceTxId, boolean forReading) throws IOException {
+ long sinceTxId, boolean forReading, boolean inProgressOk)
+ throws IOException {
try {
return rpcProxy.getEditLogManifest(NULL_CONTROLLER,
GetEditLogManifestRequestProto.newBuilder()
.setJid(convertJournalId(jid))
.setSinceTxId(sinceTxId)
.setForReading(forReading)
+ .setInProgressOk(inProgressOk)
.build());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java Tue Sep 3 18:30:05 2013
@@ -25,10 +25,9 @@ import java.io.InputStream;
import java.io.OutputStreamWriter;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
+import java.util.Iterator;
import java.util.List;
import java.util.concurrent.TimeUnit;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -36,8 +35,8 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.qjournal.protocol.JournalNotFormattedException;
-import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol;
import org.apache.hadoop.hdfs.qjournal.protocol.JournalOutOfSyncException;
+import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto;
@@ -50,6 +49,7 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.JournalManager;
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.hdfs.util.AtomicFileOutputStream;
import org.apache.hadoop.hdfs.util.BestEffortLongFile;
@@ -630,14 +630,31 @@ class Journal implements Closeable {
* @see QJournalProtocol#getEditLogManifest(String, long)
*/
public RemoteEditLogManifest getEditLogManifest(long sinceTxId,
- boolean forReading) throws IOException {
+ boolean forReading, boolean inProgressOk) throws IOException {
// No need to checkRequest() here - anyone may ask for the list
// of segments.
checkFormatted();
- RemoteEditLogManifest manifest = new RemoteEditLogManifest(
- fjm.getRemoteEditLogs(sinceTxId, forReading));
- return manifest;
+ // if this is for reading, ignore the in-progress editlog segment
+ inProgressOk = forReading ? false : inProgressOk;
+ List<RemoteEditLog> logs = fjm.getRemoteEditLogs(sinceTxId, forReading,
+ inProgressOk);
+
+ if (inProgressOk) {
+ RemoteEditLog log = null;
+ for (Iterator<RemoteEditLog> iter = logs.iterator(); iter.hasNext();) {
+ log = iter.next();
+ if (log.isInProgress()) {
+ iter.remove();
+ break;
+ }
+ }
+ if (log != null && log.isInProgress()) {
+ logs.add(new RemoteEditLog(log.getStartTxId(), getHighestWrittenTxId()));
+ }
+ }
+
+ return new RemoteEditLogManifest(logs);
}
/**
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java Tue Sep 3 18:30:05 2013
@@ -18,8 +18,10 @@
package org.apache.hadoop.hdfs.qjournal.server;
import java.io.File;
+import java.io.FileFilter;
import java.io.IOException;
import java.net.InetSocketAddress;
+import java.util.HashMap;
import java.util.Map;
import org.apache.commons.logging.Log;
@@ -34,11 +36,13 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.source.JvmMetrics;
+import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.util.DiskChecker;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
+import org.mortbay.util.ajax.JSON;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
@@ -51,7 +55,7 @@ import com.google.common.collect.Maps;
* in the quorum protocol.
*/
@InterfaceAudience.Private
-public class JournalNode implements Tool, Configurable {
+public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
public static final Log LOG = LogFactory.getLog(JournalNode.class);
private Configuration conf;
private JournalNodeRpcServer rpcServer;
@@ -128,6 +132,8 @@ public class JournalNode implements Tool
SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY,
DFSConfigKeys.DFS_JOURNALNODE_USER_NAME_KEY, socAddr.getHostName());
+ registerJNMXBean();
+
httpServer = new JournalNodeHttpServer(conf, this);
httpServer.start();
@@ -208,6 +214,50 @@ public class JournalNode implements Tool
return new File(new File(dir), jid);
}
+ @Override // JournalNodeMXBean
+ public String getJournalsStatus() {
+ // jid:{Formatted:True/False}
+ Map<String, Map<String, String>> status =
+ new HashMap<String, Map<String, String>>();
+ synchronized (this) {
+ for (Map.Entry<String, Journal> entry : journalsById.entrySet()) {
+ Map<String, String> jMap = new HashMap<String, String>();
+ jMap.put("Formatted", Boolean.toString(entry.getValue().isFormatted()));
+ status.put(entry.getKey(), jMap);
+ }
+ }
+
+ // It is possible that some journals have been formatted before, while the
+ // corresponding journals are not in journalsById yet (because of restarting
+ // JN, e.g.). For simplicity, let's just assume a journal is formatted if
+ // there is a directory for it. We can also call analyzeStorage method for
+ // these directories if necessary.
+ // Also note that we do not need to check localDir here since
+ // validateAndCreateJournalDir has been called before we register the
+ // MXBean.
+ File[] journalDirs = localDir.listFiles(new FileFilter() {
+ @Override
+ public boolean accept(File file) {
+ return file.isDirectory();
+ }
+ });
+ for (File journalDir : journalDirs) {
+ String jid = journalDir.getName();
+ if (!status.containsKey(jid)) {
+ Map<String, String> jMap = new HashMap<String, String>();
+ jMap.put("Formatted", "true");
+ status.put(jid, jMap);
+ }
+ }
+ return JSON.toString(status);
+ }
+
+ /**
+ * Register JournalNodeMXBean
+ */
+ private void registerJNMXBean() {
+ MBeans.register("JournalNode", "JournalNodeInfo", this);
+ }
private class ErrorReporter implements StorageErrorReporter {
@Override
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java Tue Sep 3 18:30:05 2013
@@ -31,12 +31,13 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.authorize.AccessControlList;
-import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
/**
* Encapsulates the HTTP server started by the Journal Service.
@@ -68,16 +69,15 @@ public class JournalNodeHttpServer {
bindAddr.getHostName()));
int tmpInfoPort = bindAddr.getPort();
- httpServer = new HttpServer("journal", bindAddr.getHostName(),
- tmpInfoPort, tmpInfoPort == 0, conf, new AccessControlList(conf
- .get(DFS_ADMIN, " "))) {
- {
- if (UserGroupInformation.isSecurityEnabled()) {
- initSpnego(conf, DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY,
- DFS_JOURNALNODE_KEYTAB_FILE_KEY);
- }
- }
- };
+ httpServer = new HttpServer.Builder().setName("journal")
+ .setBindAddress(bindAddr.getHostName()).setPort(tmpInfoPort)
+ .setFindPort(tmpInfoPort == 0).setConf(conf).setACL(
+ new AccessControlList(conf.get(DFS_ADMIN, " ")))
+ .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
+ .setUsernameConfKey(
+ DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY)
+ .setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf,
+ DFS_JOURNALNODE_KEYTAB_FILE_KEY)).build();
httpServer.setAttribute(JN_ATTRIBUTE_KEY, localJournalNode);
httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
httpServer.addInternalServlet("getJournal", "/getJournal",
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java Tue Sep 3 18:30:05 2013
@@ -175,10 +175,11 @@ class JournalNodeRpcServer implements QJ
@Override
public GetEditLogManifestResponseProto getEditLogManifest(String jid,
- long sinceTxId, boolean forReading) throws IOException {
+ long sinceTxId, boolean forReading, boolean inProgressOk)
+ throws IOException {
RemoteEditLogManifest manifest = jn.getOrCreateJournal(jid)
- .getEditLogManifest(sinceTxId, forReading);
+ .getEditLogManifest(sinceTxId, forReading, inProgressOk);
return GetEditLogManifestResponseProto.newBuilder()
.setManifest(PBHelper.convert(manifest))
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java Tue Sep 3 18:30:05 2013
@@ -81,17 +81,28 @@ public class DelegationTokenSecretManage
return new DelegationTokenIdentifier();
}
- @Override //SecretManager
- public void checkAvailableForRead() throws StandbyException {
- namesystem.checkOperation(OperationCategory.READ);
- namesystem.readLock();
+ @Override
+ public byte[] retrievePassword(
+ DelegationTokenIdentifier identifier) throws InvalidToken {
try {
+ // this check introduces inconsistency in the authentication to a
+ // HA standby NN. non-token auths are allowed into the namespace which
+ // decides whether to throw a StandbyException. tokens are a bit
+ // different in that a standby may be behind and thus not yet know
+ // of all tokens issued by the active NN. the following check does
+ // not allow ANY token auth, however it should allow known tokens in
namesystem.checkOperation(OperationCategory.READ);
- } finally {
- namesystem.readUnlock();
+ } catch (StandbyException se) {
+ // FIXME: this is a hack to get around changing method signatures by
+ // tunneling a non-InvalidToken exception as the cause which the
+ // RPC server will unwrap before returning to the client
+ InvalidToken wrappedStandby = new InvalidToken("StandbyException");
+ wrappedStandby.initCause(se);
+ throw wrappedStandby;
}
+ return super.retrievePassword(identifier);
}
-
+
/**
* Returns expiry time of a token given its identifier.
*
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java Tue Sep 3 18:30:05 2013
@@ -189,15 +189,18 @@ public class BlockPlacementPolicyWithNod
boolean avoidStaleNodes)
throws NotEnoughReplicasException {
int oldNumOfReplicas = results.size();
- // randomly choose one node from remote racks
+
+ final String rackLocation = NetworkTopology.getFirstHalf(
+ localMachine.getNetworkLocation());
try {
- chooseRandom(numOfReplicas, "~"+NetworkTopology.getFirstHalf(
- localMachine.getNetworkLocation()),
- excludedNodes, blocksize, maxReplicasPerRack, results, avoidStaleNodes);
+ // randomly choose from remote racks
+ chooseRandom(numOfReplicas, "~" + rackLocation, excludedNodes, blocksize,
+ maxReplicasPerRack, results, avoidStaleNodes);
} catch (NotEnoughReplicasException e) {
- chooseRandom(numOfReplicas-(results.size()-oldNumOfReplicas),
- localMachine.getNetworkLocation(), excludedNodes, blocksize,
- maxReplicasPerRack, results, avoidStaleNodes);
+ // fall back to the local rack
+ chooseRandom(numOfReplicas - (results.size() - oldNumOfReplicas),
+ rackLocation, excludedNodes, blocksize,
+ maxReplicasPerRack, results, avoidStaleNodes);
}
}
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java Tue Sep 3 18:30:05 2013
@@ -26,6 +26,7 @@ import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
+import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.NavigableMap;
@@ -55,13 +56,6 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
-import org.apache.hadoop.hdfs.server.namenode.HostFileManager;
-import org.apache.hadoop.hdfs.server.namenode.HostFileManager.Entry;
-import org.apache.hadoop.hdfs.server.namenode.HostFileManager.EntrySet;
-import org.apache.hadoop.hdfs.server.namenode.HostFileManager.MutableEntrySet;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
@@ -71,7 +65,6 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
import org.apache.hadoop.hdfs.util.CyclicIteration;
-import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.CachedDNSToSwitchMapping;
import org.apache.hadoop.net.DNSToSwitchMapping;
@@ -172,6 +165,14 @@ public class DatanodeManager {
* according to the NetworkTopology.
*/
private boolean hasClusterEverBeenMultiRack = false;
+
+ /**
+ * The number of datanodes for each software version. This list should change
+ * during rolling upgrades.
+ * Software version -> Number of datanodes with this version
+ */
+ private HashMap<String, Integer> datanodesSoftwareVersions =
+ new HashMap<String, Integer>(4, 0.75f);
DatanodeManager(final BlockManager blockManager, final Namesystem namesystem,
final Configuration conf) throws IOException {
@@ -463,6 +464,7 @@ public class DatanodeManager {
heartbeatManager.removeDatanode(nodeInfo);
blockManager.removeBlocksAssociatedTo(nodeInfo);
networktopology.remove(nodeInfo);
+ decrementVersionCount(nodeInfo.getSoftwareVersion());
if (LOG.isDebugEnabled()) {
LOG.debug("remove datanode " + nodeInfo);
@@ -545,6 +547,61 @@ public class DatanodeManager {
}
}
+ private void incrementVersionCount(String version) {
+ if (version == null) {
+ return;
+ }
+ synchronized(datanodeMap) {
+ Integer count = this.datanodesSoftwareVersions.get(version);
+ count = count == null ? 1 : count + 1;
+ this.datanodesSoftwareVersions.put(version, count);
+ }
+ }
+
+ private void decrementVersionCount(String version) {
+ if (version == null) {
+ return;
+ }
+ synchronized(datanodeMap) {
+ Integer count = this.datanodesSoftwareVersions.get(version);
+ if(count != null) {
+ if(count > 1) {
+ this.datanodesSoftwareVersions.put(version, count-1);
+ } else {
+ this.datanodesSoftwareVersions.remove(version);
+ }
+ }
+ }
+ }
+
+ private boolean shouldCountVersion(DatanodeDescriptor node) {
+ return node.getSoftwareVersion() != null && node.isAlive &&
+ !isDatanodeDead(node);
+ }
+
+ private void countSoftwareVersions() {
+ synchronized(datanodeMap) {
+ HashMap<String, Integer> versionCount = new HashMap<String, Integer>();
+ for(DatanodeDescriptor dn: datanodeMap.values()) {
+ // Check isAlive too because right after removeDatanode(),
+ // isDatanodeDead() is still true
+ if(shouldCountVersion(dn))
+ {
+ Integer num = versionCount.get(dn.getSoftwareVersion());
+ num = num == null ? 1 : num+1;
+ versionCount.put(dn.getSoftwareVersion(), num);
+ }
+ }
+ this.datanodesSoftwareVersions = versionCount;
+ }
+ }
+
+ public HashMap<String, Integer> getDatanodesSoftwareVersions() {
+ synchronized(datanodeMap) {
+ return new HashMap<String, Integer> (this.datanodesSoftwareVersions);
+ }
+ }
+
/* Resolve a node's network location */
private String resolveNetworkLocation (DatanodeID node) {
List<String> names = new ArrayList<String>(1);
@@ -761,21 +818,28 @@ public class DatanodeManager {
try {
// update cluster map
getNetworkTopology().remove(nodeS);
+ if(shouldCountVersion(nodeS)) {
+ decrementVersionCount(nodeS.getSoftwareVersion());
+ }
nodeS.updateRegInfo(nodeReg);
+
+ nodeS.setSoftwareVersion(nodeReg.getSoftwareVersion());
nodeS.setDisallowed(false); // Node is in the include list
-
+
// resolve network location
nodeS.setNetworkLocation(resolveNetworkLocation(nodeS));
getNetworkTopology().add(nodeS);
// also treat the registration message as a heartbeat
heartbeatManager.register(nodeS);
+ incrementVersionCount(nodeS.getSoftwareVersion());
checkDecommissioning(nodeS);
success = true;
} finally {
if (!success) {
removeDatanode(nodeS);
wipeDatanode(nodeS);
+ countSoftwareVersions();
}
}
return;
@@ -799,6 +863,7 @@ public class DatanodeManager {
try {
nodeDescr.setNetworkLocation(resolveNetworkLocation(nodeDescr));
networktopology.add(nodeDescr);
+ nodeDescr.setSoftwareVersion(nodeReg.getSoftwareVersion());
// register new datanode
addDatanode(nodeDescr);
@@ -809,10 +874,12 @@ public class DatanodeManager {
// because its is done when the descriptor is created
heartbeatManager.addDatanode(nodeDescr);
success = true;
+ incrementVersionCount(nodeReg.getSoftwareVersion());
} finally {
if (!success) {
removeDatanode(nodeDescr);
wipeDatanode(nodeDescr);
+ countSoftwareVersions();
}
}
} catch (InvalidTopologyException e) {
@@ -834,6 +901,7 @@ public class DatanodeManager {
namesystem.writeLock();
try {
refreshDatanodes();
+ countSoftwareVersions();
} finally {
namesystem.writeUnlock();
}
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Tue Sep 3 18:30:05 2013
@@ -390,11 +390,15 @@ public class DataNode extends Configured
String infoHost = infoSocAddr.getHostName();
int tmpInfoPort = infoSocAddr.getPort();
this.infoServer = (secureResources == null)
- ? new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0,
- conf, new AccessControlList(conf.get(DFS_ADMIN, " ")))
- : new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0,
- conf, new AccessControlList(conf.get(DFS_ADMIN, " ")),
- secureResources.getListener());
+ ? new HttpServer.Builder().setName("datanode")
+ .setBindAddress(infoHost).setPort(tmpInfoPort)
+ .setFindPort(tmpInfoPort == 0).setConf(conf)
+ .setACL(new AccessControlList(conf.get(DFS_ADMIN, " "))).build()
+ : new HttpServer.Builder().setName("datanode")
+ .setBindAddress(infoHost).setPort(tmpInfoPort)
+ .setFindPort(tmpInfoPort == 0).setConf(conf)
+ .setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
+ .setConnector(secureResources.getListener()).build();
LOG.info("Opened info server at " + infoHost + ":" + tmpInfoPort);
if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) {
boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java Tue Sep 3 18:30:05 2013
@@ -19,7 +19,9 @@ package org.apache.hadoop.hdfs.server.da
import java.io.File;
import java.io.IOException;
+import java.net.InetAddress;
import java.net.InetSocketAddress;
+import java.net.URI;
import java.net.URL;
import java.net.URLEncoder;
import java.security.PrivilegedExceptionAction;
@@ -27,6 +29,7 @@ import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
+import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.jsp.JspWriter;
@@ -36,6 +39,7 @@ import org.apache.hadoop.classification.
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -43,6 +47,9 @@ import org.apache.hadoop.hdfs.protocol.L
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer;
import org.apache.hadoop.http.HtmlQuoting;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.net.NetUtils;
@@ -50,6 +57,7 @@ import org.apache.hadoop.security.UserGr
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ServletUtil;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.VersionInfo;
@InterfaceAudience.Private
public class DatanodeJspHelper {
@@ -712,4 +720,24 @@ public class DatanodeJspHelper {
final String nnAddr = request.getParameter(JspHelper.NAMENODE_ADDRESS);
return getDFSClient(ugi, nnAddr, conf);
}
+
+ /** Return a table containing version information. */
+ public static String getVersionTable(ServletContext context) {
+ StringBuilder sb = new StringBuilder();
+ final DataNode dataNode = (DataNode) context.getAttribute("datanode");
+ sb.append("<div class='dfstable'><table>");
+ sb.append("<tr><td class='col1'>Version:</td><td>");
+ sb.append(VersionInfo.getVersion() + ", " + VersionInfo.getRevision());
+ sb.append("</td></tr>\n" + "\n <tr><td class='col1'>Compiled:</td><td>"
+ + VersionInfo.getDate());
+ sb.append(" by " + VersionInfo.getUser() + " from "
+ + VersionInfo.getBranch());
+ if (dataNode != null) {
+ sb.append("</td></tr>\n <tr><td class='col1'>Cluster ID:</td><td>"
+ + dataNode.getClusterId());
+ }
+ sb.append("</td></tr>\n</table></div>");
+ return sb.toString();
+ }
+
}
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java Tue Sep 3 18:30:05 2013
@@ -410,8 +410,9 @@ public class DatanodeWebHdfsMethods {
throw ioe;
}
- final long n = length.getValue() != null? length.getValue()
- : in.getVisibleLength() - offset.getValue();
+ final long n = length.getValue() != null ?
+ Math.min(length.getValue(), in.getVisibleLength() - offset.getValue()) :
+ in.getVisibleLength() - offset.getValue();
return Response.ok(new OpenEntity(in, n, dfsclient)).type(
MediaType.APPLICATION_OCTET_STREAM).build();
}
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java Tue Sep 3 18:30:05 2013
@@ -359,6 +359,7 @@ class ClusterJspHelper {
nn.httpAddress = httpAddress;
getLiveNodeCount(getProperty(props, "LiveNodes").getValueAsText(), nn);
getDeadNodeCount(getProperty(props, "DeadNodes").getValueAsText(), nn);
+ nn.softwareVersion = getProperty(props, "SoftwareVersion").getTextValue();
return nn;
}
@@ -596,6 +597,7 @@ class ClusterJspHelper {
toXmlItemBlockWithLink(doc, nn.deadDatanodeCount + " (" +
nn.deadDecomCount + ")", nn.httpAddress+"/dfsnodelist.jsp?whatNodes=DEAD"
, "Dead Datanode (Decommissioned)");
+ toXmlItemBlock(doc, "Software Version", nn.softwareVersion);
doc.endTag(); // node
}
doc.endTag(); // namenodes
@@ -624,6 +626,7 @@ class ClusterJspHelper {
int deadDatanodeCount = 0;
int deadDecomCount = 0;
String httpAddress = null;
+ String softwareVersion = "";
}
/**
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Tue Sep 3 18:30:05 2013
@@ -2093,6 +2093,10 @@ public class FSDirectory implements Clos
/** Verify if the snapshot name is legal. */
void verifySnapshotName(String snapshotName, String path)
throws PathComponentTooLongException {
+ if (snapshotName.contains(Path.SEPARATOR)) {
+ throw new HadoopIllegalArgumentException(
+ "Snapshot name cannot contain \"" + Path.SEPARATOR + "\"");
+ }
final byte[] bytes = DFSUtil.string2Bytes(snapshotName);
verifyINodeName(bytes);
verifyMaxComponentLength(bytes, path, 0);
@@ -2727,6 +2731,19 @@ public class FSDirectory implements Clos
throw new FileNotFoundException(
"File for given inode path does not exist: " + src);
}
+
+ // Handle single ".." for NFS lookup support.
+ if ((pathComponents.length > 4)
+ && DFSUtil.bytes2String(pathComponents[4]).equals("..")) {
+ INode parent = inode.getParent();
+ if (parent == null || parent.getId() == INodeId.ROOT_INODE_ID) {
+ // inode is root, or its parent is root.
+ return Path.SEPARATOR;
+ } else {
+ return parent.getFullPathName();
+ }
+ }
+
StringBuilder path = id == INodeId.ROOT_INODE_ID ? new StringBuilder()
: new StringBuilder(inode.getFullPathName());
for (int i = 4; i < pathComponents.length; i++) {
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Tue Sep 3 18:30:05 2013
@@ -1231,6 +1231,7 @@ public class FSEditLog implements LogsPu
}
}
+ @Override
public void selectInputStreams(Collection<EditLogInputStream> streams,
long fromTxId, boolean inProgressOk, boolean forReading) throws IOException {
journalSet.selectInputStreams(streams, fromTxId, inProgressOk, forReading);
@@ -1241,18 +1242,27 @@ public class FSEditLog implements LogsPu
return selectInputStreams(fromTxId, toAtLeastTxId, null, true);
}
+ /** Select a list of input streams to load */
+ public Collection<EditLogInputStream> selectInputStreams(
+ long fromTxId, long toAtLeastTxId, MetaRecoveryContext recovery,
+ boolean inProgressOk) throws IOException {
+ return selectInputStreams(fromTxId, toAtLeastTxId, recovery, inProgressOk,
+ true);
+ }
+
/**
- * Select a list of input streams to load.
+ * Select a list of input streams.
*
* @param fromTxId first transaction in the selected streams
* @param toAtLeast the selected streams must contain this transaction
* @param inProgessOk set to true if in-progress streams are OK
+ * @param forReading whether or not to use the streams to load the edit log
*/
public synchronized Collection<EditLogInputStream> selectInputStreams(
long fromTxId, long toAtLeastTxId, MetaRecoveryContext recovery,
- boolean inProgressOk) throws IOException {
+ boolean inProgressOk, boolean forReading) throws IOException {
List<EditLogInputStream> streams = new ArrayList<EditLogInputStream>();
- selectInputStreams(streams, fromTxId, inProgressOk, true);
+ selectInputStreams(streams, fromTxId, inProgressOk, forReading);
try {
checkForGaps(streams, fromTxId, toAtLeastTxId, inProgressOk);
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Tue Sep 3 18:30:05 2013
@@ -174,7 +174,6 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
@@ -352,7 +351,7 @@ public class FSNamesystem implements Nam
final LeaseManager leaseManager = new LeaseManager(this);
- Daemon smmthread = null; // SafeModeMonitor thread
+ volatile Daemon smmthread = null; // SafeModeMonitor thread
Daemon nnrmthread = null; // NamenodeResourceMonitor thread
@@ -2136,10 +2135,15 @@ public class FSNamesystem implements Nam
throw new FileNotFoundException("failed to append to non-existent file "
+ src + " on client " + clientMachine);
}
- final INodeFile myFile = INodeFile.valueOf(inode, src, true);
+ INodeFile myFile = INodeFile.valueOf(inode, src, true);
// Opening an existing file for write - may need to recover lease.
recoverLeaseInternal(myFile, src, holder, clientMachine, false);
-
+
+ // recoverLeaseInternal may create a new InodeFile via
+ // finalizeINodeFileUnderConstruction so we need to refresh
+ // the referenced file.
+ myFile = INodeFile.valueOf(dir.getINode(src), src, true);
+
final DatanodeDescriptor clientNode =
blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
return prepareFileForWrite(src, myFile, holder, clientMachine, clientNode,
@@ -3393,12 +3397,26 @@ public class FSNamesystem implements Nam
return true;
}
- ContentSummary getContentSummary(String src) throws AccessControlException,
- FileNotFoundException, UnresolvedLinkException, StandbyException {
+ /**
+ * Get the content summary for a specific file/dir.
+ *
+ * @param src The string representation of the path to the file
+ *
+ * @throws AccessControlException if access is denied
+ * @throws UnresolvedLinkException if a symlink is encountered.
+ * @throws FileNotFoundException if no file exists
+ * @throws StandbyException
+ * @throws IOException for issues with writing to the audit log
+ *
+ * @return object containing information regarding the file
+ * or null if file not found
+ */
+ ContentSummary getContentSummary(String src) throws IOException {
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.READ);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
readLock();
+ boolean success = true;
try {
checkOperation(OperationCategory.READ);
src = FSDirectory.resolvePath(src, pathComponents, dir);
@@ -3406,8 +3424,13 @@ public class FSNamesystem implements Nam
checkPermission(pc, src, false, null, null, null, FsAction.READ_EXECUTE);
}
return dir.getContentSummary(src);
+
+ } catch (AccessControlException ace) {
+ success = false;
+ throw ace;
} finally {
readUnlock();
+ logAuditEvent(success, "contentSummary", src);
}
}
@@ -3731,24 +3754,32 @@ public class FSNamesystem implements Nam
// find the DatanodeDescriptor objects
// There should be no locations in the blockManager till now because the
// file is underConstruction
- DatanodeDescriptor[] descriptors = null;
+ List<DatanodeDescriptor> targetList =
+ new ArrayList<DatanodeDescriptor>(newtargets.length);
if (newtargets.length > 0) {
- descriptors = new DatanodeDescriptor[newtargets.length];
- for(int i = 0; i < newtargets.length; i++) {
- descriptors[i] = blockManager.getDatanodeManager().getDatanode(
- newtargets[i]);
+ for (DatanodeID newtarget : newtargets) {
+ // try to get targetNode
+ DatanodeDescriptor targetNode =
+ blockManager.getDatanodeManager().getDatanode(newtarget);
+ if (targetNode != null)
+ targetList.add(targetNode);
+ else if (LOG.isDebugEnabled()) {
+ LOG.debug("DatanodeDescriptor (=" + newtarget + ") not found");
+ }
}
}
- if ((closeFile) && (descriptors != null)) {
+ if ((closeFile) && !targetList.isEmpty()) {
// the file is getting closed. Insert block locations into blockManager.
// Otherwise fsck will report these blocks as MISSING, especially if the
// blocksReceived from Datanodes take a long time to arrive.
- for (int i = 0; i < descriptors.length; i++) {
- descriptors[i].addBlock(storedBlock);
+ for (DatanodeDescriptor targetNode : targetList) {
+ targetNode.addBlock(storedBlock);
}
}
// add pipeline locations into the INodeUnderConstruction
- pendingFile.setLastBlock(storedBlock, descriptors);
+ DatanodeDescriptor[] targetArray =
+ new DatanodeDescriptor[targetList.size()];
+ pendingFile.setLastBlock(storedBlock, targetList.toArray(targetArray));
}
if (closeFile) {
@@ -4507,7 +4538,9 @@ public class FSNamesystem implements Nam
// Have to have write-lock since leaving safemode initializes
// repl queues, which requires write lock
assert hasWriteLock();
- if (needEnter()) {
+ // if smmthread is already running, the block threshold must have been
+ // reached before, there is no need to enter the safe mode again
+ if (smmthread == null && needEnter()) {
enter();
// check if we are ready to initialize replication queues
if (canInitializeReplQueues() && !isPopulatingReplQueues()) {
@@ -4516,7 +4549,7 @@ public class FSNamesystem implements Nam
reportStatus("STATE* Safe mode ON.", false);
return;
}
- // the threshold is reached
+ // the threshold is reached or was reached before
if (!isOn() || // safe mode is off
extension <= 0 || threshold <= 0) { // don't need to wait
this.leave(); // leave safe mode
@@ -4528,9 +4561,11 @@ public class FSNamesystem implements Nam
}
// start monitor
reached = now();
- smmthread = new Daemon(new SafeModeMonitor());
- smmthread.start();
- reportStatus("STATE* Safe mode extension entered.", true);
+ if (smmthread == null) {
+ smmthread = new Daemon(new SafeModeMonitor());
+ smmthread.start();
+ reportStatus("STATE* Safe mode extension entered.", true);
+ }
// check if we are ready to initialize replication queues
if (canInitializeReplQueues() && !isPopulatingReplQueues()) {
@@ -4757,7 +4792,22 @@ public class FSNamesystem implements Nam
*/
@Override
public void run() {
- while (fsRunning && (safeMode != null && !safeMode.canLeave())) {
+ while (fsRunning) {
+ writeLock();
+ try {
+ if (safeMode == null) { // Not in safe mode.
+ break;
+ }
+ if (safeMode.canLeave()) {
+ // Leave safe mode.
+ safeMode.leave();
+ smmthread = null;
+ break;
+ }
+ } finally {
+ writeUnlock();
+ }
+
try {
Thread.sleep(recheckInterval);
} catch (InterruptedException ie) {
@@ -4766,11 +4816,7 @@ public class FSNamesystem implements Nam
}
if (!fsRunning) {
LOG.info("NameNode is being shutdown, exit SafeModeMonitor thread");
- } else {
- // leave safe mode and stop the monitor
- leaveSafeMode();
}
- smmthread = null;
}
}
@@ -6184,6 +6230,7 @@ public class FSNamesystem implements Nam
innerinfo.put("nonDfsUsedSpace", node.getNonDfsUsed());
innerinfo.put("capacity", node.getCapacity());
innerinfo.put("numBlocks", node.numBlocks());
+ innerinfo.put("version", node.getSoftwareVersion());
info.put(node.getHostName(), innerinfo);
}
return JSON.toString(info);
@@ -6347,6 +6394,16 @@ public class FSNamesystem implements Nam
return JSON.toString(jasList);
}
+ @Override // NameNodeMxBean
+ public String getJournalTransactionInfo() {
+ Map<String, String> txnIdMap = new HashMap<String, String>();
+ txnIdMap.put("LastAppliedOrWrittenTxId",
+ Long.toString(this.getFSImage().getLastAppliedOrWrittenTxId()));
+ txnIdMap.put("MostRecentCheckpointTxId",
+ Long.toString(this.getFSImage().getMostRecentCheckpointTxId()));
+ return JSON.toString(txnIdMap);
+ }
+
@Override // NameNodeMXBean
public String getNNStarted() {
return getStartTime().toString();
@@ -6385,6 +6442,22 @@ public class FSNamesystem implements Nam
return JSON.toString(list);
}
+ @Override //NameNodeMXBean
+ public int getDistinctVersionCount() {
+ return blockManager.getDatanodeManager().getDatanodesSoftwareVersions()
+ .size();
+ }
+
+ @Override //NameNodeMXBean
+ public Map<String, Integer> getDistinctVersions() {
+ return blockManager.getDatanodeManager().getDatanodesSoftwareVersions();
+ }
+
+ @Override //NameNodeMXBean
+ public String getSoftwareVersion() {
+ return VersionInfo.getVersion();
+ }
+
/**
* Verifies that the given identifier and password are valid and match.
* @param identifier Token identifier.
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java Tue Sep 3 18:30:05 2013
@@ -169,18 +169,26 @@ public class FileJournalManager implemen
* @param fromTxId the txnid which to start looking
* @param forReading whether or not the caller intends to read from the edit
* logs
+ * @param inProgressOk whether or not to include the in-progress edit log
+ * segment
* @return a list of remote edit logs
* @throws IOException if edit logs cannot be listed.
*/
public List<RemoteEditLog> getRemoteEditLogs(long firstTxId,
- boolean forReading) throws IOException {
+ boolean forReading, boolean inProgressOk) throws IOException {
+ // make sure not reading in-progress edit log, i.e., if forReading is true,
+ // we should ignore the in-progress edit log.
+ Preconditions.checkArgument(!(forReading && inProgressOk));
+
File currentDir = sd.getCurrentDir();
List<EditLogFile> allLogFiles = matchEditLogs(currentDir);
List<RemoteEditLog> ret = Lists.newArrayListWithCapacity(
allLogFiles.size());
for (EditLogFile elf : allLogFiles) {
- if (elf.hasCorruptHeader() || elf.isInProgress()) continue;
+ if (elf.hasCorruptHeader() || (!inProgressOk && elf.isInProgress())) {
+ continue;
+ }
if (elf.getFirstTxId() >= firstTxId) {
ret.add(new RemoteEditLog(elf.firstTxId, elf.lastTxId));
} else if (elf.getFirstTxId() < firstTxId && firstTxId <= elf.getLastTxId()) {
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java Tue Sep 3 18:30:05 2013
@@ -310,11 +310,14 @@ public class GetImageServlet extends Htt
static String getParamStringToPutImage(long txid,
InetSocketAddress imageListenAddress, Storage storage) {
-
+ String machine = !imageListenAddress.isUnresolved()
+ && imageListenAddress.getAddress().isAnyLocalAddress() ? null
+ : imageListenAddress.getHostName();
return "putimage=1" +
"&" + TXID_PARAM + "=" + txid +
"&port=" + imageListenAddress.getPort() +
- "&" + STORAGEINFO_PARAM + "=" +
+ (machine != null ? "&machine=" + machine : "")
+ + "&" + STORAGEINFO_PARAM + "=" +
storage.toColonSeparatedString();
}
@@ -341,10 +344,6 @@ public class GetImageServlet extends Htt
Map<String, String[]> pmap = request.getParameterMap();
isGetImage = isGetEdit = isPutImage = fetchLatest = false;
remoteport = 0;
- machineName = request.getRemoteHost();
- if (InetAddresses.isInetAddress(machineName)) {
- machineName = NetUtils.getHostNameOfIP(machineName);
- }
for (Map.Entry<String, String[]> entry : pmap.entrySet()) {
String key = entry.getKey();
@@ -369,11 +368,20 @@ public class GetImageServlet extends Htt
txId = ServletUtil.parseLongParam(request, TXID_PARAM);
} else if (key.equals("port")) {
remoteport = new Integer(val[0]).intValue();
+ } else if (key.equals("machine")) {
+ machineName = val[0];
} else if (key.equals(STORAGEINFO_PARAM)) {
storageInfoString = val[0];
}
}
+ if (machineName == null) {
+ machineName = request.getRemoteHost();
+ if (InetAddresses.isInetAddress(machineName)) {
+ machineName = NetUtils.getHostNameOfIP(machineName);
+ }
+ }
+
int numGets = (isGetImage?1:0) + (isGetEdit?1:0);
if ((numGets > 1) || (numGets == 0) && !isPutImage) {
throw new IOException("Illegal parameters to TransferFsImage");
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java Tue Sep 3 18:30:05 2013
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
@@ -37,7 +39,6 @@ import org.apache.hadoop.hdfs.server.pro
import static org.apache.hadoop.util.ExitUtil.terminate;
-import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.ComparisonChain;
import com.google.common.collect.ImmutableList;
@@ -255,13 +256,12 @@ public class JournalSet implements Journ
". Skipping.", ioe);
}
}
- chainAndMakeRedundantStreams(streams, allStreams, fromTxId, inProgressOk);
+ chainAndMakeRedundantStreams(streams, allStreams, fromTxId);
}
public static void chainAndMakeRedundantStreams(
Collection<EditLogInputStream> outStreams,
- PriorityQueue<EditLogInputStream> allStreams,
- long fromTxId, boolean inProgressOk) {
+ PriorityQueue<EditLogInputStream> allStreams, long fromTxId) {
// We want to group together all the streams that start on the same start
// transaction ID. To do this, we maintain an accumulator (acc) of all
// the streams we've seen at a given start transaction ID. When we see a
@@ -596,7 +596,7 @@ public class JournalSet implements Journ
if (j.getManager() instanceof FileJournalManager) {
FileJournalManager fjm = (FileJournalManager)j.getManager();
try {
- allLogs.addAll(fjm.getRemoteEditLogs(fromTxId, forReading));
+ allLogs.addAll(fjm.getRemoteEditLogs(fromTxId, forReading, false));
} catch (Throwable t) {
LOG.warn("Cannot list edit logs in " + fjm, t);
}
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Tue Sep 3 18:30:05 2013
@@ -168,12 +168,14 @@ public class NameNode implements NameNod
*/
public static final String[] NAMENODE_SPECIFIC_KEYS = {
DFS_NAMENODE_RPC_ADDRESS_KEY,
+ DFS_NAMENODE_RPC_BIND_HOST_KEY,
DFS_NAMENODE_NAME_DIR_KEY,
DFS_NAMENODE_EDITS_DIR_KEY,
DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
DFS_NAMENODE_CHECKPOINT_DIR_KEY,
DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+ DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY,
DFS_NAMENODE_HTTP_ADDRESS_KEY,
DFS_NAMENODE_KEYTAB_FILE_KEY,
DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
@@ -182,6 +184,7 @@ public class NameNode implements NameNod
DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY,
DFS_NAMENODE_USER_NAME_KEY,
+ DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY,
DFS_HA_FENCE_METHODS_KEY,
DFS_HA_ZKFC_PORT_KEY,
DFS_HA_FENCE_METHODS_KEY
@@ -389,6 +392,28 @@ public class NameNode implements NameNod
return getAddress(conf);
}
+ /** Given a configuration get the bind host of the service rpc server
+ * If the bind host is not configured returns null.
+ */
+ protected String getServiceRpcServerBindHost(Configuration conf) {
+ String addr = conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
+ if (addr == null || addr.isEmpty()) {
+ return null;
+ }
+ return addr;
+ }
+
+ /** Given a configuration get the bind host of the client rpc server
+ * If the bind host is not configured returns null.
+ */
+ protected String getRpcServerBindHost(Configuration conf) {
+ String addr = conf.getTrimmed(DFS_NAMENODE_RPC_BIND_HOST_KEY);
+ if (addr == null || addr.isEmpty()) {
+ return null;
+ }
+ return addr;
+ }
+
/**
* Modifies the configuration passed to contain the service rpc address setting
*/
@@ -958,41 +983,49 @@ public class NameNode implements NameNod
FSEditLog sourceEditLog = fsns.getFSImage().editLog;
long fromTxId = fsns.getFSImage().getMostRecentCheckpointTxId();
- Collection<EditLogInputStream> streams = sourceEditLog.selectInputStreams(
- fromTxId+1, 0);
-
- // Set the nextTxid to the CheckpointTxId+1
- newSharedEditLog.setNextTxId(fromTxId + 1);
- // Copy all edits after last CheckpointTxId to shared edits dir
- for (EditLogInputStream stream : streams) {
- LOG.debug("Beginning to copy stream " + stream + " to shared edits");
- FSEditLogOp op;
- boolean segmentOpen = false;
- while ((op = stream.readOp()) != null) {
- if (LOG.isTraceEnabled()) {
- LOG.trace("copying op: " + op);
- }
- if (!segmentOpen) {
- newSharedEditLog.startLogSegment(op.txid, false);
- segmentOpen = true;
+ Collection<EditLogInputStream> streams = null;
+ try {
+ streams = sourceEditLog.selectInputStreams(fromTxId + 1, 0);
+
+ // Set the nextTxid to the CheckpointTxId+1
+ newSharedEditLog.setNextTxId(fromTxId + 1);
+
+ // Copy all edits after last CheckpointTxId to shared edits dir
+ for (EditLogInputStream stream : streams) {
+ LOG.debug("Beginning to copy stream " + stream + " to shared edits");
+ FSEditLogOp op;
+ boolean segmentOpen = false;
+ while ((op = stream.readOp()) != null) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("copying op: " + op);
+ }
+ if (!segmentOpen) {
+ newSharedEditLog.startLogSegment(op.txid, false);
+ segmentOpen = true;
+ }
+
+ newSharedEditLog.logEdit(op);
+
+ if (op.opCode == FSEditLogOpCodes.OP_END_LOG_SEGMENT) {
+ newSharedEditLog.logSync();
+ newSharedEditLog.endCurrentLogSegment(false);
+ LOG.debug("ending log segment because of END_LOG_SEGMENT op in "
+ + stream);
+ segmentOpen = false;
+ }
}
-
- newSharedEditLog.logEdit(op);
- if (op.opCode == FSEditLogOpCodes.OP_END_LOG_SEGMENT) {
+ if (segmentOpen) {
+ LOG.debug("ending log segment because of end of stream in " + stream);
newSharedEditLog.logSync();
newSharedEditLog.endCurrentLogSegment(false);
- LOG.debug("ending log segment because of END_LOG_SEGMENT op in " + stream);
segmentOpen = false;
}
}
-
- if (segmentOpen) {
- LOG.debug("ending log segment because of end of stream in " + stream);
- newSharedEditLog.logSync();
- newSharedEditLog.endCurrentLogSegment(false);
- segmentOpen = false;
+ } finally {
+ if (streams != null) {
+ FSEditLog.closeAllStreams(streams);
}
}
}
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java Tue Sep 3 18:30:05 2013
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
+
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.HashMap;
@@ -70,65 +71,30 @@ public class NameNodeHttpServer {
public void start() throws IOException {
final String infoHost = bindAddress.getHostName();
int infoPort = bindAddress.getPort();
-
- httpServer = new HttpServer("hdfs", infoHost, infoPort,
- infoPort == 0, conf,
- new AccessControlList(conf.get(DFS_ADMIN, " "))) {
- {
- // Add SPNEGO support to NameNode
- if (UserGroupInformation.isSecurityEnabled()) {
- initSpnego(conf,
- DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY,
- DFSUtil.getSpnegoKeytabKey(conf,
- DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY));
- }
- if (WebHdfsFileSystem.isEnabled(conf, LOG)) {
- //add SPNEGO authentication filter for webhdfs
- final String name = "SPNEGO";
- final String classname = AuthFilter.class.getName();
- final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
- Map<String, String> params = getAuthFilterParams(conf);
- defineFilter(webAppContext, name, classname, params,
- new String[]{pathSpec});
- LOG.info("Added filter '" + name + "' (class=" + classname + ")");
-
- // add webhdfs packages
- addJerseyResourcePackage(
- NamenodeWebHdfsMethods.class.getPackage().getName()
- + ";" + Param.class.getPackage().getName(), pathSpec);
- }
- }
-
- private Map<String, String> getAuthFilterParams(Configuration conf)
- throws IOException {
- Map<String, String> params = new HashMap<String, String>();
- String principalInConf = conf
- .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY);
- if (principalInConf != null && !principalInConf.isEmpty()) {
- params
- .put(
- DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
- SecurityUtil.getServerPrincipal(principalInConf,
- bindAddress.getHostName()));
- } else if (UserGroupInformation.isSecurityEnabled()) {
- LOG.error("WebHDFS and security are enabled, but configuration property '" +
- DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY +
- "' is not set.");
- }
- String httpKeytab = conf.get(DFSUtil.getSpnegoKeytabKey(conf,
- DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY));
- if (httpKeytab != null && !httpKeytab.isEmpty()) {
- params.put(
- DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
- httpKeytab);
- } else if (UserGroupInformation.isSecurityEnabled()) {
- LOG.error("WebHDFS and security are enabled, but configuration property '" +
- DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY +
- "' is not set.");
- }
- return params;
+ httpServer = new HttpServer.Builder().setName("hdfs")
+ .setBindAddress(infoHost).setPort(infoPort)
+ .setFindPort(infoPort == 0).setConf(conf).setACL(
+ new AccessControlList(conf.get(DFS_ADMIN, " ")))
+ .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
+ .setUsernameConfKey(
+ DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY)
+ .setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf,
+ DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY)).build();
+ if (WebHdfsFileSystem.isEnabled(conf, HttpServer.LOG)) {
+ //add SPNEGO authentication filter for webhdfs
+ final String name = "SPNEGO";
+ final String classname = AuthFilter.class.getName();
+ final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
+ Map<String, String> params = getAuthFilterParams(conf);
+ httpServer.defineFilter(httpServer.getWebAppContext(), name, classname, params,
+ new String[]{pathSpec});
+ HttpServer.LOG.info("Added filter '" + name + "' (class=" + classname + ")");
+
+ // add webhdfs packages
+ httpServer.addJerseyResourcePackage(
+ NamenodeWebHdfsMethods.class.getPackage().getName()
+ + ";" + Param.class.getPackage().getName(), pathSpec);
}
- };
boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false);
if (certSSL) {
@@ -153,6 +119,38 @@ public class NameNodeHttpServer {
httpServer.start();
httpAddress = new InetSocketAddress(bindAddress.getAddress(), httpServer.getPort());
}
+
+ private Map<String, String> getAuthFilterParams(Configuration conf)
+ throws IOException {
+ Map<String, String> params = new HashMap<String, String>();
+ String principalInConf = conf
+ .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY);
+ if (principalInConf != null && !principalInConf.isEmpty()) {
+ params
+ .put(
+ DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
+ SecurityUtil.getServerPrincipal(principalInConf,
+ bindAddress.getHostName()));
+ } else if (UserGroupInformation.isSecurityEnabled()) {
+ HttpServer.LOG.error(
+ "WebHDFS and security are enabled, but configuration property '" +
+ DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY +
+ "' is not set.");
+ }
+ String httpKeytab = conf.get(DFSUtil.getSpnegoKeytabKey(conf,
+ DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY));
+ if (httpKeytab != null && !httpKeytab.isEmpty()) {
+ params.put(
+ DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
+ httpKeytab);
+ } else if (UserGroupInformation.isSecurityEnabled()) {
+ HttpServer.LOG.error(
+ "WebHDFS and security are enabled, but configuration property '" +
+ DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY +
+ "' is not set.");
+ }
+ return params;
+ }
public void stop() throws Exception {
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java Tue Sep 3 18:30:05 2013
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
+import java.util.Map;
+
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -33,7 +35,13 @@ public interface NameNodeMXBean {
* @return the version
*/
public String getVersion();
-
+
+ /**
+ * Get the version of software running on the Namenode
+ * @return a string representing the version
+ */
+ public String getSoftwareVersion();
+
/**
* Gets the used space by data nodes.
*
@@ -188,6 +196,12 @@ public interface NameNodeMXBean {
* @return the name journal status information, as a JSON string.
*/
public String getNameJournalStatus();
+
+ /**
+ * Get information about the transaction ID, including the last applied
+ * transaction ID and the most recent checkpoint's transaction ID
+ */
+ public String getJournalTransactionInfo();
/**
* Gets the NN start time
@@ -209,4 +223,19 @@ public interface NameNodeMXBean {
* @return the list of corrupt files, as a JSON string.
*/
public String getCorruptFiles();
+
+ /**
+ * Get the number of distinct versions of live datanodes
+ *
+ * @return the number of distinct versions of live datanodes
+ */
+ public int getDistinctVersionCount();
+
+ /**
+ * Get the number of live datanodes for each distinct versions
+ *
+ * @return the number of live datanodes for each distinct versions
+ */
+ public Map<String, Integer> getDistinctVersions();
+
}
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Tue Sep 3 18:30:05 2013
@@ -137,6 +137,7 @@ import org.apache.hadoop.tools.protocolP
import org.apache.hadoop.util.VersionInfo;
import org.apache.hadoop.util.VersionUtil;
+import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.BlockingService;
/**
@@ -219,6 +220,13 @@ class NameNodeRpcServer implements Namen
InetSocketAddress serviceRpcAddr = nn.getServiceRpcServerAddress(conf);
if (serviceRpcAddr != null) {
+ String bindHost = nn.getServiceRpcServerBindHost(conf);
+ if (bindHost == null) {
+ bindHost = serviceRpcAddr.getHostName();
+ }
+ LOG.info("Service RPC server is binding to " + bindHost + ":" +
+ serviceRpcAddr.getPort());
+
int serviceHandlerCount =
conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
@@ -226,7 +234,7 @@ class NameNodeRpcServer implements Namen
.setProtocol(
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class)
.setInstance(clientNNPbService)
- .setBindAddress(serviceRpcAddr.getHostName())
+ .setBindAddress(bindHost)
.setPort(serviceRpcAddr.getPort()).setNumHandlers(serviceHandlerCount)
.setVerbose(false)
.setSecretManager(namesystem.getDelegationTokenSecretManager())
@@ -246,17 +254,26 @@ class NameNodeRpcServer implements Namen
DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class,
getUserMappingService, serviceRpcServer);
- serviceRPCAddress = serviceRpcServer.getListenerAddress();
+ // Update the address with the correct port
+ InetSocketAddress listenAddr = serviceRpcServer.getListenerAddress();
+ serviceRPCAddress = new InetSocketAddress(
+ serviceRpcAddr.getHostName(), listenAddr.getPort());
nn.setRpcServiceServerAddress(conf, serviceRPCAddress);
} else {
serviceRpcServer = null;
serviceRPCAddress = null;
}
InetSocketAddress rpcAddr = nn.getRpcServerAddress(conf);
+ String bindHost = nn.getRpcServerBindHost(conf);
+ if (bindHost == null) {
+ bindHost = rpcAddr.getHostName();
+ }
+ LOG.info("RPC server is binding to " + bindHost + ":" + rpcAddr.getPort());
+
this.clientRpcServer = new RPC.Builder(conf)
.setProtocol(
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class)
- .setInstance(clientNNPbService).setBindAddress(rpcAddr.getHostName())
+ .setInstance(clientNNPbService).setBindAddress(bindHost)
.setPort(rpcAddr.getPort()).setNumHandlers(handlerCount)
.setVerbose(false)
.setSecretManager(namesystem.getDelegationTokenSecretManager()).build();
@@ -286,7 +303,9 @@ class NameNodeRpcServer implements Namen
}
// The rpc-server port can be ephemeral... ensure we have the correct info
- clientRpcAddress = clientRpcServer.getListenerAddress();
+ InetSocketAddress listenAddr = clientRpcServer.getListenerAddress();
+ clientRpcAddress = new InetSocketAddress(
+ rpcAddr.getHostName(), listenAddr.getPort());
nn.setRpcServerAddress(conf, clientRpcAddress);
minimumDataNodeVersion = conf.get(
@@ -310,6 +329,12 @@ class NameNodeRpcServer implements Namen
NSQuotaExceededException.class,
DSQuotaExceededException.class);
}
+
+ /** Allow access to the client RPC server for testing */
+ @VisibleForTesting
+ RPC.Server getClientRpcServer() {
+ return clientRpcServer;
+ }
/**
* Start client and service RPC servers.
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Tue Sep 3 18:30:05 2013
@@ -32,6 +32,7 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
+import java.util.Map;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
@@ -99,6 +100,20 @@ class NamenodeJspHelper {
}
}
+ static String getRollingUpgradeText(FSNamesystem fsn) {
+ DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
+ Map<String, Integer> list = dm.getDatanodesSoftwareVersions();
+ if(list.size() > 1) {
+ StringBuffer status = new StringBuffer("Rolling upgrades in progress. " +
+ "There are " + list.size() + " versions of datanodes currently live: ");
+ for(Map.Entry<String, Integer> ver: list.entrySet()) {
+ status.append(ver.getKey() + "(" + ver.getValue() + "), ");
+ }
+ return status.substring(0, status.length()-2);
+ }
+ return "";
+ }
+
static String getInodeLimitText(FSNamesystem fsn) {
if (fsn == null) {
return "";
@@ -802,7 +817,9 @@ class NamenodeJspHelper {
+ "<td align=\"right\" class=\"pcbpused\">"
+ percentBpUsed
+ "<td align=\"right\" class=\"volfails\">"
- + d.getVolumeFailures() + "\n");
+ + d.getVolumeFailures()
+ + "<td align=\"right\" class=\"version\">"
+ + d.getSoftwareVersion() + "\n");
}
void generateNodesList(ServletContext context, JspWriter out,
@@ -900,7 +917,9 @@ class NamenodeJspHelper {
+ nodeHeaderStr("pcbpused")
+ "> Block Pool<br>Used (%)" + " <th "
+ nodeHeaderStr("volfails")
- +"> Failed Volumes\n");
+ +"> Failed Volumes <th "
+ + nodeHeaderStr("versionString")
+ +"> Version\n");
JspHelper.sortNodeList(live, sorterField, sorterOrder);
for (int i = 0; i < live.size(); i++) {
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Tue Sep 3 18:30:05 2013
@@ -256,19 +256,15 @@ public class SecondaryNameNode implement
// initialize the webserver for uploading files.
int tmpInfoPort = infoSocAddr.getPort();
- infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
- tmpInfoPort == 0, conf,
- new AccessControlList(conf.get(DFS_ADMIN, " "))) {
- {
- if (UserGroupInformation.isSecurityEnabled()) {
- initSpnego(
- conf,
- DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY,
- DFSUtil.getSpnegoKeytabKey(conf,
- DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY));
- }
- }
- };
+ infoServer = new HttpServer.Builder().setName("secondary")
+ .setBindAddress(infoBindAddress).setPort(tmpInfoPort)
+ .setFindPort(tmpInfoPort == 0).setConf(conf).setACL(
+ new AccessControlList(conf.get(DFS_ADMIN, " ")))
+ .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
+ .setUsernameConfKey(
+ DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY)
+ .setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf,
+ DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY)).build();
infoServer.setAttribute("secondary.name.node", this);
infoServer.setAttribute("name.system.image", checkpointImage);
infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java Tue Sep 3 18:30:05 2013
@@ -226,7 +226,7 @@ public class BootstrapStandby implements
try {
Collection<EditLogInputStream> streams =
image.getEditLog().selectInputStreams(
- firstTxIdInLogs, curTxIdOnOtherNode, null, true);
+ firstTxIdInLogs, curTxIdOnOtherNode, null, true, false);
for (EditLogInputStream stream : streams) {
IOUtils.closeStream(stream);
}
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java?rev=1519787&r1=1519786&r2=1519787&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java Tue Sep 3 18:30:05 2013
@@ -17,9 +17,17 @@
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
+import static org.apache.hadoop.util.Time.now;
+
import java.io.IOException;
import java.net.InetSocketAddress;
import java.security.PrivilegedAction;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -38,10 +46,10 @@ import org.apache.hadoop.hdfs.util.Cance
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
-import static org.apache.hadoop.util.Time.now;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
/**
* Thread which runs inside the NN when it's in Standby state,
@@ -57,6 +65,7 @@ public class StandbyCheckpointer {
private final FSNamesystem namesystem;
private long lastCheckpointTime;
private final CheckpointerThread thread;
+ private final ThreadFactory uploadThreadFactory;
private String activeNNAddress;
private InetSocketAddress myNNAddress;
@@ -72,6 +81,8 @@ public class StandbyCheckpointer {
this.namesystem = ns;
this.checkpointConf = new CheckpointConf(conf);
this.thread = new CheckpointerThread();
+ this.uploadThreadFactory = new ThreadFactoryBuilder().setDaemon(true)
+ .setNameFormat("TransferFsImageUpload-%d").build();
setNameNodeAddresses(conf);
}
@@ -142,7 +153,7 @@ public class StandbyCheckpointer {
private void doCheckpoint() throws InterruptedException, IOException {
assert canceler != null;
- long txid;
+ final long txid;
namesystem.writeLockInterruptibly();
try {
@@ -171,9 +182,26 @@ public class StandbyCheckpointer {
}
// Upload the saved checkpoint back to the active
- TransferFsImage.uploadImageFromStorage(
- activeNNAddress, myNNAddress,
- namesystem.getFSImage().getStorage(), txid);
+ // Do this in a separate thread to avoid blocking transition to active
+ // See HDFS-4816
+ ExecutorService executor =
+ Executors.newSingleThreadExecutor(uploadThreadFactory);
+ Future<Void> upload = executor.submit(new Callable<Void>() {
+ @Override
+ public Void call() throws IOException {
+ TransferFsImage.uploadImageFromStorage(
+ activeNNAddress, myNNAddress,
+ namesystem.getFSImage().getStorage(), txid);
+ return null;
+ }
+ });
+ executor.shutdown();
+ try {
+ upload.get();
+ } catch (ExecutionException e) {
+ throw new IOException("Exception during image upload: " + e.getMessage(),
+ e.getCause());
+ }
}
/**
@@ -301,6 +329,7 @@ public class StandbyCheckpointer {
LOG.info("Checkpoint was cancelled: " + ce.getMessage());
canceledCount++;
} catch (InterruptedException ie) {
+ LOG.info("Interrupted during checkpointing", ie);
// Probably requested shutdown.
continue;
} catch (Throwable t) {