You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by gk...@apache.org on 2012/08/03 21:00:59 UTC
svn commit: r1369164 [7/16] - in
/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project: ./
hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/dev-support/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/
hadoop-hdfs-httpfs/src/main/java/or...
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CancelDelegationTokenServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CancelDelegationTokenServlet.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CancelDelegationTokenServlet.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CancelDelegationTokenServlet.java Fri Aug 3 19:00:15 2012
@@ -68,6 +68,7 @@ public class CancelDelegationTokenServle
try {
ugi.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
public Void run() throws Exception {
nn.getRpcServer().cancelDelegationToken(token);
return null;
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java Fri Aug 3 19:00:15 2012
@@ -72,6 +72,7 @@ public class CheckpointSignature extends
* Get the cluster id from CheckpointSignature
* @return the cluster id
*/
+ @Override
public String getClusterID() {
return clusterID;
}
@@ -101,6 +102,7 @@ public class CheckpointSignature extends
this.blockpoolID = blockpoolID;
}
+ @Override
public String toString() {
return String.valueOf(layoutVersion) + FIELD_SEPARATOR
+ String.valueOf(namespaceID) + FIELD_SEPARATOR
@@ -111,12 +113,19 @@ public class CheckpointSignature extends
+ blockpoolID ;
}
+ boolean storageVersionMatches(StorageInfo si) throws IOException {
+ return (layoutVersion == si.layoutVersion) && (cTime == si.cTime);
+ }
+
+ boolean isSameCluster(FSImage si) {
+ return namespaceID == si.getStorage().namespaceID &&
+ clusterID.equals(si.getClusterID()) &&
+ blockpoolID.equals(si.getBlockPoolID());
+ }
+
void validateStorageInfo(FSImage si) throws IOException {
- if(layoutVersion != si.getStorage().layoutVersion
- || namespaceID != si.getStorage().namespaceID
- || cTime != si.getStorage().cTime
- || !clusterID.equals(si.getClusterID())
- || !blockpoolID.equals(si.getBlockPoolID())) {
+ if (!isSameCluster(si)
+ || !storageVersionMatches(si.getStorage())) {
throw new IOException("Inconsistent checkpoint fields.\n"
+ "LV = " + layoutVersion + " namespaceID = " + namespaceID
+ " cTime = " + cTime
@@ -133,6 +142,7 @@ public class CheckpointSignature extends
//
// Comparable interface
//
+ @Override
public int compareTo(CheckpointSignature o) {
return ComparisonChain.start()
.compare(layoutVersion, o.layoutVersion)
@@ -145,6 +155,7 @@ public class CheckpointSignature extends
.result();
}
+ @Override
public boolean equals(Object o) {
if (!(o instanceof CheckpointSignature)) {
return false;
@@ -152,6 +163,7 @@ public class CheckpointSignature extends
return compareTo((CheckpointSignature)o) == 0;
}
+ @Override
public int hashCode() {
return layoutVersion ^ namespaceID ^
(int)(cTime ^ mostRecentCheckpointTxId ^ curSegmentTxId)
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java Fri Aug 3 19:00:15 2012
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.na
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY;
-import static org.apache.hadoop.hdfs.server.common.Util.now;
+import static org.apache.hadoop.util.Time.now;
import java.io.File;
import java.io.IOException;
@@ -118,6 +118,7 @@ class Checkpointer extends Daemon {
//
// The main work loop
//
+ @Override
public void run() {
// Check the size of the edit log once every 5 minutes.
long periodMSec = 5 * 60; // 5 minutes
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java Fri Aug 3 19:00:15 2012
@@ -656,6 +656,7 @@ class ClusterJspHelper {
this.value = v;
}
+ @Override
public String toString() {
return value;
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java Fri Aug 3 19:00:15 2012
@@ -194,9 +194,11 @@ public class EditLogFileInputStream exte
//
long skipAmt = log.length() - tracker.getPos();
if (skipAmt > 0) {
- LOG.warn("skipping " + skipAmt + " bytes at the end " +
- "of edit log '" + getName() + "': reached txid " + txId +
- " out of " + lastTxId);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("skipping " + skipAmt + " bytes at the end " +
+ "of edit log '" + getName() + "': reached txid " + txId +
+ " out of " + lastTxId);
+ }
tracker.clearLimit();
IOUtils.skipFully(tracker, skipAmt);
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java Fri Aug 3 19:00:15 2012
@@ -57,6 +57,7 @@ public abstract class EditLogInputStream
* Close the stream.
* @throws IOException if an error occurred while closing
*/
+ @Override
public abstract void close() throws IOException;
/**
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java Fri Aug 3 19:00:15 2012
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.na
import java.io.IOException;
import java.io.Closeable;
-import static org.apache.hadoop.hdfs.server.common.Util.now;
+import static org.apache.hadoop.util.Time.now;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -74,6 +74,7 @@ public abstract class EditLogOutputStrea
* @throws IOException if the journal can't be closed,
* or if there are unflushed edits
*/
+ @Override
abstract public void close() throws IOException;
/**
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Fri Aug 3 19:00:15 2012
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import static org.apache.hadoop.hdfs.server.common.Util.now;
+import static org.apache.hadoop.util.Time.now;
import java.io.Closeable;
import java.io.FileNotFoundException;
@@ -187,6 +187,7 @@ public class FSDirectory implements Clos
/**
* Shutdown the filestore
*/
+ @Override
public void close() throws IOException {
fsImage.close();
}
@@ -229,8 +230,15 @@ public class FSDirectory implements Clos
// Always do an implicit mkdirs for parent directory tree.
long modTime = now();
- if (!mkdirs(new Path(path).getParent().toString(), permissions, true,
- modTime)) {
+
+ Path parent = new Path(path).getParent();
+ if (parent == null) {
+ // Trying to add "/" as a file - this path has no
+ // parent -- avoids an NPE below.
+ return null;
+ }
+
+ if (!mkdirs(parent.toString(), permissions, true, modTime)) {
return null;
}
INodeFileUnderConstruction newNode = new INodeFileUnderConstruction(
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Fri Aug 3 19:00:15 2012
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import static org.apache.hadoop.hdfs.server.common.Util.now;
+import static org.apache.hadoop.util.Time.now;
import java.io.IOException;
import java.lang.reflect.Constructor;
@@ -37,6 +37,9 @@ import org.apache.hadoop.fs.permission.F
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddOp;
@@ -136,10 +139,6 @@ public class FSEditLog {
// is an automatic sync scheduled?
private volatile boolean isAutoSyncScheduled = false;
- // Used to exit in the event of a failure to sync to all journals. It's a
- // member variable so it can be swapped out for testing.
- private Runtime runtime = Runtime.getRuntime();
-
// these are statistics counters.
private long numTransactions; // number of transactions
private long numTransactionsBatchedInSync;
@@ -174,6 +173,7 @@ public class FSEditLog {
// stores the most current transactionId of this thread.
private static final ThreadLocal<TransactionId> myTransactionId = new ThreadLocal<TransactionId>() {
+ @Override
protected synchronized TransactionId initialValue() {
return new TransactionId(Long.MAX_VALUE);
}
@@ -232,9 +232,6 @@ public class FSEditLog {
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT);
journalSet = new JournalSet(minimumRedundantJournals);
- // set runtime so we can test starting with a faulty or unavailable
- // shared directory
- this.journalSet.setRuntimeForTesting(runtime);
for (URI u : dirs) {
boolean required = FSNamesystem.getRequiredNamespaceEditsDirs(conf)
@@ -547,10 +544,12 @@ public class FSEditLog {
}
editLogStream.setReadyToFlush();
} catch (IOException e) {
- LOG.fatal("Could not sync enough journals to persistent storage. "
- + "Unsynced transactions: " + (txid - synctxid),
- new Exception());
- runtime.exit(1);
+ final String msg =
+ "Could not sync enough journals to persistent storage " +
+ "due to " + e.getMessage() + ". " +
+ "Unsynced transactions: " + (txid - synctxid);
+ LOG.fatal(msg, new Exception());
+ terminate(1, msg);
}
} finally {
// Prevent RuntimeException from blocking other log edit write
@@ -569,9 +568,11 @@ public class FSEditLog {
}
} catch (IOException ex) {
synchronized (this) {
- LOG.fatal("Could not sync enough journals to persistent storage. "
- + "Unsynced transactions: " + (txid - synctxid), new Exception());
- runtime.exit(1);
+ final String msg =
+ "Could not sync enough journals to persistent storage. "
+ + "Unsynced transactions: " + (txid - synctxid);
+ LOG.fatal(msg, new Exception());
+ terminate(1, msg);
}
}
long elapsed = now() - start;
@@ -844,15 +845,6 @@ public class FSEditLog {
}
/**
- * Used only by unit tests.
- */
- @VisibleForTesting
- synchronized public void setRuntimeForTesting(Runtime runtime) {
- this.runtime = runtime;
- this.journalSet.setRuntimeForTesting(runtime);
- }
-
- /**
* Used only by tests.
*/
@VisibleForTesting
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Fri Aug 3 19:00:15 2012
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import static org.apache.hadoop.hdfs.server.common.Util.now;
+import static org.apache.hadoop.util.Time.now;
import java.io.FilterInputStream;
import java.io.IOException;
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java Fri Aug 3 19:00:15 2012
@@ -183,6 +183,7 @@ public abstract class FSEditLogOp {
return (T)this;
}
+ @Override
public String getPath() {
return path;
}
@@ -216,6 +217,7 @@ public abstract class FSEditLogOp {
return (T)this;
}
+ @Override
public Block[] getBlocks() {
return blocks;
}
@@ -409,6 +411,7 @@ public abstract class FSEditLogOp {
return (AddOp)cache.get(OP_ADD);
}
+ @Override
public boolean shouldCompleteLastBlock() {
return false;
}
@@ -431,6 +434,7 @@ public abstract class FSEditLogOp {
return (CloseOp)cache.get(OP_CLOSE);
}
+ @Override
public boolean shouldCompleteLastBlock() {
return true;
}
@@ -462,6 +466,7 @@ public abstract class FSEditLogOp {
return this;
}
+ @Override
public String getPath() {
return path;
}
@@ -471,6 +476,7 @@ public abstract class FSEditLogOp {
return this;
}
+ @Override
public Block[] getBlocks() {
return blocks;
}
@@ -2082,6 +2088,7 @@ public abstract class FSEditLogOp {
return (LogSegmentOp)cache.get(code);
}
+ @Override
public void readFields(DataInputStream in, int logVersion)
throws IOException {
// no data stored in these ops yet
@@ -2174,6 +2181,7 @@ public abstract class FSEditLogOp {
WritableFactories.setFactory
(BlockTwo.class,
new WritableFactory() {
+ @Override
public Writable newInstance() { return new BlockTwo(); }
});
}
@@ -2186,11 +2194,13 @@ public abstract class FSEditLogOp {
/////////////////////////////////////
// Writable
/////////////////////////////////////
+ @Override
public void write(DataOutput out) throws IOException {
out.writeLong(blkid);
out.writeLong(len);
}
+ @Override
public void readFields(DataInput in) throws IOException {
this.blkid = in.readLong();
this.len = in.readLong();
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Fri Aug 3 19:00:15 2012
@@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Storage.StorageState;
import org.apache.hadoop.hdfs.server.common.Util;
-import static org.apache.hadoop.hdfs.server.common.Util.now;
+import static org.apache.hadoop.util.Time.now;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
@@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.util.Canceler;
import org.apache.hadoop.hdfs.util.MD5FileUtils;
import org.apache.hadoop.io.MD5Hash;
+import org.apache.hadoop.util.Time;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
@@ -664,7 +665,7 @@ public class FSImage implements Closeabl
final long checkpointTxnCount = conf.getLong(
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY,
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT);
- long checkpointAge = System.currentTimeMillis() - imageFile.lastModified();
+ long checkpointAge = Time.now() - imageFile.lastModified();
return (checkpointAge > checkpointPeriod * 1000) ||
(numEditsLoaded > checkpointTxnCount);
@@ -761,7 +762,7 @@ public class FSImage implements Closeabl
saver.save(newFile, compression);
MD5FileUtils.saveMD5File(dstFile, saver.getSavedDigest());
- storage.setMostRecentCheckpointInfo(txid, Util.now());
+ storage.setMostRecentCheckpointInfo(txid, Time.now());
}
/**
@@ -784,6 +785,7 @@ public class FSImage implements Closeabl
this.sd = sd;
}
+ @Override
public void run() {
try {
saveFSImage(context, sd);
@@ -797,6 +799,7 @@ public class FSImage implements Closeabl
}
}
+ @Override
public String toString() {
return "FSImageSaver for " + sd.getRoot() +
" of type " + sd.getStorageDirType();
@@ -1076,10 +1079,11 @@ public class FSImage implements Closeabl
// advertise it as such to other checkpointers
// from now on
if (txid > storage.getMostRecentCheckpointTxId()) {
- storage.setMostRecentCheckpointInfo(txid, Util.now());
+ storage.setMostRecentCheckpointInfo(txid, Time.now());
}
}
+ @Override
synchronized public void close() throws IOException {
if (editLog != null) { // 2NN doesn't have any edit log
getEditLog().close();
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Fri Aug 3 19:00:15 2012
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import static org.apache.hadoop.hdfs.server.common.Util.now;
+import static org.apache.hadoop.util.Time.now;
import java.io.DataInputStream;
import java.io.DataOutputStream;
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java Fri Aug 3 19:00:15 2012
@@ -114,6 +114,7 @@ class FSImageTransactionalStorageInspect
*
* @throws FileNotFoundException if not images are found.
*/
+ @Override
FSImageFile getLatestImage() throws IOException {
if (foundImages.isEmpty()) {
throw new FileNotFoundException("No valid image files found");
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Aug 3 19:00:15 2012
@@ -62,7 +62,7 @@ import static org.apache.hadoop.hdfs.DFS
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SUPPORT_APPEND_KEY;
-import static org.apache.hadoop.hdfs.server.common.Util.now;
+import static org.apache.hadoop.util.Time.now;
import java.io.BufferedWriter;
import java.io.ByteArrayInputStream;
@@ -190,6 +190,7 @@ import org.apache.hadoop.security.token.
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.VersionInfo;
import org.mortbay.util.ajax.JSON;
@@ -232,6 +233,7 @@ public class FSNamesystem implements Nam
private static final ThreadLocal<StringBuilder> auditBuffer =
new ThreadLocal<StringBuilder>() {
+ @Override
protected StringBuilder initialValue() {
return new StringBuilder();
}
@@ -392,12 +394,14 @@ public class FSNamesystem implements Nam
throws IOException {
if (namespaceDirs.size() == 1) {
- LOG.warn("Only one " + DFS_NAMENODE_NAME_DIR_KEY
- + " directory configured , beware data loss!");
+ LOG.warn("Only one image storage directory ("
+ + DFS_NAMENODE_NAME_DIR_KEY + ") configured. Beware of dataloss"
+ + " due to lack of redundant storage directories!");
}
if (namespaceEditsDirs.size() == 1) {
- LOG.warn("Only one " + DFS_NAMENODE_EDITS_DIR_KEY
- + " directory configured , beware data loss!");
+ LOG.warn("Only one namespace edits storage directory ("
+ + DFS_NAMENODE_EDITS_DIR_KEY + ") configured. Beware of dataloss"
+ + " due to lack of redundant storage directories!");
}
FSImage fsImage = new FSImage(conf, namespaceDirs, namespaceEditsDirs);
@@ -619,6 +623,7 @@ public class FSNamesystem implements Nam
LOG.info("Catching up to latest edits from old active before " +
"taking over writer role in edits logs.");
editLogTailer.catchupDuringFailover();
+ blockManager.setPostponeBlocksFromFuture(false);
LOG.info("Reprocessing replication and invalidation queues...");
blockManager.getDatanodeManager().markAllDatanodesStale();
@@ -702,6 +707,9 @@ public class FSNamesystem implements Nam
// During startup, we're already open for read.
dir.fsImage.editLog.initSharedJournalsForRead();
}
+
+ blockManager.setPostponeBlocksFromFuture(true);
+
editLogTailer = new EditLogTailer(this, conf);
editLogTailer.start();
if (standbyShouldCheckpoint) {
@@ -4137,6 +4145,7 @@ public class FSNamesystem implements Nam
/**
*/
+ @Override
public void run() {
while (fsRunning && (safeMode != null && !safeMode.canLeave())) {
try {
@@ -4241,6 +4250,7 @@ public class FSNamesystem implements Nam
* @param deltaSafe the change in number of safe blocks
* @param deltaTotal the change i nnumber of total blocks expected
*/
+ @Override
public void adjustSafeModeBlockTotals(int deltaSafe, int deltaTotal) {
// safeMode is volatile, and may be set to null at any time
SafeModeInfo safeMode = this.safeMode;
@@ -4963,6 +4973,7 @@ public class FSNamesystem implements Nam
block = b;
}
+ @Override
public String toString() {
return block.getBlockName() + "\t" + path;
}
@@ -5411,7 +5422,7 @@ public class FSNamesystem implements Nam
}
private long getLastContact(DatanodeDescriptor alivenode) {
- return (System.currentTimeMillis() - alivenode.getLastUpdate())/1000;
+ return (Time.now() - alivenode.getLastUpdate())/1000;
}
private long getDfsUsed(DatanodeDescriptor alivenode) {
@@ -5468,6 +5479,7 @@ public class FSNamesystem implements Nam
getDelegationTokenSecretManager().verifyToken(identifier, password);
}
+ @Override
public boolean isGenStampInFuture(long genStamp) {
return (genStamp > getGenerationStamp());
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java Fri Aug 3 19:00:15 2012
@@ -106,6 +106,7 @@ public class FileDataServlet extends Dfs
* GET http://<nn>:<port>/data[/<path>] HTTP/1.1
* }
*/
+ @Override
public void doGet(final HttpServletRequest request,
final HttpServletResponse response)
throws IOException {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java Fri Aug 3 19:00:15 2012
@@ -339,6 +339,7 @@ class FileJournalManager implements Jour
final static Comparator<EditLogFile> COMPARE_BY_START_TXID
= new Comparator<EditLogFile>() {
+ @Override
public int compare(EditLogFile a, EditLogFile b) {
return ComparisonChain.start()
.compare(a.getFirstTxId(), b.getFirstTxId())
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java Fri Aug 3 19:00:15 2012
@@ -42,6 +42,7 @@ public class FsckServlet extends DfsServ
private static final long serialVersionUID = 1L;
/** Handle fsck request */
+ @Override
public void doGet(HttpServletRequest request, HttpServletResponse response
) throws IOException {
@SuppressWarnings("unchecked")
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java Fri Aug 3 19:00:15 2012
@@ -79,6 +79,7 @@ public class GetImageServlet extends Htt
private static Set<Long> currentlyDownloadingCheckpoints =
Collections.<Long>synchronizedSet(new HashSet<Long>());
+ @Override
public void doGet(final HttpServletRequest request,
final HttpServletResponse response
) throws ServletException, IOException {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Fri Aug 3 19:00:15 2012
@@ -65,6 +65,7 @@ class INodeDirectory extends INode {
/**
* Check whether it's a directory
*/
+ @Override
public boolean isDirectory() {
return true;
}
@@ -422,6 +423,7 @@ class INodeDirectory extends INode {
return children;
}
+ @Override
int collectSubtreeBlocksAndClear(List<Block> v) {
int total = 1;
if (children == null) {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java Fri Aug 3 19:00:15 2012
@@ -71,6 +71,7 @@ class INodeDirectoryWithQuota extends IN
/** Get this directory's namespace quota
* @return this directory's namespace quota
*/
+ @Override
long getNsQuota() {
return nsQuota;
}
@@ -78,6 +79,7 @@ class INodeDirectoryWithQuota extends IN
/** Get this directory's diskspace quota
* @return this directory's diskspace quota
*/
+ @Override
long getDsQuota() {
return dsQuota;
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Fri Aug 3 19:00:15 2012
@@ -59,10 +59,12 @@ public class INodeFile extends INode imp
* Since this is a file,
* the {@link FsAction#EXECUTE} action, if any, is ignored.
*/
+ @Override
void setPermission(FsPermission permission) {
super.setPermission(permission.applyUMask(UMASK));
}
+ @Override
boolean isDirectory() {
return false;
}
@@ -138,6 +140,7 @@ public class INodeFile extends INode imp
this.blocks[idx] = blk;
}
+ @Override
int collectSubtreeBlocksAndClear(List<Block> v) {
parent = null;
if(blocks != null && v != null) {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java Fri Aug 3 19:00:15 2012
@@ -147,6 +147,7 @@ class INodeFileUnderConstruction extends
* Convert the last block of the file to an under-construction block.
* Set its locations.
*/
+ @Override
public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
DatanodeDescriptor[] targets)
throws IOException {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java Fri Aug 3 19:00:15 2012
@@ -40,6 +40,7 @@ public class INodeSymlink extends INode
setAccessTime(atime);
}
+ @Override
public boolean isLink() {
return true;
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java Fri Aug 3 19:00:15 2012
@@ -83,6 +83,7 @@ public interface JournalManager extends
/**
* Close the journal manager, freeing any resources it may hold.
*/
+ @Override
void close() throws IOException;
/**
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java Fri Aug 3 19:00:15 2012
@@ -26,11 +26,13 @@ import java.util.LinkedList;
import java.util.List;
import java.util.PriorityQueue;
import java.util.SortedSet;
-import java.util.TreeSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
+
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
@@ -165,17 +167,11 @@ public class JournalSet implements Journ
private List<JournalAndStream> journals = Lists.newArrayList();
final int minimumRedundantJournals;
- private volatile Runtime runtime = Runtime.getRuntime();
JournalSet(int minimumRedundantResources) {
this.minimumRedundantJournals = minimumRedundantResources;
}
- @VisibleForTesting
- public void setRuntimeForTesting(Runtime runtime) {
- this.runtime = runtime;
- }
-
@Override
public EditLogOutputStream startLogSegment(final long txId) throws IOException {
mapJournalsAndReportErrors(new JournalClosure() {
@@ -323,7 +319,7 @@ public class JournalSet implements Journ
closure.apply(jas);
} catch (Throwable t) {
if (jas.isRequired()) {
- String msg = "Error: " + status + " failed for required journal ("
+ final String msg = "Error: " + status + " failed for required journal ("
+ jas + ")";
LOG.fatal(msg, t);
// If we fail on *any* of the required journals, then we must not
@@ -335,8 +331,7 @@ public class JournalSet implements Journ
// roll of edits etc. All of them go through this common function
// where the isRequired() check is made. Applying exit policy here
// to catch all code paths.
- runtime.exit(1);
- throw new IOException(msg);
+ terminate(1, msg);
} else {
LOG.error("Error: " + status + " failed for (journal " + jas + ")", t);
badJAS.add(jas);
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java Fri Aug 3 19:00:15 2012
@@ -39,7 +39,7 @@ import org.apache.hadoop.util.Daemon;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
-import static org.apache.hadoop.hdfs.server.common.Util.now;
+import static org.apache.hadoop.util.Time.now;
/**
* LeaseManager does the lease housekeeping for writing on files.
@@ -390,6 +390,7 @@ public class LeaseManager {
final String name = getClass().getSimpleName();
/** Check leases periodically. */
+ @Override
public void run() {
for(; shouldRunMonitor && fsnamesystem.isRunning(); ) {
try {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java Fri Aug 3 19:00:15 2012
@@ -54,6 +54,7 @@ public class ListPathsServlet extends Df
public static final ThreadLocal<SimpleDateFormat> df =
new ThreadLocal<SimpleDateFormat>() {
+ @Override
protected SimpleDateFormat initialValue() {
return HftpFileSystem.getDateFormat();
}
@@ -128,6 +129,7 @@ public class ListPathsServlet extends Df
* </listing>
* }
*/
+ @Override
public void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
final PrintWriter out = response.getWriter();
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java Fri Aug 3 19:00:15 2012
@@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.util.Persi
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.DNS;
+import org.apache.hadoop.util.Time;
import com.google.common.base.Preconditions;
import com.google.common.annotations.VisibleForTesting;
@@ -100,10 +101,12 @@ public class NNStorage extends Storage i
EDITS,
IMAGE_AND_EDITS;
+ @Override
public StorageDirType getStorageDirType() {
return this;
}
+ @Override
public boolean isOfType(StorageDirType type) {
if ((this == IMAGE_AND_EDITS) && (type == IMAGE || type == EDITS))
return true;
@@ -996,7 +999,7 @@ public class NNStorage extends Storage i
}
int rand = DFSUtil.getSecureRandom().nextInt(Integer.MAX_VALUE);
- String bpid = "BP-" + rand + "-"+ ip + "-" + System.currentTimeMillis();
+ String bpid = "BP-" + rand + "-"+ ip + "-" + Time.now();
return bpid;
}
@@ -1076,7 +1079,7 @@ public class NNStorage extends Storage i
}
if (multipleLV) {
throw new IOException(
- "Storage directories containe multiple layout versions: "
+ "Storage directories contain multiple layout versions: "
+ layoutVersions);
}
// If the storage directories are with the new layout version
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Fri Aug 3 19:00:15 2012
@@ -81,6 +81,9 @@ import org.apache.hadoop.security.author
import org.apache.hadoop.tools.GetUserMappingsProtocol;
import org.apache.hadoop.util.ServicePlugin;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.ExitUtil.ExitException;
+
+import static org.apache.hadoop.util.ExitUtil.terminate;
import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
import com.google.common.annotations.VisibleForTesting;
@@ -226,7 +229,6 @@ public class NameNode {
private final boolean haEnabled;
private final HAContext haContext;
protected boolean allowStaleStandbyReads;
- private Runtime runtime = Runtime.getRuntime();
/** httpServer */
@@ -1089,29 +1091,29 @@ public class NameNode {
case FORMAT: {
boolean aborted = format(conf, startOpt.getForceFormat(),
startOpt.getInteractiveFormat());
- System.exit(aborted ? 1 : 0);
+ terminate(aborted ? 1 : 0);
return null; // avoid javac warning
}
case GENCLUSTERID: {
System.err.println("Generating new cluster id:");
System.out.println(NNStorage.newClusterID());
- System.exit(0);
+ terminate(0);
return null;
}
case FINALIZE: {
boolean aborted = finalize(conf, true);
- System.exit(aborted ? 1 : 0);
+ terminate(aborted ? 1 : 0);
return null; // avoid javac warning
}
case BOOTSTRAPSTANDBY: {
String toolArgs[] = Arrays.copyOfRange(argv, 1, argv.length);
int rc = BootstrapStandby.run(toolArgs, conf);
- System.exit(rc);
+ terminate(rc);
return null; // avoid warning
}
case INITIALIZESHAREDEDITS: {
boolean aborted = initializeSharedEdits(conf, false, true);
- System.exit(aborted ? 1 : 0);
+ terminate(aborted ? 1 : 0);
return null; // avoid warning
}
case BACKUP:
@@ -1124,9 +1126,10 @@ public class NameNode {
NameNode.doRecovery(startOpt, conf);
return null;
}
- default:
+ default: {
DefaultMetricsSystem.initialize("NameNode");
return new NameNode(conf);
+ }
}
}
@@ -1189,8 +1192,8 @@ public class NameNode {
if (namenode != null)
namenode.join();
} catch (Throwable e) {
- LOG.error("Exception in namenode join", e);
- System.exit(-1);
+ LOG.fatal("Exception in namenode join", e);
+ terminate(1, e);
}
}
@@ -1259,11 +1262,6 @@ public class NameNode {
}
return state.getServiceState();
}
-
- @VisibleForTesting
- public synchronized void setRuntimeForTesting(Runtime runtime) {
- this.runtime = runtime;
- }
/**
* Shutdown the NN immediately in an ungraceful way. Used when it would be
@@ -1272,10 +1270,10 @@ public class NameNode {
*
* @param t exception which warrants the shutdown. Printed to the NN log
* before exit.
- * @throws ServiceFailedException thrown only for testing.
+ * @throws ExitException thrown only for testing.
*/
private synchronized void doImmediateShutdown(Throwable t)
- throws ServiceFailedException {
+ throws ExitException {
String message = "Error encountered requiring NN shutdown. " +
"Shutting down immediately.";
try {
@@ -1283,9 +1281,7 @@ public class NameNode {
} catch (Throwable ignored) {
// This is unlikely to happen, but there's nothing we can do if it does.
}
- runtime.exit(1);
- // This code is only reached during testing, when runtime is stubbed out.
- throw new ServiceFailedException(message, t);
+ terminate(1, t);
}
/**
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java Fri Aug 3 19:00:15 2012
@@ -119,6 +119,7 @@ public class NameNodeResourceChecker {
Collection<URI> localEditDirs = Collections2.filter(
FSNamesystem.getNamespaceEditsDirs(conf),
new Predicate<URI>() {
+ @Override
public boolean apply(URI input) {
if (input.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) {
return true;
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java Fri Aug 3 19:00:15 2012
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
+import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.OutputStream;
import java.io.PrintWriter;
@@ -35,6 +36,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.hdfs.BlockReader;
import org.apache.hadoop.hdfs.BlockReaderFactory;
import org.apache.hadoop.hdfs.DFSClient;
@@ -51,7 +53,9 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.NodeBase;
+import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
@@ -103,6 +107,12 @@ public class NamenodeFsck {
private boolean showRacks = false;
private boolean showCorruptFileBlocks = false;
+ /**
+ * True if we encountered an internal error during FSCK, such as not being
+ * able to delete a corrupt file.
+ */
+ private boolean internalError = false;
+
/**
* True if the user specified the -move option.
*
@@ -173,7 +183,7 @@ public class NamenodeFsck {
* Check files on DFS, starting from the indicated path.
*/
public void fsck() {
- final long startTime = System.currentTimeMillis();
+ final long startTime = Time.now();
try {
String msg = "FSCK started by " + UserGroupInformation.getCurrentUser()
+ " from " + remoteAddress + " for path " + path + " at " + new Date();
@@ -198,7 +208,14 @@ public class NamenodeFsck {
out.println(" Number of racks:\t\t" + networktopology.getNumOfRacks());
out.println("FSCK ended at " + new Date() + " in "
- + (System.currentTimeMillis() - startTime + " milliseconds"));
+ + (Time.now() - startTime + " milliseconds"));
+
+ // If there were internal errors during the fsck operation, we want to
+ // return FAILURE_STATUS, even if those errors were not immediately
+ // fatal. Otherwise many unit tests will pass even when there are bugs.
+ if (internalError) {
+ throw new IOException("fsck encountered internal errors!");
+ }
// DFSck client scans for the string HEALTHY/CORRUPT to check the status
// of file system and return appropriate code. Changing the output
@@ -217,7 +234,7 @@ public class NamenodeFsck {
String errMsg = "Fsck on path '" + path + "' " + FAILURE_STATUS;
LOG.warn(errMsg, e);
out.println("FSCK ended at " + new Date() + " in "
- + (System.currentTimeMillis() - startTime + " milliseconds"));
+ + (Time.now() - startTime + " milliseconds"));
out.println(e.getMessage());
out.print("\n\n" + errMsg);
} finally {
@@ -388,20 +405,11 @@ public class NamenodeFsck {
+ " blocks of total size " + missize + " B.");
}
res.corruptFiles++;
- try {
- if (doMove) {
- if (!isOpen) {
- copyBlocksToLostFound(parent, file, blocks);
- }
- }
- if (doDelete) {
- if (!isOpen) {
- LOG.warn("\n - deleting corrupted file " + path);
- namenode.getRpcServer().delete(path, true);
- }
- }
- } catch (IOException e) {
- LOG.error("error processing " + path + ": " + e.toString());
+ if (isOpen) {
+ LOG.info("Fsck: ignoring open file " + path);
+ } else {
+ if (doMove) copyBlocksToLostFound(parent, file, blocks);
+ if (doDelete) deleteCorruptedFile(path);
}
}
if (showFiles) {
@@ -415,29 +423,52 @@ public class NamenodeFsck {
}
}
}
+
+ private void deleteCorruptedFile(String path) {
+ try {
+ namenode.getRpcServer().delete(path, true);
+ LOG.info("Fsck: deleted corrupt file " + path);
+ } catch (Exception e) {
+ LOG.error("Fsck: error deleting corrupted file " + path, e);
+ internalError = true;
+ }
+ }
+
+ boolean hdfsPathExists(String path)
+ throws AccessControlException, UnresolvedLinkException, IOException {
+ try {
+ HdfsFileStatus hfs = namenode.getRpcServer().getFileInfo(path);
+ return (hfs != null);
+ } catch (FileNotFoundException e) {
+ return false;
+ }
+ }
private void copyBlocksToLostFound(String parent, HdfsFileStatus file,
LocatedBlocks blocks) throws IOException {
final DFSClient dfs = new DFSClient(NameNode.getAddress(conf), conf);
+ final String fullName = file.getFullName(parent);
+ OutputStream fos = null;
try {
- if (!lfInited) {
- lostFoundInit(dfs);
- }
- if (!lfInitedOk) {
- return;
- }
- String fullName = file.getFullName(parent);
- String target = lostFound + fullName;
- String errmsg = "Failed to move " + fullName + " to /lost+found";
- try {
+ if (!lfInited) {
+ lostFoundInit(dfs);
+ }
+ if (!lfInitedOk) {
+ throw new IOException("failed to initialize lost+found");
+ }
+ String target = lostFound + fullName;
+ if (hdfsPathExists(target)) {
+ LOG.warn("Fsck: can't copy the remains of " + fullName + " to " +
+ "lost+found, because " + target + " already exists.");
+ return;
+ }
if (!namenode.getRpcServer().mkdirs(
target, file.getPermission(), true)) {
- LOG.warn(errmsg);
- return;
+ throw new IOException("failed to create directory " + target);
}
// create chains
int chain = 0;
- OutputStream fos = null;
+ boolean copyError = false;
for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
LocatedBlock lblock = lBlk;
DatanodeInfo[] locs = lblock.getLocations();
@@ -451,32 +482,38 @@ public class NamenodeFsck {
}
if (fos == null) {
fos = dfs.create(target + "/" + chain, true);
- if (fos != null)
- chain++;
- else {
- throw new IOException(errmsg + ": could not store chain " + chain);
+ if (fos == null) {
+ throw new IOException("Failed to copy " + fullName +
+ " to /lost+found: could not store chain " + chain);
}
+ chain++;
}
// copy the block. It's a pity it's not abstracted from DFSInputStream ...
try {
copyBlock(dfs, lblock, fos);
} catch (Exception e) {
- e.printStackTrace();
- // something went wrong copying this block...
- LOG.warn(" - could not copy block " + lblock.getBlock() + " to " + target);
+ LOG.error("Fsck: could not copy block " + lblock.getBlock() +
+ " to " + target, e);
fos.flush();
fos.close();
fos = null;
+ internalError = true;
+ copyError = true;
}
}
- if (fos != null) fos.close();
- LOG.warn("\n - copied corrupted file " + fullName + " to /lost+found");
- } catch (Exception e) {
- e.printStackTrace();
- LOG.warn(errmsg + ": " + e.getMessage());
- }
+ if (copyError) {
+ LOG.warn("Fsck: there were errors copying the remains of the " +
+ "corrupted file " + fullName + " to /lost+found");
+ } else {
+ LOG.info("Fsck: copied the remains of the corrupted file " +
+ fullName + " to /lost+found");
+ }
+ } catch (Exception e) {
+ LOG.error("copyBlocksToLostFound: error processing " + fullName, e);
+ internalError = true;
} finally {
+ if (fos != null) fos.close();
dfs.close();
}
}
@@ -503,7 +540,7 @@ public class NamenodeFsck {
targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr());
} catch (IOException ie) {
if (failures >= DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) {
- throw new IOException("Could not obtain block " + lblock);
+ throw new IOException("Could not obtain block " + lblock, ie);
}
LOG.info("Could not obtain block from any node: " + ie);
try {
@@ -515,7 +552,7 @@ public class NamenodeFsck {
continue;
}
try {
- s = new Socket();
+ s = NetUtils.getDefaultSocketFactory(conf).createSocket();
s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
@@ -555,7 +592,7 @@ public class NamenodeFsck {
", but datanode returned " +bytesRead+" bytes");
}
} catch (Exception e) {
- e.printStackTrace();
+ LOG.error("Error reading block", e);
success = false;
} finally {
try {s.close(); } catch (Exception e1) {}
@@ -606,6 +643,7 @@ public class NamenodeFsck {
if (lostFound == null) {
LOG.warn("Cannot initialize /lost+found .");
lfInitedOk = false;
+ internalError = true;
}
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Fri Aug 3 19:00:15 2012
@@ -58,6 +58,7 @@ import org.apache.hadoop.security.UserGr
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ServletUtil;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.VersionInfo;
import org.znerd.xmlenc.XMLOutputter;
@@ -368,6 +369,7 @@ class NamenodeJspHelper {
final UserGroupInformation ugi) throws IOException, InterruptedException {
Token<DelegationTokenIdentifier> token = ugi
.doAs(new PrivilegedExceptionAction<Token<DelegationTokenIdentifier>>() {
+ @Override
public Token<DelegationTokenIdentifier> run() throws IOException {
return nn.getDelegationToken(new Text(ugi.getUserName()));
}
@@ -487,7 +489,7 @@ class NamenodeJspHelper {
long decommRequestTime = d.decommissioningStatus.getStartTime();
long timestamp = d.getLastUpdate();
- long currentTime = System.currentTimeMillis();
+ long currentTime = Time.now();
long hoursSinceDecommStarted = (currentTime - decommRequestTime)/3600000;
long remainderMinutes = ((currentTime - decommRequestTime)/60000) % 60;
out.print("<td class=\"lastcontact\"> "
@@ -534,7 +536,7 @@ class NamenodeJspHelper {
String adminState = d.getAdminState().toString();
long timestamp = d.getLastUpdate();
- long currentTime = System.currentTimeMillis();
+ long currentTime = Time.now();
long bpUsed = d.getBlockPoolUsed();
String percentBpUsed = StringUtils.limitDecimalTo2(d
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java Fri Aug 3 19:00:15 2012
@@ -68,6 +68,7 @@ public class RenewDelegationTokenServlet
try {
long result = ugi.doAs(new PrivilegedExceptionAction<Long>() {
+ @Override
public Long run() throws Exception {
return nn.getRpcServer().renewDelegationToken(token);
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Fri Aug 3 19:00:15 2012
@@ -55,6 +55,9 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Storage.StorageState;
+
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
@@ -70,6 +73,7 @@ import org.apache.hadoop.security.author
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList;
@@ -96,7 +100,7 @@ public class SecondaryNameNode implement
public static final Log LOG =
LogFactory.getLog(SecondaryNameNode.class.getName());
- private final long starttime = System.currentTimeMillis();
+ private final long starttime = Time.now();
private volatile long lastCheckpointTime = 0;
private String fsName;
@@ -278,6 +282,7 @@ public class SecondaryNameNode implement
}
}
+ @Override
public void run() {
SecurityUtil.doAsLoginUserOrFatal(
new PrivilegedAction<Object>() {
@@ -312,7 +317,7 @@ public class SecondaryNameNode implement
if(UserGroupInformation.isSecurityEnabled())
UserGroupInformation.getCurrentUser().reloginFromKeytab();
- long now = System.currentTimeMillis();
+ long now = Time.now();
if (shouldCheckpointBasedOnCount() ||
now >= lastCheckpointTime + 1000 * checkpointConf.getPeriod()) {
@@ -323,9 +328,9 @@ public class SecondaryNameNode implement
LOG.error("Exception in doCheckpoint", e);
e.printStackTrace();
} catch (Throwable e) {
- LOG.error("Throwable Exception in doCheckpoint", e);
+ LOG.fatal("Throwable Exception in doCheckpoint", e);
e.printStackTrace();
- Runtime.getRuntime().exit(-1);
+ terminate(1, e);
}
}
}
@@ -432,18 +437,16 @@ public class SecondaryNameNode implement
// Returns a token that would be used to upload the merged image.
CheckpointSignature sig = namenode.rollEditLog();
- // Make sure we're talking to the same NN!
- if (checkpointImage.getNamespaceID() != 0) {
- // If the image actually has some data, make sure we're talking
- // to the same NN as we did before.
- sig.validateStorageInfo(checkpointImage);
- } else {
- // if we're a fresh 2NN, just take the storage info from the server
- // we first talk to.
+ if ((checkpointImage.getNamespaceID() == 0) ||
+ (sig.isSameCluster(checkpointImage) &&
+ !sig.storageVersionMatches(checkpointImage.getStorage()))) {
+ // if we're a fresh 2NN, or if we're on the same cluster and our storage
+ // needs an upgrade, just take the storage info from the server.
dstStorage.setStorageInfo(sig);
dstStorage.setClusterID(sig.getClusterID());
dstStorage.setBlockPoolID(sig.getBlockpoolID());
}
+ sig.validateStorageInfo(checkpointImage);
// error simulation code for junit test
CheckpointFaultInjector.getInstance().afterSecondaryCallsRollEditLog();
@@ -517,7 +520,7 @@ public class SecondaryNameNode implement
//
// This is a error returned by hadoop server. Print
// out the first line of the error mesage, ignore the stack trace.
- exitCode = -1;
+ exitCode = 1;
try {
String[] content;
content = e.getLocalizedMessage().split("\n");
@@ -529,7 +532,7 @@ public class SecondaryNameNode implement
//
// IO exception encountered locally.
//
- exitCode = -1;
+ exitCode = 1;
LOG.error(cmd + ": " + e.getLocalizedMessage());
} finally {
// Does the RPC connection need to be closed?
@@ -557,7 +560,8 @@ public class SecondaryNameNode implement
public static void main(String[] argv) throws Exception {
CommandLineOpts opts = SecondaryNameNode.parseArgs(argv);
if (opts == null) {
- System.exit(-1);
+ LOG.fatal("Failed to parse options");
+ terminate(1);
}
StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
@@ -567,12 +571,12 @@ public class SecondaryNameNode implement
secondary = new SecondaryNameNode(tconf, opts);
} catch (IOException ioe) {
LOG.fatal("Failed to start secondary namenode", ioe);
- System.exit(-1);
+ terminate(1);
}
- if (opts.getCommand() != null) {
+ if (opts != null && opts.getCommand() != null) {
int ret = secondary.processStartupCommand(opts);
- System.exit(ret);
+ terminate(ret);
}
// Create a never ending deamon
@@ -697,7 +701,7 @@ public class SecondaryNameNode implement
/**
* Analyze checkpoint directories.
* Create directories if they do not exist.
- * Recover from an unsuccessful checkpoint is necessary.
+ * Recover from an unsuccessful checkpoint if necessary.
*
* @throws IOException
*/
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java Fri Aug 3 19:00:15 2012
@@ -59,6 +59,7 @@ public class StreamFile extends DfsServl
return DatanodeJspHelper.getDFSClient(request, datanode, conf, ugi);
}
+ @Override
@SuppressWarnings("unchecked")
public void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java Fri Aug 3 19:00:15 2012
@@ -33,6 +33,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.util.Time;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.StorageErrorReporter;
import org.apache.hadoop.hdfs.server.common.Storage;
@@ -216,7 +217,7 @@ public class TransferFsImage {
public static MD5Hash doGetUrl(URL url, List<File> localPaths,
Storage dstStorage, boolean getChecksum) throws IOException {
- long startTime = Util.monotonicNow();
+ long startTime = Time.monotonicNow();
HttpURLConnection connection = (HttpURLConnection)
SecurityUtil.openSecureHttpConnection(url);
@@ -323,7 +324,7 @@ public class TransferFsImage {
}
}
double xferSec = Math.max(
- ((float)(Util.monotonicNow() - startTime)) / 1000.0, 0.001);
+ ((float)(Time.monotonicNow() - startTime)) / 1000.0, 0.001);
long xferKb = received / 1024;
LOG.info(String.format("Transfer took %.2fs at %.2f KB/s",
xferSec, xferKb / xferSec));
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java Fri Aug 3 19:00:15 2012
@@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.server.pro
* and updates its status.
*/
class UpgradeManagerNamenode extends UpgradeManager {
+ @Override
public HdfsServerConstants.NodeType getType() {
return HdfsServerConstants.NodeType.NAME_NODE;
}
@@ -55,6 +56,7 @@ class UpgradeManagerNamenode extends Upg
* @return true if distributed upgrade is required or false otherwise
* @throws IOException
*/
+ @Override
public synchronized boolean startUpgrade() throws IOException {
if(!upgradeState) {
initializeUpgrade();
@@ -108,6 +110,7 @@ class UpgradeManagerNamenode extends Upg
return reply;
}
+ @Override
public synchronized void completeUpgrade() throws IOException {
// set and write new upgrade state into disk
setUpgradeState(false, HdfsConstants.LAYOUT_VERSION);
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java Fri Aug 3 19:00:15 2012
@@ -44,12 +44,14 @@ public abstract class UpgradeObjectNamen
public abstract UpgradeCommand processUpgradeCommand(UpgradeCommand command
) throws IOException;
+ @Override
public HdfsServerConstants.NodeType getType() {
return HdfsServerConstants.NodeType.NAME_NODE;
}
/**
*/
+ @Override
public UpgradeCommand startUpgrade() throws IOException {
// broadcast that data-nodes must start the upgrade
return new UpgradeCommand(UpgradeCommand.UC_ACTION_START_UPGRADE,
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java Fri Aug 3 19:00:15 2012
@@ -86,6 +86,7 @@ public class BootstrapStandby implements
static final int ERR_CODE_ALREADY_FORMATTED = 5;
static final int ERR_CODE_LOGS_UNAVAILABLE = 6;
+ @Override
public int run(String[] args) throws Exception {
parseArgs(args);
parseConfAndFindOtherNN();
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java Fri Aug 3 19:00:15 2012
@@ -43,7 +43,8 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.SecurityUtil;
-import static org.apache.hadoop.hdfs.server.common.Util.now;
+import static org.apache.hadoop.util.Time.now;
+import static org.apache.hadoop.util.ExitUtil.terminate;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@@ -64,8 +65,6 @@ public class EditLogTailer {
private final Configuration conf;
private final FSNamesystem namesystem;
private FSEditLog editLog;
-
- private volatile Runtime runtime = Runtime.getRuntime();
private InetSocketAddress activeAddr;
private NamenodeProtocol cachedActiveProxy = null;
@@ -169,11 +168,6 @@ public class EditLogTailer {
this.editLog = editLog;
}
- @VisibleForTesting
- synchronized void setRuntime(Runtime runtime) {
- this.runtime = runtime;
- }
-
public void catchupDuringFailover() throws IOException {
Preconditions.checkState(tailerThread == null ||
!tailerThread.isAlive(),
@@ -185,7 +179,8 @@ public class EditLogTailer {
}
}
- private void doTailEdits() throws IOException, InterruptedException {
+ @VisibleForTesting
+ void doTailEdits() throws IOException, InterruptedException {
// Write lock needs to be interruptible here because the
// transitionToActive RPC takes the write lock before calling
// tailer.stop() -- so if we're not interruptible, it will
@@ -320,9 +315,9 @@ public class EditLogTailer {
// interrupter should have already set shouldRun to false
continue;
} catch (Throwable t) {
- LOG.error("Unknown error encountered while tailing edits. " +
+ LOG.fatal("Unknown error encountered while tailing edits. " +
"Shutting down standby NN.", t);
- runtime.exit(1);
+ terminate(1, t);
}
try {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java Fri Aug 3 19:00:15 2012
@@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.util.Cance
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
-import static org.apache.hadoop.hdfs.server.common.Util.now;
+import static org.apache.hadoop.util.Time.now;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;