You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2012/06/06 02:18:04 UTC
svn commit: r1346682 [5/9] - in
/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project: ./ hadoop-hdfs-httpfs/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/
hadoop-hdfs/ hadoop-hdfs/dev-support/ hadoop-hdfs/src/contrib/bkjournal/
ha...
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java Wed Jun 6 00:17:38 2012
@@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.SaveNamespaceCancelledException;
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
+import org.apache.hadoop.hdfs.util.Canceler;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
@@ -58,12 +59,16 @@ public class StandbyCheckpointer {
private final CheckpointerThread thread;
private String activeNNAddress;
private InetSocketAddress myNNAddress;
+
+ private Object cancelLock = new Object();
+ private Canceler canceler;
// Keep track of how many checkpoints were canceled.
// This is for use in tests.
private static int canceledCount = 0;
- public StandbyCheckpointer(Configuration conf, FSNamesystem ns) {
+ public StandbyCheckpointer(Configuration conf, FSNamesystem ns)
+ throws IOException {
this.namesystem = ns;
this.checkpointConf = new CheckpointConf(conf);
this.thread = new CheckpointerThread();
@@ -74,8 +79,9 @@ public class StandbyCheckpointer {
/**
* Determine the address of the NN we are checkpointing
* as well as our own HTTP address from the configuration.
+ * @throws IOException
*/
- private void setNameNodeAddresses(Configuration conf) {
+ private void setNameNodeAddresses(Configuration conf) throws IOException {
// Look up our own address.
String myAddrString = getHttpAddress(conf);
@@ -91,7 +97,7 @@ public class StandbyCheckpointer {
myNNAddress = NetUtils.createSocketAddr(myAddrString);
}
- private String getHttpAddress(Configuration conf) {
+ private String getHttpAddress(Configuration conf) throws IOException {
String configuredAddr = DFSUtil.getInfoServer(null, conf, false);
// Use the hostname from the RPC address as a default, in case
@@ -112,7 +118,7 @@ public class StandbyCheckpointer {
*/
private boolean checkAddress(String addrStr) {
InetSocketAddress addr = NetUtils.createSocketAddr(addrStr);
- return addr.getPort() != 0 && !addr.getAddress().isAnyLocalAddress();
+ return addr.getPort() != 0;
}
public void start() {
@@ -123,6 +129,7 @@ public class StandbyCheckpointer {
}
public void stop() throws IOException {
+ cancelAndPreventCheckpoints("Stopping checkpointer");
thread.setShouldRun(false);
thread.interrupt();
try {
@@ -134,6 +141,7 @@ public class StandbyCheckpointer {
}
private void doCheckpoint() throws InterruptedException, IOException {
+ assert canceler != null;
long txid;
namesystem.writeLockInterruptibly();
@@ -153,8 +161,8 @@ public class StandbyCheckpointer {
thisCheckpointTxId + ". Skipping...");
return;
}
-
- img.saveNamespace(namesystem);
+
+ img.saveNamespace(namesystem, canceler);
txid = img.getStorage().getMostRecentCheckpointTxId();
assert txid == thisCheckpointTxId : "expected to save checkpoint at txid=" +
thisCheckpointTxId + " but instead saved at txid=" + txid;
@@ -173,16 +181,18 @@ public class StandbyCheckpointer {
* and prevent any new checkpoints from starting for the next
* minute or so.
*/
- public void cancelAndPreventCheckpoints() throws ServiceFailedException {
- try {
- thread.preventCheckpointsFor(PREVENT_AFTER_CANCEL_MS);
- // TODO(HA): there is a really narrow race here if we are just
- // about to start a checkpoint - this won't cancel it!
- namesystem.getFSImage().cancelSaveNamespace(
- "About to exit standby state");
- } catch (InterruptedException e) {
- throw new ServiceFailedException(
- "Interrupted while trying to cancel checkpoint");
+ public void cancelAndPreventCheckpoints(String msg) throws ServiceFailedException {
+ thread.preventCheckpointsFor(PREVENT_AFTER_CANCEL_MS);
+ synchronized (cancelLock) {
+ // Before beginning a checkpoint, the checkpointer thread
+ // takes this lock, and creates a canceler object.
+ // If the canceler is non-null, then a checkpoint is in
+ // progress and we need to cancel it. If it's null, then
+ // the operation has not started, meaning that the above
+ // time-based prevention will take effect.
+ if (canceler != null) {
+ canceler.cancel(msg);
+ }
}
}
@@ -272,10 +282,18 @@ public class StandbyCheckpointer {
"exceeds the configured interval " + checkpointConf.getPeriod());
needCheckpoint = true;
}
- if (needCheckpoint && now < preventCheckpointsUntil) {
- LOG.info("But skipping this checkpoint since we are about to failover!");
- canceledCount++;
- } else if (needCheckpoint) {
+
+ synchronized (cancelLock) {
+ if (now < preventCheckpointsUntil) {
+ LOG.info("But skipping this checkpoint since we are about to failover!");
+ canceledCount++;
+ continue;
+ }
+ assert canceler == null;
+ canceler = new Canceler();
+ }
+
+ if (needCheckpoint) {
doCheckpoint();
lastCheckpointTime = now;
}
@@ -287,6 +305,10 @@ public class StandbyCheckpointer {
continue;
} catch (Throwable t) {
LOG.error("Exception in doCheckpoint", t);
+ } finally {
+ synchronized (cancelLock) {
+ canceler = null;
+ }
}
}
}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java Wed Jun 6 00:17:38 2012
@@ -21,9 +21,8 @@ import org.apache.hadoop.classification.
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.Block;
-/** A class to implement an array of BlockLocations
- * It provide efficient customized serialization/deserialization methods
- * in stead of using the default array (de)serialization provided by RPC
+/**
+ * Maintains an array of blocks and their corresponding storage IDs.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
@@ -36,12 +35,12 @@ public class BlocksWithLocations {
@InterfaceStability.Evolving
public static class BlockWithLocations {
Block block;
- String datanodeIDs[];
+ String storageIDs[];
/** constructor */
- public BlockWithLocations(Block b, String[] datanodes) {
- block = b;
- datanodeIDs = datanodes;
+ public BlockWithLocations(Block block, String[] storageIDs) {
+ this.block = block;
+ this.storageIDs = storageIDs;
}
/** get the block */
@@ -50,15 +49,15 @@ public class BlocksWithLocations {
}
/** get the block's locations */
- public String[] getDatanodes() {
- return datanodeIDs;
+ public String[] getStorageIDs() {
+ return storageIDs;
}
}
private BlockWithLocations[] blocks;
/** Constructor with one parameter */
- public BlocksWithLocations( BlockWithLocations[] blocks ) {
+ public BlocksWithLocations(BlockWithLocations[] blocks) {
this.blocks = blocks;
}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java Wed Jun 6 00:17:38 2012
@@ -47,21 +47,6 @@ public class DatanodeRegistration extend
this.softwareVersion = softwareVersion;
}
- public DatanodeRegistration(String ipAddr, int xferPort) {
- this(ipAddr, xferPort, new StorageInfo(), new ExportedBlockKeys());
- }
-
- public DatanodeRegistration(String ipAddr, int xferPort, StorageInfo info,
- ExportedBlockKeys keys) {
- super(ipAddr, xferPort);
- this.storageInfo = info;
- this.exportedKeys = keys;
- }
-
- public void setStorageInfo(StorageInfo storage) {
- this.storageInfo = new StorageInfo(storage);
- }
-
public StorageInfo getStorageInfo() {
return storageInfo;
}
@@ -74,10 +59,6 @@ public class DatanodeRegistration extend
return exportedKeys;
}
- public void setSoftwareVersion(String softwareVersion) {
- this.softwareVersion = softwareVersion;
- }
-
public String getSoftwareVersion() {
return softwareVersion;
}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java Wed Jun 6 00:17:38 2012
@@ -87,12 +87,18 @@ public interface NamenodeProtocol {
/**
* @return The most recent transaction ID that has been synced to
- * persistent storage.
+ * persistent storage, or applied from persistent storage in the
+ * case of a non-active node.
* @throws IOException
*/
public long getTransactionID() throws IOException;
/**
+ * Get the transaction ID of the most recent checkpoint.
+ */
+ public long getMostRecentCheckpointTxId() throws IOException;
+
+ /**
* Closes the current edit log and opens a new one. The
* call fails if the file system is in SafeMode.
* @throws IOException
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java Wed Jun 6 00:17:38 2012
@@ -21,14 +21,16 @@ import java.io.IOException;
import java.io.PrintStream;
import java.net.InetSocketAddress;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.NameNodeProxies;
-import org.apache.hadoop.hdfs.protocolPB.GetUserMappingsProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.tools.GetGroupsBase;
import org.apache.hadoop.tools.GetUserMappingsProtocol;
import org.apache.hadoop.util.ToolRunner;
@@ -39,6 +41,8 @@ import org.apache.hadoop.util.ToolRunner
*/
@InterfaceAudience.Private
public class GetGroups extends GetGroupsBase {
+
+ private static final Log LOG = LogFactory.getLog(GetGroups.class);
static{
HdfsConfiguration.init();
@@ -60,6 +64,22 @@ public class GetGroups extends GetGroups
}
@Override
+ public void setConf(Configuration conf) {
+ conf = new HdfsConfiguration(conf);
+ String nameNodePrincipal = conf.get(
+ DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, "");
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Using NN principal: " + nameNodePrincipal);
+ }
+
+ conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
+ nameNodePrincipal);
+
+ super.setConf(conf);
+ }
+
+ @Override
protected GetUserMappingsProtocol getUgmProtocol() throws IOException {
return NameNodeProxies.createProxy(getConf(), FileSystem.getDefaultUri(getConf()),
GetUserMappingsProtocol.class).getProxy();
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java Wed Jun 6 00:17:38 2012
@@ -21,6 +21,7 @@ import java.net.InetSocketAddress;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.BadFencingConfigurationException;
import org.apache.hadoop.ha.HAServiceTarget;
import org.apache.hadoop.ha.NodeFencer;
@@ -44,12 +45,14 @@ public class NNHAServiceTarget extends H
private static final String NAMENODE_ID_KEY = "namenodeid";
private final InetSocketAddress addr;
+ private InetSocketAddress zkfcAddr;
private NodeFencer fencer;
private BadFencingConfigurationException fenceConfigError;
private final String nnId;
private final String nsId;
-
- public NNHAServiceTarget(HdfsConfiguration conf,
+ private final boolean autoFailoverEnabled;
+
+ public NNHAServiceTarget(Configuration conf,
String nsId, String nnId) {
Preconditions.checkNotNull(nnId);
@@ -75,12 +78,24 @@ public class NNHAServiceTarget extends H
}
this.addr = NetUtils.createSocketAddr(serviceAddr,
NameNode.DEFAULT_PORT);
+
+ this.autoFailoverEnabled = targetConf.getBoolean(
+ DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY,
+ DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT);
+ if (autoFailoverEnabled) {
+ int port = DFSZKFailoverController.getZkfcPort(targetConf);
+ if (port != 0) {
+ setZkfcPort(port);
+ }
+ }
+
try {
this.fencer = NodeFencer.create(targetConf,
DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY);
} catch (BadFencingConfigurationException e) {
this.fenceConfigError = e;
}
+
this.nnId = nnId;
this.nsId = nsId;
}
@@ -94,10 +109,29 @@ public class NNHAServiceTarget extends H
}
@Override
+ public InetSocketAddress getZKFCAddress() {
+ Preconditions.checkState(autoFailoverEnabled,
+ "ZKFC address not relevant when auto failover is off");
+ assert zkfcAddr != null;
+
+ return zkfcAddr;
+ }
+
+ void setZkfcPort(int port) {
+ assert autoFailoverEnabled;
+
+ this.zkfcAddr = new InetSocketAddress(addr.getAddress(), port);
+ }
+
+ @Override
public void checkFencingConfigured() throws BadFencingConfigurationException {
if (fenceConfigError != null) {
throw fenceConfigError;
}
+ if (fencer == null) {
+ throw new BadFencingConfigurationException(
+ "No fencer configured for " + this);
+ }
}
@Override
@@ -125,4 +159,9 @@ public class NNHAServiceTarget extends H
ret.put(NAMESERVICE_ID_KEY, getNameServiceId());
ret.put(NAMENODE_ID_KEY, getNameNodeId());
}
+
+ @Override
+ public boolean isAutoFailoverEnabled() {
+ return autoFailoverEnabled;
+ }
}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java Wed Jun 6 00:17:38 2012
@@ -22,6 +22,7 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
@@ -48,7 +49,8 @@ interface OfflineEditsLoader {
OfflineEditsLoader loader = null;
try {
file = new File(inputFileName);
- elis = new EditLogFileInputStream(file, -1, -1, false);
+ elis = new EditLogFileInputStream(file, HdfsConstants.INVALID_TXID,
+ HdfsConstants.INVALID_TXID, false);
loader = new OfflineEditsBinaryLoader(visitor, elis);
} finally {
if ((loader == null) && (elis != null)) {
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java Wed Jun 6 00:17:38 2012
@@ -31,11 +31,13 @@ import org.apache.hadoop.hdfs.protocol.L
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement;
+import org.apache.hadoop.hdfs.util.XMLUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.security.token.delegation.DelegationKey;
+import org.xml.sax.helpers.AttributesImpl;
/**
* ImageLoaderCurrent processes Hadoop FSImage files and walks over
@@ -143,6 +145,7 @@ class ImageLoaderCurrent implements Imag
@Override
public void loadImage(DataInputStream in, ImageVisitor v,
boolean skipBlocks) throws IOException {
+ boolean done = false;
try {
v.start();
v.visitEnclosingElement(ImageElement.FS_IMAGE);
@@ -187,11 +190,13 @@ class ImageLoaderCurrent implements Imag
}
v.leaveEnclosingElement(); // FSImage
- v.finish();
- } catch(IOException e) {
- // Tell the visitor to clean up, then re-throw the exception
- v.finishAbnormally();
- throw e;
+ done = true;
+ } finally {
+ if (done) {
+ v.finish();
+ } else {
+ v.finishAbnormally();
+ }
}
}
@@ -220,9 +225,29 @@ class ImageLoaderCurrent implements Imag
for(int i=0; i<numDTokens; i++){
DelegationTokenIdentifier id = new DelegationTokenIdentifier();
id.readFields(in);
- v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER, id.toString());
+ long expiryTime = in.readLong();
+ v.visitEnclosingElement(ImageElement.DELEGATION_TOKEN_IDENTIFIER);
+ v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_KIND,
+ id.getKind().toString());
+ v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_SEQNO,
+ id.getSequenceNumber());
+ v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_OWNER,
+ id.getOwner().toString());
+ v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_RENEWER,
+ id.getRenewer().toString());
+ v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_REALUSER,
+ id.getRealUser().toString());
+ v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_ISSUE_DATE,
+ id.getIssueDate());
+ v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_MAX_DATE,
+ id.getMaxDate());
+ v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_EXPIRY_TIME,
+ expiryTime);
+ v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_MASTER_KEY_ID,
+ id.getMasterKeyId());
+ v.leaveEnclosingElement(); // DELEGATION_TOKEN_IDENTIFIER
}
- v.leaveEnclosingElement();
+ v.leaveEnclosingElement(); // DELEGATION_TOKENS
}
/**
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java Wed Jun 6 00:17:38 2012
@@ -71,7 +71,15 @@ abstract class ImageVisitor {
NUM_DELEGATION_TOKENS,
DELEGATION_TOKENS,
DELEGATION_TOKEN_IDENTIFIER,
- DELEGATION_TOKEN_EXPIRY_TIME,
+ DELEGATION_TOKEN_IDENTIFIER_KIND,
+ DELEGATION_TOKEN_IDENTIFIER_SEQNO,
+ DELEGATION_TOKEN_IDENTIFIER_OWNER,
+ DELEGATION_TOKEN_IDENTIFIER_RENEWER,
+ DELEGATION_TOKEN_IDENTIFIER_REALUSER,
+ DELEGATION_TOKEN_IDENTIFIER_ISSUE_DATE,
+ DELEGATION_TOKEN_IDENTIFIER_MAX_DATE,
+ DELEGATION_TOKEN_IDENTIFIER_EXPIRY_TIME,
+ DELEGATION_TOKEN_IDENTIFIER_MASTER_KEY_ID,
TRANSACTION_ID
}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/IndentedImageVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/IndentedImageVisitor.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/IndentedImageVisitor.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/IndentedImageVisitor.java Wed Jun 6 00:17:38 2012
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import java.io.IOException;
+import java.util.Date;
/**
* IndentedImageVisitor walks over an FSImage and displays its structure
@@ -58,6 +59,16 @@ class IndentedImageVisitor extends TextW
write(element + " = " + value + "\n");
}
+ void visit(ImageElement element, long value) throws IOException {
+ if ((element == ImageElement.DELEGATION_TOKEN_IDENTIFIER_EXPIRY_TIME) ||
+ (element == ImageElement.DELEGATION_TOKEN_IDENTIFIER_ISSUE_DATE) ||
+ (element == ImageElement.DELEGATION_TOKEN_IDENTIFIER_MAX_DATE)) {
+ visit(element, new Date(value).toString());
+ } else {
+ visit(element, Long.toString(value));
+ }
+ }
+
@Override
void visitEnclosingElement(ImageElement element) throws IOException {
printIndents();
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java Wed Jun 6 00:17:38 2012
@@ -30,8 +30,12 @@ import org.apache.commons.cli.OptionBuil
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.PosixParser;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.PositionTrackingInputStream;
/**
* OfflineImageViewer to dump the contents of an Hadoop image file to XML
@@ -40,6 +44,8 @@ import org.apache.hadoop.classification.
*/
@InterfaceAudience.Private
public class OfflineImageViewer {
+ public static final Log LOG = LogFactory.getLog(OfflineImageViewer.class);
+
private final static String usage =
"Usage: bin/hdfs oiv [OPTIONS] -i INPUTFILE -o OUTPUTFILE\n" +
"Offline Image Viewer\n" +
@@ -112,24 +118,28 @@ public class OfflineImageViewer {
*/
public void go() throws IOException {
DataInputStream in = null;
-
+ PositionTrackingInputStream tracker = null;
+ ImageLoader fsip = null;
+ boolean done = false;
try {
- in = new DataInputStream(new BufferedInputStream(
+ tracker = new PositionTrackingInputStream(new BufferedInputStream(
new FileInputStream(new File(inputFile))));
+ in = new DataInputStream(tracker);
int imageVersionFile = findImageVersion(in);
- ImageLoader fsip =
- ImageLoader.LoaderFactory.getLoader(imageVersionFile);
+ fsip = ImageLoader.LoaderFactory.getLoader(imageVersionFile);
if(fsip == null)
throw new IOException("No image processor to read version " +
imageVersionFile + " is available.");
-
fsip.loadImage(in, processor, skipBlocks);
-
+ done = true;
} finally {
- if(in != null) in.close();
+ if (!done) {
+ LOG.error("image loading failed at offset " + tracker.getPos());
+ }
+ IOUtils.cleanup(LOG, in, tracker);
}
}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java Wed Jun 6 00:17:38 2012
@@ -173,31 +173,42 @@ public class LightWeightHashSet<T> imple
* @return true if element present, false otherwise.
*/
@SuppressWarnings("unchecked")
+ @Override
public boolean contains(final Object key) {
+ return getElement((T)key) != null;
+ }
+
+ /**
+ * Return the element in this set which is equal to
+ * the given key, if such an element exists.
+ * Otherwise returns null.
+ */
+ public T getElement(final T key) {
// validate key
if (key == null) {
throw new IllegalArgumentException("Null element is not supported.");
}
// find element
- final int hashCode = ((T)key).hashCode();
+ final int hashCode = key.hashCode();
final int index = getIndex(hashCode);
- return containsElem(index, (T) key, hashCode);
+ return getContainedElem(index, key, hashCode);
}
/**
- * Check if the set contains given element at given index.
+ * Check if the set contains given element at given index. If it
+ * does, return that element.
*
- * @return true if element present, false otherwise.
+ * @return the element, or null, if no element matches
*/
- protected boolean containsElem(int index, final T key, int hashCode) {
+ protected T getContainedElem(int index, final T key, int hashCode) {
for (LinkedElement<T> e = entries[index]; e != null; e = e.next) {
// element found
if (hashCode == e.hashCode && e.element.equals(key)) {
- return true;
+ return e.element;
}
}
// element not found
- return false;
+ return null;
}
/**
@@ -240,7 +251,7 @@ public class LightWeightHashSet<T> imple
final int hashCode = element.hashCode();
final int index = getIndex(hashCode);
// return false if already present
- if (containsElem(index, element, hashCode)) {
+ if (getContainedElem(index, element, hashCode) != null) {
return false;
}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java Wed Jun 6 00:17:38 2012
@@ -88,7 +88,7 @@ public class LightWeightLinkedSet<T> ext
final int hashCode = element.hashCode();
final int index = getIndex(hashCode);
// return false if already present
- if (containsElem(index, element, hashCode)) {
+ if (getContainedElem(index, element, hashCode) != null) {
return false;
}
Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1337003-1346681
Merged /hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1306184-1342109
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto Wed Jun 6 00:17:38 2012
@@ -85,6 +85,16 @@ message RollEditLogResponseProto {
}
/**
+ * void request
+ */
+message GetMostRecentCheckpointTxIdRequestProto {
+}
+
+message GetMostRecentCheckpointTxIdResponseProto{
+ required uint64 txId = 1;
+}
+
+/**
* registration - Namenode reporting the error
* errorCode - error code indicating the error
* msg - Free text description of the error
@@ -189,12 +199,18 @@ service NamenodeProtocolService {
returns(GetTransactionIdResponseProto);
/**
+ * Get the transaction ID of the most recently persisted editlog record
+ */
+ rpc getMostRecentCheckpointTxId(GetMostRecentCheckpointTxIdRequestProto)
+ returns(GetMostRecentCheckpointTxIdResponseProto);
+
+ /**
* Close the current editlog and open a new one for checkpointing purposes
*/
rpc rollEditLog(RollEditLogRequestProto) returns(RollEditLogResponseProto);
/**
- * Close the current editlog and open a new one for checkpointing purposes
+ * Request info about the version running on this NameNode
*/
rpc versionRequest(VersionRequestProto) returns(VersionResponseProto);
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto Wed Jun 6 00:17:38 2012
@@ -113,6 +113,7 @@ message PacketHeaderProto {
required sfixed64 seqno = 2;
required bool lastPacketInBlock = 3;
required sfixed32 dataLen = 4;
+ optional bool syncBlock = 5 [default = false];
}
enum Status {
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Wed Jun 6 00:17:38 2012
@@ -274,7 +274,7 @@ message BlockProto {
*/
message BlockWithLocationsProto {
required BlockProto block = 1; // Block
- repeated string datanodeIDs = 2; // Datanodes with replicas of the block
+ repeated string storageIDs = 2; // Datanodes with replicas of the block
}
/**
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Wed Jun 6 00:17:38 2012
@@ -239,14 +239,6 @@
left empty in a non-HA cluster.
</description>
</property>
-
-<property>
- <name>dfs.web.ugi</name>
- <value>webuser,webgroup</value>
- <description>The user account used by the web interface.
- Syntax: USERNAME,GROUP1,GROUP2, ...
- </description>
-</property>
<property>
<name>dfs.permissions.enabled</name>
@@ -778,7 +770,7 @@
</property>
<property>
- <name>dfs.federation.nameservices</name>
+ <name>dfs.nameservices</name>
<value></value>
<description>
Comma-separated list of nameservices.
@@ -786,12 +778,12 @@
</property>
<property>
- <name>dfs.federation.nameservice.id</name>
+ <name>dfs.nameservice.id</name>
<value></value>
<description>
The ID of this nameservice. If the nameservice ID is not
configured or more than one nameservice is configured for
- dfs.federation.nameservices it is determined automatically by
+ dfs.nameservices it is determined automatically by
matching the local node's address with the configured address.
</description>
</property>
@@ -837,6 +829,16 @@
</property>
<property>
+ <name>dfs.ha.automatic-failover.enabled</name>
+ <value>false</value>
+ <description>
+ Whether automatic failover is enabled. See the HDFS High
+ Availability documentation for details on automatic HA
+ configuration.
+ </description>
+</property>
+
+<property>
<name>dfs.support.append</name>
<value>true</value>
<description>
Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1337003-1346681
Merged /hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1306184-1342109
Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1306184-1342109
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1337003-1346681
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp Wed Jun 6 00:17:38 2012
@@ -39,10 +39,10 @@
<!DOCTYPE html>
<html>
-
+<head>
<link rel="stylesheet" type="text/css" href="/static/hadoop.css">
<title>Hadoop <%=namenodeRole%> <%=namenodeLabel%></title>
-
+</head>
<body>
<h1><%=namenodeRole%> '<%=namenodeLabel%>' (<%=namenodeState%>)</h1>
<%= NamenodeJspHelper.getVersionTable(fsn) %>
Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1337003-1346681
Merged /hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1306184-1342109
Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1306184-1342109
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1337003-1346681
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java Wed Jun 6 00:17:38 2012
@@ -139,7 +139,7 @@ public class AppendTestUtil {
/**
* create a buffer that contains the entire test file data.
*/
- static byte[] initBuffer(int size) {
+ public static byte[] initBuffer(int size) {
if (seed == -1)
seed = nextLong();
return randomBytes(seed, size);
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Wed Jun 6 00:17:38 2012
@@ -67,19 +67,23 @@ import org.apache.hadoop.hdfs.protocol.L
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.VersionInfo;
import com.google.common.base.Joiner;
@@ -319,7 +323,7 @@ public class DFSTestUtil {
*/
public static void waitCorruptReplicas(FileSystem fs, FSNamesystem ns,
Path file, ExtendedBlock b, int corruptRepls)
- throws IOException, TimeoutException {
+ throws IOException, TimeoutException, InterruptedException {
int count = 0;
final int ATTEMPTS = 50;
int repls = ns.getBlockManager().numCorruptReplicas(b.getLocalBlock());
@@ -333,6 +337,7 @@ public class DFSTestUtil {
System.out.println("Waiting for "+corruptRepls+" corrupt replicas");
repls = ns.getBlockManager().numCorruptReplicas(b.getLocalBlock());
count++;
+ Thread.sleep(1000);
}
if (count == ATTEMPTS) {
throw new TimeoutException("Timed out waiting for corrupt replicas."
@@ -703,18 +708,19 @@ public class DFSTestUtil {
info.nameserviceId), DFSUtil.createUri(HdfsConstants.HDFS_URI_SCHEME,
info.nameNode.getNameNodeAddress()).toString());
}
- conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, Joiner.on(",")
+ conf.set(DFSConfigKeys.DFS_NAMESERVICES, Joiner.on(",")
.join(nameservices));
}
private static DatanodeID getDatanodeID(String ipAddr) {
- return new DatanodeID(ipAddr, "localhost",
- DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
+ return new DatanodeID(ipAddr, "localhost", "",
+ DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
+ DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
+ DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
}
public static DatanodeID getLocalDatanodeID() {
- return new DatanodeID("127.0.0.1", "localhost",
- DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
+ return getDatanodeID("127.0.0.1");
}
public static DatanodeID getLocalDatanodeID(int port) {
@@ -740,12 +746,14 @@ public class DFSTestUtil {
public static DatanodeInfo getDatanodeInfo(String ipAddr,
String host, int port) {
- return new DatanodeInfo(new DatanodeID(ipAddr, host, port));
+ return new DatanodeInfo(new DatanodeID(ipAddr, host, "",
+ port, DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
+ DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT));
}
public static DatanodeInfo getLocalDatanodeInfo(String ipAddr,
String hostname, AdminStates adminState) {
- return new DatanodeInfo(ipAddr, hostname, "storage",
+ return new DatanodeInfo(ipAddr, hostname, "",
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT,
@@ -760,6 +768,14 @@ public class DFSTestUtil {
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
int port, String rackLocation) {
- return new DatanodeDescriptor(new DatanodeID(ipAddr, port), rackLocation);
+ DatanodeID dnId = new DatanodeID(ipAddr, "host", "", port,
+ DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
+ DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
+ return new DatanodeDescriptor(dnId, rackLocation);
+ }
+
+ public static DatanodeRegistration getLocalDatanodeRegistration() {
+ return new DatanodeRegistration(getLocalDatanodeID(),
+ new StorageInfo(), new ExportedBlockKeys(), VersionInfo.getVersion());
}
}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Wed Jun 6 00:17:38 2012
@@ -25,8 +25,8 @@ import static org.apache.hadoop.hdfs.DFS
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICES;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
@@ -67,8 +67,10 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.ha.HAServiceProtocol;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.ha.HAServiceProtocolHelper;
import org.apache.hadoop.ha.ServiceFailedException;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
import org.apache.hadoop.ha.protocolPB.HAServiceProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf;
import org.apache.hadoop.hdfs.protocol.Block;
@@ -131,6 +133,7 @@ public class MiniDFSCluster {
private int numDataNodes = 1;
private boolean format = true;
private boolean manageNameDfsDirs = true;
+ private boolean manageNameDfsSharedDirs = true;
private boolean manageDataDfsDirs = true;
private StartupOption option = null;
private String[] racks = null;
@@ -188,6 +191,14 @@ public class MiniDFSCluster {
/**
* Default: true
*/
+ public Builder manageNameDfsSharedDirs(boolean val) {
+ this.manageNameDfsSharedDirs = val;
+ return this;
+ }
+
+ /**
+ * Default: true
+ */
public Builder manageDataDfsDirs(boolean val) {
this.manageDataDfsDirs = val;
return this;
@@ -286,6 +297,7 @@ public class MiniDFSCluster {
builder.numDataNodes,
builder.format,
builder.manageNameDfsDirs,
+ builder.manageNameDfsSharedDirs,
builder.manageDataDfsDirs,
builder.option,
builder.racks,
@@ -525,7 +537,7 @@ public class MiniDFSCluster {
long[] simulatedCapacities) throws IOException {
this.nameNodes = new NameNodeInfo[1]; // Single namenode in the cluster
initMiniDFSCluster(conf, numDataNodes, format,
- manageNameDfsDirs, manageDataDfsDirs, operation, racks, hosts,
+ manageNameDfsDirs, true, manageDataDfsDirs, operation, racks, hosts,
simulatedCapacities, null, true, false,
MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0));
}
@@ -533,7 +545,8 @@ public class MiniDFSCluster {
private void initMiniDFSCluster(
Configuration conf,
int numDataNodes, boolean format, boolean manageNameDfsDirs,
- boolean manageDataDfsDirs, StartupOption operation, String[] racks,
+ boolean manageNameDfsSharedDirs, boolean manageDataDfsDirs,
+ StartupOption operation, String[] racks,
String[] hosts, long[] simulatedCapacities, String clusterId,
boolean waitSafeMode, boolean setupHostsFile,
MiniDFSNNTopology nnTopology)
@@ -572,7 +585,8 @@ public class MiniDFSCluster {
federation = nnTopology.isFederated();
createNameNodesAndSetConf(
- nnTopology, manageNameDfsDirs, format, operation, clusterId, conf);
+ nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
+ format, operation, clusterId, conf);
if (format) {
if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
@@ -593,8 +607,8 @@ public class MiniDFSCluster {
}
private void createNameNodesAndSetConf(MiniDFSNNTopology nnTopology,
- boolean manageNameDfsDirs, boolean format, StartupOption operation,
- String clusterId,
+ boolean manageNameDfsDirs, boolean manageNameDfsSharedDirs,
+ boolean format, StartupOption operation, String clusterId,
Configuration conf) throws IOException {
Preconditions.checkArgument(nnTopology.countNameNodes() > 0,
"empty NN topology: no namenodes specified!");
@@ -612,7 +626,7 @@ public class MiniDFSCluster {
}
}
if (!allNsIds.isEmpty()) {
- conf.set(DFS_FEDERATION_NAMESERVICES, Joiner.on(",").join(allNsIds));
+ conf.set(DFS_NAMESERVICES, Joiner.on(",").join(allNsIds));
}
int nnCounter = 0;
@@ -639,7 +653,7 @@ public class MiniDFSCluster {
if (nnIds.size() > 1) {
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, nameservice.getId()),
Joiner.on(",").join(nnIds));
- if (manageNameDfsDirs) {
+ if (manageNameDfsSharedDirs) {
URI sharedEditsUri = getSharedEditsDir(nnCounter, nnCounter+nnIds.size()-1);
conf.set(DFS_NAMENODE_SHARED_EDITS_DIR_KEY, sharedEditsUri.toString());
}
@@ -719,7 +733,7 @@ public class MiniDFSCluster {
boolean manageNameDfsDirs, int nnIndex)
throws IOException {
if (nameserviceId != null) {
- conf.set(DFS_FEDERATION_NAMESERVICE_ID, nameserviceId);
+ conf.set(DFS_NAMESERVICE_ID, nameserviceId);
}
if (nnId != null) {
conf.set(DFS_HA_NAMENODE_ID_KEY, nnId);
@@ -1260,6 +1274,13 @@ public class MiniDFSCluster {
public int getNameNodePort(int nnIndex) {
return nameNodes[nnIndex].nameNode.getNameNodeAddress().getPort();
}
+
+ /**
+ * @return the service rpc port used by the NameNode at the given index.
+ */
+ public int getNameNodeServicePort(int nnIndex) {
+ return nameNodes[nnIndex].nameNode.getServiceRpcAddress().getPort();
+ }
/**
* Shutdown all the nodes in the cluster.
@@ -1660,19 +1681,16 @@ public class MiniDFSCluster {
return FSNamesystem.getNamespaceEditsDirs(nameNodes[nnIndex].conf);
}
- private HAServiceProtocol getHaServiceClient(int nnIndex) throws IOException {
- InetSocketAddress addr = nameNodes[nnIndex].nameNode.getServiceRpcAddress();
- return new HAServiceProtocolClientSideTranslatorPB(addr, conf);
- }
-
public void transitionToActive(int nnIndex) throws IOException,
ServiceFailedException {
- HAServiceProtocolHelper.transitionToActive(getHaServiceClient(nnIndex));
+ getNameNode(nnIndex).getRpcServer().transitionToActive(
+ new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER_FORCED));
}
public void transitionToStandby(int nnIndex) throws IOException,
ServiceFailedException {
- HAServiceProtocolHelper.transitionToStandby(getHaServiceClient(nnIndex));
+ getNameNode(nnIndex).getRpcServer().transitionToStandby(
+ new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER_FORCED));
}
@@ -2118,9 +2136,9 @@ public class MiniDFSCluster {
nameNodes = newlist;
String nameserviceId = NAMESERVICE_ID_PREFIX + (nnIndex + 1);
- String nameserviceIds = conf.get(DFS_FEDERATION_NAMESERVICES);
+ String nameserviceIds = conf.get(DFS_NAMESERVICES);
nameserviceIds += "," + nameserviceId;
- conf.set(DFS_FEDERATION_NAMESERVICES, nameserviceIds);
+ conf.set(DFS_NAMESERVICES, nameserviceIds);
String nnId = null;
initNameNodeAddress(conf, nameserviceId,
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java Wed Jun 6 00:17:38 2012
@@ -65,10 +65,13 @@ import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.test.GenericTestUtils;
+import org.mockito.Mockito;
import org.mockito.internal.stubbing.answers.ThrowsException;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
+import com.google.common.base.Joiner;
+
/**
* These tests make sure that DFSClient retries fetching data from DFS
* properly in case of errors.
@@ -298,6 +301,100 @@ public class TestDFSClientRetries extend
cluster.shutdown();
}
}
+
+ /**
+ * Test that getAdditionalBlock() and close() are idempotent. This allows
+ * a client to safely retry a call and still produce a correct
+ * file. See HDFS-3031.
+ */
+ public void testIdempotentAllocateBlockAndClose() throws Exception {
+ final String src = "/testIdempotentAllocateBlock";
+ Path file = new Path(src);
+
+ conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
+ final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+
+ try {
+ cluster.waitActive();
+ FileSystem fs = cluster.getFileSystem();
+ NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
+ NamenodeProtocols spyNN = spy(preSpyNN);
+ DFSClient client = new DFSClient(null, spyNN, conf, null);
+
+
+ // Make the call to addBlock() get called twice, as if it were retried
+ // due to an IPC issue.
+ doAnswer(new Answer<LocatedBlock>() {
+ @Override
+ public LocatedBlock answer(InvocationOnMock invocation) throws Throwable {
+ LocatedBlock ret = (LocatedBlock) invocation.callRealMethod();
+ LocatedBlocks lb = cluster.getNameNodeRpc().getBlockLocations(src, 0, Long.MAX_VALUE);
+ int blockCount = lb.getLocatedBlocks().size();
+ assertEquals(lb.getLastLocatedBlock().getBlock(), ret.getBlock());
+
+ // Retrying should result in a new block at the end of the file.
+ // (abandoning the old one)
+ LocatedBlock ret2 = (LocatedBlock) invocation.callRealMethod();
+ lb = cluster.getNameNodeRpc().getBlockLocations(src, 0, Long.MAX_VALUE);
+ int blockCount2 = lb.getLocatedBlocks().size();
+ assertEquals(lb.getLastLocatedBlock().getBlock(), ret2.getBlock());
+
+ // We shouldn't have gained an extra block by the RPC.
+ assertEquals(blockCount, blockCount2);
+ return (LocatedBlock) ret2;
+ }
+ }).when(spyNN).addBlock(Mockito.anyString(), Mockito.anyString(),
+ Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any());
+
+ doAnswer(new Answer<Boolean>() {
+
+ @Override
+ public Boolean answer(InvocationOnMock invocation) throws Throwable {
+ // complete() may return false a few times before it returns
+ // true. We want to wait until it returns true, and then
+ // make it retry one more time after that.
+ LOG.info("Called complete(: " +
+ Joiner.on(",").join(invocation.getArguments()) + ")");
+ if (!(Boolean)invocation.callRealMethod()) {
+ LOG.info("Complete call returned false, not faking a retry RPC");
+ return false;
+ }
+ // We got a successful close. Call it again to check idempotence.
+ try {
+ boolean ret = (Boolean) invocation.callRealMethod();
+ LOG.info("Complete call returned true, faked second RPC. " +
+ "Returned: " + ret);
+ return ret;
+ } catch (Throwable t) {
+ LOG.error("Idempotent retry threw exception", t);
+ throw t;
+ }
+ }
+ }).when(spyNN).complete(Mockito.anyString(), Mockito.anyString(),
+ Mockito.<ExtendedBlock>any());
+
+ OutputStream stm = client.create(file.toString(), true);
+ try {
+ AppendTestUtil.write(stm, 0, 10000);
+ stm.close();
+ stm = null;
+ } finally {
+ IOUtils.cleanup(LOG, stm);
+ }
+
+ // Make sure the mock was actually properly injected.
+ Mockito.verify(spyNN, Mockito.atLeastOnce()).addBlock(
+ Mockito.anyString(), Mockito.anyString(),
+ Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any());
+ Mockito.verify(spyNN, Mockito.atLeastOnce()).complete(
+ Mockito.anyString(), Mockito.anyString(),
+ Mockito.<ExtendedBlock>any());
+
+ AppendTestUtil.check(fs, file, 10000);
+ } finally {
+ cluster.shutdown();
+ }
+ }
/**
* Mock Answer implementation of NN.getBlockLocations that will return
@@ -422,17 +519,20 @@ public class TestDFSClientRetries extend
LOG.info("Test 4 succeeded! Time spent: " + (timestamp2-timestamp)/1000.0 + " sec.");
}
- private boolean busyTest(int xcievers, int threads, int fileLen, int timeWin, int retries)
+ private boolean busyTest(int xcievers, int threads, int fileLen, int timeWin, int retries)
throws IOException {
boolean ret = true;
short replicationFactor = 1;
long blockSize = 128*1024*1024; // DFS block size
int bufferSize = 4096;
-
- conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY, xcievers);
- conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
- retries);
+ int originalXcievers = conf.getInt(
+ DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
+ DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT);
+ conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
+ xcievers);
+ conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
+ retries);
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, timeWin);
// Disable keepalive
conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, 0);
@@ -508,6 +608,8 @@ public class TestDFSClientRetries extend
e.printStackTrace();
ret = false;
} finally {
+ conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
+ originalXcievers);
fs.delete(file1, false);
cluster.shutdown();
}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java Wed Jun 6 00:17:38 2012
@@ -202,7 +202,7 @@ public class TestDFSPermission extends T
case CREATE:
FSDataOutputStream out = fs.create(name, permission, true,
conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
- fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);
+ fs.getDefaultReplication(name), fs.getDefaultBlockSize(name), null);
out.close();
break;
case MKDIRS:
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java Wed Jun 6 00:17:38 2012
@@ -248,7 +248,7 @@ public class TestDFSRollback extends Tes
baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
deleteMatchingFiles(baseDirs, "edits.*");
startNameNodeShouldFail(StartupOption.ROLLBACK,
- "No non-corrupt logs for txid ");
+ "Gap in transactions");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode rollback with no image file", numDirs);
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java Wed Jun 6 00:17:38 2012
@@ -100,7 +100,7 @@ public class TestDFSUtil {
private Configuration setupAddress(String key) {
HdfsConfiguration conf = new HdfsConfiguration();
- conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
+ conf.set(DFS_NAMESERVICES, "nn1");
conf.set(DFSUtil.addKeySuffixes(key, "nn1"), "localhost:9000");
return conf;
}
@@ -112,7 +112,7 @@ public class TestDFSUtil {
@Test
public void getNameServiceId() {
HdfsConfiguration conf = new HdfsConfiguration();
- conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
+ conf.set(DFS_NAMESERVICE_ID, "nn1");
assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
}
@@ -157,7 +157,7 @@ public class TestDFSUtil {
@Test(expected = HadoopIllegalArgumentException.class)
public void testGetNameServiceIdException() {
HdfsConfiguration conf = new HdfsConfiguration();
- conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
+ conf.set(DFS_NAMESERVICES, "nn1,nn2");
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
"localhost:9000");
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
@@ -172,7 +172,7 @@ public class TestDFSUtil {
@Test
public void testGetNameServiceIds() {
HdfsConfiguration conf = new HdfsConfiguration();
- conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
+ conf.set(DFS_NAMESERVICES, "nn1,nn2");
Collection<String> nameserviceIds = DFSUtil.getNameServiceIds(conf);
Iterator<String> it = nameserviceIds.iterator();
assertEquals(2, nameserviceIds.size());
@@ -183,11 +183,11 @@ public class TestDFSUtil {
@Test
public void testGetOnlyNameServiceIdOrNull() {
HdfsConfiguration conf = new HdfsConfiguration();
- conf.set(DFS_FEDERATION_NAMESERVICES, "ns1,ns2");
+ conf.set(DFS_NAMESERVICES, "ns1,ns2");
assertNull(DFSUtil.getOnlyNameServiceIdOrNull(conf));
- conf.set(DFS_FEDERATION_NAMESERVICES, "");
+ conf.set(DFS_NAMESERVICES, "");
assertNull(DFSUtil.getOnlyNameServiceIdOrNull(conf));
- conf.set(DFS_FEDERATION_NAMESERVICES, "ns1");
+ conf.set(DFS_NAMESERVICES, "ns1");
assertEquals("ns1", DFSUtil.getOnlyNameServiceIdOrNull(conf));
}
@@ -199,7 +199,7 @@ public class TestDFSUtil {
@Test
public void testMultipleNamenodes() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
- conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
+ conf.set(DFS_NAMESERVICES, "nn1,nn2");
// Test - configured list of namenodes are returned
final String NN1_ADDRESS = "localhost:9000";
final String NN2_ADDRESS = "localhost:9001";
@@ -270,11 +270,11 @@ public class TestDFSUtil {
final HdfsConfiguration conf = new HdfsConfiguration();
String nsId = "ns1";
- conf.set(DFS_FEDERATION_NAMESERVICES, nsId);
- conf.set(DFS_FEDERATION_NAMESERVICE_ID, nsId);
+ conf.set(DFS_NAMESERVICES, nsId);
+ conf.set(DFS_NAMESERVICE_ID, nsId);
// Set the nameservice specific keys with nameserviceId in the config key
- for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
+ for (String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
// Note: value is same as the key
conf.set(DFSUtil.addKeySuffixes(key, nsId), key);
}
@@ -284,7 +284,7 @@ public class TestDFSUtil {
// Retrieve the keys without nameserviceId and Ensure generic keys are set
// to the correct value
- for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
+ for (String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
assertEquals(key, conf.get(key));
}
}
@@ -299,12 +299,12 @@ public class TestDFSUtil {
String nsId = "ns1";
String nnId = "nn1";
- conf.set(DFS_FEDERATION_NAMESERVICES, nsId);
- conf.set(DFS_FEDERATION_NAMESERVICE_ID, nsId);
+ conf.set(DFS_NAMESERVICES, nsId);
+ conf.set(DFS_NAMESERVICE_ID, nsId);
conf.set(DFS_HA_NAMENODES_KEY_PREFIX + "." + nsId, nnId);
// Set the nameservice specific keys with nameserviceId in the config key
- for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
+ for (String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
// Note: value is same as the key
conf.set(DFSUtil.addKeySuffixes(key, nsId, nnId), key);
}
@@ -314,7 +314,7 @@ public class TestDFSUtil {
// Retrieve the keys without nameserviceId and Ensure generic keys are set
// to the correct value
- for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
+ for (String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
assertEquals(key, conf.get(key));
}
}
@@ -409,14 +409,20 @@ public class TestDFSUtil {
}
@Test
- public void testGetServerInfo() {
+ public void testGetInfoServer() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
+
String httpsport = DFSUtil.getInfoServer(null, conf, true);
assertEquals("0.0.0.0:"+DFS_NAMENODE_HTTPS_PORT_DEFAULT, httpsport);
+
String httpport = DFSUtil.getInfoServer(null, conf, false);
assertEquals("0.0.0.0:"+DFS_NAMENODE_HTTP_PORT_DEFAULT, httpport);
+
+ String httpAddress = DFSUtil.getInfoServer(new InetSocketAddress(
+ "localhost", 8020), conf, false);
+ assertEquals("localhost:" + DFS_NAMENODE_HTTP_PORT_DEFAULT, httpAddress);
}
@Test
@@ -430,7 +436,7 @@ public class TestDFSUtil {
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1");
// Two nameservices, each with two NNs.
- conf.set(DFS_FEDERATION_NAMESERVICES, "ns1,ns2");
+ conf.set(DFS_NAMESERVICES, "ns1,ns2");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),
"ns1-nn1,ns1-nn2");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns2"),
@@ -491,7 +497,7 @@ public class TestDFSUtil {
final String NS1_NN2_HOST = "ns1-nn1.example.com:8020";
final String NS1_NN2_HOST_SVC = "ns1-nn2.example.com:8021";
- conf.set(DFS_FEDERATION_NAMESERVICES, "ns1");
+ conf.set(DFS_NAMESERVICES, "ns1");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2");
conf.set(DFSUtil.addKeySuffixes(
@@ -533,30 +539,74 @@ public class TestDFSUtil {
public void testGetNNUris() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration();
- final String NS1_NN1_HOST = "ns1-nn1.example.com:8020";
- final String NS1_NN2_HOST = "ns1-nn1.example.com:8020";
- final String NS2_NN_HOST = "ns2-nn.example.com:8020";
- final String NN_HOST = "nn.example.com:8020";
+ final String NS1_NN1_ADDR = "ns1-nn1.example.com:8020";
+ final String NS1_NN2_ADDR = "ns1-nn2.example.com:8020";
+ final String NS2_NN_ADDR = "ns2-nn.example.com:8020";
+ final String NN1_ADDR = "nn.example.com:8020";
+ final String NN1_SRVC_ADDR = "nn.example.com:8021";
+ final String NN2_ADDR = "nn2.example.com:8020";
- conf.set(DFS_FEDERATION_NAMESERVICES, "ns1,ns2");
+ conf.set(DFS_NAMESERVICES, "ns1,ns2");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2");
conf.set(DFSUtil.addKeySuffixes(
- DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn1"), NS1_NN1_HOST);
+ DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn1"), NS1_NN1_ADDR);
conf.set(DFSUtil.addKeySuffixes(
- DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn2"), NS1_NN2_HOST);
+ DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn2"), NS1_NN2_ADDR);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "ns2"),
- NS2_NN_HOST);
+ NS2_NN_ADDR);
+
+ conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "hdfs://" + NN1_ADDR);
+
+ conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN2_ADDR);
+
+ Collection<URI> uris = DFSUtil.getNameServiceUris(conf,
+ DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
- conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "hdfs://" + NN_HOST);
+ assertEquals(4, uris.size());
+ assertTrue(uris.contains(new URI("hdfs://ns1")));
+ assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
+ assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)));
+ assertTrue(uris.contains(new URI("hdfs://" + NN2_ADDR)));
+
+ // Make sure that non-HDFS URIs in fs.defaultFS don't get included.
+ conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,
+ "viewfs://vfs-name.example.com");
+
+ uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+ DFS_NAMENODE_RPC_ADDRESS_KEY);
+
+ assertEquals(3, uris.size());
+ assertTrue(uris.contains(new URI("hdfs://ns1")));
+ assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
+ assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)));
- Collection<URI> uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_RPC_ADDRESS_KEY,
- DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
+ // Make sure that an HA URI being the default URI doesn't result in multiple
+ // entries being returned.
+ conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1");
+
+ uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+ DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals(3, uris.size());
assertTrue(uris.contains(new URI("hdfs://ns1")));
- assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_HOST)));
- assertTrue(uris.contains(new URI("hdfs://" + NN_HOST)));
+ assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
+ assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)));
+
+ // Make sure that when a service RPC address is used that is distinct from
+ // the client RPC address, and that client RPC address is also used as the
+ // default URI, that the client URI does not end up in the set of URIs
+ // returned.
+ conf = new HdfsConfiguration();
+ conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN1_ADDR);
+ conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, NN1_ADDR);
+ conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, NN1_SRVC_ADDR);
+
+ uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+ DFS_NAMENODE_RPC_ADDRESS_KEY);
+
+ assertEquals(1, uris.size());
+ assertTrue(uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR)));
}
@Test
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java Wed Jun 6 00:17:38 2012
@@ -159,7 +159,8 @@ public class TestDataTransferProtocol ex
block.getNumBytes(), // OffsetInBlock
100, // sequencenumber
true, // lastPacketInBlock
- 0); // chunk length
+ 0, // chunk length
+ false); // sync block
hdr.write(sendOut);
sendOut.writeInt(0); // zero checksum
@@ -402,7 +403,8 @@ public class TestDataTransferProtocol ex
0, // offset in block,
100, // seqno
false, // last packet
- -1 - random.nextInt(oneMil)); // bad datalen
+ -1 - random.nextInt(oneMil), // bad datalen
+ false);
hdr.write(sendOut);
sendResponse(Status.SUCCESS, "", null, recvOut);
@@ -424,7 +426,8 @@ public class TestDataTransferProtocol ex
0, // OffsetInBlock
100, // sequencenumber
true, // lastPacketInBlock
- 0); // chunk length
+ 0, // chunk length
+ false);
hdr.write(sendOut);
sendOut.writeInt(0); // zero checksum
sendOut.flush();
@@ -508,8 +511,8 @@ public class TestDataTransferProtocol ex
1024, // OffsetInBlock
100, // sequencenumber
false, // lastPacketInBlock
- 4096); // chunk length
-
+ 4096, // chunk length
+ false);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
hdr.write(new DataOutputStream(baos));
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java Wed Jun 6 00:17:38 2012
@@ -39,8 +39,7 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.log4j.Level;
/**
- * This class tests that a file need not be closed before its
- * data can be read by another client.
+ * This class tests that pipelines survive data node death and recovery.
*/
public class TestDatanodeDeath extends TestCase {
{
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java Wed Jun 6 00:17:38 2012
@@ -38,8 +38,7 @@ import org.apache.hadoop.util.VersionInf
import org.junit.Test;
/**
- * This class tests that a file need not be closed before its
- * data can be read by another client.
+ * This class tests data node registration.
*/
public class TestDatanodeRegistration {
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java Wed Jun 6 00:17:38 2012
@@ -21,10 +21,6 @@ import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
-import junit.extensions.TestSetup;
-import junit.framework.Test;
-import junit.framework.TestSuite;
-
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
@@ -43,9 +39,13 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.log4j.Level;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import static org.junit.Assert.*;
/** This class implements some of tests posted in HADOOP-2658. */
-public class TestFileAppend3 extends junit.framework.TestCase {
+public class TestFileAppend3 {
{
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
@@ -64,29 +64,28 @@ public class TestFileAppend3 extends jun
private static MiniDFSCluster cluster;
private static DistributedFileSystem fs;
- public static Test suite() {
- return new TestSetup(new TestSuite(TestFileAppend3.class)) {
- protected void setUp() throws java.lang.Exception {
- AppendTestUtil.LOG.info("setUp()");
- conf = new HdfsConfiguration();
- conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
- buffersize = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
- cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
- fs = (DistributedFileSystem)cluster.getFileSystem();
- }
-
- protected void tearDown() throws Exception {
- AppendTestUtil.LOG.info("tearDown()");
- if(fs != null) fs.close();
- if(cluster != null) cluster.shutdown();
- }
- };
+ @BeforeClass
+ public static void setUp() throws java.lang.Exception {
+ AppendTestUtil.LOG.info("setUp()");
+ conf = new HdfsConfiguration();
+ conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
+ buffersize = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
+ fs = (DistributedFileSystem)cluster.getFileSystem();
+ }
+
+ @AfterClass
+ public static void tearDown() throws Exception {
+ AppendTestUtil.LOG.info("tearDown()");
+ if(fs != null) fs.close();
+ if(cluster != null) cluster.shutdown();
}
/**
* TC1: Append on block boundary.
* @throws IOException an exception might be thrown
*/
+ @Test
public void testTC1() throws Exception {
final Path p = new Path("/TC1/foo");
System.out.println("p=" + p);
@@ -115,6 +114,7 @@ public class TestFileAppend3 extends jun
* TC2: Append on non-block boundary.
* @throws IOException an exception might be thrown
*/
+ @Test
public void testTC2() throws Exception {
final Path p = new Path("/TC2/foo");
System.out.println("p=" + p);
@@ -145,6 +145,7 @@ public class TestFileAppend3 extends jun
* TC5: Only one simultaneous append.
* @throws IOException an exception might be thrown
*/
+ @Test
public void testTC5() throws Exception {
final Path p = new Path("/TC5/foo");
System.out.println("p=" + p);
@@ -175,6 +176,7 @@ public class TestFileAppend3 extends jun
* TC7: Corrupted replicas are present.
* @throws IOException an exception might be thrown
*/
+ @Test
public void testTC7() throws Exception {
final short repl = 2;
final Path p = new Path("/TC7/foo");
@@ -224,6 +226,7 @@ public class TestFileAppend3 extends jun
* TC11: Racing rename
* @throws IOException an exception might be thrown
*/
+ @Test
public void testTC11() throws Exception {
final Path p = new Path("/TC11/foo");
System.out.println("p=" + p);
@@ -282,6 +285,7 @@ public class TestFileAppend3 extends jun
* TC12: Append to partial CRC chunk
* @throws IOException an exception might be thrown
*/
+ @Test
public void testTC12() throws Exception {
final Path p = new Path("/TC12/foo");
System.out.println("p=" + p);
@@ -313,6 +317,7 @@ public class TestFileAppend3 extends jun
* *
* @throws IOException
*/
+ @Test
public void testAppendToPartialChunk() throws IOException {
final Path p = new Path("/partialChunk/foo");
final int fileLen = 513;
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java Wed Jun 6 00:17:38 2012
@@ -176,4 +176,32 @@ public class TestFileAppendRestart {
cluster.shutdown();
}
}
+
+ /**
+ * Test to append to the file, when one of datanode in the existing pipeline is down.
+ * @throws Exception
+ */
+ @Test
+ public void testAppendWithPipelineRecovery() throws Exception {
+ Configuration conf = new Configuration();
+ MiniDFSCluster cluster = null;
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).manageDataDfsDirs(true)
+ .manageNameDfsDirs(true).numDataNodes(4)
+ .racks(new String[] { "/rack1", "/rack1", "/rack1", "/rack2" })
+ .build();
+ cluster.waitActive();
+
+ DistributedFileSystem fs = cluster.getFileSystem();
+ Path path = new Path("/test1");
+ DFSTestUtil.createFile(fs, path, 1024, (short) 3, 1l);
+
+ cluster.stopDataNode(3);
+ DFSTestUtil.appendFile(fs, path, "hello");
+ } finally {
+ if (null != cluster) {
+ cluster.shutdown();
+ }
+ }
+ }
}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java?rev=1346682&r1=1346681&r2=1346682&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java Wed Jun 6 00:17:38 2012
@@ -31,11 +31,13 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.io.IOUtils;
import org.apache.log4j.Level;
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+
/**
- * This class tests that a file need not be closed before its
- * data can be read by another client.
+ * This class tests client lease recovery.
*/
-public class TestFileCreationClient extends junit.framework.TestCase {
+public class TestFileCreationClient {
static final String DIR = "/" + TestFileCreationClient.class.getSimpleName() + "/";
{
@@ -46,6 +48,7 @@ public class TestFileCreationClient exte
}
/** Test lease recovery Triggered by DFSClient. */
+ @Test
public void testClientTriggeredLeaseRecovery() throws Exception {
final int REPLICATION = 3;
Configuration conf = new HdfsConfiguration();