You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2012/06/28 09:00:25 UTC
svn commit: r1354832 [4/5] - in
/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project: ./ hadoop-hdfs-httpfs/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/
hadoop-h...
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java Thu Jun 28 06:59:38 2012
@@ -18,9 +18,12 @@
package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-
+import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
@@ -33,17 +36,21 @@ import org.apache.hadoop.hdfs.server.nam
class OfflineEditsBinaryLoader implements OfflineEditsLoader {
private OfflineEditsVisitor visitor;
private EditLogInputStream inputStream;
- private boolean fixTxIds;
+ private final boolean fixTxIds;
+ private final boolean recoveryMode;
private long nextTxId;
+ public static final Log LOG =
+ LogFactory.getLog(OfflineEditsBinaryLoader.class.getName());
/**
* Constructor
*/
public OfflineEditsBinaryLoader(OfflineEditsVisitor visitor,
- EditLogInputStream inputStream) {
+ EditLogInputStream inputStream, OfflineEditsViewer.Flags flags) {
this.visitor = visitor;
this.inputStream = inputStream;
- this.fixTxIds = false;
+ this.fixTxIds = flags.getFixTxIds();
+ this.recoveryMode = flags.getRecoveryMode();
this.nextTxId = -1;
}
@@ -51,9 +58,9 @@ class OfflineEditsBinaryLoader implement
* Loads edits file, uses visitor to process all elements
*/
public void loadEdits() throws IOException {
- try {
- visitor.start(inputStream.getVersion());
- while (true) {
+ visitor.start(inputStream.getVersion());
+ while (true) {
+ try {
FSEditLogOp op = inputStream.readOp();
if (op == null)
break;
@@ -68,16 +75,24 @@ class OfflineEditsBinaryLoader implement
nextTxId++;
}
visitor.visitOp(op);
+ } catch (IOException e) {
+ if (!recoveryMode) {
+ // Tell the visitor to clean up, then re-throw the exception
+ visitor.close(e);
+ throw e;
+ }
+ LOG.error("Got IOException while reading stream! Resyncing.", e);
+ inputStream.resync();
+ } catch (RuntimeException e) {
+ if (!recoveryMode) {
+ // Tell the visitor to clean up, then re-throw the exception
+ visitor.close(e);
+ throw e;
+ }
+ LOG.error("Got RuntimeException while reading stream! Resyncing.", e);
+ inputStream.resync();
}
- visitor.close(null);
- } catch(IOException e) {
- // Tell the visitor to clean up, then re-throw the exception
- visitor.close(e);
- throw e;
}
- }
-
- public void setFixTxIds() {
- fixTxIds = true;
+ visitor.close(null);
}
}
\ No newline at end of file
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java Thu Jun 28 06:59:38 2012
@@ -22,6 +22,7 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
@@ -36,13 +37,12 @@ interface OfflineEditsLoader {
abstract public void loadEdits() throws IOException;
- public abstract void setFixTxIds();
-
static class OfflineEditsLoaderFactory {
static OfflineEditsLoader createLoader(OfflineEditsVisitor visitor,
- String inputFileName, boolean xmlInput) throws IOException {
+ String inputFileName, boolean xmlInput,
+ OfflineEditsViewer.Flags flags) throws IOException {
if (xmlInput) {
- return new OfflineEditsXmlLoader(visitor, new File(inputFileName));
+ return new OfflineEditsXmlLoader(visitor, new File(inputFileName), flags);
} else {
File file = null;
EditLogInputStream elis = null;
@@ -51,7 +51,7 @@ interface OfflineEditsLoader {
file = new File(inputFileName);
elis = new EditLogFileInputStream(file, HdfsConstants.INVALID_TXID,
HdfsConstants.INVALID_TXID, false);
- loader = new OfflineEditsBinaryLoader(visitor, elis);
+ loader = new OfflineEditsBinaryLoader(visitor, elis, flags);
} finally {
if ((loader == null) && (elis != null)) {
elis.close();
@@ -61,4 +61,4 @@ interface OfflineEditsLoader {
}
}
}
-}
\ No newline at end of file
+}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java Thu Jun 28 06:59:38 2012
@@ -17,16 +17,10 @@
*/
package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
-import java.io.EOFException;
-import java.io.File;
-import java.io.IOException;
-
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
-import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsLoader.OfflineEditsLoaderFactory;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
@@ -37,7 +31,6 @@ import org.apache.commons.cli.OptionBuil
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.PosixParser;
-import org.xml.sax.SAXParseException;
/**
* This class implements an offline edits viewer, tool that
@@ -78,6 +71,9 @@ public class OfflineEditsViewer extends
"-f,--fix-txids Renumber the transaction IDs in the input,\n" +
" so that there are no gaps or invalid " +
" transaction IDs.\n" +
+ "-r,--recover When reading binary edit logs, use recovery \n" +
+ " mode. This will give you the chance to skip \n" +
+ " corrupt parts of the edit log.\n" +
"-v,--verbose More verbose output, prints the input and\n" +
" output filenames, for processors that write\n" +
" to a file, also output to screen. On large\n" +
@@ -113,6 +109,7 @@ public class OfflineEditsViewer extends
options.addOption("p", "processor", true, "");
options.addOption("v", "verbose", false, "");
options.addOption("f", "fix-txids", false, "");
+ options.addOption("r", "recover", false, "");
options.addOption("h", "help", false, "");
return options;
@@ -128,23 +125,20 @@ public class OfflineEditsViewer extends
* @return 0 on success; error code otherwise
*/
public int go(String inputFileName, String outputFileName, String processor,
- boolean printToScreen, boolean fixTxIds, OfflineEditsVisitor visitor)
+ Flags flags, OfflineEditsVisitor visitor)
{
- if (printToScreen) {
+ if (flags.getPrintToScreen()) {
System.out.println("input [" + inputFileName + "]");
System.out.println("output [" + outputFileName + "]");
}
try {
if (visitor == null) {
visitor = OfflineEditsVisitorFactory.getEditsVisitor(
- outputFileName, processor, printToScreen);
+ outputFileName, processor, flags.getPrintToScreen());
}
boolean xmlInput = inputFileName.endsWith(".xml");
OfflineEditsLoader loader = OfflineEditsLoaderFactory.
- createLoader(visitor, inputFileName, xmlInput);
- if (fixTxIds) {
- loader.setFixTxIds();
- }
+ createLoader(visitor, inputFileName, xmlInput, flags);
loader.loadEdits();
} catch(Exception e) {
System.err.println("Encountered exception. Exiting: " + e.getMessage());
@@ -154,6 +148,39 @@ public class OfflineEditsViewer extends
return 0;
}
+ public static class Flags {
+ private boolean printToScreen = false;
+ private boolean fixTxIds = false;
+ private boolean recoveryMode = false;
+
+ public Flags() {
+ }
+
+ public boolean getPrintToScreen() {
+ return printToScreen;
+ }
+
+ public void setPrintToScreen() {
+ printToScreen = true;
+ }
+
+ public boolean getFixTxIds() {
+ return fixTxIds;
+ }
+
+ public void setFixTxIds() {
+ fixTxIds = true;
+ }
+
+ public boolean getRecoveryMode() {
+ return recoveryMode;
+ }
+
+ public void setRecoveryMode() {
+ recoveryMode = true;
+ }
+ }
+
/**
* Main entry point for ToolRunner (see ToolRunner docs)
*
@@ -177,6 +204,7 @@ public class OfflineEditsViewer extends
printHelp();
return -1;
}
+
if(cmd.hasOption("h")) { // print help and exit
printHelp();
return -1;
@@ -187,10 +215,17 @@ public class OfflineEditsViewer extends
if(processor == null) {
processor = defaultProcessor;
}
- boolean printToScreen = cmd.hasOption("v");
- boolean fixTxIds = cmd.hasOption("f");
- return go(inputFileName, outputFileName, processor,
- printToScreen, fixTxIds, null);
+ Flags flags = new Flags();
+ if (cmd.hasOption("r")) {
+ flags.setRecoveryMode();
+ }
+ if (cmd.hasOption("f")) {
+ flags.setFixTxIds();
+ }
+ if (cmd.hasOption("v")) {
+ flags.setPrintToScreen();
+ }
+ return go(inputFileName, outputFileName, processor, flags, null);
}
/**
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java Thu Jun 28 06:59:38 2012
@@ -29,7 +29,7 @@ import org.apache.hadoop.hdfs.util.XMLUt
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
-
+import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer;
import org.apache.hadoop.hdfs.util.XMLUtils.Stanza;
import org.xml.sax.Attributes;
import org.xml.sax.InputSource;
@@ -46,9 +46,9 @@ import org.xml.sax.helpers.XMLReaderFact
@InterfaceStability.Unstable
class OfflineEditsXmlLoader
extends DefaultHandler implements OfflineEditsLoader {
- private boolean fixTxIds;
- private OfflineEditsVisitor visitor;
- private FileReader fileReader;
+ private final boolean fixTxIds;
+ private final OfflineEditsVisitor visitor;
+ private final FileReader fileReader;
private ParseState state;
private Stanza stanza;
private Stack<Stanza> stanzaStack;
@@ -68,9 +68,10 @@ class OfflineEditsXmlLoader
}
public OfflineEditsXmlLoader(OfflineEditsVisitor visitor,
- File inputFile) throws FileNotFoundException {
+ File inputFile, OfflineEditsViewer.Flags flags) throws FileNotFoundException {
this.visitor = visitor;
this.fileReader = new FileReader(inputFile);
+ this.fixTxIds = flags.getFixTxIds();
}
/**
@@ -250,9 +251,4 @@ class OfflineEditsXmlLoader
public void characters (char ch[], int start, int length) {
cbuf.append(ch, start, length);
}
-
- @Override
- public void setFixTxIds() {
- fixTxIds = true;
- }
}
\ No newline at end of file
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java Thu Jun 28 06:59:38 2012
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hdfs.util;
+import static org.apache.hadoop.hdfs.server.common.Util.monotonicNow;
+
/**
* a class to throttle the data transfers.
* This class is thread safe. It can be shared by multiple threads.
@@ -26,9 +28,9 @@ package org.apache.hadoop.hdfs.util;
public class DataTransferThrottler {
private long period; // period over which bw is imposed
private long periodExtension; // Max period over which bw accumulates.
- private long bytesPerPeriod; // total number of bytes can be sent in each period
- private long curPeriodStart; // current period starting time
- private long curReserve; // remaining bytes can be sent in the period
+ private long bytesPerPeriod; // total number of bytes can be sent in each period
+ private long curPeriodStart; // current period starting time
+ private long curReserve; // remaining bytes can be sent in the period
private long bytesAlreadyUsed;
/** Constructor
@@ -45,7 +47,7 @@ public class DataTransferThrottler {
* @param bandwidthPerSec bandwidth allowed in bytes per second.
*/
public DataTransferThrottler(long period, long bandwidthPerSec) {
- this.curPeriodStart = System.currentTimeMillis();
+ this.curPeriodStart = monotonicNow();
this.period = period;
this.curReserve = this.bytesPerPeriod = bandwidthPerSec*period/1000;
this.periodExtension = period*3;
@@ -87,7 +89,7 @@ public class DataTransferThrottler {
bytesAlreadyUsed += numOfBytes;
while (curReserve <= 0) {
- long now = System.currentTimeMillis();
+ long now = monotonicNow();
long curPeriodEnd = curPeriodStart + period;
if ( now < curPeriodEnd ) {
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Thu Jun 28 06:59:38 2012
@@ -34,11 +34,14 @@ import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;
+import javax.ws.rs.core.MediaType;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.DelegationTokenRenewer;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
@@ -57,7 +60,6 @@ import org.apache.hadoop.hdfs.protocol.H
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenRenewer;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
@@ -252,9 +254,23 @@ public class WebHdfsFileSystem extends F
return f.isAbsolute()? f: new Path(workingDir, f);
}
- static Map<?, ?> jsonParse(final InputStream in) throws IOException {
+ static Map<?, ?> jsonParse(final HttpURLConnection c, final boolean useErrorStream
+ ) throws IOException {
+ if (c.getContentLength() == 0) {
+ return null;
+ }
+ final InputStream in = useErrorStream? c.getErrorStream(): c.getInputStream();
if (in == null) {
- throw new IOException("The input stream is null.");
+ throw new IOException("The " + (useErrorStream? "error": "input") + " stream is null.");
+ }
+ final String contentType = c.getContentType();
+ if (contentType != null) {
+ final MediaType parsed = MediaType.valueOf(contentType);
+ if (!MediaType.APPLICATION_JSON_TYPE.isCompatible(parsed)) {
+ throw new IOException("Content-Type \"" + contentType
+ + "\" is incompatible with \"" + MediaType.APPLICATION_JSON
+ + "\" (parsed=\"" + parsed + "\")");
+ }
}
return (Map<?, ?>)JSON.parse(new InputStreamReader(in));
}
@@ -265,7 +281,7 @@ public class WebHdfsFileSystem extends F
if (code != op.getExpectedHttpResponseCode()) {
final Map<?, ?> m;
try {
- m = jsonParse(conn.getErrorStream());
+ m = jsonParse(conn, true);
} catch(IOException e) {
throw new IOException("Unexpected HTTP response: code=" + code + " != "
+ op.getExpectedHttpResponseCode() + ", " + op.toQueryString()
@@ -425,7 +441,7 @@ public class WebHdfsFileSystem extends F
final HttpURLConnection conn = httpConnect(op, fspath, parameters);
try {
final Map<?, ?> m = validateResponse(op, conn);
- return m != null? m: jsonParse(conn.getInputStream());
+ return m != null? m: jsonParse(conn, false);
} finally {
conn.disconnect();
}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/InetSocketAddressParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/InetSocketAddressParam.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/InetSocketAddressParam.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/InetSocketAddressParam.java Thu Jun 28 06:59:38 2012
@@ -44,6 +44,10 @@ abstract class InetSocketAddressParam
@Override
InetSocketAddress parse(final String str) {
+ if (str == null) {
+ throw new IllegalArgumentException("The input string is null: expect "
+ + getDomain());
+ }
final int i = str.indexOf(':');
if (i < 0) {
throw new IllegalArgumentException("Failed to parse \"" + str
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java Thu Jun 28 06:59:38 2012
@@ -59,7 +59,7 @@ abstract class LongParam extends Param<L
@Override
public String getDomain() {
- return "<" + NULL + " | short in radix " + radix + ">";
+ return "<" + NULL + " | long in radix " + radix + ">";
}
@Override
@@ -72,7 +72,7 @@ abstract class LongParam extends Param<L
}
}
- /** Convert a Short to a String. */
+ /** Convert a Long to a String. */
String toString(final Long n) {
return n == null? NULL: Long.toString(n, radix);
}
Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1346682-1354801
Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1346682-1354801
Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1346682-1354801
Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1346682-1354801
Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1346682-1354801
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Thu Jun 28 06:59:38 2012
@@ -25,8 +25,6 @@ import static org.apache.hadoop.hdfs.DFS
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
@@ -39,6 +37,8 @@ import static org.apache.hadoop.hdfs.DFS
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
@@ -66,12 +66,9 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.ha.HAServiceProtocol;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
-import org.apache.hadoop.ha.HAServiceProtocolHelper;
import org.apache.hadoop.ha.ServiceFailedException;
-import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
-import org.apache.hadoop.ha.protocolPB.HAServiceProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@@ -134,6 +131,7 @@ public class MiniDFSCluster {
private boolean format = true;
private boolean manageNameDfsDirs = true;
private boolean manageNameDfsSharedDirs = true;
+ private boolean enableManagedDfsDirsRedundancy = true;
private boolean manageDataDfsDirs = true;
private StartupOption option = null;
private String[] racks = null;
@@ -187,7 +185,7 @@ public class MiniDFSCluster {
this.manageNameDfsDirs = val;
return this;
}
-
+
/**
* Default: true
*/
@@ -199,6 +197,14 @@ public class MiniDFSCluster {
/**
* Default: true
*/
+ public Builder enableManagedDfsDirsRedundancy(boolean val) {
+ this.enableManagedDfsDirsRedundancy = val;
+ return this;
+ }
+
+ /**
+ * Default: true
+ */
public Builder manageDataDfsDirs(boolean val) {
this.manageDataDfsDirs = val;
return this;
@@ -298,6 +304,7 @@ public class MiniDFSCluster {
builder.format,
builder.manageNameDfsDirs,
builder.manageNameDfsSharedDirs,
+ builder.enableManagedDfsDirsRedundancy,
builder.manageDataDfsDirs,
builder.option,
builder.racks,
@@ -385,7 +392,7 @@ public class MiniDFSCluster {
public MiniDFSCluster(Configuration conf,
int numDataNodes,
StartupOption nameNodeOperation) throws IOException {
- this(0, conf, numDataNodes, false, false, false, nameNodeOperation,
+ this(0, conf, numDataNodes, false, false, false, false, nameNodeOperation,
null, null, null);
}
@@ -407,7 +414,8 @@ public class MiniDFSCluster {
int numDataNodes,
boolean format,
String[] racks) throws IOException {
- this(0, conf, numDataNodes, format, true, true, null, racks, null, null);
+ this(0, conf, numDataNodes, format, true, true, true, null,
+ racks, null, null);
}
/**
@@ -429,7 +437,8 @@ public class MiniDFSCluster {
int numDataNodes,
boolean format,
String[] racks, String[] hosts) throws IOException {
- this(0, conf, numDataNodes, format, true, true, null, racks, hosts, null);
+ this(0, conf, numDataNodes, format, true, true, true, null,
+ racks, hosts, null);
}
/**
@@ -462,8 +471,8 @@ public class MiniDFSCluster {
boolean manageDfsDirs,
StartupOption operation,
String[] racks) throws IOException {
- this(nameNodePort, conf, numDataNodes, format, manageDfsDirs, manageDfsDirs,
- operation, racks, null, null);
+ this(nameNodePort, conf, numDataNodes, format, manageDfsDirs,
+ manageDfsDirs, manageDfsDirs, operation, racks, null, null);
}
/**
@@ -497,7 +506,7 @@ public class MiniDFSCluster {
String[] racks,
long[] simulatedCapacities) throws IOException {
this(nameNodePort, conf, numDataNodes, format, manageDfsDirs, manageDfsDirs,
- operation, racks, null, simulatedCapacities);
+ manageDfsDirs, operation, racks, null, simulatedCapacities);
}
/**
@@ -531,13 +540,15 @@ public class MiniDFSCluster {
int numDataNodes,
boolean format,
boolean manageNameDfsDirs,
+ boolean enableManagedDfsDirsRedundancy,
boolean manageDataDfsDirs,
StartupOption operation,
String[] racks, String hosts[],
long[] simulatedCapacities) throws IOException {
this.nameNodes = new NameNodeInfo[1]; // Single namenode in the cluster
initMiniDFSCluster(conf, numDataNodes, format,
- manageNameDfsDirs, true, manageDataDfsDirs, operation, racks, hosts,
+ manageNameDfsDirs, true, enableManagedDfsDirsRedundancy, manageDataDfsDirs,
+ operation, racks, hosts,
simulatedCapacities, null, true, false,
MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0));
}
@@ -545,8 +556,8 @@ public class MiniDFSCluster {
private void initMiniDFSCluster(
Configuration conf,
int numDataNodes, boolean format, boolean manageNameDfsDirs,
- boolean manageNameDfsSharedDirs, boolean manageDataDfsDirs,
- StartupOption operation, String[] racks,
+ boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy,
+ boolean manageDataDfsDirs, StartupOption operation, String[] racks,
String[] hosts, long[] simulatedCapacities, String clusterId,
boolean waitSafeMode, boolean setupHostsFile,
MiniDFSNNTopology nnTopology)
@@ -586,6 +597,7 @@ public class MiniDFSCluster {
federation = nnTopology.isFederated();
createNameNodesAndSetConf(
nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
+ enableManagedDfsDirsRedundancy,
format, operation, clusterId, conf);
if (format) {
@@ -608,7 +620,8 @@ public class MiniDFSCluster {
private void createNameNodesAndSetConf(MiniDFSNNTopology nnTopology,
boolean manageNameDfsDirs, boolean manageNameDfsSharedDirs,
- boolean format, StartupOption operation, String clusterId,
+ boolean enableManagedDfsDirsRedundancy, boolean format,
+ StartupOption operation, String clusterId,
Configuration conf) throws IOException {
Preconditions.checkArgument(nnTopology.countNameNodes() > 0,
"empty NN topology: no namenodes specified!");
@@ -664,7 +677,7 @@ public class MiniDFSCluster {
Collection<URI> prevNNDirs = null;
int nnCounterForFormat = nnCounter;
for (NNConf nn : nameservice.getNNs()) {
- initNameNodeConf(conf, nsId, nn.getNnId(), manageNameDfsDirs,
+ initNameNodeConf(conf, nsId, nn.getNnId(), manageNameDfsDirs, manageNameDfsDirs,
nnCounterForFormat);
Collection<URI> namespaceDirs = FSNamesystem.getNamespaceDirs(conf);
if (format) {
@@ -696,7 +709,8 @@ public class MiniDFSCluster {
// Start all Namenodes
for (NNConf nn : nameservice.getNNs()) {
- initNameNodeConf(conf, nsId, nn.getNnId(), manageNameDfsDirs, nnCounter);
+ initNameNodeConf(conf, nsId, nn.getNnId(), manageNameDfsDirs,
+ enableManagedDfsDirsRedundancy, nnCounter);
createNameNode(nnCounter++, conf, numDataNodes, false, operation,
clusterId, nsId, nn.getNnId());
}
@@ -730,8 +744,8 @@ public class MiniDFSCluster {
private void initNameNodeConf(Configuration conf,
String nameserviceId, String nnId,
- boolean manageNameDfsDirs, int nnIndex)
- throws IOException {
+ boolean manageNameDfsDirs, boolean enableManagedDfsDirsRedundancy,
+ int nnIndex) throws IOException {
if (nameserviceId != null) {
conf.set(DFS_NAMESERVICE_ID, nameserviceId);
}
@@ -740,12 +754,21 @@ public class MiniDFSCluster {
}
if (manageNameDfsDirs) {
- conf.set(DFS_NAMENODE_NAME_DIR_KEY,
- fileAsURI(new File(base_dir, "name" + (2*nnIndex + 1)))+","+
- fileAsURI(new File(base_dir, "name" + (2*nnIndex + 2))));
- conf.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY,
- fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 1)))+","+
- fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 2))));
+ if (enableManagedDfsDirsRedundancy) {
+ conf.set(DFS_NAMENODE_NAME_DIR_KEY,
+ fileAsURI(new File(base_dir, "name" + (2*nnIndex + 1)))+","+
+ fileAsURI(new File(base_dir, "name" + (2*nnIndex + 2))));
+ conf.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY,
+ fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 1)))+","+
+ fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 2))));
+ } else {
+ conf.set(DFS_NAMENODE_NAME_DIR_KEY,
+ fileAsURI(new File(base_dir, "name" + (2*nnIndex + 1))).
+ toString());
+ conf.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY,
+ fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 1))).
+ toString());
+ }
}
}
@@ -1384,7 +1407,6 @@ public class MiniDFSCluster {
waitClusterUp();
LOG.info("Restarted the namenode");
waitActive();
- LOG.info("Cluster is active");
}
}
@@ -1760,6 +1782,7 @@ public class MiniDFSCluster {
}
}
}
+ LOG.info("Cluster is active");
}
private synchronized boolean shouldWait(DatanodeInfo[] dnInfo,
@@ -2143,7 +2166,7 @@ public class MiniDFSCluster {
String nnId = null;
initNameNodeAddress(conf, nameserviceId,
new NNConf(nnId).setIpcPort(namenodePort));
- initNameNodeConf(conf, nameserviceId, nnId, true, nnIndex);
+ initNameNodeConf(conf, nameserviceId, nnId, true, true, nnIndex);
createNameNode(nnIndex, conf, numDataNodes, true, null, null,
nameserviceId, nnId);
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java Thu Jun 28 06:59:38 2012
@@ -25,46 +25,55 @@ import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
-import java.net.SocketTimeoutException;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.LongWritable;
+import java.io.FileNotFoundException;
import java.io.IOException;
-import java.net.InetSocketAddress;
import java.io.InputStream;
import java.io.OutputStream;
+import java.net.InetSocketAddress;
+import java.net.SocketTimeoutException;
+import java.net.URI;
import java.security.MessageDigest;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
+import java.util.concurrent.TimeUnit;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.client.HdfsUtils;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
-import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.retry.RetryPolicies.MultipleLinearRandomRetry;
import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.log4j.Level;
import org.mockito.Mockito;
import org.mockito.internal.stubbing.answers.ThrowsException;
import org.mockito.invocation.InvocationOnMock;
@@ -341,7 +350,7 @@ public class TestDFSClientRetries extend
// We shouldn't have gained an extra block by the RPC.
assertEquals(blockCount, blockCount2);
- return (LocatedBlock) ret2;
+ return ret2;
}
}).when(spyNN).addBlock(Mockito.anyString(), Mockito.anyString(),
Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any());
@@ -798,5 +807,161 @@ public class TestDFSClientRetries extend
cluster.shutdown();
}
}
-}
+ /** Test client retry with namenode restarting. */
+ public void testNamenodeRestart() throws Exception {
+ ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
+
+ final List<Exception> exceptions = new ArrayList<Exception>();
+
+ final Path dir = new Path("/testNamenodeRestart");
+
+ final Configuration conf = new Configuration();
+ conf.setBoolean(DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, true);
+
+ final short numDatanodes = 3;
+ final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+ .numDataNodes(numDatanodes)
+ .build();
+ try {
+ cluster.waitActive();
+ final DistributedFileSystem dfs = cluster.getFileSystem();
+ final URI uri = dfs.getUri();
+ assertTrue(HdfsUtils.isHealthy(uri));
+
+ //create a file
+ final long length = 1L << 20;
+ final Path file1 = new Path(dir, "foo");
+ DFSTestUtil.createFile(dfs, file1, length, numDatanodes, 20120406L);
+
+ //get file status
+ final FileStatus s1 = dfs.getFileStatus(file1);
+ assertEquals(length, s1.getLen());
+
+ //shutdown namenode
+ assertTrue(HdfsUtils.isHealthy(uri));
+ cluster.shutdownNameNode(0);
+ assertFalse(HdfsUtils.isHealthy(uri));
+
+ //namenode is down, create another file in a thread
+ final Path file3 = new Path(dir, "file");
+ final Thread thread = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ //it should retry till namenode is up.
+ final FileSystem fs = AppendTestUtil.createHdfsWithDifferentUsername(conf);
+ DFSTestUtil.createFile(fs, file3, length, numDatanodes, 20120406L);
+ } catch (Exception e) {
+ exceptions.add(e);
+ }
+ }
+ });
+ thread.start();
+
+ //restart namenode in a new thread
+ new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ //sleep, restart, and then wait active
+ TimeUnit.SECONDS.sleep(30);
+ assertFalse(HdfsUtils.isHealthy(uri));
+ cluster.restartNameNode(0, false);
+ cluster.waitActive();
+ assertTrue(HdfsUtils.isHealthy(uri));
+ } catch (Exception e) {
+ exceptions.add(e);
+ }
+ }
+ }).start();
+
+ //namenode is down, it should retry until namenode is up again.
+ final FileStatus s2 = dfs.getFileStatus(file1);
+ assertEquals(s1, s2);
+
+ //check file1 and file3
+ thread.join();
+ assertEquals(dfs.getFileChecksum(file1), dfs.getFileChecksum(file3));
+
+ //enter safe mode
+ assertTrue(HdfsUtils.isHealthy(uri));
+ dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+ assertFalse(HdfsUtils.isHealthy(uri));
+
+ //leave safe mode in a new thread
+ new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ //sleep and then leave safe mode
+ TimeUnit.SECONDS.sleep(30);
+ assertFalse(HdfsUtils.isHealthy(uri));
+ dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+ assertTrue(HdfsUtils.isHealthy(uri));
+ } catch (Exception e) {
+ exceptions.add(e);
+ }
+ }
+ }).start();
+
+ //namenode is in safe mode, create should retry until it leaves safe mode.
+ final Path file2 = new Path(dir, "bar");
+ DFSTestUtil.createFile(dfs, file2, length, numDatanodes, 20120406L);
+ assertEquals(dfs.getFileChecksum(file1), dfs.getFileChecksum(file2));
+
+ assertTrue(HdfsUtils.isHealthy(uri));
+
+ //make sure it won't retry on exceptions like FileNotFoundException
+ final Path nonExisting = new Path(dir, "nonExisting");
+ LOG.info("setPermission: " + nonExisting);
+ try {
+ dfs.setPermission(nonExisting, new FsPermission((short)0));
+ fail();
+ } catch(FileNotFoundException fnfe) {
+ LOG.info("GOOD!", fnfe);
+ }
+
+ if (!exceptions.isEmpty()) {
+ LOG.error("There are " + exceptions.size() + " exception(s):");
+ for(int i = 0; i < exceptions.size(); i++) {
+ LOG.error("Exception " + i, exceptions.get(i));
+ }
+ fail();
+ }
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
+ public void testMultipleLinearRandomRetry() {
+ parseMultipleLinearRandomRetry(null, "");
+ parseMultipleLinearRandomRetry(null, "11");
+ parseMultipleLinearRandomRetry(null, "11,22,33");
+ parseMultipleLinearRandomRetry(null, "11,22,33,44,55");
+ parseMultipleLinearRandomRetry(null, "AA");
+ parseMultipleLinearRandomRetry(null, "11,AA");
+ parseMultipleLinearRandomRetry(null, "11,22,33,FF");
+ parseMultipleLinearRandomRetry(null, "11,-22");
+ parseMultipleLinearRandomRetry(null, "-11,22");
+
+ parseMultipleLinearRandomRetry("[22x11ms]",
+ "11,22");
+ parseMultipleLinearRandomRetry("[22x11ms, 44x33ms]",
+ "11,22,33,44");
+ parseMultipleLinearRandomRetry("[22x11ms, 44x33ms, 66x55ms]",
+ "11,22,33,44,55,66");
+ parseMultipleLinearRandomRetry("[22x11ms, 44x33ms, 66x55ms]",
+ " 11, 22, 33, 44, 55, 66 ");
+ }
+
+ static void parseMultipleLinearRandomRetry(String expected, String s) {
+ final MultipleLinearRandomRetry r = MultipleLinearRandomRetry.parseCommaSeparatedString(s);
+ LOG.info("input=" + s + ", parsed=" + r + ", expected=" + expected);
+ if (r == null) {
+ assertEquals(expected, null);
+ } else {
+ assertEquals("MultipleLinearRandomRetry" + expected, r.toString());
+ }
+ }
+}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java Thu Jun 28 06:59:38 2012
@@ -65,8 +65,8 @@ public class TestFileLengthOnClusterRest
in = (HdfsDataInputStream) dfs.open(path);
Assert.fail("Expected IOException");
} catch (IOException e) {
- Assert.assertEquals("Could not obtain the last block locations.", e
- .getLocalizedMessage());
+ Assert.assertTrue(e.getLocalizedMessage().indexOf(
+ "Name node is in safe mode") >= 0);
}
} finally {
if (null != in) {
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java Thu Jun 28 06:59:38 2012
@@ -18,39 +18,34 @@
package org.apache.hadoop.hdfs;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Random;
+
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
import org.apache.hadoop.hdfs.server.namenode.FSImage;
-import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.URI;
-import java.util.Collection;
-import java.util.List;
-import java.util.Random;
-import static org.junit.Assert.*;
import org.junit.Test;
-import com.google.common.collect.Lists;
-
/**
* A JUnit test for checking if restarting DFS preserves the
* blocks that are part of an unclosed file.
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java Thu Jun 28 06:59:38 2012
@@ -17,9 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.*;
import java.io.OutputStream;
import java.security.PrivilegedExceptionAction;
@@ -773,7 +771,60 @@ public class TestQuota {
final ContentSummary computed) {
assertEquals(expected.toString(), computed.toString());
}
-
+
+ /**
+ * Test limit cases for setting space quotas.
+ */
+ @Test
+ public void testMaxSpaceQuotas() throws Exception {
+ final Configuration conf = new HdfsConfiguration();
+ final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+ final FileSystem fs = cluster.getFileSystem();
+ assertTrue("Not a HDFS: "+fs.getUri(),
+ fs instanceof DistributedFileSystem);
+ final DistributedFileSystem dfs = (DistributedFileSystem)fs;
+
+ // create test directory
+ final Path testFolder = new Path("/testFolder");
+ assertTrue(dfs.mkdirs(testFolder));
+
+ // setting namespace quota to Long.MAX_VALUE - 1 should work
+ dfs.setQuota(testFolder, Long.MAX_VALUE - 1, 10);
+ ContentSummary c = dfs.getContentSummary(testFolder);
+ assertTrue("Quota not set properly", c.getQuota() == Long.MAX_VALUE - 1);
+
+ // setting diskspace quota to Long.MAX_VALUE - 1 should work
+ dfs.setQuota(testFolder, 10, Long.MAX_VALUE - 1);
+ c = dfs.getContentSummary(testFolder);
+ assertTrue("Quota not set properly", c.getSpaceQuota() == Long.MAX_VALUE - 1);
+
+ // setting namespace quota to Long.MAX_VALUE should not work + no error
+ dfs.setQuota(testFolder, Long.MAX_VALUE, 10);
+ c = dfs.getContentSummary(testFolder);
+ assertTrue("Quota should not have changed", c.getQuota() == 10);
+
+ // setting diskspace quota to Long.MAX_VALUE should not work + no error
+ dfs.setQuota(testFolder, 10, Long.MAX_VALUE);
+ c = dfs.getContentSummary(testFolder);
+ assertTrue("Quota should not have changed", c.getSpaceQuota() == 10);
+
+ // setting namespace quota to Long.MAX_VALUE + 1 should not work + error
+ try {
+ dfs.setQuota(testFolder, Long.MAX_VALUE + 1, 10);
+ fail("Exception not thrown");
+ } catch (IllegalArgumentException e) {
+ // Expected
+ }
+
+ // setting diskspace quota to Long.MAX_VALUE + 1 should not work + error
+ try {
+ dfs.setQuota(testFolder, 10, Long.MAX_VALUE + 1);
+ fail("Exception not thrown");
+ } catch (IllegalArgumentException e) {
+ // Expected
+ }
+ }
+
/**
* Violate a space quota using files of size < 1 block. Test that block
* allocation conservatively assumes that for quota checking the entire
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java Thu Jun 28 06:59:38 2012
@@ -23,6 +23,7 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.permission.FsPermission;
@@ -31,9 +32,11 @@ import org.apache.hadoop.hdfs.protocol.H
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.test.GenericTestUtils;
import static org.junit.Assert.*;
@@ -372,4 +375,76 @@ public class TestSafeMode {
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
assertFalse("State was expected to be out of safemode.", dfs.isInSafeMode());
}
+
+ @Test
+ public void testSafeModeWhenZeroBlockLocations() throws IOException {
+
+ try {
+ Path file1 = new Path("/tmp/testManualSafeMode/file1");
+ Path file2 = new Path("/tmp/testManualSafeMode/file2");
+
+ System.out.println("Created file1 and file2.");
+
+ // create two files with one block each.
+ DFSTestUtil.createFile(fs, file1, 1000, (short)1, 0);
+ DFSTestUtil.createFile(fs, file2, 2000, (short)1, 0);
+ checkGetBlockLocationsWorks(fs, file1);
+
+ NameNode namenode = cluster.getNameNode();
+
+ // manually set safemode.
+ dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+ assertTrue("should still be in SafeMode", namenode.isInSafeMode());
+ // getBlock locations should still work since block locations exists
+ checkGetBlockLocationsWorks(fs, file1);
+ dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+ assertFalse("should not be in SafeMode", namenode.isInSafeMode());
+
+
+ // Now 2nd part of the tests where there aren't block locations
+ cluster.shutdownDataNodes();
+ cluster.shutdownNameNode(0);
+
+ // now bring up just the NameNode.
+ cluster.restartNameNode();
+ cluster.waitActive();
+
+ System.out.println("Restarted cluster with just the NameNode");
+
+ namenode = cluster.getNameNode();
+
+ assertTrue("No datanode is started. Should be in SafeMode",
+ namenode.isInSafeMode());
+ FileStatus stat = fs.getFileStatus(file1);
+ try {
+ fs.getFileBlockLocations(stat, 0, 1000);
+ assertTrue("Should have got safemode exception", false);
+ } catch (SafeModeException e) {
+ // as expected
+ } catch (RemoteException re) {
+ if (!re.getClassName().equals(SafeModeException.class.getName()))
+ assertTrue("Should have got safemode exception", false);
+ }
+
+
+ dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+ assertFalse("Should not be in safemode", namenode.isInSafeMode());
+ checkGetBlockLocationsWorks(fs, file1);
+
+ } finally {
+ if(fs != null) fs.close();
+ if(cluster!= null) cluster.shutdown();
+ }
+ }
+
+ void checkGetBlockLocationsWorks(FileSystem fs, Path fileName) throws IOException {
+ FileStatus stat = fs.getFileStatus(fileName);
+ try {
+ fs.getFileBlockLocations(stat, 0, 1000);
+ } catch (SafeModeException e) {
+ assertTrue("Should have not got safemode exception", false);
+ } catch (RemoteException re) {
+ assertTrue("Should have not got safemode exception", false);
+ }
+ }
}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java Thu Jun 28 06:59:38 2012
@@ -17,7 +17,9 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
@@ -29,14 +31,9 @@ import org.apache.hadoop.hdfs.DFSConfigK
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.net.NetworkTopology;
import org.junit.Before;
import org.junit.Test;
@@ -381,11 +378,11 @@ public class TestBlockManager {
}
private BlockInfo addBlockOnNodes(long blockId, List<DatanodeDescriptor> nodes) {
- INodeFile iNode = Mockito.mock(INodeFile.class);
- Mockito.doReturn((short)3).when(iNode).getReplication();
+ BlockCollection bc = Mockito.mock(BlockCollection.class);
+ Mockito.doReturn((short)3).when(bc).getReplication();
BlockInfo blockInfo = blockOnNodes(blockId, nodes);
- bm.blocksMap.addBlockCollection(blockInfo, iNode);
+ bm.blocksMap.addBlockCollection(blockInfo, bc);
return blockInfo;
}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java Thu Jun 28 06:59:38 2012
@@ -24,6 +24,7 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
@@ -34,7 +35,6 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.net.NetworkTopology;
@@ -61,7 +61,7 @@ public class TestReplicationPolicy {
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"),
DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/d1/r2"),
DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/d2/r3"),
- DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3")
+ DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3")
};
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
@@ -587,4 +587,50 @@ public class TestReplicationPolicy {
fifthPrioritySize, chosenBlocks.get(
UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS).size());
}
+
+ /**
+ * Test for the chooseReplicaToDelete are processed based on
+ * block locality and free space
+ */
+ @Test
+ public void testChooseReplicaToDelete() throws Exception {
+ List<DatanodeDescriptor> replicaNodeList = new
+ ArrayList<DatanodeDescriptor>();
+ final Map<String, List<DatanodeDescriptor>> rackMap
+ = new HashMap<String, List<DatanodeDescriptor>>();
+
+ dataNodes[0].setRemaining(4*1024*1024);
+ replicaNodeList.add(dataNodes[0]);
+
+ dataNodes[1].setRemaining(3*1024*1024);
+ replicaNodeList.add(dataNodes[1]);
+
+ dataNodes[2].setRemaining(2*1024*1024);
+ replicaNodeList.add(dataNodes[2]);
+
+ dataNodes[5].setRemaining(1*1024*1024);
+ replicaNodeList.add(dataNodes[5]);
+
+ List<DatanodeDescriptor> first = new ArrayList<DatanodeDescriptor>();
+ List<DatanodeDescriptor> second = new ArrayList<DatanodeDescriptor>();
+ replicator.splitNodesWithRack(
+ replicaNodeList, rackMap, first, second);
+ // dataNodes[0] and dataNodes[1] are in first set as their rack has two
+ // replica nodes, while datanodes[2] and dataNodes[5] are in second set.
+ assertEquals(2, first.size());
+ assertEquals(2, second.size());
+ DatanodeDescriptor chosenNode = replicator.chooseReplicaToDelete(
+ null, null, (short)3, first, second);
+ // Within first set, dataNodes[1] with less free space
+ assertEquals(chosenNode, dataNodes[1]);
+
+ replicator.adjustSetsWithChosenReplica(
+ rackMap, first, second, chosenNode);
+ assertEquals(0, first.size());
+ assertEquals(3, second.size());
+ // Within second set, dataNodes[5] with less free space
+ chosenNode = replicator.chooseReplicaToDelete(
+ null, null, (short)2, first, second);
+ assertEquals(chosenNode, dataNodes[5]);
+ }
}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java Thu Jun 28 06:59:38 2012
@@ -28,8 +28,6 @@ import java.util.List;
import java.util.Random;
import java.util.concurrent.TimeoutException;
-import junit.framework.TestCase;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -52,14 +50,21 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.net.NetUtils;
+
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
/**
* This class tests if block replacement request to data nodes work correctly.
*/
-public class TestBlockReplacement extends TestCase {
+public class TestBlockReplacement {
private static final Log LOG = LogFactory.getLog(
"org.apache.hadoop.hdfs.TestBlockReplacement");
MiniDFSCluster cluster;
+ @Test
public void testThrottler() throws IOException {
Configuration conf = new HdfsConfiguration();
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
@@ -83,6 +88,7 @@ public class TestBlockReplacement extend
assertTrue(totalBytes*1000/(end-start)<=bandwidthPerSec);
}
+ @Test
public void testBlockReplacement() throws IOException, TimeoutException {
final Configuration CONF = new HdfsConfiguration();
final String[] INITIAL_RACKS = {"/RACK0", "/RACK1", "/RACK2"};
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java Thu Jun 28 06:59:38 2012
@@ -56,7 +56,7 @@ public class NameNodeAdapter {
public static LocatedBlocks getBlockLocations(NameNode namenode,
String src, long offset, long length) throws IOException {
return namenode.getNamesystem().getBlockLocations(
- src, offset, length, false, true);
+ src, offset, length, false, true, true);
}
public static HdfsFileStatus getFileInfo(NameNode namenode, String src,
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Thu Jun 28 06:59:38 2012
@@ -506,21 +506,29 @@ public class TestEditLog extends TestCas
FSImage fsimage = namesystem.getFSImage();
final FSEditLog editLog = fsimage.getEditLog();
fileSys.mkdirs(new Path("/tmp"));
- StorageDirectory sd = fsimage.getStorage().dirIterator(NameNodeDirType.EDITS).next();
+
+ Iterator<StorageDirectory> iter = fsimage.getStorage().
+ dirIterator(NameNodeDirType.EDITS);
+ LinkedList<StorageDirectory> sds = new LinkedList<StorageDirectory>();
+ while (iter.hasNext()) {
+ sds.add(iter.next());
+ }
editLog.close();
cluster.shutdown();
- File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 3);
- assertTrue(editFile.exists());
-
- long fileLen = editFile.length();
- System.out.println("File name: " + editFile + " len: " + fileLen);
- RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
- rwf.seek(fileLen-4); // seek to checksum bytes
- int b = rwf.readInt();
- rwf.seek(fileLen-4);
- rwf.writeInt(b+1);
- rwf.close();
+ for (StorageDirectory sd : sds) {
+ File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 3);
+ assertTrue(editFile.exists());
+
+ long fileLen = editFile.length();
+ LOG.debug("Corrupting Log File: " + editFile + " len: " + fileLen);
+ RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
+ rwf.seek(fileLen-4); // seek to checksum bytes
+ int b = rwf.readInt();
+ rwf.seek(fileLen-4);
+ rwf.writeInt(b+1);
+ rwf.close();
+ }
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).build();
@@ -1232,6 +1240,113 @@ public class TestEditLog extends TestCas
}
}
+ private static long readAllEdits(Collection<EditLogInputStream> streams,
+ long startTxId) throws IOException {
+ FSEditLogOp op;
+ long nextTxId = startTxId;
+ long numTx = 0;
+ for (EditLogInputStream s : streams) {
+ while (true) {
+ op = s.readOp();
+ if (op == null)
+ break;
+ if (op.getTransactionId() != nextTxId) {
+ throw new IOException("out of order transaction ID! expected " +
+ nextTxId + " but got " + op.getTransactionId() + " when " +
+ "reading " + s.getName());
+ }
+ numTx++;
+ nextTxId = op.getTransactionId() + 1;
+ }
+ }
+ return numTx;
+ }
+
+ /**
+ * Test edit log failover. If a single edit log is missing, other
+ * edits logs should be used instead.
+ */
+ @Test
+ public void testEditLogFailOverFromMissing() throws IOException {
+ File f1 = new File(TEST_DIR + "/failover0");
+ File f2 = new File(TEST_DIR + "/failover1");
+ List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI());
+
+ NNStorage storage = setupEdits(editUris, 3);
+
+ final long startErrorTxId = 1*TXNS_PER_ROLL + 1;
+ final long endErrorTxId = 2*TXNS_PER_ROLL;
+
+ File[] files = new File(f1, "current").listFiles(new FilenameFilter() {
+ public boolean accept(File dir, String name) {
+ if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId,
+ endErrorTxId))) {
+ return true;
+ }
+ return false;
+ }
+ });
+ assertEquals(1, files.length);
+ assertTrue(files[0].delete());
+
+ FSEditLog editlog = getFSEditLog(storage);
+ editlog.initJournalsForWrite();
+ long startTxId = 1;
+ try {
+ readAllEdits(editlog.selectInputStreams(startTxId, 4*TXNS_PER_ROLL),
+ startTxId);
+ } catch (IOException e) {
+ LOG.error("edit log failover didn't work", e);
+ fail("Edit log failover didn't work");
+ }
+ }
+
+ /**
+ * Test edit log failover from a corrupt edit log
+ */
+ @Test
+ public void testEditLogFailOverFromCorrupt() throws IOException {
+ File f1 = new File(TEST_DIR + "/failover0");
+ File f2 = new File(TEST_DIR + "/failover1");
+ List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI());
+
+ NNStorage storage = setupEdits(editUris, 3);
+
+ final long startErrorTxId = 1*TXNS_PER_ROLL + 1;
+ final long endErrorTxId = 2*TXNS_PER_ROLL;
+
+ File[] files = new File(f1, "current").listFiles(new FilenameFilter() {
+ public boolean accept(File dir, String name) {
+ if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId,
+ endErrorTxId))) {
+ return true;
+ }
+ return false;
+ }
+ });
+ assertEquals(1, files.length);
+
+ long fileLen = files[0].length();
+ LOG.debug("Corrupting Log File: " + files[0] + " len: " + fileLen);
+ RandomAccessFile rwf = new RandomAccessFile(files[0], "rw");
+ rwf.seek(fileLen-4); // seek to checksum bytes
+ int b = rwf.readInt();
+ rwf.seek(fileLen-4);
+ rwf.writeInt(b+1);
+ rwf.close();
+
+ FSEditLog editlog = getFSEditLog(storage);
+ editlog.initJournalsForWrite();
+ long startTxId = 1;
+ try {
+ readAllEdits(editlog.selectInputStreams(startTxId, 4*TXNS_PER_ROLL),
+ startTxId);
+ } catch (IOException e) {
+ LOG.error("edit log failover didn't work", e);
+ fail("Edit log failover didn't work");
+ }
+ }
+
/**
* Test creating a directory with lots and lots of edit log segments
*/
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java Thu Jun 28 06:59:38 2012
@@ -51,6 +51,16 @@ public class TestEditLogFileOutputStream
}
@Test
+ public void testConstants() {
+ // Each call to FSEditLogOp#Reader#readOp can read at most MAX_OP_SIZE bytes
+ // before getting an exception. So we don't want to preallocate a longer
+ // region than MAX_OP_SIZE, because then we'd get an IOException when reading
+ // through the padding at the end of the file.
+ assertTrue(EditLogFileOutputStream.PREALLOCATION_LENGTH <
+ FSEditLogOp.MAX_OP_SIZE);
+ }
+
+ @Test
public void testPreallocation() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java Thu Jun 28 06:59:38 2012
@@ -77,7 +77,7 @@ public class TestFSEditLogLoader {
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES)
- .build();
+ .enableManagedDfsDirsRedundancy(false).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
@@ -107,7 +107,7 @@ public class TestFSEditLogLoader {
bld.append("Recent opcode offsets: (\\d+\\s*){4}$");
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES)
- .format(false).build();
+ .enableManagedDfsDirsRedundancy(false).format(false).build();
fail("should not be able to start");
} catch (IOException e) {
assertTrue("error message contains opcodes message",
@@ -327,6 +327,56 @@ public class TestFSEditLogLoader {
}
@Test
+ public void testValidateEditLogWithCorruptBody() throws IOException {
+ File testDir = new File(TEST_DIR, "testValidateEditLogWithCorruptBody");
+ SortedMap<Long, Long> offsetToTxId = Maps.newTreeMap();
+ final int NUM_TXNS = 20;
+ File logFile = prepareUnfinalizedTestEditLog(testDir, NUM_TXNS,
+ offsetToTxId);
+ // Back up the uncorrupted log
+ File logFileBak = new File(testDir, logFile.getName() + ".bak");
+ Files.copy(logFile, logFileBak);
+ EditLogValidation validation =
+ EditLogFileInputStream.validateEditLog(logFile);
+ assertTrue(!validation.hasCorruptHeader());
+ // We expect that there will be an OP_START_LOG_SEGMENT, followed by
+ // NUM_TXNS opcodes, followed by an OP_END_LOG_SEGMENT.
+ assertEquals(NUM_TXNS + 1, validation.getEndTxId());
+ // Corrupt each edit and verify that validation continues to work
+ for (Map.Entry<Long, Long> entry : offsetToTxId.entrySet()) {
+ long txOffset = entry.getKey();
+ long txId = entry.getValue();
+
+ // Restore backup, corrupt the txn opcode
+ Files.copy(logFileBak, logFile);
+ corruptByteInFile(logFile, txOffset);
+ validation = EditLogFileInputStream.validateEditLog(logFile);
+ long expectedEndTxId = (txId == (NUM_TXNS + 1)) ?
+ NUM_TXNS : (NUM_TXNS + 1);
+ assertEquals("Failed when corrupting txn opcode at " + txOffset,
+ expectedEndTxId, validation.getEndTxId());
+ assertTrue(!validation.hasCorruptHeader());
+ }
+
+ // Truncate right before each edit and verify that validation continues
+ // to work
+ for (Map.Entry<Long, Long> entry : offsetToTxId.entrySet()) {
+ long txOffset = entry.getKey();
+ long txId = entry.getValue();
+
+ // Restore backup, corrupt the txn opcode
+ Files.copy(logFileBak, logFile);
+ truncateFile(logFile, txOffset);
+ validation = EditLogFileInputStream.validateEditLog(logFile);
+ long expectedEndTxId = (txId == 0) ?
+ HdfsConstants.INVALID_TXID : (txId - 1);
+ assertEquals("Failed when corrupting txid " + txId + " txn opcode " +
+ "at " + txOffset, expectedEndTxId, validation.getEndTxId());
+ assertTrue(!validation.hasCorruptHeader());
+ }
+ }
+
+ @Test
public void testValidateEmptyEditLog() throws IOException {
File testDir = new File(TEST_DIR, "testValidateEmptyEditLog");
SortedMap<Long, Long> offsetToTxId = Maps.newTreeMap();
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java Thu Jun 28 06:59:38 2012
@@ -20,10 +20,10 @@ package org.apache.hadoop.hdfs.server.na
import static org.junit.Assert.*;
import java.net.URI;
-import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Iterator;
+import java.util.PriorityQueue;
import java.io.RandomAccessFile;
import java.io.File;
@@ -33,7 +33,6 @@ import org.junit.Test;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.JournalManager.CorruptionException;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
@@ -45,7 +44,6 @@ import static org.apache.hadoop.hdfs.ser
import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_FAIL;
import com.google.common.collect.ImmutableList;
-import com.google.common.collect.TreeMultiset;
import com.google.common.base.Joiner;
public class TestFileJournalManager {
@@ -64,12 +62,13 @@ public class TestFileJournalManager {
static long getNumberOfTransactions(FileJournalManager jm, long fromTxId,
boolean inProgressOk, boolean abortOnGap) throws IOException {
long numTransactions = 0, txId = fromTxId;
- final TreeMultiset<EditLogInputStream> allStreams =
- TreeMultiset.create(JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
+ final PriorityQueue<EditLogInputStream> allStreams =
+ new PriorityQueue<EditLogInputStream>(64,
+ JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
jm.selectInputStreams(allStreams, fromTxId, inProgressOk);
-
+ EditLogInputStream elis = null;
try {
- for (EditLogInputStream elis : allStreams) {
+ while ((elis = allStreams.poll()) != null) {
elis.skipUntil(txId);
while (true) {
FSEditLogOp op = elis.readOp();
@@ -87,6 +86,7 @@ public class TestFileJournalManager {
}
} finally {
IOUtils.cleanup(LOG, allStreams.toArray(new EditLogInputStream[0]));
+ IOUtils.cleanup(LOG, elis);
}
return numTransactions;
}
@@ -379,27 +379,28 @@ public class TestFileJournalManager {
private static EditLogInputStream getJournalInputStream(JournalManager jm,
long txId, boolean inProgressOk) throws IOException {
- final TreeMultiset<EditLogInputStream> allStreams =
- TreeMultiset.create(JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
+ final PriorityQueue<EditLogInputStream> allStreams =
+ new PriorityQueue<EditLogInputStream>(64,
+ JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
jm.selectInputStreams(allStreams, txId, inProgressOk);
+ EditLogInputStream elis = null, ret;
try {
- for (Iterator<EditLogInputStream> iter = allStreams.iterator();
- iter.hasNext();) {
- EditLogInputStream elis = iter.next();
+ while ((elis = allStreams.poll()) != null) {
if (elis.getFirstTxId() > txId) {
break;
}
if (elis.getLastTxId() < txId) {
- iter.remove();
elis.close();
continue;
}
elis.skipUntil(txId);
- iter.remove();
- return elis;
+ ret = elis;
+ elis = null;
+ return ret;
}
} finally {
IOUtils.cleanup(LOG, allStreams.toArray(new EditLogInputStream[0]));
+ IOUtils.cleanup(LOG, elis);
}
return null;
}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Thu Jun 28 06:59:38 2012
@@ -76,8 +76,9 @@ public class TestFsck {
"build/test") + "/audit.log";
// Pattern for:
- // ugi=name ip=/address cmd=FSCK src=/ dst=null perm=null
+ // allowed=true ugi=name ip=/address cmd=FSCK src=/ dst=null perm=null
static final Pattern fsckPattern = Pattern.compile(
+ "allowed=.*?\\s" +
"ugi=.*?\\s" +
"ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" +
"cmd=fsck\\ssrc=\\/\\sdst=null\\s" +
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java Thu Jun 28 06:59:38 2012
@@ -343,7 +343,7 @@ public class TestNameNodeRecovery {
StorageDirectory sd = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
- .build();
+ .enableManagedDfsDirsRedundancy(false).build();
cluster.waitActive();
if (!finalize) {
// Normally, the in-progress edit log would be finalized by
@@ -379,7 +379,7 @@ public class TestNameNodeRecovery {
try {
LOG.debug("trying to start normally (this should fail)...");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
- .format(false).build();
+ .enableManagedDfsDirsRedundancy(false).format(false).build();
cluster.waitActive();
cluster.shutdown();
if (needRecovery) {
@@ -404,7 +404,8 @@ public class TestNameNodeRecovery {
try {
LOG.debug("running recovery...");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
- .format(false).startupOption(recoverStartOpt).build();
+ .enableManagedDfsDirsRedundancy(false).format(false)
+ .startupOption(recoverStartOpt).build();
} catch (IOException e) {
fail("caught IOException while trying to recover. " +
"message was " + e.getMessage() +
@@ -420,7 +421,7 @@ public class TestNameNodeRecovery {
try {
LOG.debug("starting cluster normally after recovery...");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
- .format(false).build();
+ .enableManagedDfsDirsRedundancy(false).format(false).build();
LOG.debug("successfully recovered the " + corruptor.getName() +
" corrupted edit log");
cluster.waitActive();
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java?rev=1354832&r1=1354831&r2=1354832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java Thu Jun 28 06:59:38 2012
@@ -100,6 +100,9 @@ public class TestStartup extends TestCas
fileAsURI(new File(hdfsDir, "name")).toString());
config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
new File(hdfsDir, "data").getPath());
+ config.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
+ config.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
+ config.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
fileAsURI(new File(hdfsDir, "secondary")).toString());
config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,