You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2012/10/12 02:15:37 UTC
svn commit: r1397387 [4/5] - in
/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project:
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/
hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/
hadoop-hdfs/ hadoop-hdfs...
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.h?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.h (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.h Fri Oct 12 00:15:22 2012
@@ -65,6 +65,21 @@
#define NOPRINT_EXC_ILLEGAL_ARGUMENT 0x10
/**
+ * Get information about an exception.
+ *
+ * @param excName The Exception name.
+ * This is a Java class name in JNI format.
+ * @param noPrintFlags Flags which determine which exceptions we should NOT
+ * print.
+ * @param excErrno (out param) The POSIX error number associated with the
+ * exception.
+ * @param shouldPrint (out param) Nonzero if we should print this exception,
+ * based on the noPrintFlags and its name.
+ */
+void getExceptionInfo(const char *excName, int noPrintFlags,
+ int *excErrno, int *shouldPrint);
+
+/**
* Print out information about an exception and free it.
*
* @param env The JNI environment
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Fri Oct 12 00:15:22 2012
@@ -302,6 +302,7 @@ message BlocksWithLocationsProto {
message RemoteEditLogProto {
required uint64 startTxId = 1; // Starting available edit log transaction
required uint64 endTxId = 2; // Ending available edit log transaction
+ optional bool isInProgress = 3 [default = false];
}
/**
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Fri Oct 12 00:15:22 2012
@@ -41,11 +41,34 @@
</property>
<property>
+ <name>dfs.namenode.rpc-address</name>
+ <value></value>
+ <description>
+ RPC address that handles all clients requests. In the case of HA/Federation where multiple namenodes exist,
+ the name service id is added to the name e.g. dfs.namenode.rpc-address.ns1
+ dfs.namenode.rpc-address.EXAMPLENAMESERVICE
+ The value of this property will take the form of hdfs://nn-host1:rpc-port.
+ </description>
+</property>
+
+<property>
+ <name>dfs.namenode.servicerpc-address</name>
+ <value></value>
+ <description>
+ RPC address for HDFS Services communication. BackupNode, Datanodes and all other services should be
+ connecting to this address if it is configured. In the case of HA/Federation where multiple namenodes exist,
+ the name service id is added to the name e.g. dfs.namenode.servicerpc-address.ns1
+ dfs.namenode.rpc-address.EXAMPLENAMESERVICE
+ The value of this property will take the form of hdfs://nn-host1:rpc-port.
+ If the value of this property is unset the value of dfs.namenode.rpc-address will be used as the default.
+ </description>
+</property>
+
+<property>
<name>dfs.namenode.secondary.http-address</name>
<value>0.0.0.0:50090</value>
<description>
The secondary namenode http server address and port.
- If the port is 0 then the server will start on a free port.
</description>
</property>
@@ -54,7 +77,6 @@
<value>0.0.0.0:50010</value>
<description>
The datanode server address and port for data transfer.
- If the port is 0 then the server will start on a free port.
</description>
</property>
@@ -63,7 +85,6 @@
<value>0.0.0.0:50075</value>
<description>
The datanode http server address and port.
- If the port is 0 then the server will start on a free port.
</description>
</property>
@@ -72,7 +93,6 @@
<value>0.0.0.0:50020</value>
<description>
The datanode ipc server address and port.
- If the port is 0 then the server will start on a free port.
</description>
</property>
@@ -87,7 +107,6 @@
<value>0.0.0.0:50070</value>
<description>
The address and the base port where the dfs namenode web ui will listen on.
- If the port is 0 then the server will start on a free port.
</description>
</property>
@@ -241,6 +260,11 @@
</property>
<property>
+ <name>dfs.namenode.edits.journal-plugin.qjournal</name>
+ <value>org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager</value>
+</property>
+
+<property>
<name>dfs.permissions.enabled</name>
<value>true</value>
<description>
@@ -964,12 +988,28 @@
<name>dfs.namenode.check.stale.datanode</name>
<value>false</value>
<description>
- Indicate whether or not to check "stale" datanodes whose
- heartbeat messages have not been received by the namenode
- for more than a specified time interval. If this configuration
- parameter is set as true, the stale datanodes will be moved to
- the end of the target node list for reading. The writing will
- also try to avoid stale nodes.
+ Indicate whether or not to check "stale" datanodes whose
+ heartbeat messages have not been received by the namenode
+ for more than a specified time interval. If this configuration
+ parameter is set as true, the system will keep track
+ of the number of stale datanodes. The stale datanodes will be
+ moved to the end of the node list returned for reading. See
+ dfs.namenode.avoid.write.stale.datanode for details on how this
+ affects writes.
+ </description>
+</property>
+
+<property>
+ <name>dfs.namenode.avoid.write.stale.datanode</name>
+ <value>false</value>
+ <description>
+ Indicate whether or not to avoid writing to "stale" datanodes whose
+ heartbeat messages have not been received by the namenode
+ for more than a specified time interval. If this configuration
+ parameter and dfs.namenode.check.stale.datanode are both set as true,
+ the writing will avoid using stale datanodes unless a high number
+ of datanodes are marked as stale. See
+ dfs.namenode.write.stale.datanode.ratio for details.
</description>
</property>
@@ -977,10 +1017,24 @@
<name>dfs.namenode.stale.datanode.interval</name>
<value>30000</value>
<description>
- Default time interval for marking a datanode as "stale", i.e., if
- the namenode has not received heartbeat msg from a datanode for
- more than this time interval, the datanode will be marked and treated
- as "stale" by default.
+ Default time interval for marking a datanode as "stale", i.e., if
+ the namenode has not received heartbeat msg from a datanode for
+ more than this time interval, the datanode will be marked and treated
+ as "stale" by default. The stale interval cannot be too small since
+ otherwise this may cause too frequent change of stale states.
+ We thus set a minimum stale interval value (the default value is 3 times
+ of heartbeat interval) and guarantee that the stale interval cannot be less
+ than the minimum value.
+ </description>
+</property>
+
+<property>
+ <name>dfs.namenode.write.stale.datanode.ratio</name>
+ <value>0.5f</value>
+ <description>
+ When the ratio of number stale datanodes to total datanodes marked
+ is greater than this ratio, stop avoiding writing to stale nodes so
+ as to prevent causing hotspots.
</description>
</property>
@@ -1099,4 +1153,21 @@
</description>
</property>
+<property>
+ <name>dfs.journalnode.rpc-address</name>
+ <value>0.0.0.0:8485</value>
+ <description>
+ The JournalNode RPC server address and port.
+ </description>
+</property>
+
+<property>
+ <name>dfs.journalnode.http-address</name>
+ <value>0.0.0.0:8480</value>
+ <description>
+ The address and port the JournalNode web UI listens on.
+ If the port is 0 then the server will start on a free port.
+ </description>
+</property>
+
</configuration>
Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1363593-1396941
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1390763-1397380
Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1390763-1397380
Merged /hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1363593-1396941
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp Fri Oct 12 00:15:22 2012
@@ -60,8 +60,10 @@
<%= NamenodeJspHelper.getCorruptFilesWarning(fsn)%>
<% healthjsp.generateHealthReport(out, nn, request); %>
-<hr>
+<% healthjsp.generateJournalReport(out, nn, request); %>
+<hr/>
<% healthjsp.generateConfReport(out, nn, request); %>
+<hr>
<%
out.println(ServletUtil.htmlFooter());
%>
Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1363593-1396941
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1390763-1397380
Propchange: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1363593-1396941
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1390763-1397380
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Fri Oct 12 00:15:22 2012
@@ -85,6 +85,7 @@ import org.apache.hadoop.security.UserGr
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.VersionInfo;
+import com.google.common.base.Charsets;
import com.google.common.base.Joiner;
/** Utilities for HDFS tests */
@@ -210,27 +211,40 @@ public class DFSTestUtil {
public static void createFile(FileSystem fs, Path fileName, long fileLen,
short replFactor, long seed) throws IOException {
+ createFile(fs, fileName, 1024, fileLen, fs.getDefaultBlockSize(fileName),
+ replFactor, seed);
+ }
+
+ public static void createFile(FileSystem fs, Path fileName, int bufferLen,
+ long fileLen, long blockSize, short replFactor, long seed)
+ throws IOException {
+ assert bufferLen > 0;
if (!fs.mkdirs(fileName.getParent())) {
throw new IOException("Mkdirs failed to create " +
fileName.getParent().toString());
}
FSDataOutputStream out = null;
try {
- out = fs.create(fileName, replFactor);
- byte[] toWrite = new byte[1024];
- Random rb = new Random(seed);
- long bytesToWrite = fileLen;
- while (bytesToWrite>0) {
- rb.nextBytes(toWrite);
- int bytesToWriteNext = (1024<bytesToWrite)?1024:(int)bytesToWrite;
-
- out.write(toWrite, 0, bytesToWriteNext);
- bytesToWrite -= bytesToWriteNext;
+ out = fs.create(fileName, true, fs.getConf()
+ .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
+ replFactor, blockSize);
+ if (fileLen > 0) {
+ byte[] toWrite = new byte[bufferLen];
+ Random rb = new Random(seed);
+ long bytesToWrite = fileLen;
+ while (bytesToWrite>0) {
+ rb.nextBytes(toWrite);
+ int bytesToWriteNext = (bufferLen < bytesToWrite) ? bufferLen
+ : (int) bytesToWrite;
+
+ out.write(toWrite, 0, bytesToWriteNext);
+ bytesToWrite -= bytesToWriteNext;
+ }
}
- out.close();
- out = null;
} finally {
- IOUtils.closeStream(out);
+ if (out != null) {
+ out.close();
+ }
}
}
@@ -594,12 +608,21 @@ public class DFSTestUtil {
IOUtils.copyBytes(is, os, s.length(), true);
}
- // Returns url content as string.
+ /**
+ * @return url content as string (UTF-8 encoding assumed)
+ */
public static String urlGet(URL url) throws IOException {
+ return new String(urlGetBytes(url), Charsets.UTF_8);
+ }
+
+ /**
+ * @return URL contents as a byte array
+ */
+ public static byte[] urlGetBytes(URL url) throws IOException {
URLConnection conn = url.openConnection();
ByteArrayOutputStream out = new ByteArrayOutputStream();
IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
- return out.toString();
+ return out.toByteArray();
}
/**
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Fri Oct 12 00:15:22 2012
@@ -624,14 +624,20 @@ public class MiniDFSCluster {
}
federation = nnTopology.isFederated();
- createNameNodesAndSetConf(
- nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
- enableManagedDfsDirsRedundancy,
- format, operation, clusterId, conf);
-
+ try {
+ createNameNodesAndSetConf(
+ nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
+ enableManagedDfsDirsRedundancy,
+ format, operation, clusterId, conf);
+ } catch (IOException ioe) {
+ LOG.error("IOE creating namenodes. Permissions dump:\n" +
+ createPermissionsDiagnosisString(data_dir));
+ throw ioe;
+ }
if (format) {
if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
- throw new IOException("Cannot remove data directory: " + data_dir);
+ throw new IOException("Cannot remove data directory: " + data_dir +
+ createPermissionsDiagnosisString(data_dir));
}
}
@@ -647,6 +653,27 @@ public class MiniDFSCluster {
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}
+ /**
+ * @return a debug string which can help diagnose an error of why
+ * a given directory might have a permissions error in the context
+ * of a test case
+ */
+ private String createPermissionsDiagnosisString(File path) {
+ StringBuilder sb = new StringBuilder();
+ while (path != null) {
+ sb.append("path '" + path + "': ").append("\n");
+ sb.append("\tabsolute:").append(path.getAbsolutePath()).append("\n");
+ sb.append("\tpermissions: ");
+ sb.append(path.isDirectory() ? "d": "-");
+ sb.append(path.canRead() ? "r" : "-");
+ sb.append(path.canWrite() ? "w" : "-");
+ sb.append(path.canExecute() ? "x" : "-");
+ sb.append("\n");
+ path = path.getParentFile();
+ }
+ return sb.toString();
+ }
+
private void createNameNodesAndSetConf(MiniDFSNNTopology nnTopology,
boolean manageNameDfsDirs, boolean manageNameDfsSharedDirs,
boolean enableManagedDfsDirsRedundancy, boolean format,
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java Fri Oct 12 00:15:22 2012
@@ -37,7 +37,6 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -141,13 +140,6 @@ public class TestDataTransferProtocol {
}
}
- void createFile(FileSystem fs, Path path, int fileLen) throws IOException {
- byte [] arr = new byte[fileLen];
- FSDataOutputStream out = fs.create(path);
- out.write(arr);
- out.close();
- }
-
void readFile(FileSystem fs, Path path, int fileLen) throws IOException {
byte [] arr = new byte[fileLen];
FSDataInputStream in = fs.open(path);
@@ -357,7 +349,9 @@ public class TestDataTransferProtocol {
int fileLen = Math.min(conf.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096), 4096);
- createFile(fileSys, file, fileLen);
+ DFSTestUtil.createFile(fileSys, file, fileLen, fileLen,
+ fileSys.getDefaultBlockSize(file),
+ fileSys.getDefaultReplication(file), 0L);
// get the first blockid for the file
final ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java Fri Oct 12 00:15:22 2012
@@ -27,6 +27,7 @@ import java.net.InetSocketAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@@ -92,6 +93,58 @@ public class TestDatanodeRegistration {
}
@Test
+ public void testChangeStorageID() throws Exception {
+ final String DN_IP_ADDR = "127.0.0.1";
+ final String DN_HOSTNAME = "localhost";
+ final int DN_XFER_PORT = 12345;
+ final int DN_INFO_PORT = 12346;
+ final int DN_IPC_PORT = 12347;
+ Configuration conf = new HdfsConfiguration();
+ MiniDFSCluster cluster = null;
+ try {
+ cluster = new MiniDFSCluster.Builder(conf)
+ .numDataNodes(0)
+ .build();
+ InetSocketAddress addr = new InetSocketAddress(
+ "localhost",
+ cluster.getNameNodePort());
+ DFSClient client = new DFSClient(addr, conf);
+ NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
+
+ // register a datanode
+ DatanodeID dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
+ "fake-storage-id", DN_XFER_PORT, DN_INFO_PORT, DN_IPC_PORT);
+ long nnCTime = cluster.getNamesystem().getFSImage().getStorage()
+ .getCTime();
+ StorageInfo mockStorageInfo = mock(StorageInfo.class);
+ doReturn(nnCTime).when(mockStorageInfo).getCTime();
+ doReturn(HdfsConstants.LAYOUT_VERSION).when(mockStorageInfo)
+ .getLayoutVersion();
+ DatanodeRegistration dnReg = new DatanodeRegistration(dnId,
+ mockStorageInfo, null, VersionInfo.getVersion());
+ rpcServer.registerDatanode(dnReg);
+
+ DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
+ assertEquals("Expected a registered datanode", 1, report.length);
+
+ // register the same datanode again with a different storage ID
+ dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
+ "changed-fake-storage-id", DN_XFER_PORT, DN_INFO_PORT, DN_IPC_PORT);
+ dnReg = new DatanodeRegistration(dnId,
+ mockStorageInfo, null, VersionInfo.getVersion());
+ rpcServer.registerDatanode(dnReg);
+
+ report = client.datanodeReport(DatanodeReportType.ALL);
+ assertEquals("Datanode with changed storage ID not recognized",
+ 1, report.length);
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ @Test
public void testRegistrationWithDifferentSoftwareVersions() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "3.0.0");
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java Fri Oct 12 00:15:22 2012
@@ -79,7 +79,8 @@ public class TestFileStatus {
hftpfs = cluster.getHftpFileSystem(0);
dfsClient = new DFSClient(NameNode.getAddress(conf), conf);
file1 = new Path("filestatus.dat");
- writeFile(fs, file1, 1, fileSize, blockSize);
+ DFSTestUtil.createFile(fs, file1, fileSize, fileSize, blockSize, (short) 1,
+ seed);
}
@AfterClass
@@ -87,18 +88,6 @@ public class TestFileStatus {
fs.close();
cluster.shutdown();
}
-
- private static void writeFile(FileSystem fileSys, Path name, int repl,
- int fileSize, int blockSize) throws IOException {
- // Create and write a file that contains three blocks of data
- FSDataOutputStream stm = fileSys.create(name, true,
- HdfsConstants.IO_FILE_BUFFER_SIZE, (short)repl, (long)blockSize);
- byte[] buffer = new byte[fileSize];
- Random rand = new Random(seed);
- rand.nextBytes(buffer);
- stm.write(buffer);
- stm.close();
- }
private void checkFile(FileSystem fileSys, Path name, int repl)
throws IOException, InterruptedException, TimeoutException {
@@ -218,7 +207,8 @@ public class TestFileStatus {
// create another file that is smaller than a block.
Path file2 = new Path(dir, "filestatus2.dat");
- writeFile(fs, file2, 1, blockSize/4, blockSize);
+ DFSTestUtil.createFile(fs, file2, blockSize/4, blockSize/4, blockSize,
+ (short) 1, seed);
checkFile(fs, file2, 1);
// verify file attributes
@@ -230,7 +220,8 @@ public class TestFileStatus {
// Create another file in the same directory
Path file3 = new Path(dir, "filestatus3.dat");
- writeFile(fs, file3, 1, blockSize/4, blockSize);
+ DFSTestUtil.createFile(fs, file3, blockSize/4, blockSize/4, blockSize,
+ (short) 1, seed);
checkFile(fs, file3, 1);
file3 = fs.makeQualified(file3);
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java Fri Oct 12 00:15:22 2012
@@ -110,9 +110,7 @@ public class TestGetBlocks {
// do the writing but do not close the FSDataOutputStream
// in order to mimic the ongoing writing
final Path fileName = new Path("/file1");
- stm = fileSys.create(
- fileName,
- true,
+ stm = fileSys.create(fileName, true,
fileSys.getConf().getInt(
CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) 3, blockSize);
@@ -180,29 +178,15 @@ public class TestGetBlocks {
final short REPLICATION_FACTOR = (short) 2;
final int DEFAULT_BLOCK_SIZE = 1024;
- final Random r = new Random();
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
REPLICATION_FACTOR).build();
try {
cluster.waitActive();
-
- // create a file with two blocks
- FileSystem fs = cluster.getFileSystem();
- FSDataOutputStream out = fs.create(new Path("/tmp.txt"),
- REPLICATION_FACTOR);
- byte[] data = new byte[1024];
long fileLen = 2 * DEFAULT_BLOCK_SIZE;
- long bytesToWrite = fileLen;
- while (bytesToWrite > 0) {
- r.nextBytes(data);
- int bytesToWriteNext = (1024 < bytesToWrite) ? 1024
- : (int) bytesToWrite;
- out.write(data, 0, bytesToWriteNext);
- bytesToWrite -= bytesToWriteNext;
- }
- out.close();
+ DFSTestUtil.createFile(cluster.getFileSystem(), new Path("/tmp.txt"),
+ fileLen, REPLICATION_FACTOR, 0L);
// get blocks & data nodes
List<LocatedBlock> locatedBlocks;
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java Fri Oct 12 00:15:22 2012
@@ -41,6 +41,8 @@ public class TestHftpDelegationToken {
@Test
public void testHdfsDelegationToken() throws Exception {
+ SecurityUtilTestHelper.setTokenServiceUseIp(true);
+
final Configuration conf = new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
@@ -265,4 +267,4 @@ public class TestHftpDelegationToken {
@Override
protected void initDelegationToken() throws IOException {}
}
-}
\ No newline at end of file
+}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java Fri Oct 12 00:15:22 2012
@@ -102,9 +102,15 @@ public class TestHftpFileSystem {
@AfterClass
public static void tearDown() throws IOException {
- hdfs.close();
- hftpFs.close();
- cluster.shutdown();
+ if (hdfs != null) {
+ hdfs.close();
+ }
+ if (hftpFs != null) {
+ hftpFs.close();
+ }
+ if (cluster != null) {
+ cluster.shutdown();
+ }
}
/**
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java Fri Oct 12 00:15:22 2012
@@ -53,19 +53,23 @@ public class TestHftpURLTimeouts {
boolean timedout = false;
HftpFileSystem fs = (HftpFileSystem)FileSystem.get(uri, conf);
- HttpURLConnection conn = fs.openConnection("/", "");
- timedout = false;
try {
- // this will consume the only slot in the backlog
- conn.getInputStream();
- } catch (SocketTimeoutException ste) {
- timedout = true;
- assertEquals("Read timed out", ste.getMessage());
+ HttpURLConnection conn = fs.openConnection("/", "");
+ timedout = false;
+ try {
+ // this will consume the only slot in the backlog
+ conn.getInputStream();
+ } catch (SocketTimeoutException ste) {
+ timedout = true;
+ assertEquals("Read timed out", ste.getMessage());
+ } finally {
+ if (conn != null) conn.disconnect();
+ }
+ assertTrue("read timedout", timedout);
+ assertTrue("connect timedout", checkConnectTimeout(fs, false));
} finally {
- if (conn != null) conn.disconnect();
+ fs.close();
}
- assertTrue("read timedout", timedout);
- assertTrue("connect timedout", checkConnectTimeout(fs, false));
}
@Test
@@ -79,20 +83,24 @@ public class TestHftpURLTimeouts {
boolean timedout = false;
HsftpFileSystem fs = (HsftpFileSystem)FileSystem.get(uri, conf);
- HttpURLConnection conn = null;
- timedout = false;
try {
- // this will consume the only slot in the backlog
- conn = fs.openConnection("/", "");
- } catch (SocketTimeoutException ste) {
- // SSL expects a negotiation, so it will timeout on read, unlike hftp
- timedout = true;
- assertEquals("Read timed out", ste.getMessage());
+ HttpURLConnection conn = null;
+ timedout = false;
+ try {
+ // this will consume the only slot in the backlog
+ conn = fs.openConnection("/", "");
+ } catch (SocketTimeoutException ste) {
+ // SSL expects a negotiation, so it will timeout on read, unlike hftp
+ timedout = true;
+ assertEquals("Read timed out", ste.getMessage());
+ } finally {
+ if (conn != null) conn.disconnect();
+ }
+ assertTrue("ssl read connect timedout", timedout);
+ assertTrue("connect timedout", checkConnectTimeout(fs, true));
} finally {
- if (conn != null) conn.disconnect();
+ fs.close();
}
- assertTrue("ssl read connect timedout", timedout);
- assertTrue("connect timedout", checkConnectTimeout(fs, true));
}
private boolean checkConnectTimeout(HftpFileSystem fs, boolean ignoreReadTimeout)
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java Fri Oct 12 00:15:22 2012
@@ -52,22 +52,6 @@ public class TestInjectionForSimulatedSt
private static final Log LOG = LogFactory.getLog(
"org.apache.hadoop.hdfs.TestInjectionForSimulatedStorage");
-
- private void writeFile(FileSystem fileSys, Path name, int repl)
- throws IOException {
- // create and write a file that contains three blocks of data
- FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
- .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
- (short) repl, blockSize);
- byte[] buffer = new byte[filesize];
- for (int i=0; i<buffer.length; i++) {
- buffer[i] = '1';
- }
- stm.write(buffer);
- stm.close();
- }
-
- // Waits for all of the blocks to have expected replication
// Waits for all of the blocks to have expected replication
private void waitForBlockReplication(String filename,
@@ -149,7 +133,8 @@ public class TestInjectionForSimulatedSt
cluster.getNameNodePort()),
conf);
- writeFile(cluster.getFileSystem(), testPath, numDataNodes);
+ DFSTestUtil.createFile(cluster.getFileSystem(), testPath, filesize,
+ filesize, blockSize, (short) numDataNodes, 0L);
waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, 20);
Iterable<Block>[] blocksList = cluster.getAllBlockReports(bpid);
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java Fri Oct 12 00:15:22 2012
@@ -50,19 +50,6 @@ public class TestModTime {
Random myrand = new Random();
Path hostsFile;
Path excludeFile;
-
- private void writeFile(FileSystem fileSys, Path name, int repl)
- throws IOException {
- // create and write a file that contains three blocks of data
- FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
- .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
- (short) repl, blockSize);
- byte[] buffer = new byte[fileSize];
- Random rand = new Random(seed);
- rand.nextBytes(buffer);
- stm.write(buffer);
- stm.close();
- }
private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
assertTrue(fileSys.exists(name));
@@ -105,7 +92,8 @@ public class TestModTime {
System.out.println("Creating testdir1 and testdir1/test1.dat.");
Path dir1 = new Path("testdir1");
Path file1 = new Path(dir1, "test1.dat");
- writeFile(fileSys, file1, replicas);
+ DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+ (short) replicas, seed);
FileStatus stat = fileSys.getFileStatus(file1);
long mtime1 = stat.getModificationTime();
assertTrue(mtime1 != 0);
@@ -120,7 +108,8 @@ public class TestModTime {
//
System.out.println("Creating testdir1/test2.dat.");
Path file2 = new Path(dir1, "test2.dat");
- writeFile(fileSys, file2, replicas);
+ DFSTestUtil.createFile(fileSys, file2, fileSize, fileSize, blockSize,
+ (short) replicas, seed);
stat = fileSys.getFileStatus(file2);
//
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java Fri Oct 12 00:15:22 2012
@@ -83,7 +83,7 @@ public class TestParallelReadUtil {
static class DirectReadWorkerHelper implements ReadWorkerHelper {
@Override
public int read(DFSInputStream dis, byte[] target, int startOff, int len) throws IOException {
- ByteBuffer bb = ByteBuffer.wrap(target);
+ ByteBuffer bb = ByteBuffer.allocateDirect(target.length);
int cnt = 0;
synchronized(dis) {
dis.seek(startOff);
@@ -95,6 +95,8 @@ public class TestParallelReadUtil {
cnt += read;
}
}
+ bb.clear();
+ bb.get(target);
return cnt;
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java Fri Oct 12 00:15:22 2012
@@ -41,11 +41,9 @@ public class TestPread {
boolean simulatedStorage = false;
private void writeFile(FileSystem fileSys, Path name) throws IOException {
- // create and write a file that contains three blocks of data
- DataOutputStream stm = fileSys.create(name, true, 4096, (short)1,
- blockSize);
// test empty file open and read
- stm.close();
+ DFSTestUtil.createFile(fileSys, name, 12 * blockSize, 0,
+ blockSize, (short) 1, seed);
FSDataInputStream in = fileSys.open(name);
byte[] buffer = new byte[12 * blockSize];
in.readFully(0, buffer, 0, 0);
@@ -62,11 +60,8 @@ public class TestPread {
assertTrue("Cannot delete file", false);
// now create the real file
- stm = fileSys.create(name, true, 4096, (short)1, blockSize);
- Random rand = new Random(seed);
- rand.nextBytes(buffer);
- stm.write(buffer);
- stm.close();
+ DFSTestUtil.createFile(fileSys, name, 12 * blockSize, 12 * blockSize,
+ blockSize, (short) 1, seed);
}
private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java Fri Oct 12 00:15:22 2012
@@ -26,15 +26,12 @@ import java.io.OutputStream;
import java.io.RandomAccessFile;
import java.net.InetSocketAddress;
import java.util.Iterator;
-import java.util.Random;
import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -61,19 +58,6 @@ public class TestReplication {
private static final int numDatanodes = racks.length;
private static final Log LOG = LogFactory.getLog(
"org.apache.hadoop.hdfs.TestReplication");
-
- private void writeFile(FileSystem fileSys, Path name, int repl)
- throws IOException {
- // create and write a file that contains three blocks of data
- FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
- .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
- (short) repl, blockSize);
- byte[] buffer = new byte[fileSize];
- Random rand = new Random(seed);
- rand.nextBytes(buffer);
- stm.write(buffer);
- stm.close();
- }
/* check if there are at least two nodes are on the same rack */
private void checkFile(FileSystem fileSys, Path name, int repl)
@@ -222,19 +206,25 @@ public class TestReplication {
FileSystem fileSys = cluster.getFileSystem();
try {
Path file1 = new Path("/smallblocktest.dat");
- writeFile(fileSys, file1, 3);
+ //writeFile(fileSys, file1, 3);
+ DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+ (short) 3, seed);
checkFile(fileSys, file1, 3);
cleanupFile(fileSys, file1);
- writeFile(fileSys, file1, 10);
+ DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+ (short) 10, seed);
checkFile(fileSys, file1, 10);
cleanupFile(fileSys, file1);
- writeFile(fileSys, file1, 4);
+ DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+ (short) 4, seed);
checkFile(fileSys, file1, 4);
cleanupFile(fileSys, file1);
- writeFile(fileSys, file1, 1);
+ DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+ (short) 1, seed);
checkFile(fileSys, file1, 1);
cleanupFile(fileSys, file1);
- writeFile(fileSys, file1, 2);
+ DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+ (short) 2, seed);
checkFile(fileSys, file1, 2);
cleanupFile(fileSys, file1);
} finally {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java Fri Oct 12 00:15:22 2012
@@ -40,16 +40,6 @@ public class TestSeekBug {
static final long seed = 0xDEADBEEFL;
static final int ONEMB = 1 << 20;
- private void writeFile(FileSystem fileSys, Path name) throws IOException {
- // create and write a file that contains 1MB
- DataOutputStream stm = fileSys.create(name);
- byte[] buffer = new byte[ONEMB];
- Random rand = new Random(seed);
- rand.nextBytes(buffer);
- stm.write(buffer);
- stm.close();
- }
-
private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
for (int idx = 0; idx < actual.length; idx++) {
assertEquals(message+" byte "+(from+idx)+" differs. expected "+
@@ -132,7 +122,9 @@ public class TestSeekBug {
FileSystem fileSys = cluster.getFileSystem();
try {
Path file1 = new Path("seektest.dat");
- writeFile(fileSys, file1);
+ DFSTestUtil.createFile(fileSys, file1, ONEMB, ONEMB,
+ fileSys.getDefaultBlockSize(file1),
+ fileSys.getDefaultReplication(file1), seed);
seekReadFile(fileSys, file1);
smallReadSeek(fileSys, file1);
cleanupFile(fileSys, file1);
@@ -151,7 +143,9 @@ public class TestSeekBug {
FileSystem fileSys = FileSystem.getLocal(conf);
try {
Path file1 = new Path("build/test/data", "seektest.dat");
- writeFile(fileSys, file1);
+ DFSTestUtil.createFile(fileSys, file1, ONEMB, ONEMB,
+ fileSys.getDefaultBlockSize(file1),
+ fileSys.getDefaultReplication(file1), seed);
seekReadFile(fileSys, file1);
cleanupFile(fileSys, file1);
} finally {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java Fri Oct 12 00:15:22 2012
@@ -115,6 +115,14 @@ public class TestShortCircuitLocalRead {
stm.close();
}
+ private static byte [] arrayFromByteBuffer(ByteBuffer buf) {
+ ByteBuffer alt = buf.duplicate();
+ alt.clear();
+ byte[] arr = new byte[alt.remaining()];
+ alt.get(arr);
+ return arr;
+ }
+
/**
* Verifies that reading a file with the direct read(ByteBuffer) api gives the expected set of bytes.
*/
@@ -122,7 +130,7 @@ public class TestShortCircuitLocalRead {
int readOffset) throws IOException {
HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);
- ByteBuffer actual = ByteBuffer.allocate(expected.length - readOffset);
+ ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
IOUtils.skipFully(stm, readOffset);
@@ -136,7 +144,8 @@ public class TestShortCircuitLocalRead {
// Read across chunk boundary
actual.limit(Math.min(actual.capacity(), nread + 517));
nread += stm.read(actual);
- checkData(actual.array(), readOffset, expected, nread, "A few bytes");
+ checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
+ "A few bytes");
//Now read rest of it
actual.limit(actual.capacity());
while (actual.hasRemaining()) {
@@ -147,7 +156,7 @@ public class TestShortCircuitLocalRead {
}
nread += nbytes;
}
- checkData(actual.array(), readOffset, expected, "Read 3");
+ checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
stm.close();
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java Fri Oct 12 00:15:22 2012
@@ -42,18 +42,6 @@ public class TestSmallBlock {
static final int blockSize = 1;
static final int fileSize = 20;
boolean simulatedStorage = false;
-
- private void writeFile(FileSystem fileSys, Path name) throws IOException {
- // create and write a file that contains three blocks of data
- FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
- .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
- (short) 1, blockSize);
- byte[] buffer = new byte[fileSize];
- Random rand = new Random(seed);
- rand.nextBytes(buffer);
- stm.write(buffer);
- stm.close();
- }
private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
for (int idx = 0; idx < actual.length; idx++) {
@@ -105,7 +93,8 @@ public class TestSmallBlock {
FileSystem fileSys = cluster.getFileSystem();
try {
Path file1 = new Path("smallblocktest.dat");
- writeFile(fileSys, file1);
+ DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+ (short) 1, seed);
checkFile(fileSys, file1);
cleanupFile(fileSys, file1);
} finally {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java Fri Oct 12 00:15:22 2012
@@ -74,6 +74,7 @@ import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Assume;
+import org.junit.Before;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
@@ -87,14 +88,6 @@ public class TestBlockToken {
public static final Log LOG = LogFactory.getLog(TestBlockToken.class);
private static final String ADDRESS = "0.0.0.0";
- static final String SERVER_PRINCIPAL_KEY = "test.ipc.server.principal";
- private static Configuration conf;
- static {
- conf = new Configuration();
- conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
- UserGroupInformation.setConfiguration(conf);
- }
-
static {
((Log4JLogger) Client.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) Server.LOG).getLogger().setLevel(Level.ALL);
@@ -111,6 +104,13 @@ public class TestBlockToken {
ExtendedBlock block1 = new ExtendedBlock("0", 0L);
ExtendedBlock block2 = new ExtendedBlock("10", 10L);
ExtendedBlock block3 = new ExtendedBlock("-10", -108L);
+
+ @Before
+ public void disableKerberos() {
+ Configuration conf = new Configuration();
+ conf.set(HADOOP_SECURITY_AUTHENTICATION, "simple");
+ UserGroupInformation.setConfiguration(conf);
+ }
private static class GetLengthAnswer implements
Answer<GetReplicaVisibleLengthResponseProto> {
@@ -215,8 +215,9 @@ public class TestBlockToken {
tokenGenerationAndVerification(masterHandler, slaveHandler);
}
- private Server createMockDatanode(BlockTokenSecretManager sm,
- Token<BlockTokenIdentifier> token) throws IOException, ServiceException {
+ private static Server createMockDatanode(BlockTokenSecretManager sm,
+ Token<BlockTokenIdentifier> token, Configuration conf)
+ throws IOException, ServiceException {
ClientDatanodeProtocolPB mockDN = mock(ClientDatanodeProtocolPB.class);
BlockTokenIdentifier id = sm.createIdentifier();
@@ -238,12 +239,16 @@ public class TestBlockToken {
@Test
public void testBlockTokenRpc() throws Exception {
+ Configuration conf = new Configuration();
+ conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+ UserGroupInformation.setConfiguration(conf);
+
BlockTokenSecretManager sm = new BlockTokenSecretManager(
blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null);
Token<BlockTokenIdentifier> token = sm.generateToken(block3,
EnumSet.allOf(BlockTokenSecretManager.AccessMode.class));
- final Server server = createMockDatanode(sm, token);
+ final Server server = createMockDatanode(sm, token, conf);
server.start();
@@ -272,13 +277,17 @@ public class TestBlockToken {
*/
@Test
public void testBlockTokenRpcLeak() throws Exception {
+ Configuration conf = new Configuration();
+ conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+ UserGroupInformation.setConfiguration(conf);
+
Assume.assumeTrue(FD_DIR.exists());
BlockTokenSecretManager sm = new BlockTokenSecretManager(
blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null);
Token<BlockTokenIdentifier> token = sm.generateToken(block3,
EnumSet.allOf(BlockTokenSecretManager.AccessMode.class));
- final Server server = createMockDatanode(sm, token);
+ final Server server = createMockDatanode(sm, token, conf);
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithEncryptedTransfer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithEncryptedTransfer.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithEncryptedTransfer.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithEncryptedTransfer.java Fri Oct 12 00:15:22 2012
@@ -33,17 +33,17 @@ public class TestBalancerWithEncryptedTr
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
}
- @Test
+ @Test(timeout=60000)
public void testEncryptedBalancer0() throws Exception {
new TestBalancer().testBalancer0Internal(conf);
}
- @Test
+ @Test(timeout=60000)
public void testEncryptedBalancer1() throws Exception {
new TestBalancer().testBalancer1Internal(conf);
}
- @Test
+ @Test(timeout=60000)
public void testEncryptedBalancer2() throws Exception {
new TestBalancer().testBalancer2Internal(conf);
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java Fri Oct 12 00:15:22 2012
@@ -53,7 +53,7 @@ public class TestRBWBlockInvalidation {
* datanode, namenode should ask to invalidate that corrupted block and
* schedule replication for one more replica for that under replicated block.
*/
- @Test
+ @Test(timeout=60000)
public void testBlockInvalidationWhenRBWReplicaMissedInDN()
throws IOException, InterruptedException {
Configuration conf = new HdfsConfiguration();
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java Fri Oct 12 00:15:22 2012
@@ -38,9 +38,12 @@ import org.apache.hadoop.hdfs.HdfsConfig
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
+import org.apache.hadoop.util.Time;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
@@ -55,6 +58,9 @@ public class TestReplicationPolicy {
private static BlockPlacementPolicy replicator;
private static final String filename = "/dummyfile.txt";
private static DatanodeDescriptor dataNodes[];
+ // The interval for marking a datanode as stale,
+ private static long staleInterval =
+ DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT;
@Rule
public ExpectedException exception = ExpectedException.none();
@@ -77,6 +83,8 @@ public class TestReplicationPolicy {
"test.build.data", "build/test/data"), "dfs/");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(baseDir, "name").getPath());
+ // Enable the checking for stale datanodes in the beginning
+ conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true);
DFSTestUtil.formatNameNode(conf);
namenode = new NameNode(conf);
@@ -229,7 +237,7 @@ public class TestReplicationPolicy {
assertEquals(2, targets.length);
//make sure that the chosen node is in the target.
int i = 0;
- for(; i < targets.length && !dataNodes[2].equals(targets[i]); i++);
+ for (; i < targets.length && !dataNodes[2].equals(targets[i]); i++);
assertTrue(i < targets.length);
}
@@ -369,6 +377,202 @@ public class TestReplicationPolicy {
assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
}
+
+ private boolean containsWithinRange(DatanodeDescriptor target,
+ DatanodeDescriptor[] nodes, int startIndex, int endIndex) {
+ assert startIndex >= 0 && startIndex < nodes.length;
+ assert endIndex >= startIndex && endIndex < nodes.length;
+ for (int i = startIndex; i <= endIndex; i++) {
+ if (nodes[i].equals(target)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @Test
+ public void testChooseTargetWithStaleNodes() throws Exception {
+ // Enable avoidng writing to stale datanodes
+ namenode.getNamesystem().getBlockManager().getDatanodeManager()
+ .setAvoidStaleDataNodesForWrite(true);
+ // Set dataNodes[0] as stale
+ dataNodes[0].setLastUpdate(Time.now() - staleInterval - 1);
+
+ DatanodeDescriptor[] targets;
+ // We set the datanode[0] as stale, thus should choose datanode[1] since
+ // datanode[1] is on the same rack with datanode[0] (writer)
+ targets = replicator.chooseTarget(filename, 1, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ assertEquals(targets.length, 1);
+ assertEquals(targets[0], dataNodes[1]);
+
+ HashMap<Node, Node> excludedNodes = new HashMap<Node, Node>();
+ excludedNodes.put(dataNodes[1], dataNodes[1]);
+ List<DatanodeDescriptor> chosenNodes = new ArrayList<DatanodeDescriptor>();
+ BlockPlacementPolicyDefault repl = (BlockPlacementPolicyDefault)replicator;
+ targets = chooseTarget(repl, 1, dataNodes[0], chosenNodes, excludedNodes,
+ BLOCK_SIZE);
+ assertEquals(targets.length, 1);
+ assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0]));
+
+ // reset
+ namenode.getNamesystem().getBlockManager().getDatanodeManager()
+ .setAvoidStaleDataNodesForWrite(false);
+ dataNodes[0].setLastUpdate(Time.now());
+ }
+
+ /**
+ * In this testcase, we set 3 nodes (dataNodes[0] ~ dataNodes[2]) as stale,
+ * and when the number of replicas is less or equal to 3, all the healthy
+ * datanodes should be returned by the chooseTarget method. When the number
+ * of replicas is 4, a stale node should be included.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testChooseTargetWithHalfStaleNodes() throws Exception {
+ // Enable stale datanodes checking
+ namenode.getNamesystem().getBlockManager().getDatanodeManager()
+ .setAvoidStaleDataNodesForWrite(true);
+ // Set dataNodes[0], dataNodes[1], and dataNodes[2] as stale
+ for (int i = 0; i < 3; i++) {
+ dataNodes[i].setLastUpdate(Time.now() - staleInterval - 1);
+ }
+
+ DatanodeDescriptor[] targets;
+ targets = replicator.chooseTarget(filename, 0, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ assertEquals(targets.length, 0);
+
+ // We set the datanode[0] as stale, thus should choose datanode[1]
+ targets = replicator.chooseTarget(filename, 1, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ assertEquals(targets.length, 1);
+ assertFalse(containsWithinRange(targets[0], dataNodes, 0, 2));
+
+ targets = replicator.chooseTarget(filename, 2, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ assertEquals(targets.length, 2);
+ assertFalse(containsWithinRange(targets[0], dataNodes, 0, 2));
+ assertFalse(containsWithinRange(targets[1], dataNodes, 0, 2));
+
+ targets = replicator.chooseTarget(filename, 3, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ assertEquals(targets.length, 3);
+ assertTrue(containsWithinRange(targets[0], dataNodes, 3, 5));
+ assertTrue(containsWithinRange(targets[1], dataNodes, 3, 5));
+ assertTrue(containsWithinRange(targets[2], dataNodes, 3, 5));
+
+ targets = replicator.chooseTarget(filename, 4, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ assertEquals(targets.length, 4);
+ assertTrue(containsWithinRange(dataNodes[3], targets, 0, 3));
+ assertTrue(containsWithinRange(dataNodes[4], targets, 0, 3));
+ assertTrue(containsWithinRange(dataNodes[5], targets, 0, 3));
+
+ // reset
+ namenode.getNamesystem().getBlockManager().getDatanodeManager()
+ .setAvoidStaleDataNodesForWrite(false);
+ for (int i = 0; i < dataNodes.length; i++) {
+ dataNodes[i].setLastUpdate(Time.now());
+ }
+ }
+
+ @Test
+ public void testChooseTargetWithMoreThanHalfStaleNodes() throws Exception {
+ HdfsConfiguration conf = new HdfsConfiguration();
+ conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true);
+ conf.setBoolean(
+ DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
+ String[] hosts = new String[]{"host1", "host2", "host3",
+ "host4", "host5", "host6"};
+ String[] racks = new String[]{"/d1/r1", "/d1/r1", "/d1/r2",
+ "/d1/r2", "/d2/r3", "/d2/r3"};
+ MiniDFSCluster miniCluster = new MiniDFSCluster.Builder(conf).racks(racks)
+ .hosts(hosts).numDataNodes(hosts.length).build();
+ miniCluster.waitActive();
+
+ try {
+ // Step 1. Make two datanodes as stale, check whether the
+ // avoidStaleDataNodesForWrite calculation is correct.
+ // First stop the heartbeat of host1 and host2
+ for (int i = 0; i < 2; i++) {
+ DataNode dn = miniCluster.getDataNodes().get(i);
+ DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
+ miniCluster.getNameNode().getNamesystem().getBlockManager()
+ .getDatanodeManager().getDatanode(dn.getDatanodeId())
+ .setLastUpdate(Time.now() - staleInterval - 1);
+ }
+ // Instead of waiting, explicitly call heartbeatCheck to
+ // let heartbeat manager to detect stale nodes
+ miniCluster.getNameNode().getNamesystem().getBlockManager()
+ .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
+ int numStaleNodes = miniCluster.getNameNode().getNamesystem()
+ .getBlockManager().getDatanodeManager().getNumStaleNodes();
+ assertEquals(numStaleNodes, 2);
+ assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager()
+ .getDatanodeManager().isAvoidingStaleDataNodesForWrite());
+ // Call chooseTarget
+ DatanodeDescriptor staleNodeInfo = miniCluster.getNameNode()
+ .getNamesystem().getBlockManager().getDatanodeManager()
+ .getDatanode(miniCluster.getDataNodes().get(0).getDatanodeId());
+ BlockPlacementPolicy replicator = miniCluster.getNameNode()
+ .getNamesystem().getBlockManager().getBlockPlacementPolicy();
+ DatanodeDescriptor[] targets = replicator.chooseTarget(filename, 3,
+ staleNodeInfo, new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ assertEquals(targets.length, 3);
+ assertFalse(cluster.isOnSameRack(targets[0], staleNodeInfo));
+
+ // Step 2. Set more than half of the datanodes as stale
+ for (int i = 0; i < 4; i++) {
+ DataNode dn = miniCluster.getDataNodes().get(i);
+ DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
+ miniCluster.getNameNode().getNamesystem().getBlockManager()
+ .getDatanodeManager().getDatanode(dn.getDatanodeId())
+ .setLastUpdate(Time.now() - staleInterval - 1);
+ }
+ // Explicitly call heartbeatCheck
+ miniCluster.getNameNode().getNamesystem().getBlockManager()
+ .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
+ numStaleNodes = miniCluster.getNameNode().getNamesystem()
+ .getBlockManager().getDatanodeManager().getNumStaleNodes();
+ assertEquals(numStaleNodes, 4);
+ // According to our strategy, stale datanodes will be included for writing
+ // to avoid hotspots
+ assertFalse(miniCluster.getNameNode().getNamesystem().getBlockManager()
+ .getDatanodeManager().isAvoidingStaleDataNodesForWrite());
+ // Call chooseTarget
+ targets = replicator.chooseTarget(filename, 3,
+ staleNodeInfo, new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ assertEquals(targets.length, 3);
+ assertTrue(cluster.isOnSameRack(targets[0], staleNodeInfo));
+
+ // Step 3. Set 2 stale datanodes back to healthy nodes,
+ // still have 2 stale nodes
+ for (int i = 2; i < 4; i++) {
+ DataNode dn = miniCluster.getDataNodes().get(i);
+ DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
+ miniCluster.getNameNode().getNamesystem().getBlockManager()
+ .getDatanodeManager().getDatanode(dn.getDatanodeId())
+ .setLastUpdate(Time.now());
+ }
+ // Explicitly call heartbeatCheck
+ miniCluster.getNameNode().getNamesystem().getBlockManager()
+ .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
+ numStaleNodes = miniCluster.getNameNode().getNamesystem()
+ .getBlockManager().getDatanodeManager().getNumStaleNodes();
+ assertEquals(numStaleNodes, 2);
+ assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager()
+ .getDatanodeManager().isAvoidingStaleDataNodesForWrite());
+ // Call chooseTarget
+ targets = replicator.chooseTarget(filename, 3,
+ staleNodeInfo, new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ assertEquals(targets.length, 3);
+ assertFalse(cluster.isOnSameRack(targets[0], staleNodeInfo));
+ } finally {
+ miniCluster.shutdown();
+ }
+ }
/**
* This testcase tests re-replication, when dataNodes[0] is already chosen.
@@ -490,8 +694,8 @@ public class TestReplicationPolicy {
.format(true).build();
try {
cluster.waitActive();
- final UnderReplicatedBlocks neededReplications = (UnderReplicatedBlocks) cluster
- .getNameNode().getNamesystem().getBlockManager().neededReplications;
+ final UnderReplicatedBlocks neededReplications = cluster.getNameNode()
+ .getNamesystem().getBlockManager().neededReplications;
for (int i = 0; i < 100; i++) {
// Adding the blocks directly to normal priority
neededReplications.add(new Block(random.nextLong()), 2, 0, 3);
@@ -529,10 +733,10 @@ public class TestReplicationPolicy {
// Adding QUEUE_VERY_UNDER_REPLICATED block
underReplicatedBlocks.add(new Block(random.nextLong()), 2, 0, 7);
- // Adding QUEUE_UNDER_REPLICATED block
+ // Adding QUEUE_REPLICAS_BADLY_DISTRIBUTED block
underReplicatedBlocks.add(new Block(random.nextLong()), 6, 0, 6);
- // Adding QUEUE_REPLICAS_BADLY_DISTRIBUTED block
+ // Adding QUEUE_UNDER_REPLICATED block
underReplicatedBlocks.add(new Block(random.nextLong()), 5, 0, 6);
// Adding QUEUE_WITH_CORRUPT_BLOCKS block
@@ -618,6 +822,11 @@ public class TestReplicationPolicy {
dataNodes[5].setRemaining(1*1024*1024);
replicaNodeList.add(dataNodes[5]);
+ // Refresh the last update time for all the datanodes
+ for (int i = 0; i < dataNodes.length; i++) {
+ dataNodes[i].setLastUpdate(Time.now());
+ }
+
List<DatanodeDescriptor> first = new ArrayList<DatanodeDescriptor>();
List<DatanodeDescriptor> second = new ArrayList<DatanodeDescriptor>();
replicator.splitNodesWithRack(
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java Fri Oct 12 00:15:22 2012
@@ -22,6 +22,7 @@ import static org.mockito.Mockito.when;
import java.io.IOException;
import java.net.InetSocketAddress;
+import java.util.ArrayList;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
@@ -29,7 +30,9 @@ import javax.servlet.http.HttpServletReq
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer;
import org.apache.hadoop.hdfs.web.resources.DoAsParam;
import org.apache.hadoop.hdfs.web.resources.UserParam;
@@ -399,4 +402,43 @@ public class TestJspHelper {
ugi.getAuthenticationMethod());
}
}
+
+ @Test
+ public void testSortNodeByFields() throws Exception {
+ DatanodeID dnId1 = new DatanodeID("127.0.0.1", "localhost1", "storage1",
+ 1234, 2345, 3456);
+ DatanodeID dnId2 = new DatanodeID("127.0.0.2", "localhost2", "storage2",
+ 1235, 2346, 3457);
+ DatanodeDescriptor dnDesc1 = new DatanodeDescriptor(dnId1, "rack1", 1024,
+ 100, 924, 100, 10, 2);
+ DatanodeDescriptor dnDesc2 = new DatanodeDescriptor(dnId2, "rack2", 2500,
+ 200, 1848, 200, 20, 1);
+ ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+ live.add(dnDesc1);
+ live.add(dnDesc2);
+
+ // Test sorting by failed volumes
+ JspHelper.sortNodeList(live, "volfails", "ASC");
+ Assert.assertEquals(dnDesc2, live.get(0));
+ Assert.assertEquals(dnDesc1, live.get(1));
+ JspHelper.sortNodeList(live, "volfails", "DSC");
+ Assert.assertEquals(dnDesc1, live.get(0));
+ Assert.assertEquals(dnDesc2, live.get(1));
+
+ // Test sorting by Blockpool used
+ JspHelper.sortNodeList(live, "bpused", "ASC");
+ Assert.assertEquals(dnDesc1, live.get(0));
+ Assert.assertEquals(dnDesc2, live.get(1));
+ JspHelper.sortNodeList(live, "bpused", "DSC");
+ Assert.assertEquals(dnDesc2, live.get(0));
+ Assert.assertEquals(dnDesc1, live.get(1));
+
+ // Test sorting by Percentage Blockpool used
+ JspHelper.sortNodeList(live, "pcbpused", "ASC");
+ Assert.assertEquals(dnDesc2, live.get(0));
+ Assert.assertEquals(dnDesc1, live.get(1));
+ JspHelper.sortNodeList(live, "pcbpused", "DSC");
+ Assert.assertEquals(dnDesc1, live.get(0));
+ Assert.assertEquals(dnDesc2, live.get(1));
+ }
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java Fri Oct 12 00:15:22 2012
@@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.MiniDFSNNT
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.util.StringUtils;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@@ -216,48 +217,62 @@ public class TestDataNodeMultipleRegistr
LOG.info("dn bpos len (still should be 3):" + bposs.length);
Assert.assertEquals("should've registered with three namenodes", 3, bposs.length);
} finally {
- if(cluster != null)
- cluster.shutdown();
+ cluster.shutdown();
}
}
@Test
public void testMiniDFSClusterWithMultipleNN() throws IOException {
-
Configuration conf = new HdfsConfiguration();
// start Federated cluster and add a node.
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
.build();
- Assert.assertNotNull(cluster);
- Assert.assertEquals("(1)Should be 2 namenodes", 2, cluster.getNumNameNodes());
// add a node
- cluster.addNameNode(conf, 0);
- Assert.assertEquals("(1)Should be 3 namenodes", 3, cluster.getNumNameNodes());
- cluster.shutdown();
+ try {
+ Assert.assertNotNull(cluster);
+ cluster.waitActive();
+ Assert.assertEquals("(1)Should be 2 namenodes", 2, cluster.getNumNameNodes());
+
+ cluster.addNameNode(conf, 0);
+ Assert.assertEquals("(1)Should be 3 namenodes", 3, cluster.getNumNameNodes());
+ } catch (IOException ioe) {
+ Assert.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe));
+ } finally {
+ cluster.shutdown();
+ }
// 2. start with Federation flag set
conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(1))
.build();
- Assert.assertNotNull(cluster);
- Assert.assertEquals("(2)Should be 1 namenodes", 1, cluster.getNumNameNodes());
- // add a node
- cluster.addNameNode(conf, 0);
- Assert.assertEquals("(2)Should be 2 namenodes", 2, cluster.getNumNameNodes());
- cluster.shutdown();
+ try {
+ Assert.assertNotNull(cluster);
+ cluster.waitActive();
+ Assert.assertEquals("(2)Should be 1 namenodes", 1, cluster.getNumNameNodes());
+
+ // add a node
+ cluster.addNameNode(conf, 0);
+ Assert.assertEquals("(2)Should be 2 namenodes", 2, cluster.getNumNameNodes());
+ } catch (IOException ioe) {
+ Assert.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe));
+ } finally {
+ cluster.shutdown();
+ }
// 3. start non-federated
conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).build();
- Assert.assertNotNull(cluster);
- Assert.assertEquals("(2)Should be 1 namenodes", 1, cluster.getNumNameNodes());
// add a node
try {
+ cluster.waitActive();
+ Assert.assertNotNull(cluster);
+ Assert.assertEquals("(2)Should be 1 namenodes", 1, cluster.getNumNameNodes());
+
cluster.addNameNode(conf, 9929);
Assert.fail("shouldn't be able to add another NN to non federated cluster");
} catch (IOException e) {
@@ -268,6 +283,4 @@ public class TestDataNodeMultipleRegistr
cluster.shutdown();
}
}
-
-
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java Fri Oct 12 00:15:22 2012
@@ -17,11 +17,16 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
+import static org.mockito.Matchers.anyInt;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.spy;
+
import java.io.File;
import java.io.IOException;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -182,6 +187,15 @@ public class NameNodeAdapter {
}
}
+ public static FSEditLogOp createMkdirOp(String path) {
+ MkdirOp op = MkdirOp.getInstance(new FSEditLogOp.OpInstanceCache())
+ .setPath(path)
+ .setTimestamp(0)
+ .setPermissionStatus(new PermissionStatus(
+ "testuser", "testgroup", FsPermission.getDefault()));
+ return op;
+ }
+
/**
* @return the number of blocks marked safe by safemode, or -1
* if safemode is not running.
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=1397387&r1=1397386&r2=1397387&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java Fri Oct 12 00:15:22 2012
@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -64,6 +65,10 @@ public class TestBackupNode {
}
static final String BASE_DIR = MiniDFSCluster.getBaseDirectory();
+
+ static final long seed = 0xDEADBEEFL;
+ static final int blockSize = 4096;
+ static final int fileSize = 8192;
@Before
public void setUp() throws Exception {
@@ -350,14 +355,17 @@ public class TestBackupNode {
+ NetUtils.getHostPortString(add)).toUri(), conf);
boolean canWrite = true;
try {
- TestCheckpoint.writeFile(bnFS, file3, replication);
+ DFSTestUtil.createFile(bnFS, file3, fileSize, fileSize, blockSize,
+ replication, seed);
} catch (IOException eio) {
LOG.info("Write to BN failed as expected: ", eio);
canWrite = false;
}
assertFalse("Write to BackupNode must be prohibited.", canWrite);
- TestCheckpoint.writeFile(fileSys, file3, replication);
+ DFSTestUtil.createFile(fileSys, file3, fileSize, fileSize, blockSize,
+ replication, seed);
+
TestCheckpoint.checkFile(fileSys, file3, replication);
// should also be on BN right away
assertTrue("file3 does not exist on BackupNode",