You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cu...@apache.org on 2007/04/16 23:44:46 UTC
svn commit: r529410 [24/27] - in /lucene/hadoop/trunk: ./
src/contrib/abacus/src/examples/org/apache/hadoop/abacus/examples/
src/contrib/abacus/src/java/org/apache/hadoop/abacus/
src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/ src/...
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestCheckpoint.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestCheckpoint.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestCheckpoint.java Mon Apr 16 14:44:35 2007
@@ -37,10 +37,10 @@
static final int numDatanodes = 1;
private void writeFile(FileSystem fileSys, Path name, int repl)
- throws IOException {
+ throws IOException {
FSDataOutputStream stm = fileSys.create(name, true,
- fileSys.getConf().getInt("io.file.buffer.size", 4096),
- (short)repl, (long)blockSize);
+ fileSys.getConf().getInt("io.file.buffer.size", 4096),
+ (short)repl, (long)blockSize);
byte[] buffer = new byte[fileSize];
Random rand = new Random(seed);
rand.nextBytes(buffer);
@@ -50,17 +50,17 @@
private void checkFile(FileSystem fileSys, Path name, int repl)
- throws IOException {
+ throws IOException {
assertTrue(fileSys.exists(name));
String[][] locations = fileSys.getFileCacheHints(name, 0, fileSize);
for (int idx = 0; idx < locations.length; idx++) {
assertEquals("Number of replicas for block" + idx,
- Math.min(numDatanodes, repl), locations[idx].length);
+ Math.min(numDatanodes, repl), locations[idx].length);
}
}
private void cleanupFile(FileSystem fileSys, Path name)
- throws IOException {
+ throws IOException {
assertTrue(fileSys.exists(name));
fileSys.delete(name);
assertTrue(!fileSys.exists(name));
@@ -70,7 +70,7 @@
* put back the old namedir
*/
private void resurrectNameDir(File namedir)
- throws IOException {
+ throws IOException {
String parentdir = namedir.getParent();
String name = namedir.getName();
File oldname = new File(parentdir, name + ".old");
@@ -83,7 +83,7 @@
* remove one namedir
*/
private void removeOneNameDir(File namedir)
- throws IOException {
+ throws IOException {
String parentdir = namedir.getParent();
String name = namedir.getName();
File newname = new File(parentdir, name + ".old");
@@ -96,7 +96,7 @@
* Verify that namenode does not startup if one namedir is bad.
*/
private void testNamedirError(Configuration conf, Collection<File> namedirs)
- throws IOException {
+ throws IOException {
System.out.println("Starting testNamedirError");
MiniDFSCluster cluster = null;
@@ -123,7 +123,7 @@
* Simulate namenode crashing after rolling edit log.
*/
private void testSecondaryNamenodeError1(Configuration conf)
- throws IOException {
+ throws IOException {
System.out.println("Starting testSecondaryNamenodeError 1");
Path file1 = new Path("checkpointxx.dat");
MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes,
@@ -182,7 +182,7 @@
* Simulate a namenode crash after uploading new image
*/
private void testSecondaryNamenodeError2(Configuration conf)
- throws IOException {
+ throws IOException {
System.out.println("Starting testSecondaryNamenodeError 21");
Path file1 = new Path("checkpointyy.dat");
MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes,
@@ -237,7 +237,7 @@
}
}
- /**
+ /**
* Tests checkpoint in DFS.
*/
public void testCheckpoint() throws IOException {
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java Mon Apr 16 14:44:35 2007
@@ -36,7 +36,7 @@
public class TestDFSFinalize extends TestCase {
private static final Log LOG = LogFactory.getLog(
- "org.apache.hadoop.dfs.TestDFSFinalize");
+ "org.apache.hadoop.dfs.TestDFSFinalize");
private Configuration conf;
private int testCounter = 0;
private MiniDFSCluster cluster = null;
@@ -47,8 +47,8 @@
void log(String label, int numDirs) {
LOG.info("============================================================");
LOG.info("***TEST " + (testCounter++) + "*** "
- + label + ":"
- + " numDirs="+numDirs);
+ + label + ":"
+ + " numDirs="+numDirs);
}
/**
@@ -69,9 +69,9 @@
}
for (int i = 0; i < dataNodeDirs.length; i++) {
assertEquals(
- UpgradeUtilities.checksumContents(
- DATA_NODE, new File(dataNodeDirs[i],"current")),
- UpgradeUtilities.checksumMasterContents(DATA_NODE));
+ UpgradeUtilities.checksumContents(
+ DATA_NODE, new File(dataNodeDirs[i],"current")),
+ UpgradeUtilities.checksumMasterContents(DATA_NODE));
}
for (int i = 0; i < nameNodeDirs.length; i++) {
assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSMkdirs.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSMkdirs.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSMkdirs.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSMkdirs.java Mon Apr 16 14:44:35 2007
@@ -46,28 +46,28 @@
MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
FileSystem fileSys = cluster.getFileSystem();
try {
- // First create a new directory with mkdirs
- Path myPath = new Path("/test/mkdirs");
- assertTrue(fileSys.mkdirs(myPath));
- assertTrue(fileSys.exists(myPath));
- assertTrue(fileSys.mkdirs(myPath));
+ // First create a new directory with mkdirs
+ Path myPath = new Path("/test/mkdirs");
+ assertTrue(fileSys.mkdirs(myPath));
+ assertTrue(fileSys.exists(myPath));
+ assertTrue(fileSys.mkdirs(myPath));
- // Second, create a file in that directory.
- Path myFile = new Path("/test/mkdirs/myFile");
- writeFile(fileSys, myFile);
+ // Second, create a file in that directory.
+ Path myFile = new Path("/test/mkdirs/myFile");
+ writeFile(fileSys, myFile);
- // Third, use mkdir to create a subdirectory off of that file,
- // and check that it fails.
- Path myIllegalPath = new Path("/test/mkdirs/myFile/subdir");
- Boolean exist = true;
- try {
- fileSys.mkdirs(myIllegalPath);
- } catch (IOException e) {
- exist = false;
- }
- assertFalse(exist);
- assertFalse(fileSys.exists(myIllegalPath));
- fileSys.delete(myFile);
+ // Third, use mkdir to create a subdirectory off of that file,
+ // and check that it fails.
+ Path myIllegalPath = new Path("/test/mkdirs/myFile/subdir");
+ Boolean exist = true;
+ try {
+ fileSys.mkdirs(myIllegalPath);
+ } catch (IOException e) {
+ exist = false;
+ }
+ assertFalse(exist);
+ assertFalse(fileSys.exists(myIllegalPath));
+ fileSys.delete(myFile);
} finally {
fileSys.close();
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSRollback.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSRollback.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSRollback.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSRollback.java Mon Apr 16 14:44:35 2007
@@ -37,7 +37,7 @@
public class TestDFSRollback extends TestCase {
private static final Log LOG = LogFactory.getLog(
- "org.apache.hadoop.dfs.TestDFSRollback");
+ "org.apache.hadoop.dfs.TestDFSRollback");
private Configuration conf;
private int testCounter = 0;
private MiniDFSCluster cluster = null;
@@ -48,8 +48,8 @@
void log(String label, int numDirs) {
LOG.info("============================================================");
LOG.info("***TEST " + (testCounter++) + "*** "
- + label + ":"
- + " numDirs="+numDirs);
+ + label + ":"
+ + " numDirs="+numDirs);
}
/**
@@ -58,23 +58,23 @@
*/
void checkResult(NodeType nodeType, String[] baseDirs) throws IOException {
switch (nodeType) {
- case NAME_NODE:
- for (int i = 0; i < baseDirs.length; i++) {
- assertTrue(new File(baseDirs[i],"current").isDirectory());
- assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
- assertTrue(new File(baseDirs[i],"current/edits").isFile());
- assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
- assertTrue(new File(baseDirs[i],"current/fstime").isFile());
- }
- break;
- case DATA_NODE:
- for (int i = 0; i < baseDirs.length; i++) {
- assertEquals(
- UpgradeUtilities.checksumContents(
- nodeType, new File(baseDirs[i],"current")),
- UpgradeUtilities.checksumMasterContents(nodeType));
- }
- break;
+ case NAME_NODE:
+ for (int i = 0; i < baseDirs.length; i++) {
+ assertTrue(new File(baseDirs[i],"current").isDirectory());
+ assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
+ assertTrue(new File(baseDirs[i],"current/edits").isFile());
+ assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
+ assertTrue(new File(baseDirs[i],"current/fstime").isFile());
+ }
+ break;
+ case DATA_NODE:
+ for (int i = 0; i < baseDirs.length; i++) {
+ assertEquals(
+ UpgradeUtilities.checksumContents(
+ nodeType, new File(baseDirs[i],"current")),
+ UpgradeUtilities.checksumMasterContents(nodeType));
+ }
+ break;
}
for (int i = 0; i < baseDirs.length; i++) {
assertFalse(new File(baseDirs[i],"previous").isDirectory());
@@ -162,9 +162,9 @@
UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
UpgradeUtilities.createVersionFile(DATA_NODE,baseDirs,
- new StorageInfo(Integer.MIN_VALUE,
- UpgradeUtilities.getCurrentNamespaceID(cluster),
- UpgradeUtilities.getCurrentFsscTime(cluster)));
+ new StorageInfo(Integer.MIN_VALUE,
+ UpgradeUtilities.getCurrentNamespaceID(cluster),
+ UpgradeUtilities.getCurrentFsscTime(cluster)));
startDataNodeShouldFail(StartupOption.ROLLBACK);
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
@@ -177,9 +177,9 @@
UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
UpgradeUtilities.createVersionFile(DATA_NODE,baseDirs,
- new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
- UpgradeUtilities.getCurrentNamespaceID(cluster),
- Long.MAX_VALUE));
+ new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
+ UpgradeUtilities.getCurrentNamespaceID(cluster),
+ Long.MAX_VALUE));
startDataNodeShouldFail(StartupOption.ROLLBACK);
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
@@ -216,9 +216,9 @@
UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
UpgradeUtilities.createVersionFile(NAME_NODE,baseDirs,
- new StorageInfo(1,
- UpgradeUtilities.getCurrentNamespaceID(null),
- UpgradeUtilities.getCurrentFsscTime(null)));
+ new StorageInfo(1,
+ UpgradeUtilities.getCurrentNamespaceID(null),
+ UpgradeUtilities.getCurrentFsscTime(null)));
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
} // end numDir loop
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSShell.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSShell.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSShell.java Mon Apr 16 14:44:35 2007
@@ -49,132 +49,132 @@
MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(),
- fs instanceof DistributedFileSystem);
+ fs instanceof DistributedFileSystem);
DistributedFileSystem fileSys = (DistributedFileSystem)fs;
FsShell shell = new FsShell();
shell.setConf(conf);
try {
- // First create a new directory with mkdirs
- Path myPath = new Path("/test/mkdirs");
- assertTrue(fileSys.mkdirs(myPath));
- assertTrue(fileSys.exists(myPath));
- assertTrue(fileSys.mkdirs(myPath));
-
- // Second, create a file in that directory.
- Path myFile = new Path("/test/mkdirs/myFile");
- writeFile(fileSys, myFile);
- assertTrue(fileSys.exists(myFile));
-
- // Verify that we can read the file
- {
- String[] args = new String[2];
- args[0] = "-cat";
- args[1] = "/test/mkdirs/myFile";
- int val = -1;
- try {
- val = shell.run(args);
- } catch (Exception e) {
- System.err.println("Exception raised from DFSShell.run: " +
- StringUtils.stringifyException(e));
- }
- assertTrue(val == 0);
- }
-
- // Verify that we can get with and without crc
- {
- File testFile = new File(TEST_ROOT_DIR, "myFile");
- File checksumFile = new File(fileSys.getChecksumFile(
- new Path(testFile.getAbsolutePath())).toString());
- testFile.delete();
- checksumFile.delete();
+ // First create a new directory with mkdirs
+ Path myPath = new Path("/test/mkdirs");
+ assertTrue(fileSys.mkdirs(myPath));
+ assertTrue(fileSys.exists(myPath));
+ assertTrue(fileSys.mkdirs(myPath));
+
+ // Second, create a file in that directory.
+ Path myFile = new Path("/test/mkdirs/myFile");
+ writeFile(fileSys, myFile);
+ assertTrue(fileSys.exists(myFile));
+
+ // Verify that we can read the file
+ {
+ String[] args = new String[2];
+ args[0] = "-cat";
+ args[1] = "/test/mkdirs/myFile";
+ int val = -1;
+ try {
+ val = shell.run(args);
+ } catch (Exception e) {
+ System.err.println("Exception raised from DFSShell.run: " +
+ StringUtils.stringifyException(e));
+ }
+ assertTrue(val == 0);
+ }
+
+ // Verify that we can get with and without crc
+ {
+ File testFile = new File(TEST_ROOT_DIR, "myFile");
+ File checksumFile = new File(fileSys.getChecksumFile(
+ new Path(testFile.getAbsolutePath())).toString());
+ testFile.delete();
+ checksumFile.delete();
- String[] args = new String[3];
- args[0] = "-get";
- args[1] = "/test/mkdirs";
- args[2] = TEST_ROOT_DIR;
- int val = -1;
- try {
- val = shell.run(args);
- } catch (Exception e) {
- System.err.println("Exception raised from DFSShell.run " +
- e.getLocalizedMessage());
- }
- assertTrue(val == 0);
- assertTrue("Copying failed.", testFile.exists());
- assertTrue("Checksum file " + checksumFile+" is copied.", !checksumFile.exists());
- testFile.delete();
- }
- {
- File testFile = new File(TEST_ROOT_DIR, "myFile");
- File checksumFile = new File(fileSys.getChecksumFile(
- new Path(testFile.getAbsolutePath())).toString());
- testFile.delete();
- checksumFile.delete();
+ String[] args = new String[3];
+ args[0] = "-get";
+ args[1] = "/test/mkdirs";
+ args[2] = TEST_ROOT_DIR;
+ int val = -1;
+ try {
+ val = shell.run(args);
+ } catch (Exception e) {
+ System.err.println("Exception raised from DFSShell.run " +
+ e.getLocalizedMessage());
+ }
+ assertTrue(val == 0);
+ assertTrue("Copying failed.", testFile.exists());
+ assertTrue("Checksum file " + checksumFile+" is copied.", !checksumFile.exists());
+ testFile.delete();
+ }
+ {
+ File testFile = new File(TEST_ROOT_DIR, "myFile");
+ File checksumFile = new File(fileSys.getChecksumFile(
+ new Path(testFile.getAbsolutePath())).toString());
+ testFile.delete();
+ checksumFile.delete();
- String[] args = new String[4];
- args[0] = "-get";
- args[1] = "-crc";
- args[2] = "/test/mkdirs";
- args[3] = TEST_ROOT_DIR;
- int val = -1;
- try {
- val = shell.run(args);
- } catch (Exception e) {
- System.err.println("Exception raised from DFSShell.run " +
- e.getLocalizedMessage());
- }
- assertTrue(val == 0);
+ String[] args = new String[4];
+ args[0] = "-get";
+ args[1] = "-crc";
+ args[2] = "/test/mkdirs";
+ args[3] = TEST_ROOT_DIR;
+ int val = -1;
+ try {
+ val = shell.run(args);
+ } catch (Exception e) {
+ System.err.println("Exception raised from DFSShell.run " +
+ e.getLocalizedMessage());
+ }
+ assertTrue(val == 0);
- assertTrue("Copying data file failed.", testFile.exists());
- assertTrue("Checksum file " + checksumFile+" not copied.", checksumFile.exists());
- testFile.delete();
- checksumFile.delete();
- }
- // Verify that we get an error while trying to read an nonexistent file
- {
- String[] args = new String[2];
- args[0] = "-cat";
- args[1] = "/test/mkdirs/myFile1";
- int val = -1;
- try {
- val = shell.run(args);
- } catch (Exception e) {
- System.err.println("Exception raised from DFSShell.run " +
- e.getLocalizedMessage());
- }
- assertTrue(val != 0);
- }
-
- // Verify that we get an error while trying to delete an nonexistent file
- {
- String[] args = new String[2];
- args[0] = "-rm";
- args[1] = "/test/mkdirs/myFile1";
- int val = -1;
- try {
- val = shell.run(args);
- } catch (Exception e) {
- System.err.println("Exception raised from DFSShell.run " +
- e.getLocalizedMessage());
- }
- assertTrue(val != 0);
- }
-
- // Verify that we succeed in removing the file we created
- {
- String[] args = new String[2];
- args[0] = "-rm";
- args[1] = "/test/mkdirs/myFile";
- int val = -1;
- try {
- val = shell.run(args);
- } catch (Exception e) {
- System.err.println("Exception raised from DFSShell.run " +
- e.getLocalizedMessage());
- }
- assertTrue(val == 0);
+ assertTrue("Copying data file failed.", testFile.exists());
+ assertTrue("Checksum file " + checksumFile+" not copied.", checksumFile.exists());
+ testFile.delete();
+ checksumFile.delete();
+ }
+ // Verify that we get an error while trying to read an nonexistent file
+ {
+ String[] args = new String[2];
+ args[0] = "-cat";
+ args[1] = "/test/mkdirs/myFile1";
+ int val = -1;
+ try {
+ val = shell.run(args);
+ } catch (Exception e) {
+ System.err.println("Exception raised from DFSShell.run " +
+ e.getLocalizedMessage());
+ }
+ assertTrue(val != 0);
+ }
+
+ // Verify that we get an error while trying to delete an nonexistent file
+ {
+ String[] args = new String[2];
+ args[0] = "-rm";
+ args[1] = "/test/mkdirs/myFile1";
+ int val = -1;
+ try {
+ val = shell.run(args);
+ } catch (Exception e) {
+ System.err.println("Exception raised from DFSShell.run " +
+ e.getLocalizedMessage());
+ }
+ assertTrue(val != 0);
+ }
+
+ // Verify that we succeed in removing the file we created
+ {
+ String[] args = new String[2];
+ args[0] = "-rm";
+ args[1] = "/test/mkdirs/myFile";
+ int val = -1;
+ try {
+ val = shell.run(args);
+ } catch (Exception e) {
+ System.err.println("Exception raised from DFSShell.run " +
+ e.getLocalizedMessage());
}
+ assertTrue(val == 0);
+ }
} finally {
try {
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSShellGenericOptions.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSShellGenericOptions.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSShellGenericOptions.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSShellGenericOptions.java Mon Apr 16 14:44:35 2007
@@ -33,90 +33,90 @@
public class TestDFSShellGenericOptions extends TestCase {
- public void testDFSCommand() throws IOException {
- String namenode = null;
- MiniDFSCluster cluster = null;
- try {
- Configuration conf = new Configuration();
- cluster = new MiniDFSCluster(conf, 1, true, null);
- namenode = conf.get("fs.default.name", "local");
- String [] args = new String[4];
- args[2] = "-mkdir";
- args[3] = "/data";
- testFsOption(args, namenode);
- testConfOption(args, namenode);
- testPropertyOption(args, namenode);
- } finally {
- if (cluster != null) { cluster.shutdown(); }
- }
- }
-
- private void testFsOption(String [] args, String namenode) {
- // prepare arguments to create a directory /data
- args[0] = "-fs";
- args[1] = namenode;
- execute(args, namenode);
+ public void testDFSCommand() throws IOException {
+ String namenode = null;
+ MiniDFSCluster cluster = null;
+ try {
+ Configuration conf = new Configuration();
+ cluster = new MiniDFSCluster(conf, 1, true, null);
+ namenode = conf.get("fs.default.name", "local");
+ String [] args = new String[4];
+ args[2] = "-mkdir";
+ args[3] = "/data";
+ testFsOption(args, namenode);
+ testConfOption(args, namenode);
+ testPropertyOption(args, namenode);
+ } finally {
+ if (cluster != null) { cluster.shutdown(); }
}
+ }
+
+ private void testFsOption(String [] args, String namenode) {
+ // prepare arguments to create a directory /data
+ args[0] = "-fs";
+ args[1] = namenode;
+ execute(args, namenode);
+ }
- private void testConfOption(String[] args, String namenode) {
- // prepare configuration hadoop-site.xml
- File configDir = new File(new File("build", "test"), "minidfs");
- assertTrue(configDir.mkdirs());
- File siteFile = new File(configDir, "hadoop-site.xml");
- PrintWriter pw;
- try {
- pw = new PrintWriter(siteFile);
- pw.print("<?xml version=\"1.0\"?>\n"+
- "<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>\n"+
- "<configuration>\n"+
- " <property>\n"+
- " <name>fs.default.name</name>\n"+
- " <value>"+namenode+"</value>\n"+
- " </property>\n"+
- "</configuration>\n");
- pw.close();
+ private void testConfOption(String[] args, String namenode) {
+ // prepare configuration hadoop-site.xml
+ File configDir = new File(new File("build", "test"), "minidfs");
+ assertTrue(configDir.mkdirs());
+ File siteFile = new File(configDir, "hadoop-site.xml");
+ PrintWriter pw;
+ try {
+ pw = new PrintWriter(siteFile);
+ pw.print("<?xml version=\"1.0\"?>\n"+
+ "<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>\n"+
+ "<configuration>\n"+
+ " <property>\n"+
+ " <name>fs.default.name</name>\n"+
+ " <value>"+namenode+"</value>\n"+
+ " </property>\n"+
+ "</configuration>\n");
+ pw.close();
- // prepare arguments to create a directory /data
- args[0] = "-conf";
- args[1] = siteFile.getPath();
- execute(args, namenode);
- } catch (FileNotFoundException e) {
- e.printStackTrace();
- } finally {
- siteFile.delete();
- configDir.delete();
- }
+ // prepare arguments to create a directory /data
+ args[0] = "-conf";
+ args[1] = siteFile.getPath();
+ execute(args, namenode);
+ } catch (FileNotFoundException e) {
+ e.printStackTrace();
+ } finally {
+ siteFile.delete();
+ configDir.delete();
}
+ }
- private void testPropertyOption(String[] args, String namenode) {
- // prepare arguments to create a directory /data
- args[0] = "-D";
- args[1] = "fs.default.name="+namenode;
- execute(args, namenode);
- }
+ private void testPropertyOption(String[] args, String namenode) {
+ // prepare arguments to create a directory /data
+ args[0] = "-D";
+ args[1] = "fs.default.name="+namenode;
+ execute(args, namenode);
+ }
- private void execute( String [] args, String namenode ) {
- FsShell shell=new FsShell();
- FileSystem fs=null;
+ private void execute( String [] args, String namenode ) {
+ FsShell shell=new FsShell();
+ FileSystem fs=null;
+ try {
+ shell.doMain(new Configuration(), args);
+ fs = new DistributedFileSystem(
+ DataNode.createSocketAddr(namenode),
+ shell.getConf());
+ assertTrue( "Directory does not get created",
+ fs.isDirectory(new Path("/data")) );
+ fs.delete(new Path("/data"));
+ } catch (Exception e) {
+ System.err.println(e.getMessage());
+ e.printStackTrace();
+ } finally {
+ if( fs!=null ) {
try {
- shell.doMain(new Configuration(), args);
- fs = new DistributedFileSystem(
- DataNode.createSocketAddr(namenode),
- shell.getConf());
- assertTrue( "Directory does not get created",
- fs.isDirectory(new Path("/data")) );
- fs.delete(new Path("/data"));
- } catch (Exception e) {
- System.err.println(e.getMessage());
- e.printStackTrace();
- } finally {
- if( fs!=null ) {
- try {
- fs.close();
- } catch (IOException ignored) {
- }
- }
+ fs.close();
+ } catch (IOException ignored) {
}
+ }
}
+ }
}
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStartupVersions.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStartupVersions.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStartupVersions.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStartupVersions.java Mon Apr 16 14:44:35 2007
@@ -35,9 +35,9 @@
public class TestDFSStartupVersions extends TestCase {
private static final Log LOG = LogFactory.getLog(
- "org.apache.hadoop.dfs.TestDFSStartupVersions");
+ "org.apache.hadoop.dfs.TestDFSStartupVersions");
private static Path TEST_ROOT_DIR = new Path(
- System.getProperty("test.build.data","/tmp").toString().replace(' ', '+'));
+ System.getProperty("test.build.data","/tmp").toString().replace(' ', '+'));
private MiniDFSCluster cluster = null;
/**
@@ -50,11 +50,11 @@
}
LOG.info("============================================================");
LOG.info("***TEST*** " + label + ":"
- + testCaseLine
- + " nodeType="+nodeType
- + " layoutVersion="+version.getLayoutVersion()
- + " namespaceID="+version.getNamespaceID()
- + " fsscTime="+version.getCTime());
+ + testCaseLine
+ + " nodeType="+nodeType
+ + " layoutVersion="+version.getLayoutVersion()
+ + " namespaceID="+version.getNamespaceID()
+ + " fsscTime="+version.getCTime());
}
/**
@@ -130,20 +130,20 @@
int storedLV = datanodeVer.getLayoutVersion();
if (softwareLV == storedLV &&
datanodeVer.getCTime() == namenodeVer.getCTime())
- {
- LOG.info("layoutVersions and cTimes are equal: isVersionCompatible=true");
- return true;
- }
+ {
+ LOG.info("layoutVersions and cTimes are equal: isVersionCompatible=true");
+ return true;
+ }
// check #2
long absSoftwareLV = Math.abs((long)softwareLV);
long absStoredLV = Math.abs((long)storedLV);
if (absSoftwareLV > absStoredLV ||
(softwareLV == storedLV &&
datanodeVer.getCTime() < namenodeVer.getCTime()))
- {
- LOG.info("softwareLayoutVersion is newer OR namenode cTime is newer: isVersionCompatible=true");
- return true;
- }
+ {
+ LOG.info("softwareLayoutVersion is newer OR namenode cTime is newer: isVersionCompatible=true");
+ return true;
+ }
// check #4
LOG.info("default case: isVersionCompatible=false");
return false;
@@ -168,16 +168,16 @@
Configuration conf = UpgradeUtilities.initializeStorageStateConf(1);
StorageInfo[] versions = initializeVersions();
UpgradeUtilities.createStorageDirs(
- NAME_NODE, conf.getStrings("dfs.name.dir"), "current");
+ NAME_NODE, conf.getStrings("dfs.name.dir"), "current");
cluster = new MiniDFSCluster(conf,0,StartupOption.REGULAR);
StorageInfo nameNodeVersion = new StorageInfo(
- UpgradeUtilities.getCurrentLayoutVersion(),
- UpgradeUtilities.getCurrentNamespaceID(cluster),
- UpgradeUtilities.getCurrentFsscTime(cluster));
+ UpgradeUtilities.getCurrentLayoutVersion(),
+ UpgradeUtilities.getCurrentNamespaceID(cluster),
+ UpgradeUtilities.getCurrentFsscTime(cluster));
log("NameNode version info",NAME_NODE,null,nameNodeVersion);
for (int i = 0; i < versions.length; i++) {
File[] storage = UpgradeUtilities.createStorageDirs(
- DATA_NODE, conf.getStrings("dfs.data.dir"), "current");
+ DATA_NODE, conf.getStrings("dfs.data.dir"), "current");
log("DataNode version info",DATA_NODE,i,versions[i]);
UpgradeUtilities.createVersionFile(DATA_NODE, storage, versions[i]);
try {
@@ -188,7 +188,7 @@
}
assertTrue(cluster.getNameNode() != null);
assertEquals(isVersionCompatible(nameNodeVersion, versions[i]),
- cluster.isDataNodeUp());
+ cluster.isDataNodeUp());
cluster.shutdownDataNodes();
}
}
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStorageStateRecovery.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStorageStateRecovery.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStorageStateRecovery.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStorageStateRecovery.java Mon Apr 16 14:44:35 2007
@@ -37,7 +37,7 @@
public class TestDFSStorageStateRecovery extends TestCase {
private static final Log LOG = LogFactory.getLog(
- "org.apache.hadoop.dfs.TestDFSStorageStateRecovery");
+ "org.apache.hadoop.dfs.TestDFSStorageStateRecovery");
private Configuration conf;
private int testCounter = 0;
private MiniDFSCluster cluster = null;
@@ -84,13 +84,13 @@
void log(String label, int numDirs, int testCaseNum, boolean[] state) {
LOG.info("============================================================");
LOG.info("***TEST " + (testCounter++) + "*** "
- + label + ":"
- + " numDirs="+numDirs
- + " testCase="+testCaseNum
- + " current="+state[0]
- + " previous="+state[1]
- + " previous.tmp="+state[2]
- + " removed.tmp="+state[3]);
+ + label + ":"
+ + " numDirs="+numDirs
+ + " testCase="+testCaseNum
+ + " current="+state[0]
+ + " previous="+state[1]
+ + " previous.tmp="+state[2]
+ + " removed.tmp="+state[3]);
}
/**
@@ -111,8 +111,8 @@
*/
String[] createStorageState(NodeType nodeType, boolean[] state) throws Exception {
String[] baseDirs = (nodeType == NAME_NODE ?
- conf.getStrings("dfs.name.dir") :
- conf.getStrings("dfs.data.dir"));
+ conf.getStrings("dfs.name.dir") :
+ conf.getStrings("dfs.data.dir"));
UpgradeUtilities.createEmptyDirs(baseDirs);
if (state[0]) // current
UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "current");
@@ -134,38 +134,38 @@
*/
void checkResult(NodeType nodeType, String[] baseDirs,
boolean currentShouldExist, boolean previousShouldExist)
- throws IOException
+ throws IOException
{
switch (nodeType) {
- case NAME_NODE:
- if (currentShouldExist) {
- for (int i = 0; i < baseDirs.length; i++) {
- assertTrue(new File(baseDirs[i],"current").isDirectory());
- assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
- assertTrue(new File(baseDirs[i],"current/edits").isFile());
- assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
- assertTrue(new File(baseDirs[i],"current/fstime").isFile());
- }
+ case NAME_NODE:
+ if (currentShouldExist) {
+ for (int i = 0; i < baseDirs.length; i++) {
+ assertTrue(new File(baseDirs[i],"current").isDirectory());
+ assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
+ assertTrue(new File(baseDirs[i],"current/edits").isFile());
+ assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
+ assertTrue(new File(baseDirs[i],"current/fstime").isFile());
}
- break;
- case DATA_NODE:
- if (currentShouldExist) {
- for (int i = 0; i < baseDirs.length; i++) {
- assertEquals(
- UpgradeUtilities.checksumContents(
- nodeType, new File(baseDirs[i],"current")),
- UpgradeUtilities.checksumMasterContents(nodeType));
- }
+ }
+ break;
+ case DATA_NODE:
+ if (currentShouldExist) {
+ for (int i = 0; i < baseDirs.length; i++) {
+ assertEquals(
+ UpgradeUtilities.checksumContents(
+ nodeType, new File(baseDirs[i],"current")),
+ UpgradeUtilities.checksumMasterContents(nodeType));
}
- break;
+ }
+ break;
}
if (previousShouldExist) {
for (int i = 0; i < baseDirs.length; i++) {
assertTrue(new File(baseDirs[i],"previous").isDirectory());
assertEquals(
- UpgradeUtilities.checksumContents(
- nodeType, new File(baseDirs[i],"previous")),
- UpgradeUtilities.checksumMasterContents(nodeType));
+ UpgradeUtilities.checksumContents(
+ nodeType, new File(baseDirs[i],"previous")),
+ UpgradeUtilities.checksumMasterContents(nodeType));
}
}
}
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgrade.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgrade.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgrade.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgrade.java Mon Apr 16 14:44:35 2007
@@ -37,7 +37,7 @@
public class TestDFSUpgrade extends TestCase {
private static final Log LOG = LogFactory.getLog(
- "org.apache.hadoop.dfs.TestDFSUpgrade");
+ "org.apache.hadoop.dfs.TestDFSUpgrade");
private Configuration conf;
private int testCounter = 0;
private MiniDFSCluster cluster = null;
@@ -48,8 +48,8 @@
void log(String label, int numDirs) {
LOG.info("============================================================");
LOG.info("***TEST " + (testCounter++) + "*** "
- + label + ":"
- + " numDirs="+numDirs);
+ + label + ":"
+ + " numDirs="+numDirs);
}
/**
@@ -60,30 +60,30 @@
*/
void checkResult(NodeType nodeType, String[] baseDirs) throws IOException {
switch (nodeType) {
- case NAME_NODE:
- for (int i = 0; i < baseDirs.length; i++) {
- assertTrue(new File(baseDirs[i],"current").isDirectory());
- assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
- assertTrue(new File(baseDirs[i],"current/edits").isFile());
- assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
- assertTrue(new File(baseDirs[i],"current/fstime").isFile());
- }
- break;
- case DATA_NODE:
- for (int i = 0; i < baseDirs.length; i++) {
- assertEquals(
- UpgradeUtilities.checksumContents(
- nodeType, new File(baseDirs[i],"current")),
- UpgradeUtilities.checksumMasterContents(nodeType));
- }
- break;
+ case NAME_NODE:
+ for (int i = 0; i < baseDirs.length; i++) {
+ assertTrue(new File(baseDirs[i],"current").isDirectory());
+ assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
+ assertTrue(new File(baseDirs[i],"current/edits").isFile());
+ assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
+ assertTrue(new File(baseDirs[i],"current/fstime").isFile());
+ }
+ break;
+ case DATA_NODE:
+ for (int i = 0; i < baseDirs.length; i++) {
+ assertEquals(
+ UpgradeUtilities.checksumContents(
+ nodeType, new File(baseDirs[i],"current")),
+ UpgradeUtilities.checksumMasterContents(nodeType));
+ }
+ break;
}
for (int i = 0; i < baseDirs.length; i++) {
assertTrue(new File(baseDirs[i],"previous").isDirectory());
assertEquals(
- UpgradeUtilities.checksumContents(
- nodeType, new File(baseDirs[i],"previous")),
- UpgradeUtilities.checksumMasterContents(nodeType));
+ UpgradeUtilities.checksumContents(
+ nodeType, new File(baseDirs[i],"previous")),
+ UpgradeUtilities.checksumMasterContents(nodeType));
}
}
@@ -166,9 +166,9 @@
cluster = new MiniDFSCluster(conf,0,StartupOption.UPGRADE);
baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
UpgradeUtilities.createVersionFile(DATA_NODE,baseDirs,
- new StorageInfo(Integer.MIN_VALUE,
- UpgradeUtilities.getCurrentNamespaceID(cluster),
- UpgradeUtilities.getCurrentFsscTime(cluster)));
+ new StorageInfo(Integer.MIN_VALUE,
+ UpgradeUtilities.getCurrentNamespaceID(cluster),
+ UpgradeUtilities.getCurrentFsscTime(cluster)));
startDataNodeShouldFail(StartupOption.REGULAR);
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
@@ -179,9 +179,9 @@
cluster = new MiniDFSCluster(conf,0,StartupOption.UPGRADE);
baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
UpgradeUtilities.createVersionFile(DATA_NODE,baseDirs,
- new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
- UpgradeUtilities.getCurrentNamespaceID(cluster),
- Long.MAX_VALUE));
+ new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
+ UpgradeUtilities.getCurrentNamespaceID(cluster),
+ Long.MAX_VALUE));
startDataNodeShouldFail(StartupOption.REGULAR);
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
@@ -214,9 +214,9 @@
log("NameNode upgrade with future layout version in current",numDirs);
baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
UpgradeUtilities.createVersionFile(NAME_NODE,baseDirs,
- new StorageInfo(Integer.MIN_VALUE,
- UpgradeUtilities.getCurrentNamespaceID(null),
- UpgradeUtilities.getCurrentFsscTime(null)));
+ new StorageInfo(Integer.MIN_VALUE,
+ UpgradeUtilities.getCurrentNamespaceID(null),
+ UpgradeUtilities.getCurrentFsscTime(null)));
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
} // end numDir loop
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDecommission.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDecommission.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDecommission.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDecommission.java Mon Apr 16 14:44:35 2007
@@ -49,7 +49,7 @@
private enum NodeState {NORMAL, DECOMMISSION_INPROGRESS, DECOMMISSIONED; }
private void writeConfigFile(FileSystem fs, Path name, ArrayList<String> nodes)
- throws IOException {
+ throws IOException {
// delete if it already exists
if (fs.exists(name)) {
@@ -69,11 +69,11 @@
}
private void writeFile(FileSystem fileSys, Path name, int repl)
- throws IOException {
+ throws IOException {
// create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true,
- fileSys.getConf().getInt("io.file.buffer.size", 4096),
- (short)repl, (long)blockSize);
+ fileSys.getConf().getInt("io.file.buffer.size", 4096),
+ (short)repl, (long)blockSize);
byte[] buffer = new byte[fileSize];
Random rand = new Random(seed);
rand.nextBytes(buffer);
@@ -83,11 +83,11 @@
private void checkFile(FileSystem fileSys, Path name, int repl)
- throws IOException {
+ throws IOException {
String[][] locations = fileSys.getFileCacheHints(name, 0, fileSize);
for (int idx = 0; idx < locations.length; idx++) {
assertEquals("Number of replicas for block" + idx,
- Math.min(numDatanodes, repl), locations[idx].length);
+ Math.min(numDatanodes, repl), locations[idx].length);
}
}
@@ -105,7 +105,7 @@
assertTrue("Not HDFS:"+fileSys.getUri(), fileSys instanceof DistributedFileSystem);
DFSClient.DFSDataInputStream dis = (DFSClient.DFSDataInputStream)
- ((DistributedFileSystem)fileSys).getRawFileSystem().open(name);
+ ((DistributedFileSystem)fileSys).getRawFileSystem().open(name);
DatanodeInfo[][] dinfo = dis.getDataNodes();
for (int blk = 0; blk < dinfo.length; blk++) { // for each block
@@ -119,9 +119,9 @@
}
}
System.out.println("Block " + blk + " has " + hasdown +
- " decommissioned replica.");
+ " decommissioned replica.");
assertEquals("Number of replicas for block" + blk,
- Math.min(numDatanodes, repl+hasdown), nodes.length);
+ Math.min(numDatanodes, repl+hasdown), nodes.length);
}
}
@@ -145,7 +145,7 @@
private String decommissionNode(DFSClient client,
FileSystem filesys,
FileSystem localFileSys)
- throws IOException {
+ throws IOException {
DistributedFileSystem dfs = (DistributedFileSystem) filesys;
DatanodeInfo[] info = client.datanodeReport();
@@ -252,7 +252,7 @@
MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
cluster.waitActive();
InetSocketAddress addr = new InetSocketAddress("localhost",
- cluster.getNameNodePort());
+ cluster.getNameNodePort());
DFSClient client = new DFSClient(addr, conf);
DatanodeInfo[] info = client.datanodeReport();
assertEquals("Number of Datanodes ", numDatanodes, info.length);
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCorruption.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCorruption.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCorruption.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCorruption.java Mon Apr 16 14:44:35 2007
@@ -52,7 +52,7 @@
util.createFiles(fs, "/srcdat");
// Now deliberately remove the blocks
File data_dir = new File(System.getProperty("test.build.data"),
- "dfs/data/data5/current");
+ "dfs/data/data5/current");
assertTrue("data directory does not exist", data_dir.exists());
File[] blocks = data_dir.listFiles();
assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
@@ -64,7 +64,7 @@
assertTrue("Cannot remove file.", blocks[idx].delete());
}
assertTrue("Corrupted replicas not handled properly.",
- util.checkFiles(fs, "/srcdat"));
+ util.checkFiles(fs, "/srcdat"));
util.cleanup(fs, "/srcdat");
} finally {
if (cluster != null) { cluster.shutdown(); }
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestLocalDFS.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestLocalDFS.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestLocalDFS.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestLocalDFS.java Mon Apr 16 14:44:35 2007
@@ -73,7 +73,7 @@
writeFile(fileSys, file1);
readFile(fileSys, file1);
cleanupFile(fileSys, new Path(new Path(subdir1, subdir2.toString()),
- file1.toString()));
+ file1.toString()));
} finally {
fileSys.close();
cluster.shutdown();
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestPendingReplication.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestPendingReplication.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestPendingReplication.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestPendingReplication.java Mon Apr 16 14:44:35 2007
@@ -97,7 +97,7 @@
loop++;
}
System.out.println("Had to wait for " + loop +
- " seconds for the lot to timeout");
+ " seconds for the lot to timeout");
//
// Verify that everything has timed out.
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestPread.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestPread.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestPread.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestPread.java Mon Apr 16 14:44:35 2007
@@ -39,7 +39,7 @@
private void writeFile(FileSystem fileSys, Path name) throws IOException {
// create and write a file that contains three blocks of data
DataOutputStream stm = fileSys.create(name, true, 4096, (short)1,
- (long)blockSize);
+ (long)blockSize);
byte[] buffer = new byte[(int)(3*blockSize)];
Random rand = new Random(seed);
rand.nextBytes(buffer);
@@ -50,14 +50,14 @@
private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
for (int idx = 0; idx < actual.length; idx++) {
this.assertEquals(message+" byte "+(from+idx)+" differs. expected "+
- expected[from+idx]+" actual "+actual[idx],
- actual[idx], expected[from+idx]);
+ expected[from+idx]+" actual "+actual[idx],
+ actual[idx], expected[from+idx]);
actual[idx] = 0;
}
}
private void doPread(FSDataInputStream stm, long position, byte[] buffer,
- int offset, int length) throws IOException {
+ int offset, int length) throws IOException {
int nread = 0;
while (nread < length) {
int nbytes = stm.read(position+nread, buffer, offset+nread, length-nread);
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java Mon Apr 16 14:44:35 2007
@@ -44,14 +44,14 @@
};
private static final int numDatanodes = racks.length;
private static final Log LOG = LogFactory.getLog(
- "org.apache.hadoop.dfs.TestReplication");
+ "org.apache.hadoop.dfs.TestReplication");
private void writeFile(FileSystem fileSys, Path name, int repl)
- throws IOException {
+ throws IOException {
// create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true,
- fileSys.getConf().getInt("io.file.buffer.size", 4096),
- (short)repl, (long)blockSize);
+ fileSys.getConf().getInt("io.file.buffer.size", 4096),
+ (short)repl, (long)blockSize);
byte[] buffer = new byte[fileSize];
Random rand = new Random(seed);
rand.nextBytes(buffer);
@@ -61,62 +61,62 @@
/* check if there are at least two nodes are on the same rack */
private void checkFile(FileSystem fileSys, Path name, int repl)
- throws IOException {
- Configuration conf = fileSys.getConf();
- ClientProtocol namenode = (ClientProtocol) RPC.getProxy(
- ClientProtocol.class,
- ClientProtocol.versionID,
- DataNode.createSocketAddr(conf.get("fs.default.name")),
- conf);
+ throws IOException {
+ Configuration conf = fileSys.getConf();
+ ClientProtocol namenode = (ClientProtocol) RPC.getProxy(
+ ClientProtocol.class,
+ ClientProtocol.versionID,
+ DataNode.createSocketAddr(conf.get("fs.default.name")),
+ conf);
- LocatedBlock[] locations;
- boolean isReplicationDone;
- do {
- locations = namenode.open(name.toString());
- isReplicationDone = true;
- for (int idx = 0; idx < locations.length; idx++) {
- DatanodeInfo[] datanodes = locations[idx].getLocations();
- if(Math.min(numDatanodes, repl) != datanodes.length) {
- isReplicationDone=false;
- LOG.warn("File has "+datanodes.length+" replicas, expecting "
- +Math.min(numDatanodes, repl));
- try {
- Thread.sleep(15000L);
- } catch (InterruptedException e) {
- // nothing
- }
- break;
- }
- }
- } while(!isReplicationDone);
-
- boolean isOnSameRack = true, isNotOnSameRack = true;
+ LocatedBlock[] locations;
+ boolean isReplicationDone;
+ do {
+ locations = namenode.open(name.toString());
+ isReplicationDone = true;
for (int idx = 0; idx < locations.length; idx++) {
- DatanodeInfo[] datanodes = locations[idx].getLocations();
- if(datanodes.length <= 1) break;
- if(datanodes.length == 2) {
- isNotOnSameRack = !( datanodes[0].getNetworkLocation().equals(
- datanodes[1].getNetworkLocation() ) );
- break;
- }
- isOnSameRack = false;
- isNotOnSameRack = false;
- for (int idy = 0; idy < datanodes.length-1; idy++) {
- LOG.info("datanode "+ idy + ": "+ datanodes[idy].getName());
- boolean onRack = datanodes[idy].getNetworkLocation().equals(
- datanodes[idy+1].getNetworkLocation() );
- if( onRack ) {
- isOnSameRack = true;
- }
- if( !onRack ) {
- isNotOnSameRack = true;
- }
- if( isOnSameRack && isNotOnSameRack ) break;
+ DatanodeInfo[] datanodes = locations[idx].getLocations();
+ if(Math.min(numDatanodes, repl) != datanodes.length) {
+ isReplicationDone=false;
+ LOG.warn("File has "+datanodes.length+" replicas, expecting "
+ +Math.min(numDatanodes, repl));
+ try {
+ Thread.sleep(15000L);
+ } catch (InterruptedException e) {
+ // nothing
}
- if( !isOnSameRack || !isNotOnSameRack ) break;
+ break;
+ }
+ }
+ } while(!isReplicationDone);
+
+ boolean isOnSameRack = true, isNotOnSameRack = true;
+ for (int idx = 0; idx < locations.length; idx++) {
+ DatanodeInfo[] datanodes = locations[idx].getLocations();
+ if(datanodes.length <= 1) break;
+ if(datanodes.length == 2) {
+ isNotOnSameRack = !( datanodes[0].getNetworkLocation().equals(
+ datanodes[1].getNetworkLocation() ) );
+ break;
+ }
+ isOnSameRack = false;
+ isNotOnSameRack = false;
+ for (int idy = 0; idy < datanodes.length-1; idy++) {
+ LOG.info("datanode "+ idy + ": "+ datanodes[idy].getName());
+ boolean onRack = datanodes[idy].getNetworkLocation().equals(
+ datanodes[idy+1].getNetworkLocation() );
+ if( onRack ) {
+ isOnSameRack = true;
+ }
+ if( !onRack ) {
+ isNotOnSameRack = true;
+ }
+ if( isOnSameRack && isNotOnSameRack ) break;
}
- assertTrue(isOnSameRack);
- assertTrue(isNotOnSameRack);
+ if( !isOnSameRack || !isNotOnSameRack ) break;
+ }
+ assertTrue(isOnSameRack);
+ assertTrue(isNotOnSameRack);
}
private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
@@ -135,7 +135,7 @@
cluster.waitActive();
InetSocketAddress addr = new InetSocketAddress("localhost",
- cluster.getNameNodePort());
+ cluster.getNameNodePort());
DFSClient client = new DFSClient(addr, conf);
DatanodeInfo[] info = client.datanodeReport();
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestRestartDFS.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestRestartDFS.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestRestartDFS.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestRestartDFS.java Mon Apr 16 14:44:35 2007
@@ -62,7 +62,7 @@
cluster = new MiniDFSCluster(conf, 4, false, null);
FileSystem fs = cluster.getFileSystem();
assertTrue("Filesystem corrupted after restart.",
- files.checkFiles(fs, "/srcdat"));
+ files.checkFiles(fs, "/srcdat"));
files.cleanup(fs, "/srcdat");
} finally {
if (cluster != null) { cluster.shutdown(); }
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestSeekBug.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestSeekBug.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestSeekBug.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestSeekBug.java Mon Apr 16 14:44:35 2007
@@ -49,8 +49,8 @@
private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
for (int idx = 0; idx < actual.length; idx++) {
this.assertEquals(message+" byte "+(from+idx)+" differs. expected "+
- expected[from+idx]+" actual "+actual[idx],
- actual[idx], expected[from+idx]);
+ expected[from+idx]+" actual "+actual[idx],
+ actual[idx], expected[from+idx]);
actual[idx] = 0;
}
}
@@ -82,7 +82,7 @@
*/
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
if (fileSys instanceof ChecksumFileSystem) {
- fileSys = ((ChecksumFileSystem)fileSys).getRawFileSystem();
+ fileSys = ((ChecksumFileSystem)fileSys).getRawFileSystem();
}
// Make the buffer size small to trigger code for HADOOP-922
FSDataInputStream stmRaw = fileSys.open(name, 1);
@@ -91,7 +91,7 @@
rand.nextBytes(expected);
// Issue a simple read first.
- byte[] actual = new byte[128];
+ byte[] actual = new byte[128];
stmRaw.seek(100000);
stmRaw.read(actual, 0, actual.length);
checkAndEraseData(actual, 100000, expected, "First Small Read Test");
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestSmallBlock.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestSmallBlock.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestSmallBlock.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestSmallBlock.java Mon Apr 16 14:44:35 2007
@@ -39,8 +39,8 @@
private void writeFile(FileSystem fileSys, Path name) throws IOException {
// create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true,
- fileSys.getConf().getInt("io.file.buffer.size", 4096),
- (short)1, (long)blockSize);
+ fileSys.getConf().getInt("io.file.buffer.size", 4096),
+ (short)1, (long)blockSize);
byte[] buffer = new byte[fileSize];
Random rand = new Random(seed);
rand.nextBytes(buffer);
@@ -51,8 +51,8 @@
private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
for (int idx = 0; idx < actual.length; idx++) {
this.assertEquals(message+" byte "+(from+idx)+" differs. expected "+
- expected[from+idx]+" actual "+actual[idx],
- actual[idx], expected[from+idx]);
+ expected[from+idx]+" actual "+actual[idx],
+ actual[idx], expected[from+idx]);
actual[idx] = 0;
}
}
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java Mon Apr 16 14:44:35 2007
@@ -52,7 +52,7 @@
// Root scratch directory on local filesystem
private static File TEST_ROOT_DIR = new File(
- System.getProperty("test.build.data","/tmp").toString().replace(' ', '+'));
+ System.getProperty("test.build.data","/tmp").toString().replace(' ', '+'));
// The singleton master storage directory for Namenode
private static File namenodeStorage = new File(TEST_ROOT_DIR, "namenodeMaster");
// A checksum of the contents in namenodeStorage directory
@@ -120,14 +120,14 @@
FileUtil.fullyDelete(new File(datanodeStorage,"in_use.lock"));
}
namenodeStorageChecksum = checksumContents(
- NAME_NODE, new File(namenodeStorage,"current"));
+ NAME_NODE, new File(namenodeStorage,"current"));
datanodeStorageChecksum = checksumContents(
- DATA_NODE, new File(datanodeStorage,"current"));
+ DATA_NODE, new File(datanodeStorage,"current"));
}
// Private helper method that writes a file to the given file system.
private static void writeFile(FileSystem fs, Path path, byte[] buffer,
- int bufferSize ) throws IOException
+ int bufferSize ) throws IOException
{
OutputStream out;
out = fs.create(path, true, bufferSize, (short) 1, 1024);
@@ -198,7 +198,7 @@
public static long checksumContents(NodeType nodeType, File dir) throws IOException {
if (!dir.isDirectory()) {
throw new IllegalArgumentException(
- "Given argument is not a directory:" + dir);
+ "Given argument is not a directory:" + dir);
}
File[] list = dir.listFiles();
CRC32 checksum = new CRC32();
@@ -206,10 +206,10 @@
if (list[i].isFile()) {
// skip VERSION file for DataNodes
if (nodeType == DATA_NODE &&
- list[i].getName().equals("VERSION"))
- {
- continue;
- }
+ list[i].getName().equals("VERSION"))
+ {
+ continue;
+ }
FileInputStream fis = new FileInputStream(list[i]);
byte[] buffer = new byte[1024];
int bytesRead;
@@ -249,18 +249,18 @@
createEmptyDirs(new String[] {newDir.toString()});
LocalFileSystem localFS = FileSystem.getLocal(new Configuration());
switch (nodeType) {
- case NAME_NODE:
- localFS.copyToLocalFile(
- new Path(namenodeStorage.toString(), "current"),
- new Path(newDir.toString()),
- false);
- break;
- case DATA_NODE:
- localFS.copyToLocalFile(
- new Path(datanodeStorage.toString(), "current"),
- new Path(newDir.toString()),
- false);
- break;
+ case NAME_NODE:
+ localFS.copyToLocalFile(
+ new Path(namenodeStorage.toString(), "current"),
+ new Path(newDir.toString()),
+ false);
+ break;
+ case DATA_NODE:
+ localFS.copyToLocalFile(
+ new Path(datanodeStorage.toString(), "current"),
+ new Path(newDir.toString()),
+ false);
+ break;
}
retVal[i] = newDir;
}
@@ -278,7 +278,7 @@
* @return the created version file
*/
public static File[] createVersionFile(NodeType nodeType, File[] parent,
- StorageInfo version) throws IOException
+ StorageInfo version) throws IOException
{
Storage storage = null;
File[] versionFiles = new File[parent.length];
@@ -286,12 +286,12 @@
File versionFile = new File(parent[i], "VERSION");
FileUtil.fullyDelete(versionFile);
switch (nodeType) {
- case NAME_NODE:
- storage = new FSImage( version );
- break;
- case DATA_NODE:
- storage = new DataStorage( version, "doNotCare" );
- break;
+ case NAME_NODE:
+ storage = new FSImage( version );
+ break;
+ case DATA_NODE:
+ storage = new DataStorage( version, "doNotCare" );
+ break;
}
StorageDirectory sd = storage.new StorageDirectory(parent[i].getParentFile());
sd.write(versionFile);
@@ -310,7 +310,7 @@
public static void corruptFile(File file) throws IOException {
if (!file.isFile()) {
throw new IllegalArgumentException(
- "Given argument is not a file:" + file);
+ "Given argument is not a file:" + file);
}
RandomAccessFile raf = new RandomAccessFile(file,"rws");
Random random = new Random();
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/DFSCIOTest.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/DFSCIOTest.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/DFSCIOTest.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/DFSCIOTest.java Mon Apr 16 14:44:35 2007
@@ -126,8 +126,8 @@
SequenceFile.Writer writer = null;
try {
writer = SequenceFile.createWriter(fs, fsConfig, controlFile,
- UTF8.class, LongWritable.class,
- CompressionType.NONE);
+ UTF8.class, LongWritable.class,
+ CompressionType.NONE);
writer.append(new UTF8(name), new LongWritable(fileSize));
} catch(Exception e) {
throw new IOException(e.getLocalizedMessage());
@@ -194,47 +194,47 @@
String name,
long totalSize
) throws IOException {
- // create file
+ // create file
totalSize *= MEGA;
// create instance of local filesystem
FileSystem localFS = FileSystem.getLocal(fsConfig);
try {
- // native runtime
- Runtime runTime = Runtime.getRuntime();
+ // native runtime
+ Runtime runTime = Runtime.getRuntime();
// copy the dso and executable from dfs and chmod them
synchronized (this) {
- localFS.delete(HDFS_TEST_DIR);
- if (!(localFS.mkdirs(HDFS_TEST_DIR))) {
- throw new IOException("Failed to create " + HDFS_TEST_DIR + " on local filesystem");
- }
- }
+ localFS.delete(HDFS_TEST_DIR);
+ if (!(localFS.mkdirs(HDFS_TEST_DIR))) {
+ throw new IOException("Failed to create " + HDFS_TEST_DIR + " on local filesystem");
+ }
+ }
synchronized (this) {
if (!localFS.exists(HDFS_SHLIB)) {
- FileUtil.copy(fs, HDFS_SHLIB, localFS, HDFS_SHLIB, false, fsConfig);
+ FileUtil.copy(fs, HDFS_SHLIB, localFS, HDFS_SHLIB, false, fsConfig);
- String chmodCmd = new String(CHMOD + " a+x " + HDFS_SHLIB);
- Process process = runTime.exec(chmodCmd);
- int exitStatus = process.waitFor();
+ String chmodCmd = new String(CHMOD + " a+x " + HDFS_SHLIB);
+ Process process = runTime.exec(chmodCmd);
+ int exitStatus = process.waitFor();
if (exitStatus != 0) {
- throw new IOException(chmodCmd + ": Failed with exitStatus: " + exitStatus);
+ throw new IOException(chmodCmd + ": Failed with exitStatus: " + exitStatus);
}
}
}
synchronized (this) {
if (!localFS.exists(HDFS_WRITE)) {
- FileUtil.copy(fs, HDFS_WRITE, localFS, HDFS_WRITE, false, fsConfig);
+ FileUtil.copy(fs, HDFS_WRITE, localFS, HDFS_WRITE, false, fsConfig);
- String chmodCmd = new String(CHMOD + " a+x " + HDFS_WRITE);
+ String chmodCmd = new String(CHMOD + " a+x " + HDFS_WRITE);
Process process = runTime.exec(chmodCmd);
int exitStatus = process.waitFor();
if (exitStatus != 0) {
- throw new IOException(chmodCmd + ": Failed with exitStatus: " + exitStatus);
- }
+ throw new IOException(chmodCmd + ": Failed with exitStatus: " + exitStatus);
+ }
}
}
@@ -247,7 +247,7 @@
throw new IOException(writeCmd + ": Failed with exitStatus: " + exitStatus);
}
} catch (InterruptedException interruptedException) {
- reporter.setStatus(interruptedException.toString());
+ reporter.setStatus(interruptedException.toString());
} finally {
localFS.close();
}
@@ -297,71 +297,71 @@
String name,
long totalSize
) throws IOException {
- totalSize *= MEGA;
+ totalSize *= MEGA;
// create instance of local filesystem
FileSystem localFS = FileSystem.getLocal(fsConfig);
try {
- // native runtime
- Runtime runTime = Runtime.getRuntime();
+ // native runtime
+ Runtime runTime = Runtime.getRuntime();
// copy the dso and executable from dfs
synchronized (this) {
- localFS.delete(HDFS_TEST_DIR);
- if (!(localFS.mkdirs(HDFS_TEST_DIR))) {
- throw new IOException("Failed to create " + HDFS_TEST_DIR + " on local filesystem");
- }
- }
+ localFS.delete(HDFS_TEST_DIR);
+ if (!(localFS.mkdirs(HDFS_TEST_DIR))) {
+ throw new IOException("Failed to create " + HDFS_TEST_DIR + " on local filesystem");
+ }
+ }
synchronized (this) {
if (!localFS.exists(HDFS_SHLIB)) {
- if (!FileUtil.copy(fs, HDFS_SHLIB, localFS, HDFS_SHLIB, false, fsConfig)) {
- throw new IOException("Failed to copy " + HDFS_SHLIB + " to local filesystem");
- }
-
- String chmodCmd = new String(CHMOD + " a+x " + HDFS_SHLIB);
- Process process = runTime.exec(chmodCmd);
- int exitStatus = process.waitFor();
- if (exitStatus != 0) {
- throw new IOException( chmodCmd + ": Failed with exitStatus: " + exitStatus );
- }
- }
- }
+ if (!FileUtil.copy(fs, HDFS_SHLIB, localFS, HDFS_SHLIB, false, fsConfig)) {
+ throw new IOException("Failed to copy " + HDFS_SHLIB + " to local filesystem");
+ }
+
+ String chmodCmd = new String(CHMOD + " a+x " + HDFS_SHLIB);
+ Process process = runTime.exec(chmodCmd);
+ int exitStatus = process.waitFor();
+ if (exitStatus != 0) {
+ throw new IOException( chmodCmd + ": Failed with exitStatus: " + exitStatus );
+ }
+ }
+ }
- synchronized (this) {
- if (!localFS.exists(HDFS_READ)) {
- if (!FileUtil.copy(fs, HDFS_READ, localFS, HDFS_READ, false, fsConfig)) {
- throw new IOException("Failed to copy " + HDFS_READ + " to local filesystem");
- }
-
- String chmodCmd = new String(CHMOD + " a+x " + HDFS_READ);
- Process process = runTime.exec(chmodCmd);
- int exitStatus = process.waitFor();
+ synchronized (this) {
+ if (!localFS.exists(HDFS_READ)) {
+ if (!FileUtil.copy(fs, HDFS_READ, localFS, HDFS_READ, false, fsConfig)) {
+ throw new IOException("Failed to copy " + HDFS_READ + " to local filesystem");
+ }
+
+ String chmodCmd = new String(CHMOD + " a+x " + HDFS_READ);
+ Process process = runTime.exec(chmodCmd);
+ int exitStatus = process.waitFor();
- if (exitStatus != 0) {
- throw new IOException(chmodCmd + ": Failed with exitStatus: " + exitStatus);
- }
- }
- }
+ if (exitStatus != 0) {
+ throw new IOException(chmodCmd + ": Failed with exitStatus: " + exitStatus);
+ }
+ }
+ }
- // exec the C program
- Path inFile = new Path(DATA_DIR, name);
- String readCmd = new String(HDFS_READ + " " + inFile + " " + totalSize + " " +
- bufferSize);
- Process process = runTime.exec(readCmd, null, new File(HDFS_TEST_DIR.toString()));
- int exitStatus = process.waitFor();
+ // exec the C program
+ Path inFile = new Path(DATA_DIR, name);
+ String readCmd = new String(HDFS_READ + " " + inFile + " " + totalSize + " " +
+ bufferSize);
+ Process process = runTime.exec(readCmd, null, new File(HDFS_TEST_DIR.toString()));
+ int exitStatus = process.waitFor();
- if (exitStatus != 0) {
- throw new IOException(HDFS_READ + ": Failed with exitStatus: " + exitStatus);
- }
+ if (exitStatus != 0) {
+ throw new IOException(HDFS_READ + ": Failed with exitStatus: " + exitStatus);
+ }
} catch (InterruptedException interruptedException) {
- reporter.setStatus(interruptedException.toString());
+ reporter.setStatus(interruptedException.toString());
} finally {
localFS.close();
}
return new Long(totalSize);
- }
+ }
}
private static void readTest(FileSystem fs) throws IOException {
@@ -433,18 +433,18 @@
FileSystem fs = FileSystem.get(fsConfig);
if (testType != TEST_TYPE_CLEANUP) {
- fs.delete(HDFS_TEST_DIR);
- if (!fs.mkdirs(HDFS_TEST_DIR)) {
- throw new IOException("Mkdirs failed to create " +
- HDFS_TEST_DIR.toString());
- }
-
- //Copy the executables over to the remote filesystem
- String hadoopHome = System.getenv("HADOOP_HOME");
- fs.copyFromLocalFile(new Path(hadoopHome + "/libhdfs/libhdfs.so." + HDFS_LIB_VERSION),
- HDFS_SHLIB);
- fs.copyFromLocalFile(new Path(hadoopHome + "/libhdfs/hdfs_read"), HDFS_READ);
- fs.copyFromLocalFile(new Path(hadoopHome + "/libhdfs/hdfs_write"), HDFS_WRITE);
+ fs.delete(HDFS_TEST_DIR);
+ if (!fs.mkdirs(HDFS_TEST_DIR)) {
+ throw new IOException("Mkdirs failed to create " +
+ HDFS_TEST_DIR.toString());
+ }
+
+ //Copy the executables over to the remote filesystem
+ String hadoopHome = System.getenv("HADOOP_HOME");
+ fs.copyFromLocalFile(new Path(hadoopHome + "/libhdfs/libhdfs.so." + HDFS_LIB_VERSION),
+ HDFS_SHLIB);
+ fs.copyFromLocalFile(new Path(hadoopHome + "/libhdfs/hdfs_read"), HDFS_READ);
+ fs.copyFromLocalFile(new Path(hadoopHome + "/libhdfs/hdfs_write"), HDFS_WRITE);
}
if( isSequential ) {
@@ -514,8 +514,8 @@
double stdDev = Math.sqrt( Math.abs(sqrate / 1000 / tasks - med*med ));
String resultLines[] = {
"----- DFSCIOTest ----- : " + ((testType == TEST_TYPE_WRITE) ? "write" :
- (testType == TEST_TYPE_READ) ? "read" :
- "unknown"),
+ (testType == TEST_TYPE_READ) ? "read" :
+ "unknown"),
" Date & time: " + new Date(System.currentTimeMillis()),
" Number of files: " + tasks,
"Total MBytes processed: " + size/MEGA,
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/DistributedFSCheck.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/DistributedFSCheck.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/DistributedFSCheck.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/DistributedFSCheck.java Mon Apr 16 14:44:35 2007
@@ -93,7 +93,7 @@
Path inputFile = new Path(MAP_INPUT_DIR, "in_file");
SequenceFile.Writer writer =
SequenceFile.createWriter(fs, fsConfig, inputFile,
- UTF8.class, LongWritable.class, CompressionType.NONE);
+ UTF8.class, LongWritable.class, CompressionType.NONE);
try {
nrFiles = 0;
@@ -149,7 +149,7 @@
try {
long blockSize = fs.getDefaultBlockSize();
reporter.setStatus( "reading " + name + "@" +
- offset + "/" + blockSize );
+ offset + "/" + blockSize );
for( int curSize = bufferSize;
curSize == bufferSize && actualSize < blockSize;
actualSize += curSize) {
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestCopyFiles.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestCopyFiles.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestCopyFiles.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestCopyFiles.java Mon Apr 16 14:44:35 2007
@@ -94,7 +94,7 @@
* with random (but reproducible) data in them.
*/
private static MyFile[] createFiles(String fsname, String topdir)
- throws IOException {
+ throws IOException {
MyFile[] files = new MyFile[NFILES];
for (int idx = 0; idx < NFILES; idx++) {
@@ -125,7 +125,7 @@
/** check if the files have been copied correctly. */
private static boolean checkFiles(String fsname, String topdir, MyFile[] files)
- throws IOException {
+ throws IOException {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getNamed(fsname, conf);
@@ -154,7 +154,7 @@
/** delete directory and everything underneath it.*/
private static void deldir(String fsname, String topdir)
- throws IOException {
+ throws IOException {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getNamed(fsname, conf);
Path root = new Path(topdir);
@@ -165,10 +165,10 @@
public void testCopyFromLocalToLocal() throws Exception {
MyFile[] files = createFiles("local", TEST_ROOT_DIR+"/srcdat");
new CopyFiles().doMain(new Configuration(),
- new String[] {"file://"+TEST_ROOT_DIR+"/srcdat",
- "file://"+TEST_ROOT_DIR+"/destdat"});
+ new String[] {"file://"+TEST_ROOT_DIR+"/srcdat",
+ "file://"+TEST_ROOT_DIR+"/destdat"});
assertTrue("Source and destination directories do not match.",
- checkFiles("local", TEST_ROOT_DIR+"/destdat", files));
+ checkFiles("local", TEST_ROOT_DIR+"/destdat", files));
deldir("local", TEST_ROOT_DIR+"/destdat");
deldir("local", TEST_ROOT_DIR+"/srcdat");
}
@@ -184,9 +184,9 @@
if (!"local".equals(namenode)) {
MyFile[] files = createFiles(namenode, "/srcdat");
new CopyFiles().doMain(conf, new String[] {"hdfs://"+namenode+"/srcdat",
- "hdfs://"+namenode+"/destdat"});
+ "hdfs://"+namenode+"/destdat"});
assertTrue("Source and destination directories do not match.",
- checkFiles(namenode, "/destdat", files));
+ checkFiles(namenode, "/destdat", files));
deldir(namenode, "/destdat");
deldir(namenode, "/srcdat");
}
@@ -206,9 +206,9 @@
if (!"local".equals(namenode)) {
MyFile[] files = createFiles("local", TEST_ROOT_DIR+"/srcdat");
new CopyFiles().doMain(conf, new String[] {"file://"+TEST_ROOT_DIR+"/srcdat",
- "hdfs://"+namenode+"/destdat"});
+ "hdfs://"+namenode+"/destdat"});
assertTrue("Source and destination directories do not match.",
- checkFiles(namenode, "/destdat", files));
+ checkFiles(namenode, "/destdat", files));
deldir(namenode, "/destdat");
deldir("local", TEST_ROOT_DIR+"/srcdat");
}
@@ -228,15 +228,15 @@
if (!"local".equals(namenode)) {
MyFile[] files = createFiles(namenode, "/srcdat");
new CopyFiles().doMain(conf, new String[] {"hdfs://"+namenode+"/srcdat",
- "file://"+TEST_ROOT_DIR+"/destdat"});
+ "file://"+TEST_ROOT_DIR+"/destdat"});
assertTrue("Source and destination directories do not match.",
- checkFiles("local", TEST_ROOT_DIR+"/destdat", files));
+ checkFiles("local", TEST_ROOT_DIR+"/destdat", files));
deldir("local", TEST_ROOT_DIR+"/destdat");
deldir(namenode, "/srcdat");
}
} finally {
if (cluster != null) { cluster.shutdown(); }
}
- }
+ }
}
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestDFSIO.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestDFSIO.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestDFSIO.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestDFSIO.java Mon Apr 16 14:44:35 2007
@@ -120,8 +120,8 @@
SequenceFile.Writer writer = null;
try {
writer = SequenceFile.createWriter(fs, fsConfig, controlFile,
- UTF8.class, LongWritable.class,
- CompressionType.NONE);
+ UTF8.class, LongWritable.class,
+ CompressionType.NONE);
writer.append(new UTF8(name), new LongWritable(fileSize));
} catch(Exception e) {
throw new IOException(e.getLocalizedMessage());
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestFileSystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestFileSystem.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestFileSystem.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestFileSystem.java Mon Apr 16 14:44:35 2007
@@ -81,7 +81,7 @@
SequenceFile.Writer writer =
SequenceFile.createWriter(fs, conf, controlFile,
- UTF8.class, LongWritable.class, CompressionType.NONE);
+ UTF8.class, LongWritable.class, CompressionType.NONE);
long totalSize = 0;
long maxSize = ((megaBytes / numFiles) * 2) + 1;
@@ -404,8 +404,8 @@
String usage = "Usage: TestFileSystem -files N -megaBytes M [-noread] [-nowrite] [-noseek] [-fastcheck]";
if (args.length == 0) {
- System.err.println(usage);
- System.exit(-1);
+ System.err.println(usage);
+ System.exit(-1);
}
for (int i = 0; i < args.length; i++) { // parse command line
if (args[i].equals("-files")) {
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestGlobPaths.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestGlobPaths.java?view=diff&rev=529410&r1=529409&r2=529410
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestGlobPaths.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestGlobPaths.java Mon Apr 16 14:44:35 2007
@@ -232,7 +232,7 @@
}
private Path[] prepareTesting( String pattern, String[] files)
- throws IOException {
+ throws IOException {
for(int i=0; i<Math.min(NUM_OF_PATHS, files.length); i++) {
path[i] = new Path( files[i] );
if (!fs.mkdirs( path[i] )) {