You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cu...@apache.org on 2006/06/09 23:11:30 UTC
svn commit: r413169 [2/2] - in /lucene/hadoop/branches/branch-0.3: ./ bin/
conf/ site/ src/contrib/streaming/src/java/org/apache/hadoop/streaming/
src/java/org/apache/hadoop/dfs/ src/java/org/apache/hadoop/fs/
src/site/src/documentation/content/xdocs/ ...
Modified: lucene/hadoop/branches/branch-0.3/src/java/org/apache/hadoop/dfs/FSDirectory.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/branches/branch-0.3/src/java/org/apache/hadoop/dfs/FSDirectory.java?rev=413169&r1=413168&r2=413169&view=diff
==============================================================================
--- lucene/hadoop/branches/branch-0.3/src/java/org/apache/hadoop/dfs/FSDirectory.java (original)
+++ lucene/hadoop/branches/branch-0.3/src/java/org/apache/hadoop/dfs/FSDirectory.java Fri Jun 9 14:11:29 2006
@@ -143,18 +143,22 @@
* @param path file path
* @param newNode INode to be added
* @return null if the node already exists; inserted INode, otherwise
+ * @throws FileNotFoundException
* @author shv
*/
- INode addNode(String path, INode newNode) {
+ INode addNode(String path, INode newNode) throws FileNotFoundException {
File target = new File( path );
// find parent
Path parent = new Path(path).getParent();
- if (parent == null)
- return null;
+ if (parent == null) { // add root
+ return null;
+ }
INode parentNode = getNode(parent.toString());
- if (parentNode == null)
- return null;
- // check whether the parent already has a node with that name
+ if (parentNode == null) {
+ throw new FileNotFoundException(
+ "Parent path does not exist: "+path);
+ }
+ // check whether the parent already has a node with that name
String name = newNode.name = target.getName();
if( parentNode.getChild( name ) != null )
return null;
@@ -688,11 +692,19 @@
*/
boolean unprotectedAddFile(UTF8 path, INode newNode) {
synchronized (rootDir) {
- int nrBlocks = (newNode.blocks == null) ? 0 : newNode.blocks.length;
- // Add file->block mapping
- for (int i = 0; i < nrBlocks; i++)
- activeBlocks.put(newNode.blocks[i], newNode);
- return (rootDir.addNode(path.toString(), newNode) != null);
+ try {
+ if( rootDir.addNode(path.toString(), newNode ) != null ) {
+ int nrBlocks = (newNode.blocks == null) ? 0 : newNode.blocks.length;
+ // Add file->block mapping
+ for (int i = 0; i < nrBlocks; i++)
+ activeBlocks.put(newNode.blocks[i], newNode);
+ return true;
+ } else {
+ return false;
+ }
+ } catch (FileNotFoundException e ) {
+ return false;
+ }
}
}
@@ -720,23 +732,36 @@
INode renamedNode = rootDir.getNode(srcStr);
if (renamedNode == null) {
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
- +"failed to rename "+src+" to "+dst+ " because "+ src+" does not exist" );
+ +"failed to rename "+src+" to "+dst+ " because source does not exist" );
return false;
}
- renamedNode.removeNode();
if (isDir(dst)) {
dstStr += "/" + new File(srcStr).getName();
}
+ if( rootDir.getNode(dstStr.toString()) != null ) {
+ NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
+ +"failed to rename "+src+" to "+dstStr+ " because destination exists" );
+ return false;
+ }
+ renamedNode.removeNode();
+
// the renamed node can be reused now
- if( rootDir.addNode(dstStr, renamedNode ) == null ) {
+ try {
+ if( rootDir.addNode(dstStr, renamedNode ) != null ) {
+ NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedRenameTo: "
+ +src+" is renamed to "+dst );
+ return true;
+ }
+ } catch (FileNotFoundException e ) {
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
+"failed to rename "+src+" to "+dst );
- rootDir.addNode(srcStr, renamedNode); // put it back
- return false;
+ try {
+ rootDir.addNode(srcStr, renamedNode); // put it back
+ }catch(FileNotFoundException e2) {
+ }
}
- NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedRenameTo: "
- +src+" is renamed to "+dst );
- return true;
+
+ return false;
}
}
@@ -977,29 +1002,28 @@
// Now go backwards through list of dirs, creating along
// the way
- boolean lastSuccess = false;
int numElts = v.size();
for (int i = numElts - 1; i >= 0; i--) {
String cur = (String) v.elementAt(i);
- INode inserted = unprotectedMkdir(cur);
- if (inserted != null) {
- NameNode.stateChangeLog.debug("DIR* FSDirectory.mkdirs: "
+ try {
+ INode inserted = unprotectedMkdir(cur);
+ if (inserted != null) {
+ NameNode.stateChangeLog.debug("DIR* FSDirectory.mkdirs: "
+"created directory "+cur );
- logEdit(OP_MKDIR, new UTF8(inserted.computeName()), null);
- lastSuccess = true;
- } else {
- lastSuccess = false;
+ logEdit(OP_MKDIR, new UTF8(inserted.computeName()), null);
+ } // otherwise cur exists, continue
+ } catch (FileNotFoundException e ) {
+ NameNode.stateChangeLog.debug("DIR* FSDirectory.mkdirs: "
+ +"failed to create directory "+src);
+ return false;
}
}
-/* if( !lastSuccess )
- NameNode.stateChangeLog.warn("DIR* FSDirectory.mkdirs: "
- +"failed to create directory "+src );*/
- return lastSuccess;
+ return true;
}
/**
*/
- INode unprotectedMkdir(String src) {
+ INode unprotectedMkdir(String src) throws FileNotFoundException {
synchronized (rootDir) {
return rootDir.addNode(src, new INode(new File(src).getName()));
}
Modified: lucene/hadoop/branches/branch-0.3/src/java/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/branches/branch-0.3/src/java/org/apache/hadoop/dfs/FSNamesystem.java?rev=413169&r1=413168&r2=413169&view=diff
==============================================================================
--- lucene/hadoop/branches/branch-0.3/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ lucene/hadoop/branches/branch-0.3/src/java/org/apache/hadoop/dfs/FSNamesystem.java Fri Jun 9 14:11:29 2006
@@ -1266,7 +1266,7 @@
if (! dir.isValidBlock(b) && ! pendingCreateBlocks.contains(b)) {
obsolete.add(b);
- NameNode.stateChangeLog.info("BLOCK* NameSystem.processReport: "
+ NameNode.stateChangeLog.debug("BLOCK* NameSystem.processReport: "
+"ask "+nodeID.getName()+" to delete "+b.getBlockName() );
}
}
@@ -1329,6 +1329,8 @@
*/
private void proccessOverReplicatedBlock( Block block, short replication ) {
TreeSet containingNodes = (TreeSet) blocksMap.get(block);
+ if( containingNodes == null )
+ return;
Vector nonExcess = new Vector();
for (Iterator it = containingNodes.iterator(); it.hasNext(); ) {
DatanodeInfo cur = (DatanodeInfo) it.next();
@@ -1509,7 +1511,7 @@
blockList.append(' ');
blockList.append(((Block)invalidateSet.elementAt(i)).getBlockName());
}
- NameNode.stateChangeLog.info("BLOCK* NameSystem.blockToInvalidate: "
+ NameNode.stateChangeLog.debug("BLOCK* NameSystem.blockToInvalidate: "
+"ask "+nodeID.getName()+" to delete " + blockList );
}
return (Block[]) invalidateSet.toArray(new Block[invalidateSet.size()]);
Modified: lucene/hadoop/branches/branch-0.3/src/java/org/apache/hadoop/fs/FileSystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/branches/branch-0.3/src/java/org/apache/hadoop/fs/FileSystem.java?rev=413169&r1=413168&r2=413169&view=diff
==============================================================================
--- lucene/hadoop/branches/branch-0.3/src/java/org/apache/hadoop/fs/FileSystem.java (original)
+++ lucene/hadoop/branches/branch-0.3/src/java/org/apache/hadoop/fs/FileSystem.java Fri Jun 9 14:11:29 2006
@@ -459,7 +459,8 @@
/**
* Make the given file and all non-existent parents into
- * directories.
+ * directories. Has the semantics of Unix 'mkdir -p'.
+ * Existence of the directory hierarchy is not an error.
*/
public abstract boolean mkdirs(Path f) throws IOException;
Modified: lucene/hadoop/branches/branch-0.3/src/java/org/apache/hadoop/fs/LocalFileSystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/branches/branch-0.3/src/java/org/apache/hadoop/fs/LocalFileSystem.java?rev=413169&r1=413168&r2=413169&view=diff
==============================================================================
--- lucene/hadoop/branches/branch-0.3/src/java/org/apache/hadoop/fs/LocalFileSystem.java (original)
+++ lucene/hadoop/branches/branch-0.3/src/java/org/apache/hadoop/fs/LocalFileSystem.java Fri Jun 9 14:11:29 2006
@@ -223,11 +223,18 @@
}
return results;
}
-
+
+ /**
+ * Creates the specified directory hierarchy. Does not
+ * treat existence as an error.
+ */
public boolean mkdirs(Path f) throws IOException {
- return pathToFile(f).mkdirs();
+ Path parent = f.getParent();
+ File p2f = pathToFile(f);
+ return (parent == null || mkdirs(parent)) &&
+ (p2f.mkdir() || p2f.isDirectory());
}
-
+
/**
* Set the working directory to the given directory.
*/
Modified: lucene/hadoop/branches/branch-0.3/src/site/src/documentation/content/xdocs/index.xml
URL: http://svn.apache.org/viewvc/lucene/hadoop/branches/branch-0.3/src/site/src/documentation/content/xdocs/index.xml?rev=413169&r1=413168&r2=413169&view=diff
==============================================================================
--- lucene/hadoop/branches/branch-0.3/src/site/src/documentation/content/xdocs/index.xml (original)
+++ lucene/hadoop/branches/branch-0.3/src/site/src/documentation/content/xdocs/index.xml Fri Jun 9 14:11:29 2006
@@ -15,6 +15,22 @@
<title>News</title>
<section>
+ <title>9 June, 2006: release 0.3.2 available</title>
+ <p>This is a bugfix release. For details see the <a
+ href="http://tinyurl.com/k9g5c">change log</a>. The release can
+ be obtained from <a
+ href="http://www.apache.org/dyn/closer.cgi/lucene/hadoop/"> a
+ nearby mirror</a>.
+ </p> </section>
+
+ <section>
+ <title>8 June, 2006: FAQ added to Wiki</title>
+ <p>Hadoop now has a <a
+ href="http://wiki.apache.org/lucene-hadoop/FAQ">FAQ</a>. Please
+ help make this more complete!
+ </p> </section>
+
+ <section>
<title>5 June, 2006: release 0.3.1 available</title>
<p>This is a bugfix release. For details see the <a
href="http://tinyurl.com/l6on4">change log</a>. The release can
Modified: lucene/hadoop/branches/branch-0.3/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/branches/branch-0.3/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java?rev=413169&r1=413168&r2=413169&view=diff
==============================================================================
--- lucene/hadoop/branches/branch-0.3/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java (original)
+++ lucene/hadoop/branches/branch-0.3/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java Fri Jun 9 14:11:29 2006
@@ -73,7 +73,7 @@
private static final int BLOCK_LOG_HEADER_LEN = 32;
/** DFS block size
*/
- private static final int BLOCK_SIZE = 32*1000*1000;
+ private static final int BLOCK_SIZE = 32*1024*1024;
/** Buffer size
*/
@@ -158,7 +158,7 @@
// create a file with 2 data blocks
try {
- createFile("/data/yy",BLOCK_SIZE+1);
+ createFile("/data/yy", BLOCK_SIZE+1);
assertCreate( "/data/yy", BLOCK_SIZE+1, false );
} catch( IOException ioe ) {
assertCreate( "/data/yy", BLOCK_SIZE+1, true );
@@ -326,9 +326,9 @@
//
private void configureDFS() throws IOException {
// set given config param to override other config settings
- conf.setInt("test.dfs.block_size", BLOCK_SIZE);
+ conf.setInt("dfs.block.size", BLOCK_SIZE);
// verify that config changed
- assertTrue(BLOCK_SIZE == conf.getInt("test.dfs.block_size", 2)); // 2 is an intentional obviously-wrong block size
+ assertTrue(BLOCK_SIZE == conf.getInt("dfs.block.size", 2)); // 2 is an intentional obviously-wrong block size
// downsize for testing (just to save resources)
conf.setInt("dfs.namenode.handler.count", 3);
conf.setLong("dfs.blockreport.intervalMsec", 50*1000L);
Modified: lucene/hadoop/branches/branch-0.3/src/test/org/apache/hadoop/test/AllTestDriver.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/branches/branch-0.3/src/test/org/apache/hadoop/test/AllTestDriver.java?rev=413169&r1=413168&r2=413169&view=diff
==============================================================================
--- lucene/hadoop/branches/branch-0.3/src/test/org/apache/hadoop/test/AllTestDriver.java (original)
+++ lucene/hadoop/branches/branch-0.3/src/test/org/apache/hadoop/test/AllTestDriver.java Fri Jun 9 14:11:29 2006
@@ -27,6 +27,7 @@
import org.apache.hadoop.io.TestSequenceFile;
import org.apache.hadoop.ipc.TestIPC;
import org.apache.hadoop.ipc.TestRPC;
+import org.apache.hadoop.fs.DistributedFSCheck;
import org.apache.hadoop.fs.TestDFSIO;
import org.apache.hadoop.fs.DFSCIOTest;
@@ -52,7 +53,7 @@
pgd.addClass("testtextinputformat", TestTextInputFormat.class, "A test for text input format.");
pgd.addClass("TestDFSIO", TestDFSIO.class, "Distributed i/o benchmark.");
pgd.addClass("DFSCIOTest", DFSCIOTest.class, "Distributed i/o benchmark of libhdfs.");
- pgd.addClass("DistributedFSCheck", TestDFSIO.class, "Distributed checkup of the file system consistency.");
+ pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, "Distributed checkup of the file system consistency.");
pgd.driver(argv);
}
catch(Throwable e){