You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by sz...@apache.org on 2008/10/13 20:14:28 UTC
svn commit: r704194 - in /hadoop/core/branches/branch-0.18: CHANGES.txt
src/hdfs/org/apache/hadoop/dfs/FSEditLog.java
src/hdfs/org/apache/hadoop/dfs/FSImage.java
src/test/org/apache/hadoop/dfs/TestRestartDFS.java
Author: szetszwo
Date: Mon Oct 13 11:14:27 2008
New Revision: 704194
URL: http://svn.apache.org/viewvc?rev=704194&view=rev
Log:
HADOOP-4395. The FSEditLog loading is incorrect for the case OP_SET_OWNER. (szetszwo)
Modified:
hadoop/core/branches/branch-0.18/CHANGES.txt
hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/FSEditLog.java
hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/FSImage.java
hadoop/core/branches/branch-0.18/src/test/org/apache/hadoop/dfs/TestRestartDFS.java
Modified: hadoop/core/branches/branch-0.18/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.18/CHANGES.txt?rev=704194&r1=704193&r2=704194&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.18/CHANGES.txt (original)
+++ hadoop/core/branches/branch-0.18/CHANGES.txt Mon Oct 13 11:14:27 2008
@@ -9,17 +9,20 @@
HADOOP-3614. Fix a bug that Datanode may use an old GenerationStamp to get
meta file. (szetszwo)
+ HADOOP-4314. Simulated datanodes should not include blocks that are still
+ being written in their block report. (Raghu Angadi)
+
HADOOP-4228. dfs datanoe metrics, bytes_read and bytes_written, overflow
due to incorrect type used. (hairong)
+ HADOOP-4395. The FSEditLog loading is incorrect for the case OP_SET_OWNER.
+ (szetszwo)
+
NEW FEATURES
HADOOP-2421. Add jdiff output to documentation, listing all API
changes from the prior release. (cutting)
- HADOOP-4314. Simulated datanodes should not include blocks that are still
- being written in their block report. (Raghu Angadi)
-
Release 0.18.1 - 2008-09-17
IMPROVEMENTS
Modified: hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/FSEditLog.java?rev=704194&r1=704193&r2=704194&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/FSEditLog.java (original)
+++ hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/FSEditLog.java Mon Oct 13 11:14:27 2008
@@ -631,7 +631,8 @@
throw new IOException("Unexpected opcode " + opcode
+ " for version " + logVersion);
fsDir.unprotectedSetOwner(FSImage.readString(in),
- FSImage.readString(in), FSImage.readString(in));
+ FSImage.readString_EmptyAsNull(in),
+ FSImage.readString_EmptyAsNull(in));
break;
}
case OP_SET_QUOTA: {
Modified: hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/FSImage.java?rev=704194&r1=704193&r2=704194&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/FSImage.java (original)
+++ hadoop/core/branches/branch-0.18/src/hdfs/org/apache/hadoop/dfs/FSImage.java Mon Oct 13 11:14:27 2008
@@ -1369,6 +1369,11 @@
return U_STR.toString();
}
+ static String readString_EmptyAsNull(DataInputStream in) throws IOException {
+ final String s = readString(in);
+ return s.isEmpty()? null: s;
+ }
+
static byte[] readBytes(DataInputStream in) throws IOException {
U_STR.readFields(in);
int len = U_STR.getLength();
Modified: hadoop/core/branches/branch-0.18/src/test/org/apache/hadoop/dfs/TestRestartDFS.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.18/src/test/org/apache/hadoop/dfs/TestRestartDFS.java?rev=704194&r1=704193&r2=704194&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.18/src/test/org/apache/hadoop/dfs/TestRestartDFS.java (original)
+++ hadoop/core/branches/branch-0.18/src/test/org/apache/hadoop/dfs/TestRestartDFS.java Mon Oct 13 11:14:27 2008
@@ -18,12 +18,10 @@
package org.apache.hadoop.dfs;
-import java.io.IOException;
-import java.util.Random;
-import junit.framework.*;
+import junit.framework.TestCase;
+
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -31,30 +29,31 @@
* A JUnit test for checking if restarting DFS preserves integrity.
*/
public class TestRestartDFS extends TestCase {
-
- private static Configuration conf = new Configuration();
-
- public TestRestartDFS(String testName) {
- super(testName);
- }
-
- protected void setUp() throws Exception {
- }
-
- protected void tearDown() throws Exception {
- }
-
/** check if DFS remains in proper condition after a restart */
public void testRestartDFS() throws Exception {
+ final Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
DFSTestUtil files = new DFSTestUtil("TestRestartDFS", 20, 3, 8*1024);
- Path root = new Path("/");
- long modificationTime;
+
+ final String dir = "/srcdat";
+ final Path rootpath = new Path("/");
+ final Path dirpath = new Path(dir);
+
+ long rootmtime;
+ FileStatus rootstatus;
+ FileStatus dirstatus;
+
try {
cluster = new MiniDFSCluster(conf, 4, true, null);
FileSystem fs = cluster.getFileSystem();
- files.createFiles(fs, "/srcdat");
- modificationTime = fs.getFileStatus(root).getModificationTime();
+ files.createFiles(fs, dir);
+
+ rootmtime = fs.getFileStatus(rootpath).getModificationTime();
+ rootstatus = fs.getFileStatus(dirpath);
+ dirstatus = fs.getFileStatus(dirpath);
+
+ fs.setOwner(rootpath, rootstatus.getOwner() + "_XXX", null);
+ fs.setOwner(dirpath, null, dirstatus.getGroup() + "_XXX");
} finally {
if (cluster != null) { cluster.shutdown(); }
}
@@ -62,11 +61,19 @@
// Here we restart the MiniDFScluster without formatting namenode
cluster = new MiniDFSCluster(conf, 4, false, null);
FileSystem fs = cluster.getFileSystem();
- assertEquals(modificationTime,
- fs.getFileStatus(root).getModificationTime());
assertTrue("Filesystem corrupted after restart.",
- files.checkFiles(fs, "/srcdat"));
- files.cleanup(fs, "/srcdat");
+ files.checkFiles(fs, dir));
+
+ final FileStatus newrootstatus = fs.getFileStatus(rootpath);
+ assertEquals(rootmtime, newrootstatus.getModificationTime());
+ assertEquals(rootstatus.getOwner() + "_XXX", newrootstatus.getOwner());
+ assertEquals(rootstatus.getGroup(), newrootstatus.getGroup());
+
+ final FileStatus newdirstatus = fs.getFileStatus(dirpath);
+ assertEquals(dirstatus.getOwner(), newdirstatus.getOwner());
+ assertEquals(dirstatus.getGroup() + "_XXX", newdirstatus.getGroup());
+
+ files.cleanup(fs, dir);
} finally {
if (cluster != null) { cluster.shutdown(); }
}