You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by sz...@apache.org on 2008/10/13 19:58:53 UTC
svn commit: r704188 - in /hadoop/core/branches/branch-0.19: CHANGES.txt
src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java
src/test/org/apache/hadoop/hdfs/TestRestartDFS.java
Author: szetszwo
Date: Mon Oct 13 10:58:53 2008
New Revision: 704188
URL: http://svn.apache.org/viewvc?rev=704188&view=rev
Log:
HADOOP-4395. The FSEditLog loading is incorrect for the case OP_SET_OWNER. (szetszwo)
Modified:
hadoop/core/branches/branch-0.19/CHANGES.txt
hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java
hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestRestartDFS.java
Modified: hadoop/core/branches/branch-0.19/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/CHANGES.txt?rev=704188&r1=704187&r2=704188&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/CHANGES.txt (original)
+++ hadoop/core/branches/branch-0.19/CHANGES.txt Mon Oct 13 10:58:53 2008
@@ -848,6 +848,9 @@
HADOOP-4228. dfs datanoe metrics, bytes_read and bytes_written, overflow
due to incorrect type used. (hairong)
+ HADOOP-4395. The FSEditLog loading is incorrect for the case OP_SET_OWNER.
+ (szetszwo)
+
Release 0.18.1 - 2008-09-17
IMPROVEMENTS
Modified: hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=704188&r1=704187&r2=704188&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Mon Oct 13 10:58:53 2008
@@ -732,7 +732,8 @@
throw new IOException("Unexpected opcode " + opcode
+ " for version " + logVersion);
fsDir.unprotectedSetOwner(FSImage.readString(in),
- FSImage.readString(in), FSImage.readString(in));
+ FSImage.readString_EmptyAsNull(in),
+ FSImage.readString_EmptyAsNull(in));
break;
}
case OP_SET_NS_QUOTA: {
Modified: hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=704188&r1=704187&r2=704188&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java Mon Oct 13 10:58:53 2008
@@ -1540,6 +1540,11 @@
return U_STR.toString();
}
+ static String readString_EmptyAsNull(DataInputStream in) throws IOException {
+ final String s = readString(in);
+ return s.isEmpty()? null: s;
+ }
+
static byte[] readBytes(DataInputStream in) throws IOException {
U_STR.readFields(in);
int len = U_STR.getLength();
Modified: hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestRestartDFS.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestRestartDFS.java?rev=704188&r1=704187&r2=704188&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestRestartDFS.java (original)
+++ hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestRestartDFS.java Mon Oct 13 10:58:53 2008
@@ -18,12 +18,10 @@
package org.apache.hadoop.hdfs;
-import java.io.IOException;
-import java.util.Random;
-import junit.framework.*;
+import junit.framework.TestCase;
+
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -31,30 +29,31 @@
* A JUnit test for checking if restarting DFS preserves integrity.
*/
public class TestRestartDFS extends TestCase {
-
- private static Configuration conf = new Configuration();
-
- public TestRestartDFS(String testName) {
- super(testName);
- }
-
- protected void setUp() throws Exception {
- }
-
- protected void tearDown() throws Exception {
- }
-
/** check if DFS remains in proper condition after a restart */
public void testRestartDFS() throws Exception {
+ final Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
DFSTestUtil files = new DFSTestUtil("TestRestartDFS", 20, 3, 8*1024);
- Path root = new Path("/");
- long modificationTime;
+
+ final String dir = "/srcdat";
+ final Path rootpath = new Path("/");
+ final Path dirpath = new Path(dir);
+
+ long rootmtime;
+ FileStatus rootstatus;
+ FileStatus dirstatus;
+
try {
cluster = new MiniDFSCluster(conf, 4, true, null);
FileSystem fs = cluster.getFileSystem();
- files.createFiles(fs, "/srcdat");
- modificationTime = fs.getFileStatus(root).getModificationTime();
+ files.createFiles(fs, dir);
+
+ rootmtime = fs.getFileStatus(rootpath).getModificationTime();
+ rootstatus = fs.getFileStatus(dirpath);
+ dirstatus = fs.getFileStatus(dirpath);
+
+ fs.setOwner(rootpath, rootstatus.getOwner() + "_XXX", null);
+ fs.setOwner(dirpath, null, dirstatus.getGroup() + "_XXX");
} finally {
if (cluster != null) { cluster.shutdown(); }
}
@@ -62,11 +61,19 @@
// Here we restart the MiniDFScluster without formatting namenode
cluster = new MiniDFSCluster(conf, 4, false, null);
FileSystem fs = cluster.getFileSystem();
- assertEquals(modificationTime,
- fs.getFileStatus(root).getModificationTime());
assertTrue("Filesystem corrupted after restart.",
- files.checkFiles(fs, "/srcdat"));
- files.cleanup(fs, "/srcdat");
+ files.checkFiles(fs, dir));
+
+ final FileStatus newrootstatus = fs.getFileStatus(rootpath);
+ assertEquals(rootmtime, newrootstatus.getModificationTime());
+ assertEquals(rootstatus.getOwner() + "_XXX", newrootstatus.getOwner());
+ assertEquals(rootstatus.getGroup(), newrootstatus.getGroup());
+
+ final FileStatus newdirstatus = fs.getFileStatus(dirpath);
+ assertEquals(dirstatus.getOwner(), newdirstatus.getOwner());
+ assertEquals(dirstatus.getGroup() + "_XXX", newdirstatus.getGroup());
+
+ files.cleanup(fs, dir);
} finally {
if (cluster != null) { cluster.shutdown(); }
}