You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2013/04/30 00:40:03 UTC
svn commit: r1477396 - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/org/apache/hadoop/hdfs/server/common/
src/main/java/org/apache/hadoop/hdfs/server/namenode/
src/test/java/org/apache/hadoop/hdfs/ src/test/java/org/apache/...
Author: suresh
Date: Mon Apr 29 22:40:03 2013
New Revision: 1477396
URL: http://svn.apache.org/r1477396
Log:
HDFS-4610. Reverting the patch Jenkins build is not run.
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1477396&r1=1477395&r2=1477396&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Mon Apr 29 22:40:03 2013
@@ -332,9 +332,6 @@ Trunk (Unreleased)
HDFS-4734. HDFS Tests that use ShellCommandFencer are broken on Windows.
(Arpit Agarwal via suresh)
- HDFS-4610. Use common utils FileUtil#setReadable/Writable/Executable &
- FileUtil#canRead/Write/Execute. (Ivan Mitic via suresh)
-
BREAKDOWN OF HDFS-347 SUBTASKS AND RELATED JIRAS
HDFS-4353. Encapsulate connections to peers in Peer and PeerServer classes.
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java?rev=1477396&r1=1477395&r2=1477396&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java Mon Apr 29 22:40:03 2013
@@ -448,7 +448,7 @@ public abstract class Storage extends St
LOG.warn(rootPath + "is not a directory");
return StorageState.NON_EXISTENT;
}
- if (!FileUtil.canWrite(root)) {
+ if (!root.canWrite()) {
LOG.warn("Cannot access storage directory " + rootPath);
return StorageState.NON_EXISTENT;
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java?rev=1477396&r1=1477395&r2=1477396&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java Mon Apr 29 22:40:03 2013
@@ -33,7 +33,6 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
@@ -129,7 +128,7 @@ class FSImagePreTransactionalStorageInsp
static long readCheckpointTime(StorageDirectory sd) throws IOException {
File timeFile = NNStorage.getStorageFile(sd, NameNodeFile.TIME);
long timeStamp = 0L;
- if (timeFile.exists() && FileUtil.canRead(timeFile)) {
+ if (timeFile.exists() && timeFile.canRead()) {
DataInputStream in = new DataInputStream(new FileInputStream(timeFile));
try {
timeStamp = in.readLong();
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java?rev=1477396&r1=1477395&r2=1477396&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java Mon Apr 29 22:40:03 2013
@@ -34,7 +34,6 @@ import java.util.concurrent.CopyOnWriteA
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
@@ -231,8 +230,8 @@ public class NNStorage extends Storage i
File root = sd.getRoot();
LOG.info("currently disabled dir " + root.getAbsolutePath() +
"; type="+sd.getStorageDirType()
- + ";canwrite="+FileUtil.canWrite(root));
- if(root.exists() && FileUtil.canWrite(root)) {
+ + ";canwrite="+root.canWrite());
+ if(root.exists() && root.canWrite()) {
LOG.info("restoring dir " + sd.getRoot().getAbsolutePath());
this.addStorageDir(sd); // restore
this.removedStorageDirs.remove(sd);
@@ -506,7 +505,7 @@ public class NNStorage extends Storage i
dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
sd = it.next();
File fsImage = getStorageFile(sd, NameNodeFile.IMAGE, txid);
- if(FileUtil.canRead(sd.getRoot()) && fsImage.exists())
+ if(sd.getRoot().canRead() && fsImage.exists())
return fsImage;
}
return null;
@@ -723,7 +722,7 @@ public class NNStorage extends Storage i
private File findFile(NameNodeDirType dirType, String name) {
for (StorageDirectory sd : dirIterable(dirType)) {
File candidate = new File(sd.getCurrentDir(), name);
- if (FileUtil.canRead(sd.getCurrentDir()) &&
+ if (sd.getCurrentDir().canRead() &&
candidate.exists()) {
return candidate;
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java?rev=1477396&r1=1477395&r2=1477396&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java Mon Apr 29 22:40:03 2013
@@ -33,7 +33,6 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.util.Time;
@@ -103,7 +102,7 @@ public class TransferFsImage {
assert !dstFiles.isEmpty() : "No checkpoint targets.";
for (File f : dstFiles) {
- if (f.exists() && FileUtil.canRead(f)) {
+ if (f.exists() && f.canRead()) {
LOG.info("Skipping download of remote edit log " +
log + " since it already is stored locally at " + f);
return;
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1477396&r1=1477395&r2=1477396&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Mon Apr 29 22:40:03 2013
@@ -681,9 +681,9 @@ public class MiniDFSCluster {
sb.append("\tabsolute:").append(path.getAbsolutePath()).append("\n");
sb.append("\tpermissions: ");
sb.append(path.isDirectory() ? "d": "-");
- sb.append(FileUtil.canRead(path) ? "r" : "-");
- sb.append(FileUtil.canWrite(path) ? "w" : "-");
- sb.append(FileUtil.canExecute(path) ? "x" : "-");
+ sb.append(path.canRead() ? "r" : "-");
+ sb.append(path.canWrite() ? "w" : "-");
+ sb.append(path.canExecute() ? "x" : "-");
sb.append("\n");
path = path.getParentFile();
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=1477396&r1=1477395&r2=1477396&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Mon Apr 29 22:40:03 2013
@@ -31,7 +31,6 @@ import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.BlockReader;
import org.apache.hadoop.hdfs.BlockReaderFactory;
@@ -92,10 +91,10 @@ public class TestDataNodeVolumeFailure {
@After
public void tearDown() throws Exception {
if(data_fail != null) {
- FileUtil.setWritable(data_fail, true);
+ data_fail.setWritable(true);
}
if(failedDir != null) {
- FileUtil.setWritable(failedDir, true);
+ failedDir.setWritable(true);
}
if(cluster != null) {
cluster.shutdown();
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java?rev=1477396&r1=1477395&r2=1477396&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java Mon Apr 29 22:40:03 2013
@@ -31,7 +31,6 @@ import org.apache.commons.logging.LogFac
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -89,8 +88,8 @@ public class TestDataNodeVolumeFailureRe
@After
public void tearDown() throws Exception {
for (int i = 0; i < 3; i++) {
- FileUtil.setExecutable(new File(dataDir, "data"+(2*i+1)), true);
- FileUtil.setExecutable(new File(dataDir, "data"+(2*i+2)), true);
+ new File(dataDir, "data"+(2*i+1)).setExecutable(true);
+ new File(dataDir, "data"+(2*i+2)).setExecutable(true);
}
cluster.shutdown();
}
@@ -132,8 +131,8 @@ public class TestDataNodeVolumeFailureRe
* fail. The client does not retry failed nodes even though
* perhaps they could succeed because just a single volume failed.
*/
- assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, false));
- assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false));
+ assertTrue("Couldn't chmod local vol", dn1Vol1.setExecutable(false));
+ assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(false));
/*
* Create file1 and wait for 3 replicas (ie all DNs can still
@@ -169,7 +168,7 @@ public class TestDataNodeVolumeFailureRe
* Now fail a volume on the third datanode. We should be able to get
* three replicas since we've already identified the other failures.
*/
- assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol1, false));
+ assertTrue("Couldn't chmod local vol", dn3Vol1.setExecutable(false));
Path file2 = new Path("/test2");
DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);
DFSTestUtil.waitReplication(fs, file2, (short)3);
@@ -201,7 +200,7 @@ public class TestDataNodeVolumeFailureRe
* and that it's no longer up. Only wait for two replicas since
* we'll never get a third.
*/
- assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol2, false));
+ assertTrue("Couldn't chmod local vol", dn3Vol2.setExecutable(false));
Path file3 = new Path("/test3");
DFSTestUtil.createFile(fs, file3, 1024, (short)3, 1L);
DFSTestUtil.waitReplication(fs, file3, (short)2);
@@ -223,10 +222,10 @@ public class TestDataNodeVolumeFailureRe
* restart, so file creation should be able to succeed after
* restoring the data directories and restarting the datanodes.
*/
- assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, true));
- assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, true));
- assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol1, true));
- assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol2, true));
+ assertTrue("Couldn't chmod local vol", dn1Vol1.setExecutable(true));
+ assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(true));
+ assertTrue("Couldn't chmod local vol", dn3Vol1.setExecutable(true));
+ assertTrue("Couldn't chmod local vol", dn3Vol2.setExecutable(true));
cluster.restartDataNodes();
cluster.waitActive();
Path file4 = new Path("/test4");
@@ -262,8 +261,8 @@ public class TestDataNodeVolumeFailureRe
// third healthy so one node in the pipeline will not fail).
File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
- assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, false));
- assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false));
+ assertTrue("Couldn't chmod local vol", dn1Vol1.setExecutable(false));
+ assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(false));
Path file1 = new Path("/test1");
DFSTestUtil.createFile(fs, file1, 1024, (short)2, 1L);
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java?rev=1477396&r1=1477395&r2=1477396&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java Mon Apr 29 22:40:03 2013
@@ -77,8 +77,8 @@ public class TestDataNodeVolumeFailureTo
@After
public void tearDown() throws Exception {
for (int i = 0; i < 3; i++) {
- FileUtil.setExecutable(new File(dataDir, "data"+(2*i+1)), true);
- FileUtil.setExecutable(new File(dataDir, "data"+(2*i+2)), true);
+ new File(dataDir, "data"+(2*i+1)).setExecutable(true);
+ new File(dataDir, "data"+(2*i+2)).setExecutable(true);
}
cluster.shutdown();
}
@@ -152,7 +152,7 @@ public class TestDataNodeVolumeFailureTo
// Fail a volume on the 2nd DN
File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
- assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false));
+ assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(false));
// Should only get two replicas (the first DN and the 3rd)
Path file1 = new Path("/test1");
@@ -165,7 +165,7 @@ public class TestDataNodeVolumeFailureTo
// If we restore the volume we should still only be able to get
// two replicas since the DN is still considered dead.
- assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, true));
+ assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(true));
Path file2 = new Path("/test2");
DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);
DFSTestUtil.waitReplication(fs, file2, (short)2);
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=1477396&r1=1477395&r2=1477396&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java Mon Apr 29 22:40:03 2013
@@ -27,7 +27,6 @@ import java.net.Socket;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -107,8 +106,8 @@ public class TestDiskError {
}
} finally {
// restore its old permission
- FileUtil.setWritable(dir1, true);
- FileUtil.setWritable(dir2, true);
+ dir1.setWritable(true);
+ dir2.setWritable(true);
}
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1477396&r1=1477395&r2=1477396&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Mon Apr 29 22:40:03 2013
@@ -157,7 +157,7 @@ public class TestCheckpoint {
try {
// Simulate the mount going read-only
- FileUtil.setWritable(dir, false);
+ dir.setWritable(false);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(false).build();
fail("NN should have failed to start with " + dir + " set unreadable");
@@ -167,7 +167,7 @@ public class TestCheckpoint {
} finally {
cleanup(cluster);
cluster = null;
- FileUtil.setWritable(dir, true);
+ dir.setWritable(true);
}
}
}
@@ -1825,7 +1825,7 @@ public class TestCheckpoint {
StorageDirectory sd1 = storage.getStorageDir(1);
currentDir = sd0.getCurrentDir();
- FileUtil.setExecutable(currentDir, false);
+ currentDir.setExecutable(false);
// Upload checkpoint when NN has a bad storage dir. This should
// succeed and create the checkpoint in the good dir.
@@ -1835,7 +1835,7 @@ public class TestCheckpoint {
new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2)));
// Restore the good dir
- FileUtil.setExecutable(currentDir, true);
+ currentDir.setExecutable(true);
nn.restoreFailedStorage("true");
nn.rollEditLog();
@@ -1846,7 +1846,7 @@ public class TestCheckpoint {
assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
} finally {
if (currentDir != null) {
- FileUtil.setExecutable(currentDir, true);
+ currentDir.setExecutable(true);
}
cleanup(secondary);
secondary = null;
@@ -1896,7 +1896,7 @@ public class TestCheckpoint {
StorageDirectory sd0 = storage.getStorageDir(0);
assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType());
currentDir = sd0.getCurrentDir();
- FileUtil.setExecutable(currentDir, false);
+ currentDir.setExecutable(false);
// Try to upload checkpoint -- this should fail since there are no
// valid storage dirs
@@ -1909,7 +1909,7 @@ public class TestCheckpoint {
}
// Restore the good dir
- FileUtil.setExecutable(currentDir, true);
+ currentDir.setExecutable(true);
nn.restoreFailedStorage("true");
nn.rollEditLog();
@@ -1920,7 +1920,7 @@ public class TestCheckpoint {
assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
} finally {
if (currentDir != null) {
- FileUtil.setExecutable(currentDir, true);
+ currentDir.setExecutable(true);
}
cleanup(secondary);
secondary = null;
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1477396&r1=1477395&r2=1477396&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Mon Apr 29 22:40:03 2013
@@ -881,14 +881,14 @@ public class TestEditLog {
logDir.mkdirs();
FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir);
try {
- FileUtil.setWritable(logDir, false);
+ logDir.setWritable(false);
log.openForWrite();
fail("Did no throw exception on only having a bad dir");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"too few journals successfully started", ioe);
} finally {
- FileUtil.setWritable(logDir, true);
+ logDir.setWritable(true);
log.close();
}
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java?rev=1477396&r1=1477395&r2=1477396&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java Mon Apr 29 22:40:03 2013
@@ -28,7 +28,6 @@ import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -107,10 +106,10 @@ public class TestNNStorageRetentionFunct
getInProgressEditsFileName(5));
LOG.info("Failing first storage dir by chmodding it");
- FileUtil.setExecutable(sd0, false);
+ sd0.setExecutable(false);
doSaveNamespace(nn);
LOG.info("Restoring accessibility of first storage dir");
- FileUtil.setExecutable(sd0, true);
+ sd0.setExecutable(true);
LOG.info("nothing should have been purged in first storage dir");
assertGlobEquals(cd0, "fsimage_\\d*",
@@ -139,7 +138,7 @@ public class TestNNStorageRetentionFunct
assertGlobEquals(cd0, "edits_.*",
getInProgressEditsFileName(9));
} finally {
- FileUtil.setExecutable(sd0, true);
+ sd0.setExecutable(true);
LOG.info("Shutting down...");
if (cluster != null) {