You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by dh...@apache.org on 2007/08/07 19:15:18 UTC
svn commit: r563565 - in /lucene/hadoop/trunk: CHANGES.txt
src/java/org/apache/hadoop/fs/FsShell.java
src/java/org/apache/hadoop/fs/Trash.java
src/test/org/apache/hadoop/dfs/TestTrash.java
Author: dhruba
Date: Tue Aug 7 10:15:17 2007
New Revision: 563565
URL: http://svn.apache.org/viewvc?view=rev&rev=563565
Log:
HADOOP-1665. With HDFS Trash enabled and the same file was created
and deleted more than once, the suceeding deletions creates Trash item
names suffixed with a integer. (Dhruba Borthakur via dhruba)
Added:
lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestTrash.java (with props)
Modified:
lucene/hadoop/trunk/CHANGES.txt
lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FsShell.java
lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/Trash.java
Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?view=diff&rev=563565&r1=563564&r2=563565
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Tue Aug 7 10:15:17 2007
@@ -484,6 +484,10 @@
144. HADOOP-1659. Fix a job id/job name mixup. (Arun C. Murthy via omalley)
+145. HADOOP-1665. With HDFS Trash enabled and the same file was created
+ and deleted more than once, the suceeding deletions creates Trash item
+ names suffixed with a integer. (Dhruba Borthakur via dhruba)
+
Release 0.13.0 - 2007-06-08
1. HADOOP-1047. Fix TestReplication to succeed more reliably.
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FsShell.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FsShell.java?view=diff&rev=563565&r1=563564&r2=563565
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FsShell.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FsShell.java Tue Aug 7 10:15:17 2007
@@ -56,7 +56,6 @@
if (this.trash == null) {
this.trash = new Trash(conf);
}
- System.out.println("XXX FsShell init done");
}
/**
@@ -797,6 +796,13 @@
private void expunge() throws IOException {
trash.expunge();
trash.checkpoint();
+ }
+
+ /**
+ * Returns the Trash object associated with this shell.
+ */
+ public Path getCurrentTrashDir() {
+ return trash.getCurrentTrashDir();
}
/**
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/Trash.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/Trash.java?view=diff&rev=563565&r1=563564&r2=563565
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/Trash.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/Trash.java Tue Aug 7 10:15:17 2007
@@ -98,6 +98,14 @@
throw new IOException("Failed to create trash directory: "+trashDir);
}
try {
+ //
+ // if the target path in Trash already exists, then append with
+ // a number. Start from 1.
+ //
+ String orig = trashPath.toString();
+ for (int j = 1; fs.exists(trashPath); j++) {
+ trashPath = new Path(orig + "." + j);
+ }
if (fs.rename(path, trashPath)) // move to current trash
return true;
} catch (IOException e) {
@@ -153,6 +161,13 @@
}
}
}
+ }
+
+ //
+ // get the current working directory
+ //
+ Path getCurrentTrashDir() {
+ return current;
}
/** Return a {@link Runnable} that periodically empties the trash.
Added: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestTrash.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestTrash.java?view=auto&rev=563565
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestTrash.java (added)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestTrash.java Tue Aug 7 10:15:17 2007
@@ -0,0 +1,221 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.dfs;
+
+import junit.framework.TestCase;
+import java.io.*;
+import java.util.*;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.util.StringUtils;
+
+/**
+ * This class tests commands from Trash.
+ */
+public class TestTrash extends TestCase {
+
+ static private Path writeFile(FileSystem fs, Path f) throws IOException {
+ DataOutputStream out = fs.create(f);
+ out.writeBytes("dhruba: " + f);
+ out.close();
+ assertTrue(fs.exists(f));
+ return f;
+ }
+
+ static private Path mkdir(FileSystem fs, Path p) throws IOException {
+ assertTrue(fs.mkdirs(p));
+ assertTrue(fs.exists(p));
+ assertTrue(fs.getFileStatus(p).isDir());
+ return p;
+ }
+
+ // check that the specified file is in Trash
+ static void checkTrash(FileSystem fs, Path trashRoot, String pathname)
+ throws IOException {
+ Path p = new Path(trashRoot + pathname);
+ assertTrue(fs.exists(p));
+ }
+
+ // check that the specified file is not in Trash
+ static void checkNotInTrash(FileSystem fs, Path trashRoot, String pathname)
+ throws IOException {
+ Path p = new Path(trashRoot + pathname);
+ assertTrue(!fs.exists(p));
+ }
+
+ static void show(String s) {
+ System.out.println(Thread.currentThread().getStackTrace()[2] + " " + s);
+ }
+
+ /**
+ * Tests Trash
+ */
+ public void testTrash() throws IOException {
+ Configuration conf = new Configuration();
+ conf.set("fs.trash.interval", 10); // 10 minute
+ conf.set("fs.trash.root",
+ conf.get("hadoop.tmp.dir") + "/Trash");
+ MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+ FileSystem fs = cluster.getFileSystem();
+ DistributedFileSystem fileSys = (DistributedFileSystem) fs;
+ FsShell shell = new FsShell();
+ shell.setConf(conf);
+ Path trashRoot = null;
+
+ try {
+ // First create a new directory with mkdirs
+ Path myPath = new Path("/test/mkdirs");
+ mkdir(fs, myPath);
+
+ // Second, create a file in that directory.
+ Path myFile = new Path("/test/mkdirs/myFile");
+ writeFile(fs, myFile);
+
+ // Verify that we succeed in removing the file we created.
+ // This should go into Trash.
+ {
+ String[] args = new String[2];
+ args[0] = "-rm";
+ args[1] = "/test/mkdirs/myFile";
+ int val = -1;
+ try {
+ val = shell.run(args);
+ } catch (Exception e) {
+ System.err.println("Exception raised from Trash.run " +
+ e.getLocalizedMessage());
+ }
+ assertTrue(val == 0);
+
+ trashRoot = shell.getCurrentTrashDir();
+ checkTrash(fs, trashRoot, args[1]);
+ }
+
+ // Verify that we can recreate the file
+ writeFile(fs, myFile);
+
+ // Verify that we succeed in removing the file we re-created
+ {
+ String[] args = new String[2];
+ args[0] = "-rm";
+ args[1] = "/test/mkdirs/myFile";
+ int val = -1;
+ try {
+ val = shell.run(args);
+ } catch (Exception e) {
+ System.err.println("Exception raised from Trash.run " +
+ e.getLocalizedMessage());
+ }
+ assertTrue(val == 0);
+ }
+
+ // Verify that we can recreate the file
+ writeFile(fs, myFile);
+
+ // Verify that we succeed in removing the whole directory
+ // along with the file inside it.
+ {
+ String[] args = new String[2];
+ args[0] = "-rmr";
+ args[1] = "/test/mkdirs";
+ int val = -1;
+ try {
+ val = shell.run(args);
+ } catch (Exception e) {
+ System.err.println("Exception raised from Trash.run " +
+ e.getLocalizedMessage());
+ }
+ assertTrue(val == 0);
+ }
+
+ // recreate directory
+ mkdir(fs, myPath);
+
+ // Verify that we succeed in removing the whole directory
+ {
+ String[] args = new String[2];
+ args[0] = "-rmr";
+ args[1] = "/test/mkdirs";
+ int val = -1;
+ try {
+ val = shell.run(args);
+ } catch (Exception e) {
+ System.err.println("Exception raised from Trash.run " +
+ e.getLocalizedMessage());
+ }
+ assertTrue(val == 0);
+ }
+
+ // simulate Trash removal
+ {
+ String[] args = new String[1];
+ args[0] = "-expunge";
+ int val = -1;
+ try {
+ val = shell.run(args);
+ } catch (Exception e) {
+ System.err.println("Exception raised from Trash.run " +
+ e.getLocalizedMessage());
+ }
+ assertTrue(val == 0);
+ }
+
+ // verify that after expunging the Trash, it really goes away
+ checkNotInTrash(fs, trashRoot, "/test/mkdirs/myFile");
+
+ // recreate directory and file
+ mkdir(fs, myPath);
+ writeFile(fs, myFile);
+
+ // remove file first, then remove directory
+ {
+ String[] args = new String[2];
+ args[0] = "-rm";
+ args[1] = "/test/mkdirs/myFile";
+ int val = -1;
+ try {
+ val = shell.run(args);
+ } catch (Exception e) {
+ System.err.println("Exception raised from Trash.run " +
+ e.getLocalizedMessage());
+ }
+ assertTrue(val == 0);
+ checkTrash(fs, trashRoot, args[1]);
+
+ args = new String[2];
+ args[0] = "-rmr";
+ args[1] = "/test/mkdirs";
+ val = -1;
+ try {
+ val = shell.run(args);
+ } catch (Exception e) {
+ System.err.println("Exception raised from Trash.run " +
+ e.getLocalizedMessage());
+ }
+ assertTrue(val == 0);
+ checkTrash(fs, trashRoot, args[1]);
+ }
+ } finally {
+ try {
+ fs.close();
+ } catch (Exception e) {
+ }
+ cluster.shutdown();
+ }
+ }
+}
Propchange: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestTrash.java
------------------------------------------------------------------------------
svn:eol-style = native
Propchange: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestTrash.java
------------------------------------------------------------------------------
svn:keywords = Id Revision HeadURL