You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by dh...@apache.org on 2009/03/11 17:50:10 UTC
svn commit: r752514 - in /hadoop/core/trunk: ./ src/c++/libhdfs/tests/conf/
src/hdfs/ src/hdfs/org/apache/hadoop/hdfs/protocol/
src/hdfs/org/apache/hadoop/hdfs/server/datanode/
src/hdfs/org/apache/hadoop/hdfs/server/namenode/
src/test/org/apache/hadoop...
Author: dhruba
Date: Wed Mar 11 16:50:09 2009
New Revision: 752514
URL: http://svn.apache.org/viewvc?rev=752514&view=rev
Log:
HADOOP-5332. Appending to files is not allowed (by default) unless
dfs.support.append is set to true. (dhruba)
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/src/c++/libhdfs/tests/conf/hdfs-site.xml
hadoop/core/trunk/src/hdfs/hdfs-default.xml
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/core/trunk/src/test/org/apache/hadoop/fs/permission/TestStickyBit.java
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestQuota.java
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=752514&r1=752513&r2=752514&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Wed Mar 11 16:50:09 2009
@@ -995,6 +995,9 @@
HADOOP-5307. Fix null value handling in StringUtils#arrayToString() and
#getStrings(). (enis)
+
+ HADOOP-5332. Appending to files is not allowed (by default) unless
+ dfs.support.append is set to true. (dhruba)
Release 0.19.1 - Unreleased
Modified: hadoop/core/trunk/src/c++/libhdfs/tests/conf/hdfs-site.xml
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/c%2B%2B/libhdfs/tests/conf/hdfs-site.xml?rev=752514&r1=752513&r2=752514&view=diff
==============================================================================
--- hadoop/core/trunk/src/c++/libhdfs/tests/conf/hdfs-site.xml (original)
+++ hadoop/core/trunk/src/c++/libhdfs/tests/conf/hdfs-site.xml Wed Mar 11 16:50:09 2009
@@ -14,4 +14,11 @@
</description>
</property>
+<property>
+ <name>dfs.support.append</name>
+ <value>true</value>
+ <description>Allow appends to files.
+ </description>
+</property>
+
</configuration>
Modified: hadoop/core/trunk/src/hdfs/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/hdfs-default.xml?rev=752514&r1=752513&r2=752514&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/hdfs-default.xml (original)
+++ hadoop/core/trunk/src/hdfs/hdfs-default.xml Wed Mar 11 16:50:09 2009
@@ -353,4 +353,13 @@
</description>
</property>
-</configuration>
\ No newline at end of file
+<property>
+ <name>dfs.support.append</name>
+ <value>false</value>
+ <description>Does HDFS allow appends to files?
+ This is currently set to false because there are bugs in the
+ "append code" and is not supported in any prodction cluster.
+ </description>
+</property>
+
+</configuration>
Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=752514&r1=752513&r2=752514&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Wed Mar 11 16:50:09 2009
@@ -117,6 +117,9 @@
* @throws AccessControlException if permission to append file is
* denied by the system. As usually on the client side the exception will
* be wrapped into {@link org.apache.hadoop.ipc.RemoteException}.
+ * Allows appending to an existing file if the server is
+ * configured with the parameter dfs.support.append set to true, otherwise
+ * throws an IOException.
* @throws IOException if other errors occur.
*/
public LocatedBlock append(String src, String clientName) throws IOException;
Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=752514&r1=752513&r2=752514&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Wed Mar 11 16:50:09 2009
@@ -297,6 +297,7 @@
FSVolume(File currentDir, Configuration conf) throws IOException {
this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
+ boolean supportAppends = conf.getBoolean("dfs.support.append", false);
File parent = currentDir.getParentFile();
this.detachDir = new File(parent, "detach");
@@ -311,7 +312,11 @@
//
this.tmpDir = new File(parent, "tmp");
if (tmpDir.exists()) {
- recoverDetachedBlocks(currentDir, tmpDir);
+ if (supportAppends) {
+ recoverDetachedBlocks(currentDir, tmpDir);
+ } else {
+ FileUtil.fullyDelete(tmpDir);
+ }
}
this.dataDir = new FSDir(currentDir);
if (!tmpDir.mkdirs()) {
Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=752514&r1=752513&r2=752514&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Mar 11 16:50:09 2009
@@ -422,7 +422,7 @@
this.blockInvalidateLimit = Math.max(this.blockInvalidateLimit,
20*(int)(heartbeatInterval/1000));
this.accessTimePrecision = conf.getLong("dfs.access.time.precision", 0);
- this.supportAppends = conf.getBoolean("dfs.support.append", true);
+ this.supportAppends = conf.getBoolean("dfs.support.append", false);
}
/**
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/fs/permission/TestStickyBit.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/fs/permission/TestStickyBit.java?rev=752514&r1=752513&r2=752514&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/fs/permission/TestStickyBit.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/fs/permission/TestStickyBit.java Wed Mar 11 16:50:09 2009
@@ -162,6 +162,7 @@
try {
Configuration conf = new Configuration();
conf.setBoolean("dfs.permissions", true);
+ conf.setBoolean("dfs.support.append", true);
cluster = new MiniDFSCluster(conf, 4, true, null);
FileSystem hdfs = cluster.getFileSystem();
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java?rev=752514&r1=752513&r2=752514&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileAppend2.java Wed Mar 11 16:50:09 2009
@@ -128,6 +128,7 @@
conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
}
conf.setInt("dfs.datanode.handler.count", 50);
+ conf.setBoolean("dfs.support.append", true);
initBuffer(fileSize);
MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
FileSystem fs = cluster.getFileSystem();
@@ -377,6 +378,7 @@
conf.setInt("dfs.socket.timeout", 30000);
conf.setInt("dfs.datanode.socket.write.timeout", 30000);
conf.setInt("dfs.datanode.handler.count", 50);
+ conf.setBoolean("dfs.support.append", true);
MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes,
true, null);
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java?rev=752514&r1=752513&r2=752514&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileAppend3.java Wed Mar 11 16:50:09 2009
@@ -52,6 +52,7 @@
AppendTestUtil.LOG.info("setUp()");
conf = new Configuration();
conf.setInt("io.bytes.per.checksum", 512);
+ conf.setBoolean("dfs.support.append", true);
buffersize = conf.getInt("io.file.buffer.size", 4096);
cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
fs = (DistributedFileSystem)cluster.getFileSystem();
@@ -266,4 +267,4 @@
//c. Reopen file and read 25687+5877 bytes of data from file. Close file.
AppendTestUtil.check(fs, p, len1 + len2);
}
-}
\ No newline at end of file
+}
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java?rev=752514&r1=752513&r2=752514&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java Wed Mar 11 16:50:09 2009
@@ -42,6 +42,7 @@
conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
conf.setInt("heartbeat.recheck.interval", 1000);
conf.setInt("dfs.heartbeat.interval", 1);
+ conf.setBoolean("dfs.support.append", true);
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java?rev=752514&r1=752513&r2=752514&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java Wed Mar 11 16:50:09 2009
@@ -58,6 +58,7 @@
final int ORG_FILE_SIZE = 3000;
Configuration conf = new Configuration();
conf.setLong("dfs.block.size", BLOCK_SIZE);
+ conf.setBoolean("dfs.support.append", true);
MiniDFSCluster cluster = null;
try {
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestQuota.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestQuota.java?rev=752514&r1=752513&r2=752514&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestQuota.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestQuota.java Wed Mar 11 16:50:09 2009
@@ -57,6 +57,7 @@
// set a smaller block size so that we can test with smaller
// Space quotas
conf.set("dfs.block.size", "512");
+ conf.setBoolean("dfs.support.append", true);
final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
final FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(),
@@ -429,6 +430,7 @@
// set a smaller block size so that we can test with smaller
// diskspace quotas
conf.set("dfs.block.size", "512");
+ conf.setBoolean("dfs.support.append", true);
final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
final FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(),
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java?rev=752514&r1=752513&r2=752514&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java Wed Mar 11 16:50:09 2009
@@ -48,6 +48,7 @@
conf.setInt("heartbeat.recheck.interval", 1000);
conf.setInt("dfs.heartbeat.interval", 1);
conf.setInt("dfs.safemode.threshold.pct", 1);
+ conf.setBoolean("dfs.support.append", true);
// create cluster
System.out.println("Test 1*****************************");
@@ -120,6 +121,7 @@
conf.setInt("heartbeat.recheck.interval", 1000);
conf.setInt("dfs.heartbeat.interval", 1);
conf.setInt("dfs.safemode.threshold.pct", 1);
+ conf.setBoolean("dfs.support.append", true);
System.out.println("Test 2************************************");
// create cluster
@@ -192,6 +194,7 @@
conf.setInt("heartbeat.recheck.interval", 1000);
conf.setInt("dfs.heartbeat.interval", 1);
conf.setInt("dfs.safemode.threshold.pct", 1);
+ conf.setBoolean("dfs.support.append", true);
System.out.println("Test 3************************************");
// create cluster
@@ -254,6 +257,7 @@
conf.setInt("heartbeat.recheck.interval", 1000);
conf.setInt("dfs.heartbeat.interval", 1);
conf.setInt("dfs.safemode.threshold.pct", 1);
+ conf.setBoolean("dfs.support.append", true);
System.out.println("Test 4************************************");
// create cluster