You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2009/09/09 22:40:32 UTC
svn commit: r813107 - in /hadoop/hdfs/branches/HDFS-265: ./ lib/
src/contrib/hdfsproxy/ src/java/ src/java/org/apache/hadoop/hdfs/
src/java/org/apache/hadoop/hdfs/protocol/
src/java/org/apache/hadoop/hdfs/server/common/
src/java/org/apache/hadoop/hdfs/...
Author: szetszwo
Date: Wed Sep 9 20:40:31 2009
New Revision: 813107
URL: http://svn.apache.org/viewvc?rev=813107&view=rev
Log:
Merge -r 811494:813103 from trunk.
Added:
hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java
- copied unchanged from r813103, hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java
hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestByteRangeInputStream.java
- copied unchanged from r813103, hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestByteRangeInputStream.java
hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java
- copied unchanged from r813103, hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java
Modified:
hadoop/hdfs/branches/HDFS-265/ (props changed)
hadoop/hdfs/branches/HDFS-265/CHANGES.txt
hadoop/hdfs/branches/HDFS-265/build.xml (contents, props changed)
hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-0.21.0-dev.jar
hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-test-0.21.0-dev.jar
hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-0.21.0-dev.jar
hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-examples-0.21.0-dev.jar
hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-test-0.21.0-dev.jar
hadoop/hdfs/branches/HDFS-265/src/contrib/hdfsproxy/ (props changed)
hadoop/hdfs/branches/HDFS-265/src/java/ (props changed)
hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java
hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java
hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/Block.java
hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/common/GenerationStamp.java
hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java (props changed)
hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeActivityMBean.java
hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java
hadoop/hdfs/branches/HDFS-265/src/test/hdfs/ (props changed)
hadoop/hdfs/branches/HDFS-265/src/test/hdfs-with-mr/ (props changed)
hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java
hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java
hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/security/TestPermission.java
hadoop/hdfs/branches/HDFS-265/src/webapps/datanode/ (props changed)
hadoop/hdfs/branches/HDFS-265/src/webapps/hdfs/ (props changed)
hadoop/hdfs/branches/HDFS-265/src/webapps/secondary/ (props changed)
Propchange: hadoop/hdfs/branches/HDFS-265/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Sep 9 20:40:31 2009
@@ -1,2 +1,2 @@
/hadoop/core/branches/branch-0.19/hdfs:713112
-/hadoop/hdfs/trunk:796829-800617,800619-803337,804756-805652,808672-809439
+/hadoop/hdfs/trunk:796829-800617,800619-803337,804756-805652,808672-809439,811495-813103
Modified: hadoop/hdfs/branches/HDFS-265/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/CHANGES.txt?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-265/CHANGES.txt Wed Sep 9 20:40:31 2009
@@ -62,6 +62,18 @@
HDFS-492. Add two JSON JSP pages to the Namenode for providing corrupt
blocks/replicas information. (Bill Zeller via szetszwo)
+ HDFS-578. Add support for new FileSystem method for clients to get server
+ defaults. (Kan Zhang via suresh)
+
+ HDFS-595. umask settings in configuration may now use octal or symbolic
+ instead of decimal. (Jakob Homan via suresh)
+
+ HADOOP-6234. Updated hadoop-core and test jars to propagate new option
+ dfs.umaskmode in configuration. (Jakob Homan via suresh)
+
+ HDFS-235. Add support for byte ranges in HftpFileSystem to serve
+ range of bytes from a file. (Bill Zeller via suresh)
+
IMPROVEMENTS
HDFS-381. Remove blocks from DataNode maps when corresponding file
@@ -168,6 +180,12 @@
HDFS-551. Create new functional test for a block report. (Konstantin
Boudnik via hairong)
+ HDFS-288. Redundant computation in hashCode() implementation.
+ (szetszwo via tomwhite)
+
+ HDFS-412. Hadoop JMX usage makes Nagios monitoring impossible.
+ (Brian Bockelman via tomwhite)
+
BUG FIXES
HDFS-76. Better error message to users when commands fail because of
@@ -239,6 +257,12 @@
HDFS-586. TestBlocksWithNotEnoughRacks sometimes fails.
(Jitendra Nath Pandey via hairong)
+ HADOOP-6243. Fixed a NullPointerException in handling deprecated keys.
+ (Sreekanth Ramakrishnan via yhemanth)
+
+ HDFS-605. Do not run fault injection tests in the run-test-hdfs-with-mr
+ target. (Konstantin Boudnik via szetszwo)
+
Release 0.20.1 - Unreleased
IMPROVEMENTS
Modified: hadoop/hdfs/branches/HDFS-265/build.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/build.xml?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/build.xml (original)
+++ hadoop/hdfs/branches/HDFS-265/build.xml Wed Sep 9 20:40:31 2009
@@ -378,15 +378,6 @@
</subant>
</target>
- <target name="run-test-hdfs-with-mr-fault-inject" depends="injectfaults"
- description="Run hdfs Fault Injection related unit tests that require mapred">
- <subant buildpath="build.xml" target="run-test-hdfs-with-mr">
- <property name="build.dir" value="${build-fi.dir}"/>
- <property name="test.fault.inject" value="yes"/>
- <property name="test.include" value="TestFi*"/>
- </subant>
- </target>
-
<!-- ================================================================== -->
<!-- Make hadoop-fi.jar including all Fault Iinjected artifacts -->
<!-- ================================================================== -->
@@ -702,17 +693,9 @@
includes="**/${test.include}.java"
excludes="**/${test.exclude}.java" />
</batchtest>
- <batchtest todir="${test.build.dir}" if="tests.notestcase.fi">
- <fileset dir="${test.src.dir}/aop"
- includes="**/${test.include}.java"
- excludes="**/${test.exclude}.java" />
- </batchtest>
<batchtest todir="${test.build.dir}" if="tests.testcase">
<fileset dir="${test.src.dir}/hdfs-with-mr" includes="**/${testcase}.java"/>
</batchtest>
- <batchtest todir="${test.build.dir}" if="tests.testcase.fi">
- <fileset dir="${test.src.dir}/aop" includes="**/${testcase}.java"/>
- </batchtest>
</junit>
<antcall target="checkfailure"/>
</target>
@@ -737,7 +720,6 @@
<antcall target="run-test-hdfs"/>
<antcall target="run-test-hdfs-with-mr"/>
<antcall target="run-test-hdfs-fault-inject"/>
- <antcall target="run-test-hdfs-with-mr-fault-inject"/>
<available file="${test.build.dir}/testsfailed" property="testsfailed"/>
<fail if="testsfailed">Tests failed!</fail>
</target>
Propchange: hadoop/hdfs/branches/HDFS-265/build.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Sep 9 20:40:31 2009
@@ -1,3 +1,3 @@
/hadoop/core/branches/branch-0.19/hdfs/build.xml:713112
/hadoop/core/trunk/build.xml:779102
-/hadoop/hdfs/trunk/build.xml:796829-800617,800619-803337,804756-805652,808672-809439
+/hadoop/hdfs/trunk/build.xml:796829-800617,800619-803337,804756-805652,808672-809439,811495-813103
Modified: hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-0.21.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-0.21.0-dev.jar?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
Binary files - no diff available.
Modified: hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-test-0.21.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-test-0.21.0-dev.jar?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
Binary files - no diff available.
Modified: hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-0.21.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-0.21.0-dev.jar?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
Binary files - no diff available.
Modified: hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-examples-0.21.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-examples-0.21.0-dev.jar?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
Binary files - no diff available.
Modified: hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-test-0.21.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/lib/hadoop-mapred-test-0.21.0-dev.jar?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
Binary files - no diff available.
Propchange: hadoop/hdfs/branches/HDFS-265/src/contrib/hdfsproxy/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Sep 9 20:40:31 2009
@@ -1,3 +1,3 @@
/hadoop/core/branches/branch-0.19/hdfs/src/contrib/hdfsproxy:713112
/hadoop/core/trunk/src/contrib/hdfsproxy:776175-784663
-/hadoop/hdfs/trunk/src/contrib/hdfsproxy:796829-800617,800619-803337,804756-805652,808672-809439
+/hadoop/hdfs/trunk/src/contrib/hdfsproxy:796829-800617,800619-803337,804756-805652,808672-809439,811495-813103
Propchange: hadoop/hdfs/branches/HDFS-265/src/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Sep 9 20:40:31 2009
@@ -1,3 +1,3 @@
/hadoop/core/branches/branch-0.19/hdfs/src/java:713112
/hadoop/core/trunk/src/hdfs:776175-785643,785929-786278
-/hadoop/hdfs/trunk/src/java:796829-800617,800619-803337,804756-805652,808672-809439
+/hadoop/hdfs/trunk/src/java:796829-800617,800619-803337,804756-805652,808672-809439,811495-813103
Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java Wed Sep 9 20:40:31 2009
@@ -67,6 +67,7 @@
import org.apache.hadoop.fs.FSOutputSummer;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.Path;
@@ -125,12 +126,15 @@
********************************************************/
public class DFSClient implements FSConstants, java.io.Closeable {
public static final Log LOG = LogFactory.getLog(DFSClient.class);
+ public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour
public static final int MAX_BLOCK_ACQUIRE_FAILURES = 3;
private static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
private final ClientProtocol namenode;
private final ClientProtocol rpcNamenode;
final UnixUserGroupInformation ugi;
volatile boolean clientRunning = true;
+ private volatile FsServerDefaults serverDefaults;
+ private volatile long serverDefaultsLastUpdate;
Random r = new Random();
final String clientName;
final LeaseChecker leasechecker = new LeaseChecker();
@@ -329,6 +333,18 @@
}
/**
+ * Get server default values for a number of configuration params.
+ */
+ public FsServerDefaults getServerDefaults() throws IOException {
+ long now = System.currentTimeMillis();
+ if (now - serverDefaultsLastUpdate > SERVER_DEFAULTS_VALIDITY_PERIOD) {
+ serverDefaults = namenode.getServerDefaults();
+ serverDefaultsLastUpdate = now;
+ }
+ return serverDefaults;
+ }
+
+ /**
* Report corrupt blocks that were discovered by the client.
*/
public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Wed Sep 9 20:40:31 2009
@@ -434,6 +434,12 @@
dfs.metaSave(pathname);
}
+ /** {@inheritDoc} */
+ @Override
+ public FsServerDefaults getServerDefaults() throws IOException {
+ return dfs.getServerDefaults();
+ }
+
/**
* We need to find the blocks that didn't match. Likely only one
* is corrupt but we will report both to the namenode. In the future,
Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java Wed Sep 9 20:40:31 2009
@@ -59,6 +59,9 @@
import org.xml.sax.XMLReader;
import org.xml.sax.helpers.DefaultHandler;
import org.xml.sax.helpers.XMLReaderFactory;
+import org.apache.hadoop.hdfs.ByteRangeInputStream;
+
+
/** An implementation of a protocol for accessing filesystems over HTTP.
* The following implementation provides a limited, read-only interface
@@ -115,55 +118,48 @@
}
}
- /**
- * Open an HTTP connection to the namenode to read file data and metadata.
- * @param path The path component of the URL
- * @param query The query component of the URL
- */
- protected HttpURLConnection openConnection(String path, String query)
- throws IOException {
+
+ /*
+ Construct URL pointing to file on namenode
+ */
+ URL getNamenodeFileURL(Path f) throws IOException {
+ return getNamenodeURL("/data" + f.toUri().getPath(), "ugi=" + ugi);
+ }
+
+ /*
+ Construct URL pointing to namenode.
+ */
+ URL getNamenodeURL(String path, String query) throws IOException {
try {
final URL url = new URI("http", null, nnAddr.getHostName(),
nnAddr.getPort(), path, query, null).toURL();
if (LOG.isTraceEnabled()) {
LOG.trace("url=" + url);
}
- HttpURLConnection connection = (HttpURLConnection)url.openConnection();
- connection.setRequestMethod("GET");
- connection.connect();
- return connection;
+ return url;
} catch (URISyntaxException e) {
- throw (IOException)new IOException().initCause(e);
+ throw new IOException(e);
}
}
+ /**
+ * Open an HTTP connection to the namenode to read file data and metadata.
+ * @param path The path component of the URL
+ * @param query The query component of the URL
+ */
+ protected HttpURLConnection openConnection(String path, String query)
+ throws IOException {
+ final URL url = getNamenodeURL(path, query);
+ HttpURLConnection connection = (HttpURLConnection)url.openConnection();
+ connection.setRequestMethod("GET");
+ connection.connect();
+ return connection;
+ }
+
@Override
public FSDataInputStream open(Path f, int buffersize) throws IOException {
- HttpURLConnection connection = null;
- connection = openConnection("/data" + f.toUri().getPath(), "ugi=" + ugi);
- final InputStream in = connection.getInputStream();
- return new FSDataInputStream(new FSInputStream() {
- public int read() throws IOException {
- return in.read();
- }
- public int read(byte[] b, int off, int len) throws IOException {
- return in.read(b, off, len);
- }
-
- public void close() throws IOException {
- in.close();
- }
-
- public void seek(long pos) throws IOException {
- throw new IOException("Can't seek!");
- }
- public long getPos() throws IOException {
- throw new IOException("Position unknown!");
- }
- public boolean seekToNewSource(long targetPos) throws IOException {
- return false;
- }
- });
+ URL u = getNamenodeURL("/data" + f.toUri().getPath(), "ugi=" + ugi);
+ return new FSDataInputStream(new ByteRangeInputStream(u));
}
/** Class to parse and store a listing reply from the server. */
Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/Block.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/Block.java?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/Block.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/Block.java Wed Sep 9 20:40:31 2009
@@ -207,6 +207,6 @@
/** {@inheritDoc} */
public int hashCode() {
//GenerationStamp is IRRELEVANT and should not be used here
- return 37 * 17 + (int) (blockId^(blockId>>>32));
+ return (int)(blockId^(blockId>>>32));
}
}
Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Wed Sep 9 20:40:31 2009
@@ -23,6 +23,7 @@
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
@@ -43,9 +44,9 @@
* Compared to the previous version the following changes have been introduced:
* (Only the latest change is reflected.
* The log of historical changes can be retrieved from the svn).
- * 46: add Block parameter to addBlock() and complete()
+ * 47: added a new method getServerDefaults(), see HDFS-578
*/
- public static final long versionID = 46L;
+ public static final long versionID = 47L;
///////////////////////////////////////
// File contents
@@ -74,6 +75,13 @@
long length) throws IOException;
/**
+ * Get server default values for a number of configuration params.
+ * @return a set of server default configuration values
+ * @throws IOException
+ */
+ public FsServerDefaults getServerDefaults() throws IOException;
+
+ /**
* Create a new file entry in the namespace.
* <p>
* This will create an empty file specified by the source path.
Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java Wed Sep 9 20:40:31 2009
@@ -53,6 +53,10 @@
public static final int SMALL_BUFFER_SIZE = Math.min(BUFFER_SIZE/2, 512);
//TODO mb@media-style.com: should be conf injected?
public static final long DEFAULT_BLOCK_SIZE = 64 * 1024 * 1024;
+ public static final int DEFAULT_BYTES_PER_CHECKSUM = 512;
+ public static final int DEFAULT_WRITE_PACKET_SIZE = 64 * 1024;
+ public static final short DEFAULT_REPLICATION_FACTOR = 3;
+ public static final int DEFAULT_FILE_BUFFER_SIZE = 4096;
public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
public static final int SIZE_OF_INTEGER = Integer.SIZE / Byte.SIZE;
Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/common/GenerationStamp.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/common/GenerationStamp.java?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/common/GenerationStamp.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/common/GenerationStamp.java Wed Sep 9 20:40:31 2009
@@ -109,6 +109,6 @@
/** {@inheritDoc} */
public int hashCode() {
- return 37 * 17 + (int) (genstamp^(genstamp>>>32));
+ return (int) (genstamp^(genstamp>>>32));
}
}
Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Wed Sep 9 20:40:31 2009
@@ -380,7 +380,7 @@
this.infoServer.start();
// adjust info port
this.dnRegistration.setInfoPort(this.infoServer.getPort());
- myMetrics = new DataNodeMetrics(conf, dnRegistration.getStorageID());
+ myMetrics = new DataNodeMetrics(conf, dnRegistration.getName());
// set service-level authorization security policy
if (conf.getBoolean(
Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java Wed Sep 9 20:40:31 2009
@@ -113,7 +113,7 @@
@Override // Object
public int hashCode() {
- return 37 * 17 + (int) (blockId^(blockId>>>32));
+ return (int)(blockId^(blockId>>>32));
}
public long getGenStamp() {
Propchange: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Sep 9 20:40:31 2009
@@ -1,3 +1,3 @@
/hadoop/core/branches/branch-0.19/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeBlockInfo.java:713112
/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DatanodeBlockInfo.java:776175-785643,785929-786278
-/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:800619-803337,804756-805652,808672-809439
+/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java:800619-803337,804756-805652,808672-809439,811495-813103
Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeActivityMBean.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeActivityMBean.java?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeActivityMBean.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeActivityMBean.java Wed Sep 9 20:40:31 2009
@@ -28,7 +28,7 @@
*
* This is the JMX MBean for reporting the DataNode Activity.
* The MBean is register using the name
- * "hadoop:service=DataNode,name=DataNodeActivity-<storageid>"
+ * "hadoop:service=DataNode,name=DataNodeActivity-<hostname>-<portNumber>"
*
* Many of the activity metrics are sampled and averaged on an interval
* which can be specified in the metrics config file.
@@ -57,15 +57,17 @@
final private ObjectName mbeanName;
private Random rand = new Random();
- public DataNodeActivityMBean(final MetricsRegistry mr, final String storageId) {
+ public DataNodeActivityMBean(final MetricsRegistry mr,
+ final String datanodeName) {
super(mr, "Activity statistics at the DataNode");
- String storageName;
- if (storageId.equals("")) {// Temp fix for the uninitialized storage
- storageName = "UndefinedStorageId" + rand.nextInt();
+ String name;
+ if (datanodeName.equals("")) {// Temp fix for the uninitialized name
+ name = "UndefinedDataNodeName" + rand.nextInt();
} else {
- storageName = storageId;
+ name = datanodeName.replace(":", "-");
}
- mbeanName = MBeanUtil.registerMBean("DataNode", "DataNodeActivity-" + storageName, this);
+ mbeanName = MBeanUtil.registerMBean("DataNode",
+ "DataNodeActivity-" + name, this);
}
Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java Wed Sep 9 20:40:31 2009
@@ -90,14 +90,14 @@
new MetricsTimeVaryingRate("blockReports", registry);
- public DataNodeMetrics(Configuration conf, String storageId) {
+ public DataNodeMetrics(Configuration conf, String datanodeName) {
String sessionId = conf.get("session.id");
// Initiate reporting of Java VM metrics
JvmMetrics.init("DataNode", sessionId);
// Now the MBean for the data node
- datanodeActivityMBean = new DataNodeActivityMBean(registry, storageId);
+ datanodeActivityMBean = new DataNodeActivityMBean(registry, datanodeName);
// Create record for DataNode metrics
MetricsContext context = MetricsUtil.getContext("dfs");
Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Sep 9 20:40:31 2009
@@ -55,6 +55,7 @@
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.*;
import org.apache.hadoop.ipc.Server;
@@ -200,8 +201,7 @@
private long heartbeatExpireInterval;
//replicationRecheckInterval is how often namenode checks for new replication work
private long replicationRecheckInterval;
- // default block size of a file
- private long defaultBlockSize = 0;
+ private FsServerDefaults serverDefaults;
// allow appending to hdfs files
private boolean supportAppends = true;
@@ -413,7 +413,12 @@
10 * heartbeatInterval;
this.replicationRecheckInterval =
conf.getInt("dfs.replication.interval", 3) * 1000L;
- this.defaultBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+ this.serverDefaults = new FsServerDefaults(
+ conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE),
+ conf.getInt("io.bytes.per.checksum", DEFAULT_BYTES_PER_CHECKSUM),
+ conf.getInt("dfs.write.packet.size", DEFAULT_WRITE_PACKET_SIZE),
+ (short) conf.getInt("dfs.replication", DEFAULT_REPLICATION_FACTOR),
+ conf.getInt("io.file.buffer.size", DEFAULT_FILE_BUFFER_SIZE));
this.maxFsObjects = conf.getLong("dfs.max.objects", 0);
this.blockInvalidateLimit = Math.max(this.blockInvalidateLimit,
20*(int)(heartbeatInterval/1000));
@@ -505,7 +510,11 @@
}
long getDefaultBlockSize() {
- return defaultBlockSize;
+ return serverDefaults.getBlockSize();
+ }
+
+ FsServerDefaults getServerDefaults() {
+ return serverDefaults;
}
long getAccessTimePrecision() {
Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Wed Sep 9 20:40:31 2009
@@ -32,6 +32,7 @@
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.Trash;
import org.apache.hadoop.fs.permission.FsPermission;
@@ -546,6 +547,11 @@
}
/** {@inheritDoc} */
+ public FsServerDefaults getServerDefaults() throws IOException {
+ return namesystem.getServerDefaults();
+ }
+
+ /** {@inheritDoc} */
public void create(String src,
FsPermission masked,
String clientName,
Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java Wed Sep 9 20:40:31 2009
@@ -21,17 +21,18 @@
import java.io.OutputStream;
import java.io.PrintWriter;
import java.net.InetSocketAddress;
-
+import java.util.Enumeration;
+import java.util.List;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.mortbay.jetty.InclusiveByteRange;
public class StreamFile extends DfsServlet {
/** for java.io.Serializable */
@@ -65,22 +66,104 @@
out.print("Invalid input");
return;
}
- DFSClient dfs = getDFSClient(request);
+
+ Enumeration reqRanges = request.getHeaders("Range");
+ if (reqRanges != null && !reqRanges.hasMoreElements())
+ reqRanges = null;
+
+ DFSClient dfs = getDFSClient(request);
+ long fileLen = dfs.getFileInfo(filename).getLen();
FSInputStream in = dfs.open(filename);
OutputStream os = response.getOutputStream();
- response.setHeader("Content-Disposition", "attachment; filename=\"" +
- filename + "\"");
- response.setContentType("application/octet-stream");
- byte buf[] = new byte[4096];
+
try {
- int bytesRead;
- while ((bytesRead = in.read(buf)) != -1) {
- os.write(buf, 0, bytesRead);
+ if (reqRanges != null) {
+ List ranges = InclusiveByteRange.satisfiableRanges(reqRanges,
+ fileLen);
+ StreamFile.sendPartialData(in, os, response, fileLen, ranges);
+ } else {
+ // No ranges, so send entire file
+ response.setHeader("Content-Disposition", "attachment; filename=\"" +
+ filename + "\"");
+ response.setContentType("application/octet-stream");
+ StreamFile.writeTo(in, os, 0L, fileLen);
}
} finally {
in.close();
os.close();
dfs.close();
+ }
+ }
+
+ static void sendPartialData(FSInputStream in,
+ OutputStream os,
+ HttpServletResponse response,
+ long contentLength,
+ List ranges)
+ throws IOException {
+
+ if (ranges == null || ranges.size() != 1) {
+ // if there are no satisfiable ranges, or if multiple ranges are
+ // requested (we don't support multiple range requests), send 416 response
+ response.setContentLength(0);
+ int status = HttpServletResponse.SC_REQUESTED_RANGE_NOT_SATISFIABLE;
+ response.setStatus(status);
+ response.setHeader("Content-Range",
+ InclusiveByteRange.to416HeaderRangeString(contentLength));
+ } else {
+ // if there is only a single valid range (must be satisfiable
+ // since were here now), send that range with a 206 response
+ InclusiveByteRange singleSatisfiableRange =
+ (InclusiveByteRange)ranges.get(0);
+ long singleLength = singleSatisfiableRange.getSize(contentLength);
+ response.setStatus(HttpServletResponse.SC_PARTIAL_CONTENT);
+ response.setHeader("Content-Range",
+ singleSatisfiableRange.toHeaderRangeString(contentLength));
+ System.out.println("first: "+singleSatisfiableRange.getFirst(contentLength));
+ System.out.println("singleLength: "+singleLength);
+
+ StreamFile.writeTo(in,
+ os,
+ singleSatisfiableRange.getFirst(contentLength),
+ singleLength);
}
}
+
+ static void writeTo(FSInputStream in,
+ OutputStream os,
+ long start,
+ long count)
+ throws IOException {
+ byte buf[] = new byte[4096];
+ long bytesRemaining = count;
+ int bytesRead;
+ int bytesToRead;
+
+ in.seek(start);
+
+ while (true) {
+ // number of bytes to read this iteration
+ bytesToRead = (int)(bytesRemaining<buf.length ?
+ bytesRemaining:
+ buf.length);
+
+ // number of bytes actually read this iteration
+ bytesRead = in.read(buf, 0, bytesToRead);
+
+ // if we can't read anymore, break
+ if (bytesRead == -1) {
+ break;
+ }
+
+ os.write(buf, 0, bytesRead);
+
+ bytesRemaining -= bytesRead;
+
+ // if we don't need to read anymore, break
+ if (bytesRemaining <= 0) {
+ break;
+ }
+
+ }
+ }
}
Propchange: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Sep 9 20:40:31 2009
@@ -1,3 +1,3 @@
/hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs:713112
/hadoop/core/trunk/src/test/hdfs:776175-785643
-/hadoop/hdfs/trunk/src/test/hdfs:796829-800617,800619-803337,804756-805652,808672-809439
+/hadoop/hdfs/trunk/src/test/hdfs:796829-800617,800619-803337,804756-805652,808672-809439,811495-813103
Propchange: hadoop/hdfs/branches/HDFS-265/src/test/hdfs-with-mr/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Sep 9 20:40:31 2009
@@ -1,3 +1,3 @@
/hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs-with-mr:713112
/hadoop/core/trunk/src/test/hdfs-with-mr:776175-784663
-/hadoop/hdfs/trunk/src/test/hdfs-with-mr:796829-800617,800619-803337,804756-805652,808672-809439
+/hadoop/hdfs/trunk/src/test/hdfs-with-mr:796829-800617,800619-803337,804756-805652,808672-809439,811495-813103
Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java Wed Sep 9 20:40:31 2009
@@ -155,6 +155,8 @@
public LocatedBlocks getBlockLocations(String src, long offset, long length) throws IOException { return null; }
+ public FsServerDefaults getServerDefaults() throws IOException { return null; }
+
public void create(String src, FsPermission masked, String clientName, EnumSetWritable<CreateFlag> flag, short replication, long blockSize) throws IOException {}
public LocatedBlock append(String src, String clientName) throws IOException { return null; }
Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java Wed Sep 9 20:40:31 2009
@@ -154,8 +154,8 @@
/* create a file/directory with the given umask and permission */
private void create(OpType op, Path name, short umask,
FsPermission permission) throws IOException {
- // set umask in configuration
- conf.setInt(FsPermission.UMASK_LABEL, umask);
+ // set umask in configuration, converting to padded octal
+ conf.set(FsPermission.UMASK_LABEL, String.format("%1$03o", umask));
// create the file/directory
switch (op) {
Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java Wed Sep 9 20:40:31 2009
@@ -32,6 +32,7 @@
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.Block;
@@ -45,6 +46,7 @@
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.log4j.Level;
@@ -171,6 +173,33 @@
}
/**
+ * Test that server default values can be retrieved on the client side
+ */
+ public void testServerDefaults() throws IOException {
+ Configuration conf = new Configuration();
+ conf.setLong("dfs.block.size", FSConstants.DEFAULT_BLOCK_SIZE);
+ conf.setInt("io.bytes.per.checksum", FSConstants.DEFAULT_BYTES_PER_CHECKSUM);
+ conf.setInt("dfs.write.packet.size", FSConstants.DEFAULT_WRITE_PACKET_SIZE);
+ conf.setInt("dfs.replication", FSConstants.DEFAULT_REPLICATION_FACTOR + 1);
+ conf.setInt("io.file.buffer.size", FSConstants.DEFAULT_FILE_BUFFER_SIZE);
+ MiniDFSCluster cluster = new MiniDFSCluster(conf,
+ FSConstants.DEFAULT_REPLICATION_FACTOR + 1, true, null);
+ cluster.waitActive();
+ FileSystem fs = cluster.getFileSystem();
+ try {
+ FsServerDefaults serverDefaults = fs.getServerDefaults();
+ assertEquals(FSConstants.DEFAULT_BLOCK_SIZE, serverDefaults.getBlockSize());
+ assertEquals(FSConstants.DEFAULT_BYTES_PER_CHECKSUM, serverDefaults.getBytesPerChecksum());
+ assertEquals(FSConstants.DEFAULT_WRITE_PACKET_SIZE, serverDefaults.getWritePacketSize());
+ assertEquals(FSConstants.DEFAULT_REPLICATION_FACTOR + 1, serverDefaults.getReplication());
+ assertEquals(FSConstants.DEFAULT_FILE_BUFFER_SIZE, serverDefaults.getFileBufferSize());
+ } finally {
+ fs.close();
+ cluster.shutdown();
+ }
+ }
+
+ /**
* Test that file data becomes available before file is closed.
*/
public void testFileCreation() throws IOException {
Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/security/TestPermission.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/security/TestPermission.java?rev=813107&r1=813106&r2=813107&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/security/TestPermission.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/security/TestPermission.java Wed Sep 9 20:40:31 2009
@@ -67,7 +67,7 @@
public void testCreate() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean("dfs.permissions", true);
- conf.setInt(FsPermission.UMASK_LABEL, 0);
+ conf.set(FsPermission.UMASK_LABEL, "000");
MiniDFSCluster cluster = null;
FileSystem fs = null;
@@ -95,7 +95,7 @@
checkPermission(fs, "/b1/b2", inheritPerm);
checkPermission(fs, "/b1/b2/b3.txt", filePerm);
- conf.setInt(FsPermission.UMASK_LABEL, 0022);
+ conf.set(FsPermission.UMASK_LABEL, "022");
FsPermission permission =
FsPermission.createImmutable((short)0666);
FileSystem.mkdirs(fs, new Path("/c1"), new FsPermission(permission));
Propchange: hadoop/hdfs/branches/HDFS-265/src/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Sep 9 20:40:31 2009
@@ -1,3 +1,3 @@
/hadoop/core/branches/branch-0.19/hdfs/src/webapps/datanode:713112
/hadoop/core/trunk/src/webapps/datanode:776175-784663
-/hadoop/hdfs/trunk/src/webapps/datanode:796829-800617,800619-803337,804756-805652,808672-809439
+/hadoop/hdfs/trunk/src/webapps/datanode:796829-800617,800619-803337,804756-805652,808672-809439,811495-813103
Propchange: hadoop/hdfs/branches/HDFS-265/src/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Sep 9 20:40:31 2009
@@ -1,3 +1,3 @@
/hadoop/core/branches/branch-0.19/hdfs/src/webapps/hdfs:713112
/hadoop/core/trunk/src/webapps/hdfs:776175-784663
-/hadoop/hdfs/trunk/src/webapps/hdfs:796829-800617,800619-803337,804756-805652,808672-809439
+/hadoop/hdfs/trunk/src/webapps/hdfs:796829-800617,800619-803337,804756-805652,808672-809439,811495-813103
Propchange: hadoop/hdfs/branches/HDFS-265/src/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Sep 9 20:40:31 2009
@@ -1,3 +1,3 @@
/hadoop/core/branches/branch-0.19/hdfs/src/webapps/secondary:713112
/hadoop/core/trunk/src/webapps/secondary:776175-784663
-/hadoop/hdfs/trunk/src/webapps/secondary:796829-800617,800619-803337,804756-805652,808672-809439
+/hadoop/hdfs/trunk/src/webapps/secondary:796829-800617,800619-803337,804756-805652,808672-809439,811495-813103