You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2012/08/19 07:51:40 UTC
svn commit: r1374697 - in
/hadoop/common/branches/branch-2/hadoop-hdfs-project: ./ hadoop-hdfs/
hadoop-hdfs/src/main/java/ hadoop-hdfs/src/main/java/org/apache/hadoop/fs/
hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/
hadoop-hdfs/src/main/java/org/a...
Author: szetszwo
Date: Sun Aug 19 05:51:37 2012
New Revision: 1374697
URL: http://svn.apache.org/viewvc?rev=1374697&view=rev
Log:
svn merge -c 1374696 from trunk for HADOOP-8240. Add a new API to allow users to specify a checksum type on FileSystem.create(..).
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/ (props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/ (props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project:r1374696
Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1374696
Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1374696
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java?rev=1374697&r1=1374696&r2=1374697&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java Sun Aug 19 05:51:37 2012
@@ -31,6 +31,7 @@ import org.apache.hadoop.classification.
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSUtil;
@@ -93,10 +94,10 @@ public class Hdfs extends AbstractFileSy
public HdfsDataOutputStream createInternal(Path f,
EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
int bufferSize, short replication, long blockSize, Progressable progress,
- int bytesPerChecksum, boolean createParent) throws IOException {
+ ChecksumOpt checksumOpt, boolean createParent) throws IOException {
return new HdfsDataOutputStream(dfs.primitiveCreate(getUriPath(f),
absolutePermission, createFlag, createParent, replication, blockSize,
- progress, bufferSize, bytesPerChecksum), getStatistics());
+ progress, bufferSize, checksumOpt), getStatistics());
}
@Override
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1374697&r1=1374696&r2=1374697&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Sun Aug 19 05:51:37 2012
@@ -93,6 +93,7 @@ import org.apache.hadoop.fs.HdfsBlockLoc
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
@@ -205,8 +206,7 @@ public class DFSClient implements java.i
final int maxBlockAcquireFailures;
final int confTime;
final int ioBufferSize;
- final DataChecksum.Type checksumType;
- final int bytesPerChecksum;
+ final ChecksumOpt defaultChecksumOpt;
final int writePacketSize;
final int socketTimeout;
final int socketCacheCapacity;
@@ -245,9 +245,7 @@ public class DFSClient implements java.i
ioBufferSize = conf.getInt(
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
- checksumType = getChecksumType(conf);
- bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
- DFS_BYTES_PER_CHECKSUM_DEFAULT);
+ defaultChecksumOpt = getChecksumOptFromConf(conf);
socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
HdfsServerConstants.READ_TIMEOUT);
/** dfs.write.packet.size is an internal config variable */
@@ -302,9 +300,32 @@ public class DFSClient implements java.i
}
}
- private DataChecksum createChecksum() {
- return DataChecksum.newDataChecksum(
- checksumType, bytesPerChecksum);
+ // Construct a checksum option from conf
+ private ChecksumOpt getChecksumOptFromConf(Configuration conf) {
+ DataChecksum.Type type = getChecksumType(conf);
+ int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
+ DFS_BYTES_PER_CHECKSUM_DEFAULT);
+ return new ChecksumOpt(type, bytesPerChecksum);
+ }
+
+ // create a DataChecksum with the default option.
+ private DataChecksum createChecksum() throws IOException {
+ return createChecksum(null);
+ }
+
+ private DataChecksum createChecksum(ChecksumOpt userOpt)
+ throws IOException {
+ // Fill in any missing field with the default.
+ ChecksumOpt myOpt = ChecksumOpt.processChecksumOpt(
+ defaultChecksumOpt, userOpt);
+ DataChecksum dataChecksum = DataChecksum.newDataChecksum(
+ myOpt.getChecksumType(),
+ myOpt.getBytesPerChecksum());
+ if (dataChecksum == null) {
+ throw new IOException("Invalid checksum type specified: "
+ + myOpt.getChecksumType().name());
+ }
+ return dataChecksum;
}
}
@@ -1181,12 +1202,13 @@ public class DFSClient implements java.i
return create(src, FsPermission.getDefault(),
overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
: EnumSet.of(CreateFlag.CREATE), replication, blockSize, progress,
- buffersize);
+ buffersize, null);
}
/**
* Call {@link #create(String, FsPermission, EnumSet, boolean, short,
- * long, Progressable, int)} with <code>createParent</code> set to true.
+ * long, Progressable, int, ChecksumOpt)} with <code>createParent</code>
+ * set to true.
*/
public DFSOutputStream create(String src,
FsPermission permission,
@@ -1194,10 +1216,11 @@ public class DFSClient implements java.i
short replication,
long blockSize,
Progressable progress,
- int buffersize)
+ int buffersize,
+ ChecksumOpt checksumOpt)
throws IOException {
return create(src, permission, flag, true,
- replication, blockSize, progress, buffersize);
+ replication, blockSize, progress, buffersize, checksumOpt);
}
/**
@@ -1215,6 +1238,7 @@ public class DFSClient implements java.i
* @param blockSize maximum block size
* @param progress interface for reporting client progress
* @param buffersize underlying buffer size
+ * @param checksumOpts checksum options
*
* @return output stream
*
@@ -1228,8 +1252,8 @@ public class DFSClient implements java.i
short replication,
long blockSize,
Progressable progress,
- int buffersize)
- throws IOException {
+ int buffersize,
+ ChecksumOpt checksumOpt) throws IOException {
checkOpen();
if (permission == null) {
permission = FsPermission.getDefault();
@@ -1240,7 +1264,7 @@ public class DFSClient implements java.i
}
final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
src, masked, flag, createParent, replication, blockSize, progress,
- buffersize, dfsClientConf.createChecksum());
+ buffersize, dfsClientConf.createChecksum(checksumOpt));
beginFileLease(src, result);
return result;
}
@@ -1278,15 +1302,13 @@ public class DFSClient implements java.i
long blockSize,
Progressable progress,
int buffersize,
- int bytesPerChecksum)
+ ChecksumOpt checksumOpt)
throws IOException, UnresolvedLinkException {
checkOpen();
CreateFlag.validate(flag);
DFSOutputStream result = primitiveAppend(src, flag, buffersize, progress);
if (result == null) {
- DataChecksum checksum = DataChecksum.newDataChecksum(
- dfsClientConf.checksumType,
- bytesPerChecksum);
+ DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
flag, createParent, replication, blockSize, progress, buffersize,
checksum);
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1374697&r1=1374696&r2=1374697&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Sun Aug 19 05:51:37 2012
@@ -42,6 +42,7 @@ import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.RemoteIterator;
@@ -258,19 +259,19 @@ public class DistributedFileSystem exten
public HdfsDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
- return create(f, permission,
+ return this.create(f, permission,
overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
: EnumSet.of(CreateFlag.CREATE), bufferSize, replication,
- blockSize, progress);
+ blockSize, progress, null);
}
@Override
public HdfsDataOutputStream create(Path f, FsPermission permission,
EnumSet<CreateFlag> cflags, int bufferSize, short replication, long blockSize,
- Progressable progress) throws IOException {
+ Progressable progress, ChecksumOpt checksumOpt) throws IOException {
statistics.incrementWriteOps(1);
final DFSOutputStream out = dfs.create(getPathName(f), permission, cflags,
- replication, blockSize, progress, bufferSize);
+ replication, blockSize, progress, bufferSize, checksumOpt);
return new HdfsDataOutputStream(out, statistics);
}
@@ -279,11 +280,11 @@ public class DistributedFileSystem exten
protected HdfsDataOutputStream primitiveCreate(Path f,
FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
short replication, long blockSize, Progressable progress,
- int bytesPerChecksum) throws IOException {
+ ChecksumOpt checksumOpt) throws IOException {
statistics.incrementWriteOps(1);
return new HdfsDataOutputStream(dfs.primitiveCreate(getPathName(f),
absolutePermission, flag, true, replication, blockSize,
- progress, bufferSize, bytesPerChecksum),statistics);
+ progress, bufferSize, checksumOpt),statistics);
}
/**
@@ -298,7 +299,8 @@ public class DistributedFileSystem exten
flag.add(CreateFlag.CREATE);
}
return new HdfsDataOutputStream(dfs.create(getPathName(f), permission, flag,
- false, replication, blockSize, progress, bufferSize), statistics);
+ false, replication, blockSize, progress,
+ bufferSize, null), statistics);
}
@Override
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1374697&r1=1374696&r2=1374697&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Sun Aug 19 05:51:37 2012
@@ -70,6 +70,7 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
@@ -134,6 +135,7 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text;
+import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.security.token.Token;
import com.google.protobuf.ByteString;
@@ -1003,7 +1005,8 @@ public class PBHelper {
fs.getWritePacketSize(), (short) fs.getReplication(),
fs.getFileBufferSize(),
fs.getEncryptDataTransfer(),
- fs.getTrashInterval());
+ fs.getTrashInterval(),
+ DataChecksum.Type.valueOf(fs.getChecksumType().name()));
}
public static FsServerDefaultsProto convert(FsServerDefaults fs) {
@@ -1015,7 +1018,9 @@ public class PBHelper {
.setReplication(fs.getReplication())
.setFileBufferSize(fs.getFileBufferSize())
.setEncryptDataTransfer(fs.getEncryptDataTransfer())
- .setTrashInterval(fs.getTrashInterval()).build();
+ .setTrashInterval(fs.getTrashInterval())
+ .setChecksumType(ChecksumTypeProto.valueOf(fs.getChecksumType().name()))
+ .build();
}
public static FsPermissionProto convert(FsPermission p) {
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java?rev=1374697&r1=1374696&r2=1374697&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java Sun Aug 19 05:51:37 2012
@@ -215,7 +215,7 @@ public class DatanodeWebHdfsMethods {
fullpath, permission.getFsPermission(),
overwrite.getValue() ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
: EnumSet.of(CreateFlag.CREATE),
- replication.getValue(conf), blockSize.getValue(conf), null, b), null);
+ replication.getValue(conf), blockSize.getValue(conf), null, b, null), null);
IOUtils.copyBytes(in, out, b);
out.close();
out = null;
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1374697&r1=1374696&r2=1374697&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Sun Aug 19 05:51:37 2012
@@ -25,6 +25,8 @@ import static org.apache.hadoop.hdfs.DFS
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY;
@@ -195,6 +197,7 @@ import org.apache.hadoop.security.token.
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.VersionInfo;
import org.mortbay.util.ajax.JSON;
@@ -461,6 +464,16 @@ public class FSNamesystem implements Nam
"must not be specified if HA is not enabled.");
}
+ // Get the checksum type from config
+ String checksumTypeStr = conf.get(DFS_CHECKSUM_TYPE_KEY, DFS_CHECKSUM_TYPE_DEFAULT);
+ DataChecksum.Type checksumType;
+ try {
+ checksumType = DataChecksum.Type.valueOf(checksumTypeStr);
+ } catch (IllegalArgumentException iae) {
+ throw new IOException("Invalid checksum type in "
+ + DFS_CHECKSUM_TYPE_KEY + ": " + checksumTypeStr);
+ }
+
this.serverDefaults = new FsServerDefaults(
conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT),
conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT),
@@ -468,7 +481,8 @@ public class FSNamesystem implements Nam
(short) conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT),
conf.getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT),
conf.getBoolean(DFS_ENCRYPT_DATA_TRANSFER_KEY, DFS_ENCRYPT_DATA_TRANSFER_DEFAULT),
- conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT));
+ conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT),
+ checksumType);
this.maxFsObjects = conf.getLong(DFS_NAMENODE_MAX_OBJECTS_KEY,
DFS_NAMENODE_MAX_OBJECTS_DEFAULT);
Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1374696
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1374697&r1=1374696&r2=1374697&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Sun Aug 19 05:51:37 2012
@@ -179,6 +179,15 @@ message HdfsFileStatusProto {
}
/**
+ * Checksum algorithms/types used in HDFS
+ */
+enum ChecksumTypeProto {
+ NULL = 0;
+ CRC32 = 1;
+ CRC32C = 2;
+}
+
+/**
* HDFS Server Defaults
*/
message FsServerDefaultsProto {
@@ -189,6 +198,7 @@ message FsServerDefaultsProto {
required uint32 fileBufferSize = 5;
optional bool encryptDataTransfer = 6 [default = false];
optional uint64 trashInterval = 7 [default = 0];
+ optional ChecksumTypeProto checksumType = 8 [default = CRC32];
}
Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1374696
Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1374696
Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1374696
Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1374696
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java?rev=1374697&r1=1374696&r2=1374697&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java Sun Aug 19 05:51:37 2012
@@ -28,6 +28,7 @@ import java.io.IOException;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
+import java.util.EnumSet;
import java.util.Random;
import org.apache.commons.lang.ArrayUtils;
@@ -36,16 +37,19 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.BlockStorageLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.VolumeId;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Test;
@@ -664,4 +668,54 @@ public class TestDistributedFileSystem {
(l.getVolumeIds()[0].isValid()) ^ (l.getVolumeIds()[1].isValid()));
}
}
+
+ @Test
+ public void testCreateWithCustomChecksum() throws Exception {
+ Configuration conf = getTestConfiguration();
+ final long grace = 1000L;
+ MiniDFSCluster cluster = null;
+ Path testBasePath = new Path("/test/csum");
+ // create args
+ Path path1 = new Path(testBasePath, "file_wtih_crc1");
+ Path path2 = new Path(testBasePath, "file_with_crc2");
+ ChecksumOpt opt1 = new ChecksumOpt(DataChecksum.Type.CRC32C, 512);
+ ChecksumOpt opt2 = new ChecksumOpt(DataChecksum.Type.CRC32, 512);
+
+ // common args
+ FsPermission perm = FsPermission.getDefault().applyUMask(
+ FsPermission.getUMask(conf));
+ EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.OVERWRITE,
+ CreateFlag.CREATE);
+ short repl = 1;
+
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+ FileSystem dfs = cluster.getFileSystem();
+
+ dfs.mkdirs(testBasePath);
+
+ // create two files with different checksum types
+ FSDataOutputStream out1 = dfs.create(path1, perm, flags, 4096, repl,
+ 131072L, null, opt1);
+ FSDataOutputStream out2 = dfs.create(path2, perm, flags, 4096, repl,
+ 131072L, null, opt2);
+
+ for (int i = 0; i < 1024; i++) {
+ out1.write(i);
+ out2.write(i);
+ }
+ out1.close();
+ out2.close();
+
+ // the two checksums must be different.
+ FileChecksum sum1 = dfs.getFileChecksum(path1);
+ FileChecksum sum2 = dfs.getFileChecksum(path2);
+ assertFalse(sum1.equals(sum2));
+ } finally {
+ if (cluster != null) {
+ cluster.getFileSystem().delete(testBasePath, true);
+ cluster.shutdown();
+ }
+ }
+ }
}