You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2009/10/01 07:31:40 UTC
svn commit: r820536 [1/4] - in /hadoop/hdfs/branches/branch-0.21: ./
src/ant/org/apache/hadoop/ant/ src/contrib/fuse-dfs/src/test/
src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/
src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/ s...
Author: suresh
Date: Thu Oct 1 05:31:37 2009
New Revision: 820536
URL: http://svn.apache.org/viewvc?rev=820536&view=rev
Log:
HDFS-631. merge -r 820532:820533 from trunk to release 21. Contributed by Jitendra Nath Pandey.
Added:
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDeprecatedKeys.java
Modified:
hadoop/hdfs/branches/branch-0.21/CHANGES.txt
hadoop/hdfs/branches/branch-0.21/src/ant/org/apache/hadoop/ant/DfsTask.java
hadoop/hdfs/branches/branch-0.21/src/contrib/fuse-dfs/src/test/TestFuseDFS.java
hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java
hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/LdapIpDirFilter.java
hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java
hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFilter.java
hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyListPathsServlet.java
hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyStreamFile.java
hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUgiManager.java
hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUtil.java
hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestHdfsProxy.java
hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestProxyUtil.java
hadoop/hdfs/branches/branch-0.21/src/contrib/thriftfs/src/java/org/apache/hadoop/thriftfs/HadoopThriftServer.java
hadoop/hdfs/branches/branch-0.21/src/contrib/thriftfs/test/org/apache/hadoop/thriftfs/TestThriftfs.java
hadoop/hdfs/branches/branch-0.21/src/java/hdfs-default.xml
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/DFSClient.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/HsftpFileSystem.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/tools/DFSck.java
hadoop/hdfs/branches/branch-0.21/src/test/aop/org/apache/hadoop/fi/FiConfig.java
hadoop/hdfs/branches/branch-0.21/src/test/aop/org/apache/hadoop/hdfs/TestFiHFlush.java
hadoop/hdfs/branches/branch-0.21/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/TestGlobPaths.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/TestUrlStreamHandler.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/permission/TestStickyBit.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/AppendTestUtil.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/BenchmarkThroughput.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/DataNodeCluster.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockMissingException.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockReport.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSFinalize.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSMkdirs.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRename.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeDeath.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFSInputChecker.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFSOutputSummer.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationClient.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationDelete.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationEmpty.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSTrash.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestLease.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestLocalDFS.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestModTime.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestPread.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestQuota.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestRestartDFS.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSeekBug.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSetTimes.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSmallBlock.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAccessTokenWithDFS.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/security/TestPermission.java
hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/tools/TestJMXGet.java
Modified: hadoop/hdfs/branches/branch-0.21/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/CHANGES.txt?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/CHANGES.txt (original)
+++ hadoop/hdfs/branches/branch-0.21/CHANGES.txt Thu Oct 1 05:31:37 2009
@@ -60,7 +60,7 @@
HDFS-567. Add block forensics contrib tool to print history of corrupt and
missing blocks from the HDFS logs.
- (Bill Zeller, Jithendra Pandey via suresh).
+ (Bill Zeller, Jitendra Nath Pandey via suresh).
HDFS-610. Support o.a.h.fs.FileContext. (Sanjay Radia via szetszwo)
@@ -95,6 +95,9 @@
HDFS-642. Support pipeline close and close error recovery. (hairong)
+ HDFS-631. Rename configuration keys towards API standardization and
+ backward compatibility. (Jitendra Nath Pandey via suresh)
+
IMPROVEMENTS
HDFS-381. Remove blocks from DataNode maps when corresponding file
Modified: hadoop/hdfs/branches/branch-0.21/src/ant/org/apache/hadoop/ant/DfsTask.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/ant/org/apache/hadoop/ant/DfsTask.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/ant/org/apache/hadoop/ant/DfsTask.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/ant/org/apache/hadoop/ant/DfsTask.java Thu Oct 1 05:31:37 2009
@@ -34,6 +34,8 @@
import org.apache.tools.ant.types.Path;
import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+
/**
* {@link org.apache.hadoop.fs.FsShell FsShell} wrapper for ant Task.
*/
@@ -180,7 +182,7 @@
try {
pushContext();
- Configuration conf = new Configuration();
+ Configuration conf = new HdfsConfiguration();
conf.setClassLoader(confloader);
exit_code = ToolRunner.run(conf, shell,
argv.toArray(new String[argv.size()]));
Modified: hadoop/hdfs/branches/branch-0.21/src/contrib/fuse-dfs/src/test/TestFuseDFS.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/contrib/fuse-dfs/src/test/TestFuseDFS.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/contrib/fuse-dfs/src/test/TestFuseDFS.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/contrib/fuse-dfs/src/test/TestFuseDFS.java Thu Oct 1 05:31:37 2009
@@ -113,8 +113,8 @@
static public void startStuff() {
try {
- Configuration conf = new Configuration();
- conf.setBoolean("dfs.permissions",false);
+ Configuration conf = new HdfsConfiguration();
+ conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,false);
cluster = new MiniDFSCluster(conf, 1, true, null);
fileSys = (DistributedFileSystem)cluster.getFileSystem();
assertTrue(fileSys.getFileStatus(new Path("/")).isDir());
Modified: hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java Thu Oct 1 05:31:37 2009
@@ -29,6 +29,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
/**
* A HTTPS/SSL proxy to HDFS, implementing certificate based access control.
@@ -57,7 +58,7 @@
InetSocketAddress nnAddr = NetUtils.createSocketAddr(nn);
LOG.info("HDFS NameNode is at: " + nnAddr.getHostName() + ":" + nnAddr.getPort());
- Configuration sslConf = new Configuration(false);
+ Configuration sslConf = new HdfsConfiguration(false);
sslConf.addResource(conf.get("hdfsproxy.https.server.keystore.resource",
"ssl-server.xml"));
// unit testing
@@ -67,7 +68,7 @@
this.server = new ProxyHttpServer(sslAddr, sslConf);
this.server.setAttribute("proxy.https.port", server.getPort());
this.server.setAttribute("name.node.address", nnAddr);
- this.server.setAttribute("name.conf", new Configuration());
+ this.server.setAttribute("name.conf", new HdfsConfiguration());
this.server.addGlobalFilter("ProxyFilter", ProxyFilter.class.getName(), null);
this.server.addServlet("listPaths", "/listPaths/*", ProxyListPathsServlet.class);
this.server.addServlet("data", "/data/*", ProxyFileDataServlet.class);
@@ -129,7 +130,7 @@
return null;
}
if (conf == null) {
- conf = new Configuration(false);
+ conf = new HdfsConfiguration(false);
conf.addResource("hdfsproxy-default.xml");
}
Modified: hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/LdapIpDirFilter.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/LdapIpDirFilter.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/LdapIpDirFilter.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/LdapIpDirFilter.java Thu Oct 1 05:31:37 2009
@@ -48,6 +48,8 @@
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+
public class LdapIpDirFilter implements Filter {
public static final Log LOG = LogFactory.getLog(LdapIpDirFilter.class);
@@ -89,7 +91,7 @@
/** {@inheritDoc} */
public void init(FilterConfig filterConfig) throws ServletException {
ServletContext context = filterConfig.getServletContext();
- Configuration conf = new Configuration(false);
+ Configuration conf = new HdfsConfiguration(false);
conf.addResource("hdfsproxy-default.xml");
conf.addResource("hdfsproxy-site.xml");
// extract namenode from source conf.
Modified: hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java Thu Oct 1 05:31:37 2009
@@ -31,6 +31,8 @@
import org.apache.hadoop.hdfs.server.namenode.FileDataServlet;
import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+
/** {@inheritDoc} */
public class ProxyFileDataServlet extends FileDataServlet {
/** For java.io.Serializable */
@@ -41,7 +43,7 @@
public void init() throws ServletException {
ServletContext context = getServletContext();
if (context.getAttribute("name.conf") == null) {
- context.setAttribute("name.conf", new Configuration());
+ context.setAttribute("name.conf", new HdfsConfiguration());
}
}
Modified: hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFilter.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFilter.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFilter.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFilter.java Thu Oct 1 05:31:37 2009
@@ -50,6 +50,8 @@
import org.apache.hadoop.security.UnixUserGroupInformation;
import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+
public class ProxyFilter implements Filter {
public static final Log LOG = LogFactory.getLog(ProxyFilter.class);
@@ -73,7 +75,7 @@
private static volatile Map<String, Set<Path>> permsMap;
private static volatile Map<String, Set<BigInteger>> certsMap;
static {
- Configuration conf = new Configuration(false);
+ Configuration conf = new HdfsConfiguration(false);
conf.addResource("hdfsproxy-default.xml");
Map<String, Set<Path>> pMap = getPermMap(conf);
permsMap = pMap != null ? pMap : new HashMap<String, Set<Path>>();
@@ -85,7 +87,7 @@
/** {@inheritDoc} */
public void init(FilterConfig filterConfig) throws ServletException {
ServletContext context = filterConfig.getServletContext();
- Configuration conf = new Configuration(false);
+ Configuration conf = new HdfsConfiguration(false);
conf.addResource("hdfsproxy-default.xml");
conf.addResource("ssl-server.xml");
conf.addResource("hdfsproxy-site.xml");
@@ -95,7 +97,7 @@
}
InetSocketAddress nAddr = NetUtils.createSocketAddr(nn);
context.setAttribute("name.node.address", nAddr);
- context.setAttribute("name.conf", new Configuration());
+ context.setAttribute("name.conf", new HdfsConfiguration());
context.setAttribute("org.apache.hadoop.hdfsproxy.conf", conf);
LOG.info("proxyFilter initialization success: " + nn);
@@ -108,7 +110,7 @@
LOG.warn("HdfsProxy user permissions file not found");
return null;
}
- Configuration permConf = new Configuration(false);
+ Configuration permConf = new HdfsConfiguration(false);
permConf.addResource(permLoc);
Map<String, Set<Path>> map = new HashMap<String, Set<Path>>();
for (Map.Entry<String, String> e : permConf) {
@@ -135,7 +137,7 @@
LOG.warn("HdfsProxy user certs file not found");
return null;
}
- Configuration certsConf = new Configuration(false);
+ Configuration certsConf = new HdfsConfiguration(false);
certsConf.addResource(certsLoc);
Map<String, Set<BigInteger>> map = new HashMap<String, Set<BigInteger>>();
for (Map.Entry<String, String> e : certsConf) {
@@ -284,7 +286,7 @@
}
} else if (RELOAD_PATTERN.matcher(servletPath).matches()
&& checkUser("Admin", certs[0])) {
- Configuration conf = new Configuration(false);
+ Configuration conf = new HdfsConfiguration(false);
conf.addResource("hdfsproxy-default.xml");
Map<String, Set<Path>> permsMap = getPermMap(conf);
Map<String, Set<BigInteger>> certsMap = getCertsMap(conf);
Modified: hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyListPathsServlet.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyListPathsServlet.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyListPathsServlet.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyListPathsServlet.java Thu Oct 1 05:31:37 2009
@@ -22,6 +22,7 @@
import javax.servlet.http.HttpServletRequest;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.namenode.ListPathsServlet;
import org.apache.hadoop.security.UnixUserGroupInformation;
@@ -35,7 +36,7 @@
public void init() throws ServletException {
ServletContext context = getServletContext();
if (context.getAttribute("name.conf") == null) {
- context.setAttribute("name.conf", new Configuration());
+ context.setAttribute("name.conf", new HdfsConfiguration());
}
}
Modified: hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyStreamFile.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyStreamFile.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyStreamFile.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyStreamFile.java Thu Oct 1 05:31:37 2009
@@ -26,6 +26,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.namenode.StreamFile;
import org.apache.hadoop.security.UnixUserGroupInformation;
@@ -39,7 +40,7 @@
public void init() throws ServletException {
ServletContext context = getServletContext();
if (context.getAttribute("name.conf") == null) {
- context.setAttribute("name.conf", new Configuration());
+ context.setAttribute("name.conf", new HdfsConfiguration());
}
}
@@ -48,7 +49,7 @@
protected DFSClient getDFSClient(HttpServletRequest request)
throws IOException {
ServletContext context = getServletContext();
- Configuration conf = new Configuration((Configuration) context
+ Configuration conf = new HdfsConfiguration((Configuration) context
.getAttribute("name.conf"));
UnixUserGroupInformation.saveToConf(conf,
UnixUserGroupInformation.UGI_PROPERTY_NAME, getUGI(request));
Modified: hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUgiManager.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUgiManager.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUgiManager.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUgiManager.java Thu Oct 1 05:31:37 2009
@@ -25,6 +25,7 @@
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.security.UnixUserGroupInformation;
import org.apache.hadoop.util.Shell;
@@ -37,7 +38,7 @@
static final int CLEANUP_THRESHOLD = 1000;
static {
- Configuration conf = new Configuration(false);
+ Configuration conf = new HdfsConfiguration(false);
conf.addResource("hdfsproxy-default.xml");
ugiLifetime = conf.getLong("hdfsproxy.ugi.cache.ugi.lifetime", 15) * 60 * 1000L;
}
Modified: hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUtil.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUtil.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUtil.java Thu Oct 1 05:31:37 2009
@@ -51,6 +51,8 @@
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.HostsFileReader;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+
/**
* Proxy Utility .
*/
@@ -312,7 +314,7 @@
+ UtilityOption.CHECKCERTS.getName() + " <hostname> <#port> ]");
System.exit(0);
}
- Configuration conf = new Configuration(false);
+ Configuration conf = new HdfsConfiguration(false);
conf.addResource("ssl-client.xml");
conf.addResource("hdfsproxy-default.xml");
Modified: hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestHdfsProxy.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestHdfsProxy.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestHdfsProxy.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestHdfsProxy.java Thu Oct 1 05:31:37 2009
@@ -39,6 +39,7 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.net.NetUtils;
/**
@@ -121,7 +122,7 @@
private static MyFile[] createFiles(URI fsname, String topdir)
throws IOException {
- return createFiles(FileSystem.get(fsname, new Configuration()), topdir);
+ return createFiles(FileSystem.get(fsname, new HdfsConfiguration()), topdir);
}
/**
@@ -203,13 +204,13 @@
HdfsProxy proxy = null;
try {
- final Configuration dfsConf = new Configuration();
+ final Configuration dfsConf = new HdfsConfiguration();
cluster = new MiniDFSCluster(dfsConf, 2, true, null);
cluster.waitActive();
final FileSystem localfs = FileSystem.get(LOCAL_FS, dfsConf);
final FileSystem hdfs = cluster.getFileSystem();
- final Configuration proxyConf = new Configuration(false);
+ final Configuration proxyConf = new HdfsConfiguration(false);
proxyConf.set("hdfsproxy.dfs.namenode.address", hdfs.getUri().getHost() + ":"
+ hdfs.getUri().getPort());
proxyConf.set("hdfsproxy.https.address", "localhost:0");
Modified: hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestProxyUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestProxyUtil.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestProxyUtil.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestProxyUtil.java Thu Oct 1 05:31:37 2009
@@ -21,6 +21,7 @@
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
/** Unit tests for ProxyUtil */
public class TestProxyUtil extends TestCase {
@@ -30,7 +31,7 @@
public void testSendCommand() throws Exception {
- Configuration conf = new Configuration(false);
+ Configuration conf = new HdfsConfiguration(false);
conf.addResource("ssl-client.xml");
conf.addResource("hdfsproxy-default.xml");
String address = "localhost:" + TEST_PROXY_HTTPS_PORT;
Modified: hadoop/hdfs/branches/branch-0.21/src/contrib/thriftfs/src/java/org/apache/hadoop/thriftfs/HadoopThriftServer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/contrib/thriftfs/src/java/org/apache/hadoop/thriftfs/HadoopThriftServer.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/contrib/thriftfs/src/java/org/apache/hadoop/thriftfs/HadoopThriftServer.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/contrib/thriftfs/src/java/org/apache/hadoop/thriftfs/HadoopThriftServer.java Thu Oct 1 05:31:37 2009
@@ -26,6 +26,8 @@
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+
/**
* ThriftHadoopFileSystem
* A thrift wrapper around the Hadoop File System
@@ -122,7 +124,7 @@
* @param name - the name of this handler
*/
public HadoopThriftHandler(String name) {
- conf = new Configuration();
+ conf = new HdfsConfiguration();
now = now();
try {
inactivityThread = new Daemon(new InactivityMonitor());
Modified: hadoop/hdfs/branches/branch-0.21/src/contrib/thriftfs/test/org/apache/hadoop/thriftfs/TestThriftfs.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/contrib/thriftfs/test/org/apache/hadoop/thriftfs/TestThriftfs.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/contrib/thriftfs/test/org/apache/hadoop/thriftfs/TestThriftfs.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/contrib/thriftfs/test/org/apache/hadoop/thriftfs/TestThriftfs.java Thu Oct 1 05:31:37 2009
@@ -35,7 +35,7 @@
public void testServer() throws IOException
{
- Configuration conf = new Configuration();
+ Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
cluster.waitActive();
DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
Modified: hadoop/hdfs/branches/branch-0.21/src/java/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/hdfs-default.xml?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/hdfs-default.xml (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/hdfs-default.xml Thu Oct 1 05:31:37 2009
@@ -8,6 +8,12 @@
<configuration>
<property>
+ <name>hadoop.hdfs.configuration.version</name>
+ <value>1</value>
+ <description>version of this configuration file</description>
+</property>
+
+<property>
<name>dfs.namenode.logging.level</name>
<value>info</value>
<description>The logging level for dfs namenode. Other values are "dir"(trac
@@ -16,7 +22,7 @@
</property>
<property>
- <name>dfs.secondary.http.address</name>
+ <name>dfs.namenode.secondary.http-address</name>
<value>0.0.0.0:50090</value>
<description>
The secondary namenode http server address and port.
@@ -58,7 +64,7 @@
</property>
<property>
- <name>dfs.http.address</name>
+ <name>dfs.namenode.http-address</name>
<value>0.0.0.0:50070</value>
<description>
The address and the base port where the dfs namenode web ui will listen on.
@@ -74,7 +80,7 @@
</property>
<property>
- <name>dfs.https.need.client.auth</name>
+ <name>dfs.client.https.need-auth</name>
<value>false</value>
<description>Whether SSL client certificate authentication is required
</description>
@@ -89,7 +95,7 @@
</property>
<property>
- <name>dfs.https.client.keystore.resource</name>
+ <name>dfs.client.https.keystore.resource</name>
<value>ssl-client.xml</value>
<description>Resource file from which ssl client keystore
information will be extracted
@@ -102,7 +108,7 @@
</property>
<property>
- <name>dfs.https.address</name>
+ <name>dfs.namenode.https-address</name>
<value>0.0.0.0:50470</value>
</property>
@@ -124,7 +130,7 @@
</property>
<property>
- <name>dfs.backup.address</name>
+ <name>dfs.namenode.backup.address</name>
<value>0.0.0.0:50100</value>
<description>
The backup node server address and port.
@@ -133,7 +139,7 @@
</property>
<property>
- <name>dfs.backup.http.address</name>
+ <name>dfs.namenode.backup.http-address</name>
<value>0.0.0.0:50105</value>
<description>
The backup node http server address and port.
@@ -142,7 +148,7 @@
</property>
<property>
- <name>dfs.replication.considerLoad</name>
+ <name>dfs.namenode.replication.considerLoad</name>
<value>true</value>
<description>Decide if chooseTarget considers the target's load or not
</description>
@@ -162,7 +168,7 @@
</property>
<property>
- <name>dfs.name.dir</name>
+ <name>dfs.namenode.name.dir</name>
<value>${hadoop.tmp.dir}/dfs/name</value>
<description>Determines where on the local filesystem the DFS name node
should store the name table(fsimage). If this is a comma-delimited list
@@ -171,8 +177,8 @@
</property>
<property>
- <name>dfs.name.edits.dir</name>
- <value>${dfs.name.dir}</value>
+ <name>dfs.namenode.edits.dir</name>
+ <value>${dfs.namenode.name.dir}</value>
<description>Determines where on the local filesystem the DFS name node
should store the transaction (edits) file. If this is a comma-delimited list
of directories then the transaction file is replicated in all of the
@@ -188,7 +194,7 @@
</property>
<property>
- <name>dfs.permissions</name>
+ <name>dfs.permissions.enabled</name>
<value>true</value>
<description>
If "true", enable permission checking in HDFS.
@@ -200,36 +206,13 @@
</property>
<property>
- <name>dfs.permissions.supergroup</name>
+ <name>dfs.permissions.superusergroup</name>
<value>supergroup</value>
<description>The name of the group of super-users.</description>
</property>
<property>
- <name>dfs.access.token.enable</name>
- <value>false</value>
- <description>
- If "true", access tokens are used as capabilities for accessing datanodes.
- If "false", no access tokens are checked on accessing datanodes.
- </description>
-</property>
-
-<property>
- <name>dfs.access.key.update.interval</name>
- <value>600</value>
- <description>
- Interval in minutes at which namenode updates its access keys.
- </description>
-</property>
-
-<property>
- <name>dfs.access.token.lifetime</name>
- <value>600</value>
- <description>The lifetime of access tokens in minutes.</description>
-</property>
-
-<property>
- <name>dfs.data.dir</name>
+ <name>dfs.datanode.data.dir</name>
<value>${hadoop.tmp.dir}/dfs/data</value>
<description>Determines where on the local filesystem an DFS data node
should store its blocks. If this is a comma-delimited
@@ -256,25 +239,19 @@
</property>
<property>
- <name>dfs.replication.min</name>
+ <name>dfs.namenode.replication.min</name>
<value>1</value>
<description>Minimal block replication.
</description>
</property>
<property>
- <name>dfs.block.size</name>
+ <name>dfs.blocksize</name>
<value>67108864</value>
<description>The default block size for new files.</description>
</property>
<property>
- <name>dfs.df.interval</name>
- <value>60000</value>
- <description>Disk usage statistics refresh interval in msec.</description>
-</property>
-
-<property>
<name>dfs.client.block.write.retries</name>
<value>3</value>
<description>The number of retries for writing blocks to the data nodes,
@@ -314,18 +291,18 @@
</property>
<property>
- <name>dfs.safemode.threshold.pct</name>
+ <name>dfs.namenode.safemode.threshold-pct</name>
<value>0.999f</value>
<description>
Specifies the percentage of blocks that should satisfy
- the minimal replication requirement defined by dfs.replication.min.
+ the minimal replication requirement defined by dfs.namenode.replication.min.
Values less than or equal to 0 mean not to start in safe mode.
Values greater than 1 will make safe mode permanent.
</description>
</property>
<property>
- <name>dfs.safemode.extension</name>
+ <name>dfs.namenode.safemode.extension</name>
<value>30000</value>
<description>
Determines extension of safe mode in milliseconds
@@ -334,7 +311,7 @@
</property>
<property>
- <name>dfs.balance.bandwidthPerSec</name>
+ <name>dfs.datanode.balance.bandwidthPerSec</name>
<value>1048576</value>
<description>
Specifies the maximum amount of bandwidth that each datanode
@@ -362,7 +339,7 @@
</property>
<property>
- <name>dfs.max.objects</name>
+ <name>dfs.namenode.max.objects</name>
<value>0</value>
<description>The maximum number of files, directories and blocks
dfs supports. A value of zero indicates no limit to the number
@@ -385,14 +362,14 @@
</property>
<property>
- <name>dfs.replication.interval</name>
+ <name>dfs.namenode.replication.interval</name>
<value>3</value>
<description>The periodicity in seconds with which the namenode computes
repliaction work for datanodes. </description>
</property>
<property>
- <name>dfs.access.time.precision</name>
+ <name>dfs.namenode.accesstime.precision</name>
<value>3600000</value>
<description>The access time for HDFS file is precise upto this value.
The default value is 1 hour. Setting a value of 0 disables
@@ -423,4 +400,62 @@
</description>
</property>
+<property>
+ <name>dfs.stream-buffer-size</name>
+ <value>4096</value>
+ <description>The size of buffer to stream files.
+ The size of this buffer should probably be a multiple of hardware
+ page size (4096 on Intel x86), and it determines how much data is
+ buffered during read and write operations.</description>
+</property>
+
+<property>
+ <name>dfs.bytes-per-checksum</name>
+ <value>512</value>
+ <description>The number of bytes per checksum. Must not be larger than
+ dfs.stream-buffer-size</description>
+</property>
+
+<property>
+ <name>dfs.client-write-packet-size</name>
+ <value>65536</value>
+ <description>Packet size for clients to write</description>
+</property>
+
+<property>
+ <name>dfs.namenode.checkpoint.dir</name>
+ <value>${hadoop.tmp.dir}/dfs/namesecondary</value>
+ <description>Determines where on the local filesystem the DFS secondary
+ name node should store the temporary images to merge.
+ If this is a comma-delimited list of directories then the image is
+ replicated in all of the directories for redundancy.
+ </description>
+</property>
+
+<property>
+ <name>dfs.namenode.checkpoint.edits.dir</name>
+ <value>${dfs.namenode.checkpoint.dir}</value>
+ <description>Determines where on the local filesystem the DFS secondary
+ name node should store the temporary edits to merge.
+ If this is a comma-delimited list of directoires then teh edits is
+ replicated in all of the directoires for redundancy.
+ Default value is same as fs.checkpoint.dir
+ </description>
+</property>
+
+<property>
+ <name>dfs.namenode.checkpoint.period</name>
+ <value>3600</value>
+ <description>The number of seconds between two periodic checkpoints.
+ </description>
+</property>
+
+<property>
+ <name>dfs.namenode.checkpoint.size</name>
+ <value>67108864</value>
+ <description>The size of the current edit log (in bytes) that triggers
+ a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
+ </description>
+</property>
+
</configuration>
Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/DFSClient.java Thu Oct 1 05:31:37 2009
@@ -248,13 +248,14 @@
throws IOException {
this.conf = conf;
this.stats = stats;
- this.socketTimeout = conf.getInt("dfs.socket.timeout",
+ this.socketTimeout = conf.getInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
HdfsConstants.READ_TIMEOUT);
this.datanodeWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout",
HdfsConstants.WRITE_TIMEOUT);
this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
// dfs.write.packet.size is an internal config variable
- this.writePacketSize = conf.getInt("dfs.write.packet.size", 64*1024);
+ this.writePacketSize = conf.getInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
+ DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
this.maxBlockAcquireFailures =
conf.getInt("dfs.client.max.block.acquire.failures",
MAX_BLOCK_ACQUIRE_FAILURES);
@@ -273,7 +274,7 @@
} else {
this.clientName = "DFSClient_" + r.nextInt();
}
- defaultBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+ defaultBlockSize = conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
defaultReplication = (short) conf.getInt("dfs.replication", 3);
if (nameNodeAddr != null && rpcNamenode == null) {
@@ -569,7 +570,8 @@
LOG.debug(src + ": masked=" + masked);
OutputStream result = new DFSOutputStream(src, masked,
flag, createParent, replication, blockSize, progress, buffersize,
- conf.getInt("io.bytes.per.checksum", 512));
+ conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,
+ DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT));
leasechecker.put(src, result);
return result;
}
@@ -628,7 +630,8 @@
DSQuotaExceededException.class);
}
OutputStream result = new DFSOutputStream(src, buffersize, progress,
- lastBlock, stat, conf.getInt("io.bytes.per.checksum", 512));
+ lastBlock, stat, conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,
+ DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT));
leasechecker.put(src, result);
return result;
}
@@ -1635,7 +1638,7 @@
this.verifyChecksum = verifyChecksum;
this.buffersize = buffersize;
this.src = src;
- prefetchSize = conf.getLong("dfs.read.prefetch.size", prefetchSize);
+ prefetchSize = conf.getLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, prefetchSize);
openInfo();
}
Added: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=820536&view=auto
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (added)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Thu Oct 1 05:31:37 2009
@@ -0,0 +1,184 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+
+/**
+ * This class contains constants for configuration keys used
+ * in hdfs.
+ *
+ */
+
+public class DFSConfigKeys extends CommonConfigurationKeys {
+
+ public static final String DFS_BLOCK_SIZE_KEY = "dfs.blocksize";
+ public static final long DFS_BLOCK_SIZE_DEFAULT = 64*1024*1024;
+ public static final String DFS_REPLICATION_KEY = "dfs.replication";
+ public static final short DFS_REPLICATION_DEFAULT = 3;
+ public static final String DFS_STREAM_BUFFER_SIZE_KEY = "dfs.stream-buffer-size";
+ public static final int DFS_STREAM_BUFFER_SIZE_DEFAULT = 4096;
+ public static final String DFS_BYTES_PER_CHECKSUM_KEY = "dfs.bytes-per-checksum";
+ public static final int DFS_BYTES_PER_CHECKSUM_DEFAULT = 512;
+ public static final String DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size";
+ public static final int DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024;
+
+ public static final String DFS_NAMENODE_BACKUP_ADDRESS_KEY = "dfs.namenode.backup.address";
+ public static final String DFS_NAMENODE_BACKUP_ADDRESS_DEFAULT = "localhost:50100";
+ public static final String DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY = "dfs.namenode.backup.http-address";
+ public static final String DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50105";
+ public static final String DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY = "dfs.datanode.balance.bandwidthPerSec";
+ public static final long DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT = 1024*1024;
+ public static final String DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
+ public static final String DFS_NAMENODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50070";
+ public static final String DFS_NAMENODE_MAX_OBJECTS_KEY = "dfs.namenode.max.objects";
+ public static final long DFS_NAMENODE_MAX_OBJECTS_DEFAULT = 0;
+ public static final String DFS_NAMENODE_SAFEMODE_EXTENSION_KEY = "dfs.namenode.safemode.extension";
+ public static final int DFS_NAMENODE_SAFEMODE_EXTENSION_DEFAULT = 30000;
+ public static final String DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY = "dfs.namenode.safemode.threshold-pct";
+ public static final float DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_DEFAULT = 0.999f;
+ public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY = "dfs.namenode.secondary.http-address";
+ public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50090";
+ public static final String DFS_NAMENODE_CHECKPOINT_PERIOD_KEY = "dfs.namenode.checkpoint.period";
+ public static final long DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT = 3600;
+ public static final String DFS_NAMENODE_CHECKPOINT_SIZE_KEY = "dfs.namenode.checkpoint.size";
+ public static final long DFS_NAMENODE_CHECKPOINT_SIZE_DEFAULT = 4194304;
+ public static final String DFS_NAMENODE_UPGRADE_PERMISSION_KEY = "dfs.namenode.upgrade.permission";
+ public static final int DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT = 00777;
+ public static final String DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY = "dfs.namenode.heartbeat.recheck-interval";
+ public static final int DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT = 5*60*1000;
+ public static final String DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.client.https.keystore.resource";
+ public static final String DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-client.xml";
+ public static final String DFS_CLIENT_HTTPS_NEED_AUTH_KEY = "dfs.client.https.need-auth";
+ public static final boolean DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT = false;
+ public static final String DFS_NAMENODE_ACCESSTIME_PRECISION_KEY = "dfs.namenode.accesstime.precision";
+ public static final long DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT = 3600000;
+ public static final String DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY = "dfs.namenode.replication.considerLoad";
+ public static final boolean DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT = true;
+ public static final String DFS_NAMENODE_REPLICATION_INTERVAL_KEY = "dfs.namenode.replication.interval";
+ public static final int DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT = 3;
+ public static final String DFS_NAMENODE_REPLICATION_MIN_KEY = "dfs.namenode.replication.min";
+ public static final int DFS_NAMENODE_REPLICATION_MIN_DEFAULT = 1;
+ public static final String DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY = "dfs.namenode.replication.pending.timeout-sec";
+ public static final int DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT = -1;
+ public static final String DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY = "dfs.namenode.replication.max-streams";
+ public static final int DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT = 2;
+ public static final String DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled";
+ public static final boolean DFS_PERMISSIONS_ENABLED_DEFAULT = true;
+ public static final String DFS_PERMISSIONS_SUPERUSERGROUP_KEY = "dfs.permissions.superusergroup";
+ public static final String DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT = "supergroup";
+ public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.https.server.keystore.resource";
+ public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-server.xml";
+ public static final String DFS_NAMENODE_NAME_DIR_RESTORE_KEY = "dfs.namenode.name.dir.restore";
+ public static final boolean DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT = false;
+
+ //Following keys have no defaults
+ public static final String DFS_DATANODE_DATA_DIR_KEY = "dfs.datanode.data.dir";
+ public static final String DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address";
+ public static final String DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:50470";
+ public static final String DFS_NAMENODE_NAME_DIR_KEY = "dfs.namenode.name.dir";
+ public static final String DFS_NAMENODE_EDITS_DIR_KEY = "dfs.namenode.edits.dir";
+ public static final String DFS_CLIENT_READ_PREFETCH_SIZE_KEY = "dfs.client.read.prefetch.size";
+ public static final String DFS_METRICS_SESSION_ID_KEY = "dfs.metrics.session-id";
+ public static final String DFS_DATANODE_HOST_NAME_KEY = "dfs.datanode.hostname";
+ public static final String DFS_DATANODE_STORAGEID_KEY = "dfs.datanode.StorageId";
+ public static final String DFS_NAMENODE_HOSTS_KEY = "dfs.namenode.hosts";
+ public static final String DFS_NAMENODE_HOSTS_EXCLUDE_KEY = "dfs.namenode.hosts.exclude";
+ public static final String DFS_CLIENT_SOCKET_TIMEOUT_KEY = "dfs.client.socket-timeout";
+ public static final String DFS_NAMENODE_CHECKPOINT_DIR_KEY = "dfs.namenode.checkpoint.dir";
+ public static final String DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY = "dfs.namenode.checkpoint.edits.dir";
+
+ //Code in hdfs is not updated to use these keys.
+ public static final String DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY = "dfs.client.block.write.locateFollowingBlock.retries";
+ public static final int DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT = 5;
+ public static final String DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY = "dfs.client.block.write.retries";
+ public static final int DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT = 3;
+ public static final String DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY = "dfs.client.max.block.acquire.failures";
+ public static final int DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT = 3;
+ public static final String DFS_BALANCER_MOVEDWINWIDTH_KEY = "dfs.balancer.movedWinWidth";
+ public static final int DFS_BALANCER_MOVEDWINWIDTH_DEFAULT = 5400*1000;
+ public static final String DFS_DATANODE_ADDRESS_KEY = "dfs.datanode.address";
+ public static final String DFS_DATANODE_ADDRESS_DEFAULT = "0.0.0.0:50010";
+ public static final String DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY = "dfs.datanode.directoryscan.interval";
+ public static final int DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT = 21600;
+ public static final String DFS_DATANODE_DNS_INTERFACE_KEY = "dfs.datanode.dns.interface";
+ public static final String DFS_DATANODE_DNS_INTERFACE_DEFAULT = "default";
+ public static final String DFS_DATANODE_DNS_NAMESERVER_KEY = "dfs.datanode.dns.nameserver";
+ public static final String DFS_DATANODE_DNS_NAMESERVER_DEFAULT = "default";
+ public static final String DFS_DATANODE_DU_RESERVED_KEY = "dfs.datanode.du.reserved";
+ public static final long DFS_DATANODE_DU_RESERVED_DEFAULT = 0;
+ public static final String DFS_DATANODE_HANDLER_COUNT_KEY = "dfs.datanode.handler.count";
+ public static final int DFS_DATANODE_HANDLER_COUNT_DEFAULT = 3;
+ public static final String DFS_DATANODE_HTTP_ADDRESS_KEY = "dfs.datanode.http.address";
+ public static final String DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50075";
+ public static final String DFS_DATANODE_MAX_XCIEVERS_KEY = "dfs.datanode.max.xcievers";
+ public static final int DFS_DATANODE_MAX_XCIEVERS_DEFAULT = 256;
+ public static final String DFS_DATANODE_NUMBLOCKS_KEY = "dfs.datanode.numblocks";
+ public static final int DFS_DATANODE_NUMBLOCKS_DEFAULT = 64;
+ public static final String DFS_DATANODE_SCAN_PERIOD_HOURS_KEY = "dfs.datanode.scan.period.hours";
+ public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 0;
+ public static final String DFS_DATANODE_SIMULATEDDATASTORAGE_KEY = "dfs.datanode.simulateddatastorage";
+ public static final boolean DFS_DATANODE_SIMULATEDDATASTORAGE_DEFAULT = false;
+ public static final String DFS_DATANODE_SIMULATEDDATASTORAGE_CAPACITY_KEY = "dfs.datanode.simulateddatastorage.capacity";
+ public static final long DFS_DATANODE_SIMULATEDDATASTORAGE_CAPACITY_DEFAULT = 2L<<40;
+ public static final String DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = "dfs.datanode.transferTo.allowed";
+ public static final boolean DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT = true;
+ public static final String DFS_HEARTBEAT_INTERVAL_KEY = "dfs.heartbeat.interval";
+ public static final long DFS_HEARTBEAT_INTERVAL_DEFAULT = 3;
+ public static final String DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY = "dfs.namenode.decommission.interval";
+ public static final int DFS_NAMENODE_DECOMMISSION_INTERVAL_DEFAULT = 30;
+ public static final String DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_KEY = "dfs.namenode.decommission.nodes.per.interval";
+ public static final int DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_DEFAULT = 5;
+ public static final String DFS_NAMENODE_HANDLER_COUNT_KEY = "dfs.namenode.handler.count";
+ public static final int DFS_NAMENODE_HANDLER_COUNT_DEFAULT = 10;
+ public static final String DFS_SUPPORT_APPEND_KEY = "dfs.support.append";
+ public static final boolean DFS_SUPPORT_APPEND_DEFAULT = false;
+ public static final String DFS_HTTPS_ENABLE_KEY = "dfs.https.enable";
+ public static final boolean DFS_HTTPS_ENABLE_DEFAULT = false;
+ public static final String DFS_DEFAULT_CHUNK_VIEW_SIZE_KEY = "dfs.default.chunk.view.size";
+ public static final int DFS_DEFAULT_CHUNK_VIEW_SIZE_DEFAULT = 32*1024;
+ public static final String DFS_DATANODE_HTTPS_ADDRESS_KEY = "dfs.datanode.https.address";
+ public static final String DFS_DATANODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:50475";
+ public static final String DFS_DATANODE_IPC_ADDRESS_KEY = "dfs.datanode.ipc.address";
+ public static final String DFS_DATANODE_IPC_ADDRESS_DEFAULT = "0.0.0.0:50020";
+
+ public static final String DFS_ACCESS_TOKEN_ENABLE_KEY = "dfs.access.token.enable";
+ public static final boolean DFS_ACCESS_TOKEN_ENABLE_DEFAULT = false;
+ public static final String DFS_ACCESS_KEY_UPDATE_INTERVAL_KEY = "dfs.access.key.update.interval";
+ public static final int DFS_ACCESS_KEY_UPDATE_INTERVAL_DEFAULT = 600;
+ public static final String DFS_ACCESS_TOKEN_LIFETIME_KEY = "dfs.access.token.lifetime";
+ public static final int DFS_ACCESS_TOKEN_LIFETIME_DEFAULT = 600;
+
+ public static final String DFS_REPLICATION_MAX_KEY = "dfs.replication.max";
+ public static final int DFS_REPLICATION_MAX_DEFAULT = 512;
+ public static final String DFS_DF_INTERVAL_KEY = "dfs.df.interval";
+ public static final int DFS_DF_INTERVAL_DEFAULT = 60000;
+ public static final String DFS_BLOCKREPORT_INTERVAL_MSEC_KEY = "dfs.blockreport.intervalMsec";
+ public static final long DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT = 21600000;
+ public static final String DFS_BLOCKREPORT_INITIAL_DELAY_KEY = "dfs.blockreport.initialDelay";
+ public static final int DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT = 0;
+
+ //Keys with no defaults
+ public static final String DFS_DATANODE_PLUGINS_KEY = "dfs.datanode.plugins";
+ public static final String DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY = "dfs.datanode.socket.write.timeout";
+ public static final String DFS_DATANODE_STARTUP_KEY = "dfs.datanode.startup";
+ public static final String DFS_NAMENODE_PLUGINS_KEY = "dfs.namenode.plugins";
+ public static final String DFS_WEB_UGI_KEY = "dfs.web.ugi";
+ public static final String DFS_NAMENODE_STARTUP_KEY = "dfs.namenode.startup";
+}
Added: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/HdfsConfiguration.java?rev=820536&view=auto
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/HdfsConfiguration.java (added)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/HdfsConfiguration.java Thu Oct 1 05:31:37 2009
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Adds deprecated keys into the configuration.
+ */
+public class HdfsConfiguration extends Configuration {
+ static {
+ addDeprecatedKeys();
+ }
+
+ public HdfsConfiguration() {
+ super();
+ }
+
+ public HdfsConfiguration(boolean loadDefaults) {
+ super(loadDefaults);
+ }
+
+ public HdfsConfiguration(Configuration conf) {
+ super(conf);
+ }
+
+ private static void deprecate(String oldKey, String newKey) {
+ Configuration.addDeprecation(oldKey, new String[]{newKey});
+ }
+
+ private static void addDeprecatedKeys() {
+ deprecate("dfs.backup.address", DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY);
+ deprecate("dfs.backup.http.address", DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY);
+ deprecate("dfs.balance.bandwidthPerSec", DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY);
+ deprecate("dfs.data.dir", DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
+ deprecate("dfs.http.address", DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
+ deprecate("dfs.https.address", DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
+ deprecate("dfs.max.objects", DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY);
+ deprecate("dfs.name.dir", DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
+ deprecate("dfs.name.dir.restore", DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY);
+ deprecate("dfs.name.edits.dir", DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY);
+ deprecate("dfs.read.prefetch.size", DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY);
+ deprecate("dfs.safemode.extension", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY);
+ deprecate("dfs.safemode.threshold.pct", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY);
+ deprecate("dfs.secondary.http.address", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
+ deprecate("dfs.socket.timeout", DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY);
+ deprecate("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
+ deprecate("fs.checkpoint.edits.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
+ deprecate("fs.checkpoint.period", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY);
+ deprecate("fs.checkpoint.size", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_SIZE_KEY);
+ deprecate("dfs.upgrade.permission", DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_KEY);
+ deprecate("heartbeat.recheck.interval", DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY);
+ deprecate("StorageId", DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY);
+ deprecate("dfs.https.client.keystore.resource", DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY);
+ deprecate("dfs.https.need.client.auth", DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY);
+ deprecate("slave.host.name", DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY);
+ deprecate("session.id", DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
+ deprecate("dfs.access.time.precision", DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY);
+ deprecate("dfs.replication.considerLoad", DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY);
+ deprecate("dfs.replication.interval", DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY);
+ deprecate("dfs.replication.min", DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY);
+ deprecate("dfs.replication.pending.timeout.sec", DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY);
+ deprecate("dfs.max-repl-streams", DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY);
+ deprecate("dfs.permissions", DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY);
+ deprecate("dfs.permissions.supergroup", DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY);
+ deprecate("dfs.write.packet.size", DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY);
+ deprecate("dfs.block.size", DFSConfigKeys.DFS_BLOCK_SIZE_KEY);
+ }
+}
Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/HsftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/HsftpFileSystem.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/HsftpFileSystem.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/HsftpFileSystem.java Thu Oct 1 05:31:37 2009
@@ -65,9 +65,9 @@
* @throws IOException
*/
private static void setupSsl(Configuration conf) throws IOException {
- Configuration sslConf = new Configuration(false);
- sslConf.addResource(conf.get("dfs.https.client.keystore.resource",
- "ssl-client.xml"));
+ Configuration sslConf = new HdfsConfiguration(false);
+ sslConf.addResource(conf.get(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
+ DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
FileInputStream fis = null;
try {
SSLContext sc = SSLContext.getInstance("SSL");
Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Thu Oct 1 05:31:37 2009
@@ -366,7 +366,7 @@
* percentage called threshold of blocks, which satisfy the minimal
* replication condition.
* The minimal replication condition is that each block must have at least
- * <tt>dfs.replication.min</tt> replicas.
+ * <tt>dfs.namenode.replication.min</tt> replicas.
* When the threshold is reached the name node extends safe mode
* for a configurable amount of time
* to let the remaining data nodes to check in before it
@@ -382,7 +382,7 @@
* <h4>Configuration parameters:</h4>
* <tt>dfs.safemode.threshold.pct</tt> is the threshold parameter.<br>
* <tt>dfs.safemode.extension</tt> is the safe mode extension parameter.<br>
- * <tt>dfs.replication.min</tt> is the minimal replication parameter.
+ * <tt>dfs.namenode.replication.min</tt> is the minimal replication parameter.
*
* <h4>Special cases:</h4>
* The name node does not enter safe mode at startup if the threshold is
Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java Thu Oct 1 05:31:37 2009
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
/************************************
* Some handy constants
@@ -48,7 +49,7 @@
public static int MAX_PATH_LENGTH = 8000;
public static int MAX_PATH_DEPTH = 1000;
- public static final int BUFFER_SIZE = new Configuration().getInt("io.file.buffer.size", 4096);
+ public static final int BUFFER_SIZE = new HdfsConfiguration().getInt("io.file.buffer.size", 4096);
//Used for writing header etc.
public static final int SMALL_BUFFER_SIZE = Math.min(BUFFER_SIZE/2, 512);
//TODO mb@media-style.com: should be conf injected?
Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java Thu Oct 1 05:31:37 2009
@@ -36,6 +36,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
@@ -47,7 +48,7 @@
public class JspHelper {
final static public String WEB_UGI_PROPERTY_NAME = "dfs.web.ugi";
- public static final Configuration conf = new Configuration();
+ public static final Configuration conf = new HdfsConfiguration();
public static final UnixUserGroupInformation webUGI
= UnixUserGroupInformation.createImmutable(
conf.getStrings(WEB_UGI_PROPERTY_NAME));
Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Thu Oct 1 05:31:37 2009
@@ -80,6 +80,8 @@
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RPC;
@@ -251,8 +253,8 @@
AbstractList<File> dataDirs
) throws IOException {
// use configured nameserver & interface to get local hostname
- if (conf.get("slave.host.name") != null) {
- machineName = conf.get("slave.host.name");
+ if (conf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY) != null) {
+ machineName = conf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY);
}
if (machineName == null) {
machineName = DNS.getDefaultHost(
@@ -261,7 +263,7 @@
}
this.nameNodeAddr = NameNode.getAddress(conf);
- this.socketTimeout = conf.getInt("dfs.socket.timeout",
+ this.socketTimeout = conf.getInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
HdfsConstants.READ_TIMEOUT);
this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout",
HdfsConstants.WRITE_TIMEOUT);
@@ -269,7 +271,8 @@
* to false on some of them. */
this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed",
true);
- this.writePacketSize = conf.getInt("dfs.write.packet.size", 64*1024);
+ this.writePacketSize = conf.getInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
+ DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
InetSocketAddress socAddr = NetUtils.createSocketAddr(
conf.get("dfs.datanode.address", "0.0.0.0:50010"));
int tmpPort = socAddr.getPort();
@@ -296,7 +299,7 @@
dnRegistration.storageInfo.namespaceID = nsInfo.namespaceID;
// it would have been better to pass storage as a parameter to
// constructor below - need to augment ReflectionUtils used below.
- conf.set("StorageId", dnRegistration.getStorageID());
+ conf.set(DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY, dnRegistration.getStorageID());
try {
//Equivalent of following (can't do because Simulated is in test dir)
// this.data = new SimulatedFSDataset(conf);
@@ -365,10 +368,11 @@
this.infoServer = new HttpServer("datanode", infoHost, tmpInfoPort,
tmpInfoPort == 0, conf);
if (conf.getBoolean("dfs.https.enable", false)) {
- boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false);
+ boolean needClientAuth = conf.getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
+ DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get(
"dfs.datanode.https.address", infoHost + ":" + 0));
- Configuration sslConf = new Configuration(false);
+ Configuration sslConf = new HdfsConfiguration(false);
sslConf.addResource(conf.get("dfs.https.server.keystore.resource",
"ssl-server.xml"));
this.infoServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth);
@@ -1345,7 +1349,7 @@
public static DataNode instantiateDataNode(String args[],
Configuration conf) throws IOException {
if (conf == null)
- conf = new Configuration();
+ conf = new HdfsConfiguration();
if (args != null) {
// parse generic hadoop options
@@ -1362,7 +1366,7 @@
" anymore. RackID resolution is handled by the NameNode.");
System.exit(-1);
}
- String[] dataDirs = conf.getStrings("dfs.data.dir");
+ String[] dataDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
dnThreadName = "DataNode: [" +
StringUtils.arrayToString(dataDirs) + "]";
return makeInstance(dataDirs, conf);
Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java Thu Oct 1 05:31:37 2009
@@ -32,6 +32,8 @@
import org.apache.hadoop.hdfs.server.balancer.Balancer;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+
/**
* Server used for receiving/sending a block of data.
@@ -115,11 +117,12 @@
this.maxXceiverCount = conf.getInt("dfs.datanode.max.xcievers",
MAX_XCEIVER_COUNT);
- this.estimateBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+ this.estimateBlockSize = conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
//set up parameter for cluster balancing
this.balanceThrottler = new BlockBalanceThrottler(
- conf.getLong("dfs.balance.bandwidthPerSec", 1024L*1024));
+ conf.getLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY,
+ DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT));
}
/**
Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java Thu Oct 1 05:31:37 2009
@@ -28,6 +28,7 @@
import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong;
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
/**
@@ -91,7 +92,7 @@
public DataNodeMetrics(Configuration conf, String datanodeName) {
- String sessionId = conf.get("session.id");
+ String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
// Initiate reporting of Java VM metrics
JvmMetrics.init("DataNode", sessionId);
Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java Thu Oct 1 05:31:37 2009
@@ -32,6 +32,7 @@
import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
import org.apache.hadoop.hdfs.server.namenode.FSImage.CheckpointStates;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.DNS;
@@ -52,10 +53,10 @@
* </ol>
*/
public class BackupNode extends NameNode {
- private static final String BN_ADDRESS_NAME_KEY = "dfs.backup.address";
- private static final String BN_ADDRESS_DEFAULT = "localhost:50100";
- private static final String BN_HTTP_ADDRESS_NAME_KEY = "dfs.backup.http.address";
- private static final String BN_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50105";
+ private static final String BN_ADDRESS_NAME_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
+ private static final String BN_ADDRESS_DEFAULT = DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_DEFAULT;
+ private static final String BN_HTTP_ADDRESS_NAME_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY;
+ private static final String BN_HTTP_ADDRESS_DEFAULT = DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT;
/** Name-node proxy */
NamenodeProtocol namenode;
Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java Thu Oct 1 05:31:37 2009
@@ -41,6 +41,7 @@
import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas;
import org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
/**
* Keeps information related to the blocks stored in the Hadoop cluster.
@@ -128,8 +129,8 @@
throws IOException {
namesystem = fsn;
pendingReplications = new PendingReplicationBlocks(
- conf.getInt("dfs.replication.pending.timeout.sec",
- -1) * 1000L);
+ conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
+ DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT) * 1000L);
setConfigurationParameters(conf);
blocksMap = new BlocksMap(capacity, DEFAULT_MAP_LOAD_FACTOR);
}
@@ -142,10 +143,11 @@
this.defaultReplication = conf.getInt("dfs.replication", 3);
this.maxReplication = conf.getInt("dfs.replication.max", 512);
- this.minReplication = conf.getInt("dfs.replication.min", 1);
+ this.minReplication = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY,
+ DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT);
if (minReplication <= 0)
throw new IOException(
- "Unexpected configuration parameters: dfs.replication.min = "
+ "Unexpected configuration parameters: dfs.namenode.replication.min = "
+ minReplication
+ " must be greater than 0");
if (maxReplication >= (int)Short.MAX_VALUE)
@@ -154,12 +156,13 @@
+ maxReplication + " must be less than " + (Short.MAX_VALUE));
if (maxReplication < minReplication)
throw new IOException(
- "Unexpected configuration parameters: dfs.replication.min = "
+ "Unexpected configuration parameters: dfs.namenode.replication.min = "
+ minReplication
+ " must be less than dfs.replication.max = "
+ maxReplication);
- this.maxReplicationStreams = conf.getInt("dfs.max-repl-streams", 2);
- this.shouldCheckForEnoughRacks = conf.get("topology.script.file.name") == null ? false
+ this.maxReplicationStreams = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY,
+ DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT);
+ this.shouldCheckForEnoughRacks = conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null ? false
: true;
FSNamesystem.LOG.info("defaultReplication = " + defaultReplication);
FSNamesystem.LOG.info("maxReplication = " + maxReplication);