You are viewing a plain text version of this content. The canonical link for it is here.
Posted to server-dev@james.apache.org by fe...@apache.org on 2011/10/12 15:20:52 UTC

svn commit: r1182352 - in /james/mailbox/trunk: hbase/src/test/resources/hdfs-default.xml hbase/src/test/resources/hdfs-site.xml spring/src/test/resources/hdfs-default.xml spring/src/test/resources/hdfs-site.xml

Author: felixk
Date: Wed Oct 12 13:20:51 2011
New Revision: 1182352

URL: http://svn.apache.org/viewvc?rev=1182352&view=rev
Log:
Thanks for hint, Eric

"In hadoop/hbase world, you set the conf properties by defining a *-site.xml where you override the default properties defined in *-default.xml which is shipped in the hadoop/hbase jar.
So rather than having hdfs-default.xml in our source tree, hdfs-site.xml with only dfs.datanode.data.dir.perm property defined will be better."

Added:
    james/mailbox/trunk/hbase/src/test/resources/hdfs-site.xml
      - copied, changed from r1182282, james/mailbox/trunk/hbase/src/test/resources/hdfs-default.xml
    james/mailbox/trunk/spring/src/test/resources/hdfs-site.xml
      - copied, changed from r1182284, james/mailbox/trunk/spring/src/test/resources/hdfs-default.xml
Removed:
    james/mailbox/trunk/hbase/src/test/resources/hdfs-default.xml
    james/mailbox/trunk/spring/src/test/resources/hdfs-default.xml

Copied: james/mailbox/trunk/hbase/src/test/resources/hdfs-site.xml (from r1182282, james/mailbox/trunk/hbase/src/test/resources/hdfs-default.xml)
URL: http://svn.apache.org/viewvc/james/mailbox/trunk/hbase/src/test/resources/hdfs-site.xml?p2=james/mailbox/trunk/hbase/src/test/resources/hdfs-site.xml&p1=james/mailbox/trunk/hbase/src/test/resources/hdfs-default.xml&r1=1182282&r2=1182352&rev=1182352&view=diff
==============================================================================
--- james/mailbox/trunk/hbase/src/test/resources/hdfs-default.xml (original)
+++ james/mailbox/trunk/hbase/src/test/resources/hdfs-site.xml Wed Oct 12 13:20:51 2011
@@ -1,179 +1,11 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 
-<!-- Do not modify this file directly.  Instead, copy entries that you -->
-<!-- wish to modify from this file into hdfs-site.xml and change them -->
-<!-- there.  If hdfs-site.xml does not already exist, create it.      -->
-
 <configuration>
 
 <property>
-  <name>dfs.namenode.logging.level</name>
-  <value>info</value>
-  <description>The logging level for dfs namenode. Other values are "dir"(trac
-e namespace mutations), "block"(trace block under/over replications and block
-creations/deletions), or "all".</description>
-</property>
-
-<property>
-  <name>dfs.secondary.http.address</name>
-  <value>0.0.0.0:50090</value>
-  <description>
-    The secondary namenode http server address and port.
-    If the port is 0 then the server will start on a free port.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.address</name>
-  <value>0.0.0.0:50010</value>
-  <description>
-    The address where the datanode server will listen to.
-    If the port is 0 then the server will start on a free port.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.http.address</name>
-  <value>0.0.0.0:50075</value>
-  <description>
-    The datanode http server address and port.
-    If the port is 0 then the server will start on a free port.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.ipc.address</name>
-  <value>0.0.0.0:50020</value>
-  <description>
-    The datanode ipc server address and port.
-    If the port is 0 then the server will start on a free port.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.handler.count</name>
-  <value>3</value>
-  <description>The number of server threads for the datanode.</description>
-</property>
-
-<property>
-  <name>dfs.http.address</name>
-  <value>0.0.0.0:50070</value>
-  <description>
-    The address and the base port where the dfs namenode web ui will listen on.
-    If the port is 0 then the server will start on a free port.
-  </description>
-</property>
-
-<property>
-  <name>dfs.https.enable</name>
-  <value>false</value>
-  <description>Decide if HTTPS(SSL) is supported on HDFS
-  </description>
-</property>
-
-<property>
-  <name>dfs.https.need.client.auth</name>
-  <value>false</value>
-  <description>Whether SSL client certificate authentication is required
-  </description>
-</property>
-
-<property>
-  <name>dfs.https.server.keystore.resource</name>
-  <value>ssl-server.xml</value>
-  <description>Resource file from which ssl server keystore
-  information will be extracted
-  </description>
-</property>
-
-<property>
-  <name>dfs.https.client.keystore.resource</name>
-  <value>ssl-client.xml</value>
-  <description>Resource file from which ssl client keystore
-  information will be extracted
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.https.address</name>
-  <value>0.0.0.0:50475</value>
-</property>
-
-<property>
-  <name>dfs.https.address</name>
-  <value>0.0.0.0:50470</value>
-</property>
-
- <property>
-  <name>dfs.datanode.dns.interface</name>
-  <value>default</value>
-  <description>The name of the Network Interface from which a data node should 
-  report its IP address.
-  </description>
- </property>
- 
-<property>
-  <name>dfs.datanode.dns.nameserver</name>
-  <value>default</value>
-  <description>The host name or IP address of the name server (DNS)
-  which a DataNode should use to determine the host name used by the
-  NameNode for communication and display purposes.
-  </description>
- </property>
- 
- 
- 
-<property>
-  <name>dfs.replication.considerLoad</name>
-  <value>true</value>
-  <description>Decide if chooseTarget considers the target's load or not
-  </description>
-</property>
-<property>
-  <name>dfs.default.chunk.view.size</name>
-  <value>32768</value>
-  <description>The number of bytes to view for a file on the browser.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.du.reserved</name>
-  <value>0</value>
-  <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-  </description>
-</property>
-
-<property>
-  <name>dfs.name.dir</name>
-  <value>${hadoop.tmp.dir}/dfs/name</value>
-  <description>Determines where on the local filesystem the DFS name node
-      should store the name table(fsimage).  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-</property>
-
-<property>
-  <name>dfs.name.edits.dir</name>
-  <value>${dfs.name.dir}</value>
-  <description>Determines where on the local filesystem the DFS name node
-      should store the transaction (edits) file. If this is a comma-delimited list
-      of directories then the transaction file is replicated in all of the 
-      directories, for redundancy. Default value is same as dfs.name.dir
-  </description>
-</property>
-<property>
-  <name>dfs.web.ugi</name>
-  <value>webuser,webgroup</value>
-  <description>The user account used by the web interface.
-    Syntax: USERNAME,GROUP1,GROUP2, ...
-  </description>
-</property>
-
-<property>
   <name>dfs.permissions</name>
-  <value>true</value>
+  <value>false</value>
   <description>
     If "true", enable permission checking in HDFS.
     If "false", permission checking is turned off,
@@ -184,181 +16,12 @@ creations/deletions), or "all".</descrip
 </property>
 
 <property>
-  <name>dfs.permissions.supergroup</name>
-  <value>supergroup</value>
-  <description>The name of the group of super-users.</description>
-</property>
-
-<property>
-  <name>dfs.data.dir</name>
-  <value>${hadoop.tmp.dir}/dfs/data</value>
-  <description>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </description>
-</property>
-
-<property>
   <name>dfs.datanode.data.dir.perm</name>
   <value>775</value>
   <description>Permissions for the directories on on the local filesystem where
   the DFS data node store its blocks. The permissions can either be octal or symbolic.
   </description>
 </property>
-<property>
-  <name>dfs.replication</name>
-  <value>3</value>
-  <description>Default block replication. 
-  The actual number of replications can be specified when the file is created.
-  The default is used if replication is not specified in create time.
-  </description>
-</property>
-
-<property>
-  <name>dfs.replication.max</name>
-  <value>512</value>
-  <description>Maximal block replication. 
-  </description>
-</property>
-
-<property>
-  <name>dfs.replication.min</name>
-  <value>1</value>
-  <description>Minimal block replication. 
-  </description>
-</property>
-
-<property>
-  <name>dfs.block.size</name>
-  <value>67108864</value>
-  <description>The default block size for new files.</description>
-</property>
-
-<property>
-  <name>dfs.df.interval</name>
-  <value>60000</value>
-  <description>Disk usage statistics refresh interval in msec.</description>
-</property>
-
-<property>
-  <name>dfs.client.block.write.retries</name>
-  <value>3</value>
-  <description>The number of retries for writing blocks to the data nodes, 
-  before we signal failure to the application.
-  </description>
-</property>
-
-<property>
-  <name>dfs.blockreport.intervalMsec</name>
-  <value>3600000</value>
-  <description>Determines block reporting interval in milliseconds.</description>
-</property>
-
-<property>
-  <name>dfs.blockreport.initialDelay</name>  <value>0</value>
-  <description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-  <name>dfs.heartbeat.interval</name>
-  <value>3</value>
-  <description>Determines datanode heartbeat interval in seconds.</description>
-</property>
-
-<property>
-  <name>dfs.namenode.handler.count</name>
-  <value>10</value>
-  <description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-  <name>dfs.safemode.threshold.pct</name>
-  <value>0.999f</value>
-  <description>
-    Specifies the percentage of blocks that should satisfy 
-    the minimal replication requirement defined by dfs.replication.min.
-    Values less than or equal to 0 mean not to start in safe mode.
-    Values greater than 1 will make safe mode permanent.
-  </description>
-</property>
-
-<property>
-  <name>dfs.safemode.extension</name>
-  <value>30000</value>
-  <description>
-    Determines extension of safe mode in milliseconds 
-    after the threshold level is reached.
-  </description>
-</property>
-
-<property>
-  <name>dfs.balance.bandwidthPerSec</name>
-  <value>1048576</value>
-  <description>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </description>
-</property>
-
-<property>
-  <name>dfs.hosts</name>
-  <value></value>
-  <description>Names a file that contains a list of hosts that are
-  permitted to connect to the namenode. The full pathname of the file
-  must be specified.  If the value is empty, all hosts are
-  permitted.</description>
-</property>
-
-<property>
-  <name>dfs.hosts.exclude</name>
-  <value></value>
-  <description>Names a file that contains a list of hosts that are
-  not permitted to connect to the namenode.  The full pathname of the
-  file must be specified.  If the value is empty, no hosts are
-  excluded.</description>
-</property> 
-
-<property>
-  <name>dfs.max.objects</name>
-  <value>0</value>
-  <description>The maximum number of files, directories and blocks
-  dfs supports. A value of zero indicates no limit to the number
-  of objects that dfs supports.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.decommission.interval</name>
-  <value>30</value>
-  <description>Namenode periodicity in seconds to check if decommission is 
-  complete.</description>
-</property>
-
-<property>
-  <name>dfs.namenode.decommission.nodes.per.interval</name>
-  <value>5</value>
-  <description>The number of nodes namenode checks if decommission is complete
-  in each dfs.namenode.decommission.interval.</description>
-</property>
-
-<property>
-  <name>dfs.replication.interval</name>
-  <value>3</value>
-  <description>The periodicity in seconds with which the namenode computes 
-  repliaction work for datanodes. </description>
-</property>
-
-<property>
-  <name>dfs.access.time.precision</name>
-  <value>3600000</value>
-  <description>The access time for HDFS file is precise upto this value. 
-               The default value is 1 hour. Setting a value of 0 disables
-               access times for HDFS.
-  </description>
-</property>
 
 <property>
   <name>dfs.support.append</name>

Copied: james/mailbox/trunk/spring/src/test/resources/hdfs-site.xml (from r1182284, james/mailbox/trunk/spring/src/test/resources/hdfs-default.xml)
URL: http://svn.apache.org/viewvc/james/mailbox/trunk/spring/src/test/resources/hdfs-site.xml?p2=james/mailbox/trunk/spring/src/test/resources/hdfs-site.xml&p1=james/mailbox/trunk/spring/src/test/resources/hdfs-default.xml&r1=1182284&r2=1182352&rev=1182352&view=diff
==============================================================================
--- james/mailbox/trunk/spring/src/test/resources/hdfs-default.xml (original)
+++ james/mailbox/trunk/spring/src/test/resources/hdfs-site.xml Wed Oct 12 13:20:51 2011
@@ -1,179 +1,11 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 
-<!-- Do not modify this file directly.  Instead, copy entries that you -->
-<!-- wish to modify from this file into hdfs-site.xml and change them -->
-<!-- there.  If hdfs-site.xml does not already exist, create it.      -->
-
 <configuration>
 
 <property>
-  <name>dfs.namenode.logging.level</name>
-  <value>info</value>
-  <description>The logging level for dfs namenode. Other values are "dir"(trac
-e namespace mutations), "block"(trace block under/over replications and block
-creations/deletions), or "all".</description>
-</property>
-
-<property>
-  <name>dfs.secondary.http.address</name>
-  <value>0.0.0.0:50090</value>
-  <description>
-    The secondary namenode http server address and port.
-    If the port is 0 then the server will start on a free port.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.address</name>
-  <value>0.0.0.0:50010</value>
-  <description>
-    The address where the datanode server will listen to.
-    If the port is 0 then the server will start on a free port.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.http.address</name>
-  <value>0.0.0.0:50075</value>
-  <description>
-    The datanode http server address and port.
-    If the port is 0 then the server will start on a free port.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.ipc.address</name>
-  <value>0.0.0.0:50020</value>
-  <description>
-    The datanode ipc server address and port.
-    If the port is 0 then the server will start on a free port.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.handler.count</name>
-  <value>3</value>
-  <description>The number of server threads for the datanode.</description>
-</property>
-
-<property>
-  <name>dfs.http.address</name>
-  <value>0.0.0.0:50070</value>
-  <description>
-    The address and the base port where the dfs namenode web ui will listen on.
-    If the port is 0 then the server will start on a free port.
-  </description>
-</property>
-
-<property>
-  <name>dfs.https.enable</name>
-  <value>false</value>
-  <description>Decide if HTTPS(SSL) is supported on HDFS
-  </description>
-</property>
-
-<property>
-  <name>dfs.https.need.client.auth</name>
-  <value>false</value>
-  <description>Whether SSL client certificate authentication is required
-  </description>
-</property>
-
-<property>
-  <name>dfs.https.server.keystore.resource</name>
-  <value>ssl-server.xml</value>
-  <description>Resource file from which ssl server keystore
-  information will be extracted
-  </description>
-</property>
-
-<property>
-  <name>dfs.https.client.keystore.resource</name>
-  <value>ssl-client.xml</value>
-  <description>Resource file from which ssl client keystore
-  information will be extracted
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.https.address</name>
-  <value>0.0.0.0:50475</value>
-</property>
-
-<property>
-  <name>dfs.https.address</name>
-  <value>0.0.0.0:50470</value>
-</property>
-
- <property>
-  <name>dfs.datanode.dns.interface</name>
-  <value>default</value>
-  <description>The name of the Network Interface from which a data node should 
-  report its IP address.
-  </description>
- </property>
- 
-<property>
-  <name>dfs.datanode.dns.nameserver</name>
-  <value>default</value>
-  <description>The host name or IP address of the name server (DNS)
-  which a DataNode should use to determine the host name used by the
-  NameNode for communication and display purposes.
-  </description>
- </property>
- 
- 
- 
-<property>
-  <name>dfs.replication.considerLoad</name>
-  <value>true</value>
-  <description>Decide if chooseTarget considers the target's load or not
-  </description>
-</property>
-<property>
-  <name>dfs.default.chunk.view.size</name>
-  <value>32768</value>
-  <description>The number of bytes to view for a file on the browser.
-  </description>
-</property>
-
-<property>
-  <name>dfs.datanode.du.reserved</name>
-  <value>0</value>
-  <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-  </description>
-</property>
-
-<property>
-  <name>dfs.name.dir</name>
-  <value>${hadoop.tmp.dir}/dfs/name</value>
-  <description>Determines where on the local filesystem the DFS name node
-      should store the name table(fsimage).  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-</property>
-
-<property>
-  <name>dfs.name.edits.dir</name>
-  <value>${dfs.name.dir}</value>
-  <description>Determines where on the local filesystem the DFS name node
-      should store the transaction (edits) file. If this is a comma-delimited list
-      of directories then the transaction file is replicated in all of the 
-      directories, for redundancy. Default value is same as dfs.name.dir
-  </description>
-</property>
-<property>
-  <name>dfs.web.ugi</name>
-  <value>webuser,webgroup</value>
-  <description>The user account used by the web interface.
-    Syntax: USERNAME,GROUP1,GROUP2, ...
-  </description>
-</property>
-
-<property>
   <name>dfs.permissions</name>
-  <value>true</value>
+  <value>false</value>
   <description>
     If "true", enable permission checking in HDFS.
     If "false", permission checking is turned off,
@@ -184,181 +16,12 @@ creations/deletions), or "all".</descrip
 </property>
 
 <property>
-  <name>dfs.permissions.supergroup</name>
-  <value>supergroup</value>
-  <description>The name of the group of super-users.</description>
-</property>
-
-<property>
-  <name>dfs.data.dir</name>
-  <value>${hadoop.tmp.dir}/dfs/data</value>
-  <description>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </description>
-</property>
-
-<property>
   <name>dfs.datanode.data.dir.perm</name>
   <value>775</value>
   <description>Permissions for the directories on on the local filesystem where
   the DFS data node store its blocks. The permissions can either be octal or symbolic.
   </description>
 </property>
-<property>
-  <name>dfs.replication</name>
-  <value>3</value>
-  <description>Default block replication. 
-  The actual number of replications can be specified when the file is created.
-  The default is used if replication is not specified in create time.
-  </description>
-</property>
-
-<property>
-  <name>dfs.replication.max</name>
-  <value>512</value>
-  <description>Maximal block replication. 
-  </description>
-</property>
-
-<property>
-  <name>dfs.replication.min</name>
-  <value>1</value>
-  <description>Minimal block replication. 
-  </description>
-</property>
-
-<property>
-  <name>dfs.block.size</name>
-  <value>67108864</value>
-  <description>The default block size for new files.</description>
-</property>
-
-<property>
-  <name>dfs.df.interval</name>
-  <value>60000</value>
-  <description>Disk usage statistics refresh interval in msec.</description>
-</property>
-
-<property>
-  <name>dfs.client.block.write.retries</name>
-  <value>3</value>
-  <description>The number of retries for writing blocks to the data nodes, 
-  before we signal failure to the application.
-  </description>
-</property>
-
-<property>
-  <name>dfs.blockreport.intervalMsec</name>
-  <value>3600000</value>
-  <description>Determines block reporting interval in milliseconds.</description>
-</property>
-
-<property>
-  <name>dfs.blockreport.initialDelay</name>  <value>0</value>
-  <description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-  <name>dfs.heartbeat.interval</name>
-  <value>3</value>
-  <description>Determines datanode heartbeat interval in seconds.</description>
-</property>
-
-<property>
-  <name>dfs.namenode.handler.count</name>
-  <value>10</value>
-  <description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-  <name>dfs.safemode.threshold.pct</name>
-  <value>0.999f</value>
-  <description>
-    Specifies the percentage of blocks that should satisfy 
-    the minimal replication requirement defined by dfs.replication.min.
-    Values less than or equal to 0 mean not to start in safe mode.
-    Values greater than 1 will make safe mode permanent.
-  </description>
-</property>
-
-<property>
-  <name>dfs.safemode.extension</name>
-  <value>30000</value>
-  <description>
-    Determines extension of safe mode in milliseconds 
-    after the threshold level is reached.
-  </description>
-</property>
-
-<property>
-  <name>dfs.balance.bandwidthPerSec</name>
-  <value>1048576</value>
-  <description>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </description>
-</property>
-
-<property>
-  <name>dfs.hosts</name>
-  <value></value>
-  <description>Names a file that contains a list of hosts that are
-  permitted to connect to the namenode. The full pathname of the file
-  must be specified.  If the value is empty, all hosts are
-  permitted.</description>
-</property>
-
-<property>
-  <name>dfs.hosts.exclude</name>
-  <value></value>
-  <description>Names a file that contains a list of hosts that are
-  not permitted to connect to the namenode.  The full pathname of the
-  file must be specified.  If the value is empty, no hosts are
-  excluded.</description>
-</property> 
-
-<property>
-  <name>dfs.max.objects</name>
-  <value>0</value>
-  <description>The maximum number of files, directories and blocks
-  dfs supports. A value of zero indicates no limit to the number
-  of objects that dfs supports.
-  </description>
-</property>
-
-<property>
-  <name>dfs.namenode.decommission.interval</name>
-  <value>30</value>
-  <description>Namenode periodicity in seconds to check if decommission is 
-  complete.</description>
-</property>
-
-<property>
-  <name>dfs.namenode.decommission.nodes.per.interval</name>
-  <value>5</value>
-  <description>The number of nodes namenode checks if decommission is complete
-  in each dfs.namenode.decommission.interval.</description>
-</property>
-
-<property>
-  <name>dfs.replication.interval</name>
-  <value>3</value>
-  <description>The periodicity in seconds with which the namenode computes 
-  repliaction work for datanodes. </description>
-</property>
-
-<property>
-  <name>dfs.access.time.precision</name>
-  <value>3600000</value>
-  <description>The access time for HDFS file is precise upto this value. 
-               The default value is 1 hour. Setting a value of 0 disables
-               access times for HDFS.
-  </description>
-</property>
 
 <property>
   <name>dfs.support.append</name>



---------------------------------------------------------------------
To unsubscribe, e-mail: server-dev-unsubscribe@james.apache.org
For additional commands, e-mail: server-dev-help@james.apache.org