You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-commits@hadoop.apache.org by st...@apache.org on 2009/11/28 21:26:22 UTC

svn commit: r885145 [14/34] - in /hadoop/mapreduce/branches/MAPREDUCE-233: ./ .eclipse.templates/ .eclipse.templates/.launches/ conf/ ivy/ lib/ src/benchmarks/gridmix/ src/benchmarks/gridmix/pipesort/ src/benchmarks/gridmix2/ src/benchmarks/gridmix2/sr...

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/mapred-default.xml
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/mapred-default.xml?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/mapred-default.xml (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/mapred-default.xml Sat Nov 28 20:26:01 2009
@@ -8,7 +8,7 @@
 <configuration>
 
 <property>
-  <name>hadoop.job.history.location</name>
+  <name>mapreduce.jobtracker.jobhistory.location</name>
   <value></value>
   <description> If job tracker is static the history files are stored 
   in this single well known place. If No value is set here, by default,
@@ -17,7 +17,7 @@
 </property>
 
 <property>
-  <name>hadoop.job.history.user.location</name>
+  <name>mapreduce.job.userhistorylocation</name>
   <value></value>
   <description> User can specify a location to store the history files of 
   a particular job. If nothing is specified, the logs are stored in 
@@ -27,16 +27,16 @@
 </property>
 
 <property>
-  <name>mapred.job.tracker.history.completed.location</name>
+  <name>mapreduce.jobtracker.jobhistory.completed.location</name>
   <value></value>
   <description> The completed job history files are stored at this single well 
   known location. If nothing is specified, the files are stored at 
-  ${hadoop.job.history.location}/done.
+  ${mapreduce.jobtracker.jobhistory.location}/done.
   </description>
 </property>
 
 <property>
-  <name>mapred.committer.job.setup.cleanup.needed</name>
+  <name>mapreduce.job.committer.setup.cleanup.needed</name>
   <value>true</value>
   <description> true, if job needs job-setup and job-cleanup.
                 false, otherwise  
@@ -45,14 +45,14 @@
 <!-- i/o properties -->
 
 <property>
-  <name>io.sort.factor</name>
+  <name>mapreduce.task.io.sort.factor</name>
   <value>10</value>
   <description>The number of streams to merge at once while sorting
   files.  This determines the number of open file handles.</description>
 </property>
 
 <property>
-  <name>io.sort.mb</name>
+  <name>mapreduce.task.io.sort.mb</name>
   <value>100</value>
   <description>The total amount of buffer memory to use while sorting 
   files, in megabytes.  By default, gives each merge stream 1MB, which
@@ -60,16 +60,17 @@
 </property>
 
 <property>
-  <name>io.sort.record.percent</name>
+  <name>mapreduce.map.sort.record.percent</name>
   <value>0.05</value>
-  <description>The percentage of io.sort.mb dedicated to tracking record
-  boundaries. Let this value be r, io.sort.mb be x. The maximum number
+  <description>The percentage of mapreduce.task.io.sort.mb dedicated to 
+  tracking record boundaries. Let this value be r, 
+  mapreduce.task.io.sort.mb be x. The maximum number
   of records collected before the collection thread must block is equal
   to (r * x) / 4</description>
 </property>
 
 <property>
-  <name>io.sort.spill.percent</name>
+  <name>mapreduce.map.sort.spill.percent</name>
   <value>0.80</value>
   <description>The soft limit in either the buffer or record collection
   buffers. Once reached, a thread will begin to spill the contents to disk
@@ -78,15 +79,7 @@
 </property>
 
 <property>
-  <name>io.map.index.skip</name>
-  <value>0</value>
-  <description>Number of index entries to skip between each entry.
-  Zero by default. Setting this to values larger than zero can
-  facilitate opening large map files using less memory.</description>
-</property>
-
-<property>
-  <name>mapred.job.tracker</name>
+  <name>mapreduce.jobtracker.address</name>
   <value>local</value>
   <description>The host and port that the MapReduce job tracker runs
   at.  If "local", then jobs are run in-process as a single map
@@ -95,7 +88,7 @@
 </property>
 
 <property>
-  <name>mapred.job.tracker.http.address</name>
+  <name>mapreduce.jobtracker.http.address</name>
   <value>0.0.0.0:50030</value>
   <description>
     The job tracker http server address and port the server will listen on.
@@ -104,7 +97,7 @@
 </property>
 
 <property>
-  <name>mapred.job.tracker.handler.count</name>
+  <name>mapreduce.jobtracker.handler.count</name>
   <value>10</value>
   <description>
     The number of server threads for the JobTracker. This should be roughly
@@ -113,7 +106,7 @@
 </property>
 
 <property>
-  <name>mapred.task.tracker.report.address</name>
+  <name>mapreduce.tasktracker.report.address</name>
   <value>127.0.0.1:0</value>
   <description>The interface and port that task tracker server listens on. 
   Since it is only connected to by the tasks, it uses the local interface.
@@ -122,7 +115,7 @@
 </property>
 
 <property>
-  <name>mapred.local.dir</name>
+  <name>mapreduce.cluster.local.dir</name>
   <value>${hadoop.tmp.dir}/mapred/local</value>
   <description>The local directory where MapReduce stores intermediate
   data files.  May be a comma-separated list of
@@ -132,32 +125,32 @@
 </property>
 
 <property>
-  <name>mapred.system.dir</name>
+  <name>mapreduce.jobtracker.system.dir</name>
   <value>${hadoop.tmp.dir}/mapred/system</value>
   <description>The shared directory where MapReduce stores control files.
   </description>
 </property>
 
 <property>
-  <name>mapred.temp.dir</name>
+  <name>mapreduce.cluster.temp.dir</name>
   <value>${hadoop.tmp.dir}/mapred/temp</value>
   <description>A shared directory for temporary files.
   </description>
 </property>
 
 <property>
-  <name>mapred.local.dir.minspacestart</name>
+  <name>mapreduce.tasktracker.local.dir.minspacestart</name>
   <value>0</value>
-  <description>If the space in mapred.local.dir drops under this, 
+  <description>If the space in mapreduce.cluster.local.dir drops under this, 
   do not ask for more tasks.
   Value in bytes.
   </description>
 </property>
 
 <property>
-  <name>mapred.local.dir.minspacekill</name>
+  <name>mapreduce.tasktracker.local.dir.minspacekill</name>
   <value>0</value>
-  <description>If the space in mapred.local.dir drops under this, 
+  <description>If the space in mapreduce.cluster.local.dir drops under this, 
     do not ask more tasks until all the current ones have finished and 
     cleaned up. Also, to save the rest of the tasks we have running, 
     kill one of them, to clean up some space. Start with the reduce tasks,
@@ -167,7 +160,7 @@
 </property>
 
 <property>
-  <name>mapred.tasktracker.expiry.interval</name>
+  <name>mapreduce.jobtracker.expire.trackers.interval</name>
   <value>600000</value>
   <description>Expert: The time-interval, in miliseconds, after which
   a tasktracker is declared 'lost' if it doesn't send heartbeats.
@@ -175,14 +168,14 @@
 </property>
 
 <property>
-  <name>mapred.tasktracker.instrumentation</name>
+  <name>mapreduce.tasktracker.instrumentation</name>
   <value>org.apache.hadoop.mapred.TaskTrackerMetricsInst</value>
   <description>Expert: The instrumentation class to associate with each TaskTracker.
   </description>
 </property>
 
 <property>
-  <name>mapred.tasktracker.memory_calculator_plugin</name>
+  <name>mapreduce.tasktracker.memorycalculatorplugin</name>
   <value></value>
   <description>
    Name of the class whose instance will be used to query memory information
@@ -196,7 +189,7 @@
 </property>
 
 <property>
-  <name>mapred.tasktracker.taskmemorymanager.monitoring-interval</name>
+  <name>mapreduce.tasktracker.taskmemorymanager.monitoringinterval</name>
   <value>5000</value>
   <description>The interval, in milliseconds, for which the tasktracker waits
    between two cycles of monitoring its tasks' memory usage. Used only if
@@ -205,7 +198,7 @@
 </property>
 
 <property>
-  <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
+  <name>mapreduce.tasktracker.tasks.sleeptimebeforesigkill</name>
   <value>5000</value>
   <description>The time, in milliseconds, the tasktracker waits for sending a
   SIGKILL to a task, after it has been sent a SIGTERM. This is currently
@@ -214,25 +207,25 @@
 </property>
 
 <property>
-  <name>mapred.map.tasks</name>
+  <name>mapreduce.job.maps</name>
   <value>2</value>
   <description>The default number of map tasks per job.
-  Ignored when mapred.job.tracker is "local".  
+  Ignored when mapreduce.jobtracker.address is "local".  
   </description>
 </property>
 
 <property>
-  <name>mapred.reduce.tasks</name>
+  <name>mapreduce.job.reduces</name>
   <value>1</value>
   <description>The default number of reduce tasks per job. Typically set to 99%
   of the cluster's reduce capacity, so that if a node fails the reduces can 
   still be executed in a single wave.
-  Ignored when mapred.job.tracker is "local".
+  Ignored when mapreduce.jobtracker.address is "local".
   </description>
 </property>
 
 <property>
-  <name>mapred.jobtracker.restart.recover</name>
+  <name>mapreduce.jobtracker.restart.recover</name>
   <value>false</value>
   <description>"true" to enable (job) recovery upon restart,
                "false" to start afresh
@@ -240,7 +233,7 @@
 </property>
 
 <property>
-  <name>mapred.jobtracker.job.history.block.size</name>
+  <name>mapreduce.jobtracker.jobhistory.block.size</name>
   <value>3145728</value>
   <description>The block size of the job history file. Since the job recovery
                uses job history, its important to dump job history to disk as 
@@ -250,13 +243,13 @@
 </property>
 
 <property>
-  <name>mapred.jobtracker.taskScheduler</name>
+  <name>mapreduce.jobtracker.taskscheduler</name>
   <value>org.apache.hadoop.mapred.JobQueueTaskScheduler</value>
   <description>The class responsible for scheduling the tasks.</description>
 </property>
 
 <property>
-  <name>mapred.jobtracker.taskScheduler.maxRunningTasksPerJob</name>
+  <name>mapreduce.jobtracker.taskscheduler.maxrunningtasks.perjob</name>
   <value></value>
   <description>The maximum number of running tasks for a job before
   it gets preempted. No limits if undefined.
@@ -264,7 +257,7 @@
 </property>
 
 <property>
-  <name>mapred.map.max.attempts</name>
+  <name>mapreduce.map.maxattempts</name>
   <value>4</value>
   <description>Expert: The maximum number of attempts per map task.
   In other words, framework will try to execute a map task these many number
@@ -273,7 +266,7 @@
 </property>
 
 <property>
-  <name>mapred.reduce.max.attempts</name>
+  <name>mapreduce.reduce.maxattempts</name>
   <value>4</value>
   <description>Expert: The maximum number of attempts per reduce task.
   In other words, framework will try to execute a reduce task these many number
@@ -282,7 +275,7 @@
 </property>
 
 <property>
-  <name>mapred.reduce.parallel.copies</name>
+  <name>mapreduce.reduce.shuffle.parallelcopies</name>
   <value>5</value>
   <description>The default number of parallel transfers run by reduce
   during the copy(shuffle) phase.
@@ -290,15 +283,7 @@
 </property>
 
 <property>
-  <name>mapred.reduce.copy.backoff</name>
-  <value>300</value>
-  <description>The maximum amount of time (in seconds) a reducer spends on 
-  fetching one map output before declaring it as failed.
-  </description>
-</property>
-
-<property>
-  <name>mapred.shuffle.connect.timeout</name>
+  <name>mapreduce.reduce.shuffle.connect.timeout</name>
   <value>180000</value>
   <description>Expert: Cluster-wide configuration. The maximum amount of
   time (in milli seconds) reduce task spends in trying to connect to a
@@ -307,7 +292,7 @@
 </property>
 
 <property>
-  <name>mapred.shuffle.read.timeout</name>
+  <name>mapreduce.reduce.shuffle.read.timeout</name>
   <value>30000</value>
   <description>Expert: Cluster-wide configuration. The maximum amount of time
   (in milli seconds) reduce task waits for map output data to be available
@@ -316,7 +301,7 @@
 </property>
 
 <property>
-  <name>mapred.task.timeout</name>
+  <name>mapreduce.task.timeout</name>
   <value>600000</value>
   <description>The number of milliseconds before a task will be
   terminated if it neither reads an input, writes an output, nor
@@ -325,7 +310,7 @@
 </property>
 
 <property>
-  <name>mapred.tasktracker.map.tasks.maximum</name>
+  <name>mapreduce.tasktracker.map.tasks.maximum</name>
   <value>2</value>
   <description>The maximum number of map tasks that will be run
   simultaneously by a task tracker.
@@ -333,7 +318,7 @@
 </property>
 
 <property>
-  <name>mapred.tasktracker.reduce.tasks.maximum</name>
+  <name>mapreduce.tasktracker.reduce.tasks.maximum</name>
   <value>2</value>
   <description>The maximum number of reduce tasks that will be run
   simultaneously by a task tracker.
@@ -341,14 +326,22 @@
 </property>
 
 <property>
-  <name>mapred.job.tracker.retiredjobs.cache.size</name>
+  <name>mapreduce.jobtracker.retiredjobs.cache.size</name>
   <value>1000</value>
   <description>The number of retired job status to keep in the cache.
   </description>
 </property>
 
 <property>
-  <name>mapred.job.tracker.jobhistory.lru.cache.size</name>
+  <name>mapreduce.tasktracker.outofband.heartbeat</name>
+  <value>false</value>
+  <description>Expert: Set this to true to let the tasktracker send an 
+  out-of-band heartbeat on task-completion for better latency.
+  </description>
+</property>
+
+<property>
+  <name>mapreduce.jobtracker.jobhistory.lru.cache.size</name>
   <value>5</value>
   <description>The number of job history files loaded in memory. The jobs are 
   loaded when they are first accessed. The cache is cleared based on LRU.
@@ -356,7 +349,7 @@
 </property>
 
 <property>
-  <name>mapred.jobtracker.instrumentation</name>
+  <name>mapreduce.jobtracker.instrumentation</name>
   <value>org.apache.hadoop.mapred.JobTrackerMetricsInst</value>
   <description>Expert: The instrumentation class to associate with each JobTracker.
   </description>
@@ -402,7 +395,7 @@
 </property>
 
 <property>
-  <name>mapred.child.tmp</name>
+  <name>mapreduce.task.tmp.dir</name>
   <value>./tmp</value>
   <description> To set the value of tmp directory for map and reduce tasks.
   If the value is an absolute path, it is directly assigned. Otherwise, it is
@@ -414,7 +407,7 @@
 </property>
 
 <property>
-  <name>mapred.map.child.log.level</name>
+  <name>mapreduce.map.log.level</name>
   <value>INFO</value>
   <description>The logging level for the map task. The allowed levels are:
   OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
@@ -422,7 +415,7 @@
 </property>
 
 <property>
-  <name>mapred.reduce.child.log.level</name>
+  <name>mapreduce.reduce.log.level</name>
   <value>INFO</value>
   <description>The logging level for the reduce task. The allowed levels are:
   OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
@@ -430,7 +423,7 @@
 </property>
 
 <property>
-  <name>mapred.inmem.merge.threshold</name>
+  <name>mapreduce.reduce.merge.inmem.threshold</name>
   <value>1000</value>
   <description>The threshold, in terms of the number of files 
   for the in-memory merge process. When we accumulate threshold number of files
@@ -441,17 +434,17 @@
 </property>
 
 <property>
-  <name>mapred.job.shuffle.merge.percent</name>
+  <name>mapreduce.reduce.shuffle.merge.percent</name>
   <value>0.66</value>
   <description>The usage threshold at which an in-memory merge will be
   initiated, expressed as a percentage of the total memory allocated to
   storing in-memory map outputs, as defined by
-  mapred.job.shuffle.input.buffer.percent.
+  mapreduce.reduce.shuffle.input.buffer.percent.
   </description>
 </property>
 
 <property>
-  <name>mapred.job.shuffle.input.buffer.percent</name>
+  <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
   <value>0.70</value>
   <description>The percentage of memory to be allocated from the maximum heap
   size to storing map outputs during the shuffle.
@@ -459,7 +452,7 @@
 </property>
 
 <property>
-  <name>mapred.job.reduce.input.buffer.percent</name>
+  <name>mapreduce.reduce.input.buffer.percent</name>
   <value>0.0</value>
   <description>The percentage of memory- relative to the maximum heap size- to
   retain map outputs during the reduce. When the shuffle is concluded, any
@@ -469,7 +462,7 @@
 </property>
 
 <property>
-  <name>mapred.job.reduce.markreset.buffer.percent</name>
+  <name>mapreduce.reduce.markreset.buffer.percent</name>
   <value>0.0</value>
   <description>The percentage of memory -relative to the maximum heap size- to
   be used for caching values when using the mark-reset functionality.
@@ -477,27 +470,27 @@
 </property>
 
 <property>
-  <name>mapred.map.tasks.speculative.execution</name>
+  <name>mapreduce.map.speculative</name>
   <value>true</value>
   <description>If true, then multiple instances of some map tasks 
                may be executed in parallel.</description>
 </property>
 
 <property>
-  <name>mapred.reduce.tasks.speculative.execution</name>
+  <name>mapreduce.reduce.speculative</name>
   <value>true</value>
   <description>If true, then multiple instances of some reduce tasks 
                may be executed in parallel.</description>
 </property>
 <property>
-  <name>mapred.speculative.execution.speculativeCap</name>
+  <name>mapreduce.job.speculative.speculativecap</name>
   <value>0.1</value>
   <description>The max percent (0-1) of running tasks that
   can be speculatively re-executed at any time.</description>
 </property>
  
 <property>
-  <name>mapred.speculative.execution.slowTaskThreshold</name>
+  <name>mapreduce.job.speculative.slowtaskthreshold</name>
   <value>1.0</value>The number of standard deviations by which a task's 
   ave progress-rates must be lower than the average of all running tasks'
   for the task to be considered too slow.
@@ -506,7 +499,7 @@
 </property>
 
 <property>
-  <name>mapred.speculative.execution.slowNodeThreshold</name>
+  <name>mapreduce.job.speculative.slownodethreshold</name>
   <value>1.0</value>
   <description>The number of standard deviations by which a Task 
   Tracker's ave map and reduce progress-rates (finishTime-dispatchTime)
@@ -516,7 +509,7 @@
 </property>
 
 <property>
-  <name>mapred.job.reuse.jvm.num.tasks</name>
+  <name>mapreduce.job.jvm.numtasks</name>
   <value>1</value>
   <description>How many tasks to run per jvm. If set to -1, there is
   no limit. 
@@ -524,7 +517,7 @@
 </property>
 
 <property>
-  <name>mapred.min.split.size</name>
+  <name>mapreduce.input.fileinputformat.split.minsize</name>
   <value>0</value>
   <description>The minimum size chunk that map input should be split
   into.  Note that some file formats may have minimum split sizes that
@@ -532,14 +525,14 @@
 </property>
 
 <property>
-  <name>mapred.jobtracker.maxtasks.per.job</name>
+  <name>mapreduce.jobtracker.maxtasks.perjob</name>
   <value>-1</value>
   <description>The maximum number of tasks for a single job.
   A value of -1 indicates that there is no maximum.  </description>
 </property>
 
 <property>
-  <name>mapred.submit.replication</name>
+  <name>mapreduce.client.submit.file.replication</name>
   <value>10</value>
   <description>The replication level for submitted job files.  This
   should be around the square root of the number of nodes.
@@ -548,7 +541,7 @@
 
 
 <property>
-  <name>mapred.tasktracker.dns.interface</name>
+  <name>mapreduce.tasktracker.dns.interface</name>
   <value>default</value>
   <description>The name of the Network Interface from which a task
   tracker should report its IP address.
@@ -556,7 +549,7 @@
  </property>
  
 <property>
-  <name>mapred.tasktracker.dns.nameserver</name>
+  <name>mapreduce.tasktracker.dns.nameserver</name>
   <value>default</value>
   <description>The host name or IP address of the name server (DNS)
   which a TaskTracker should use to determine the host name used by
@@ -565,7 +558,7 @@
  </property>
  
 <property>
-  <name>tasktracker.http.threads</name>
+  <name>mapreduce.tasktracker.http.threads</name>
   <value>40</value>
   <description>The number of worker threads that for the http server. This is
                used for map output fetching
@@ -573,7 +566,7 @@
 </property>
 
 <property>
-  <name>mapred.task.tracker.http.address</name>
+  <name>mapreduce.tasktracker.http.address</name>
   <value>0.0.0.0:50060</value>
   <description>
     The task tracker http server address and port.
@@ -582,7 +575,7 @@
 </property>
 
 <property>
-  <name>keep.failed.task.files</name>
+  <name>mapreduce.task.files.preserve.failedtasks</name>
   <value>false</value>
   <description>Should the files for failed tasks be kept. This should only be 
                used on jobs that are failing, because the storage is never
@@ -593,7 +586,7 @@
 
 <!-- 
   <property>
-  <name>keep.task.files.pattern</name>
+  <name>mapreduce.task.files.preserve.filepattern</name>
   <value>.*_m_123456_0</value>
   <description>Keep all files from tasks whose task names match the given
                regular expression. Defaults to none.</description>
@@ -601,14 +594,14 @@
 -->
 
 <property>
-  <name>mapred.output.compress</name>
+  <name>mapreduce.output.fileoutputformat.compress</name>
   <value>false</value>
   <description>Should the job outputs be compressed?
   </description>
 </property>
 
 <property>
-  <name>mapred.output.compression.type</name>
+  <name>mapreduce.output.fileoutputformat.compression.type</name>
   <value>RECORD</value>
   <description>If the job outputs are to compressed as SequenceFiles, how should
                they be compressed? Should be one of NONE, RECORD or BLOCK.
@@ -616,14 +609,14 @@
 </property>
 
 <property>
-  <name>mapred.output.compression.codec</name>
+  <name>mapreduce.output.fileoutputformat.compression.codec</name>
   <value>org.apache.hadoop.io.compress.DefaultCodec</value>
   <description>If the job outputs are compressed, how should they be compressed?
   </description>
 </property>
 
 <property>
-  <name>mapred.compress.map.output</name>
+  <name>mapreduce.map.output.compress</name>
   <value>false</value>
   <description>Should the outputs of the maps be compressed before being
                sent across the network. Uses SequenceFile compression.
@@ -631,7 +624,7 @@
 </property>
 
 <property>
-  <name>mapred.map.output.compression.codec</name>
+  <name>mapreduce.map.output.compress.codec</name>
   <value>org.apache.hadoop.io.compress.DefaultCodec</value>
   <description>If the map outputs are compressed, how should they be 
                compressed?
@@ -646,14 +639,14 @@
 </property>
 
 <property>
-  <name>mapred.userlog.limit.kb</name>
+  <name>mapreduce.task.userlog.limit.kb</name>
   <value>0</value>
   <description>The maximum size of user-logs of each task in KB. 0 disables the cap.
   </description>
 </property>
 
 <property>
-  <name>mapred.userlog.retain.hours</name>
+  <name>mapreduce.task.userlog.retain.hours</name>
   <value>24</value>
   <description>The maximum time, in hours, for which the user-logs are to be 
           retained.
@@ -661,7 +654,7 @@
 </property>
 
 <property>
-  <name>mapred.hosts</name>
+  <name>mapreduce.jobtracker.hosts.filename</name>
   <value></value>
   <description>Names a file that contains the list of nodes that may
   connect to the jobtracker.  If the value is empty, all hosts are
@@ -669,7 +662,7 @@
 </property>
 
 <property>
-  <name>mapred.hosts.exclude</name>
+  <name>mapreduce.jobtracker.hosts.exclude.filename</name>
   <value></value>
   <description>Names a file that contains the list of hosts that
   should be excluded by the jobtracker.  If the value is empty, no
@@ -677,16 +670,16 @@
 </property>
 
 <property>
-  <name>mapred.heartbeats.in.second</name>
+  <name>mapreduce.jobtracker.heartbeats.in.second</name>
   <value>100</value>
   <description>Expert: Approximate number of heart-beats that could arrive 
-               JobTracker in a second. Assuming each RPC can be processed 
+               at JobTracker in a second. Assuming each RPC can be processed 
                in 10msec, the default value is made 100 RPCs in a second.
   </description>
 </property> 
 
 <property>
-  <name>mapred.max.tracker.blacklists</name>
+  <name>mapreduce.jobtracker.tasktracker.maxblacklists</name>
   <value>4</value>
   <description>The number of blacklists for a taskTracker by various jobs
                after which the task tracker could be blacklisted across
@@ -697,7 +690,7 @@
 </property> 
 
 <property>
-  <name>mapred.max.tracker.failures</name>
+  <name>mapreduce.job.maxtaskfailures.per.tracker</name>
   <value>4</value>
   <description>The number of task-failures on a tasktracker of a given job 
                after which new tasks of that job aren't assigned to it.
@@ -705,7 +698,7 @@
 </property>
 
 <property>
-  <name>jobclient.output.filter</name>
+  <name>mapreduce.client.output.filter</name>
   <value>FAILED</value>
   <description>The filter for controlling the output of the task's userlogs sent
                to the console of the JobClient. 
@@ -715,7 +708,27 @@
 </property>
 
   <property>
-    <name>mapred.job.tracker.persist.jobstatus.active</name>
+    <name>mapreduce.client.completion.pollinterval</name>
+    <value>5000</value>
+    <description>The interval (in milliseconds) between which the JobClient
+    polls the JobTracker for updates about job status. You may want to set this
+    to a lower value to make tests run faster on a single node system. Adjusting
+    this value in production may lead to unwanted client-server traffic.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.client.progerssmonitor.pollinterval</name>
+    <value>1000</value>
+    <description>The interval (in milliseconds) between which the JobClient
+    reports status to the console and checks for job completion. You may want to set this
+    to a lower value to make tests run faster on a single node system. Adjusting
+    this value in production may lead to unwanted client-server traffic.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobtracker.persist.jobstatus.active</name>
     <value>false</value>
     <description>Indicates if persistency of job status information is
       active or not.
@@ -723,7 +736,7 @@
   </property>
 
   <property>
-  <name>mapred.job.tracker.persist.jobstatus.hours</name>
+  <name>mapreduce.jobtracker.persist.jobstatus.hours</name>
   <value>0</value>
   <description>The number of hours job status information is persisted in DFS.
     The job status information will be available after it drops of the memory
@@ -733,7 +746,7 @@
 </property>
 
   <property>
-    <name>mapred.job.tracker.persist.jobstatus.dir</name>
+    <name>mapreduce.jobtracker.persist.jobstatus.dir</name>
     <value>/jobtracker/jobsInfo</value>
     <description>The directory where the job status information is persisted
       in a file system to be available after it drops of the memory queue and
@@ -742,7 +755,7 @@
   </property>
 
   <property>
-    <name>mapred.task.profile</name>
+    <name>mapreduce.task.profile</name>
     <value>false</value>
     <description>To set whether the system should collect profiler
      information for some of the tasks in this job? The information is stored
@@ -751,30 +764,23 @@
   </property>
 
   <property>
-    <name>mapred.task.profile.maps</name>
+    <name>mapreduce.task.profile.maps</name>
     <value>0-2</value>
     <description> To set the ranges of map tasks to profile.
-    mapred.task.profile has to be set to true for the value to be accounted.
+    mapreduce.task.profile has to be set to true for the value to be accounted.
     </description>
   </property>
 
   <property>
-    <name>mapred.task.profile.reduces</name>
+    <name>mapreduce.task.profile.reduces</name>
     <value>0-2</value>
     <description> To set the ranges of reduce tasks to profile.
-    mapred.task.profile has to be set to true for the value to be accounted.
+    mapreduce.task.profile has to be set to true for the value to be accounted.
     </description>
   </property>
 
   <property>
-    <name>mapred.line.input.format.linespermap</name>
-    <value>1</value>
-    <description> Number of lines per split in NLineInputFormat.
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.skip.attempts.to.start.skipping</name>
+    <name>mapreduce.task.skip.start.attempts</name>
     <value>2</value>
     <description> The number of Task attempts AFTER which skip mode 
     will be kicked off. When skip mode is kicked off, the 
@@ -786,7 +792,7 @@
   </property>
   
   <property>
-    <name>mapred.skip.map.auto.incr.proc.count</name>
+    <name>mapreduce.map.skip.proc.count.autoincr</name>
     <value>true</value>
     <description> The flag which if set to true, 
     SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS is incremented 
@@ -798,7 +804,7 @@
   </property>
   
   <property>
-    <name>mapred.skip.reduce.auto.incr.proc.count</name>
+    <name>mapreduce.reduce.skip.proc.count.autoincr</name>
     <value>true</value>
     <description> The flag which if set to true, 
     SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS is incremented 
@@ -810,7 +816,7 @@
   </property>
   
   <property>
-    <name>mapred.skip.out.dir</name>
+    <name>mapreduce.job.skip.outdir</name>
     <value></value>
     <description> If no value is specified here, the skipped records are 
     written to the output directory at _logs/skip.
@@ -819,7 +825,7 @@
   </property>
 
   <property>
-    <name>mapred.skip.map.max.skip.records</name>
+    <name>mapreduce.map.skip.maxrecords</name>
     <value>0</value>
     <description> The number of acceptable skip records surrounding the bad 
     record PER bad record in mapper. The number includes the bad record as well.
@@ -834,7 +840,7 @@
   </property>
   
   <property>
-    <name>mapred.skip.reduce.max.skip.groups</name>
+    <name>mapreduce.reduce.skip.maxgroups</name>
     <value>0</value>
     <description> The number of acceptable skip groups surrounding the bad 
     group PER bad group in reducer. The number includes the bad group as well.
@@ -852,7 +858,7 @@
 
 <!--
 <property>
- <name>job.end.notification.url</name>
+ <name>mapreduce.job.end-notification.url</name>
  <value>http://localhost:8080/jobstatus.php?jobId=$jobId&amp;jobStatus=$jobStatus</value>
  <description>Indicates url which will be called on completion of job to inform
               end status of job.
@@ -864,14 +870,14 @@
 -->
 
 <property>
-  <name>job.end.retry.attempts</name>
+  <name>mapreduce.job.end-notification.retry.attempts</name>
   <value>0</value>
   <description>Indicates how many times hadoop should attempt to contact the
                notification URL </description>
 </property>
 
 <property>
-  <name>job.end.retry.interval</name>
+  <name>mapreduce.job.end-notification.retry.interval</name>
    <value>30000</value>
    <description>Indicates time in milliseconds between notification URL retry
                 calls</description>
@@ -879,15 +885,7 @@
   
 <!-- Proxy Configuration -->
 <property>
-  <name>hadoop.rpc.socket.factory.class.JobSubmissionProtocol</name>
-  <value></value>
-  <description> SocketFactory to use to connect to a Map/Reduce master
-    (JobTracker). If null or empty, then use hadoop.rpc.socket.class.default.
-  </description>
-</property>
-
-<property>
-  <name>mapred.task.cache.levels</name>
+  <name>mapreduce.jobtracker.taskcache.levels</name>
   <value>2</value>
   <description> This is the max level of the task cache. For example, if
     the level is 2, the tasks cached are at the host level and at the rack
@@ -896,7 +894,7 @@
 </property>
 
 <property>
-  <name>mapred.job.queue.name</name>
+  <name>mapreduce.job.queuename</name>
   <value>default</value>
   <description> Queue to which a job is submitted. This must match one of the
     queues defined in mapred.queue.names for the system. Also, the ACL setup
@@ -907,7 +905,7 @@
 </property>
 
 <property>
-  <name>mapred.tasktracker.indexcache.mb</name>
+  <name>mapreduce.tasktracker.indexcache.mb</name>
   <value>10</value>
   <description> The maximum memory that a task tracker allows for the 
     index cache that is used when serving map outputs to reducers.
@@ -915,7 +913,7 @@
 </property>
 
 <property>
-  <name>mapred.merge.recordsBeforeProgress</name>
+  <name>mapreduce.task.merge.progress.records</name>
   <value>10000</value>
   <description> The number of records to process during merge before
    sending a progress notification to the TaskTracker.
@@ -923,7 +921,7 @@
 </property>
 
 <property>
-  <name>mapred.reduce.slowstart.completed.maps</name>
+  <name>mapreduce.job.reduce.slowstart.completedmaps</name>
   <value>0.05</value>
   <description>Fraction of the number of maps in the job which should be 
   complete before reduces are scheduled for the job. 
@@ -931,7 +929,7 @@
 </property>
 
 <property>
-  <name>mapred.task.tracker.task-controller</name>
+  <name>mapreduce.tasktracker.taskcontroller</name>
   <value>org.apache.hadoop.mapred.DefaultTaskController</value>
   <description>TaskController which is used to launch and manage task execution 
   </description>
@@ -940,7 +938,7 @@
 <!--  Node health script variables -->
 
 <property>
-  <name>mapred.healthChecker.script.path</name>
+  <name>mapreduce.tasktracker.healthchecker.script.path</name>
   <value></value>
   <description>Absolute path to the script which is
   periodicallyrun by the node health monitoring service to determine if
@@ -950,21 +948,21 @@
 </property>
 
 <property>
-  <name>mapred.healthChecker.interval</name>
+  <name>mapreduce.tasktracker.healthchecker.interval</name>
   <value>60000</value>
   <description>Frequency of the node health script to be run,
   in milliseconds</description>
 </property>
 
 <property>
-  <name>mapred.healthChecker.script.timeout</name>
+  <name>mapreduce.tasktracker.healthchecker.script.timeout</name>
   <value>600000</value>
   <description>Time after node health script should be killed if 
   unresponsive and considered that the script has failed.</description>
 </property>
 
 <property>
-  <name>mapred.healthChecker.script.args</name>
+  <name>mapreduce.tasktracker.healthchecker.script.args</name>
   <value></value>
   <description>List of arguments which are to be passed to 
   node health script when it is being launched comma seperated.

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/BackupStore.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/BackupStore.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/BackupStore.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/BackupStore.java Sat Nov 28 20:26:01 2009
@@ -35,10 +35,10 @@
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.mapred.IFile.InMemoryReader;
 import org.apache.hadoop.mapred.IFile.Reader;
 import org.apache.hadoop.mapred.IFile.Writer;
 import org.apache.hadoop.mapred.Merger.Segment;
+import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 
 /**
@@ -81,10 +81,10 @@
   throws IOException {
     
     final float bufferPercent =
-      conf.getFloat("mapred.job.reduce.markreset.buffer.percent", 0f);
+      conf.getFloat(JobContext.REDUCE_MARKRESET_BUFFER_PERCENT, 0f);
 
     if (bufferPercent > 1.0 || bufferPercent < 0.0) {
-      throw new IOException("mapred.job.reduce.markreset.buffer.percent" +
+      throw new IOException(JobContext.REDUCE_MARKRESET_BUFFER_PERCENT +
           bufferPercent);
     }
 
@@ -92,7 +92,7 @@
         Runtime.getRuntime().maxMemory() * bufferPercent, Integer.MAX_VALUE);
 
     // Support an absolute size also.
-    int tmp = conf.getInt("mapred.job.reduce.markreset.buffer.size", 0);
+    int tmp = conf.getInt(JobContext.REDUCE_MARKRESET_BUFFER_SIZE, 0);
     if (tmp >  0) {
       maxSize = tmp;
     }
@@ -355,7 +355,11 @@
   
   private void clearSegmentList() throws IOException {
     for (Segment<K,V> segment: segmentList) {
+      long len = segment.getLength();
       segment.close();
+      if (segment.inMemory()) {
+       memCache.unreserve(len);
+      }
     }
     segmentList.clear();
   }
@@ -376,6 +380,10 @@
       }
     }
 
+    public void unreserve(long len) {
+      ramManager.unreserve((int)len);
+    }
+
     /**
      * Re-initialize the memory cache.
      * 
@@ -485,7 +493,7 @@
       ramManager.unreserve(blockSize - usedSize);
 
       Reader<K, V> reader = 
-        new InMemoryReader<K, V>(ramManager, 
+        new org.apache.hadoop.mapreduce.task.reduce.InMemoryReader<K, V>(null, 
             (org.apache.hadoop.mapred.TaskAttemptID) tid, 
             dataOut.getData(), 0, usedSize);
       Segment<K, V> segment = new Segment<K, V>(reader, false);
@@ -509,7 +517,7 @@
     throws IOException {
       this.conf = conf;
       this.fs = FileSystem.getLocal(conf);
-      this.lDirAlloc = new LocalDirAllocator("mapred.local.dir");
+      this.lDirAlloc = new LocalDirAllocator(MRConfig.LOCAL_DIR);
     }
 
     void write(DataInputBuffer key, DataInputBuffer value)
@@ -568,7 +576,6 @@
       availableSize = maxSize = size;
     }
 
-    @Override
     public boolean reserve(int requestedSize, InputStream in) {
       // Not used
       LOG.warn("Reserve(int, InputStream) not supported by BackupRamManager");
@@ -595,7 +602,6 @@
       }
     }
 
-    @Override
     public void unreserve(int requestedSize) {
       availableSize += requestedSize;
       LOG.debug("Unreserving: " + requestedSize +

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/BasicTypeSorterBase.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/BasicTypeSorterBase.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/BasicTypeSorterBase.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/BasicTypeSorterBase.java Sat Nov 28 20:26:01 2009
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/BufferSorter.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/BufferSorter.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/BufferSorter.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/BufferSorter.java Sat Nov 28 20:26:01 2009
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/Child.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/Child.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/Child.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/Child.java Sat Nov 28 20:26:01 2009
@@ -26,11 +26,15 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSError;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.mapred.JvmTask;
+import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.security.JobTokens;
 import org.apache.hadoop.metrics.MetricsContext;
 import org.apache.hadoop.metrics.MetricsUtil;
 import org.apache.hadoop.metrics.jvm.JvmMetrics;
@@ -54,6 +58,9 @@
     LOG.debug("Child starting");
 
     JobConf defaultConf = new JobConf();
+    // set tcp nodelay
+    defaultConf.setBoolean("ipc.client.tcpnodelay", true);
+    
     String host = args[0];
     int port = Integer.parseInt(args[1]);
     InetSocketAddress address = new InetSocketAddress(host, port);
@@ -62,6 +69,13 @@
     int jvmIdInt = Integer.parseInt(args[3]);
     JVMId jvmId = new JVMId(firstTaskid.getJobID(),
         firstTaskid.getTaskType() == TaskType.MAP,jvmIdInt);
+    
+    // file name is passed thru env
+    String jobTokenFile = System.getenv().get("JOB_TOKEN_FILE");
+    FileSystem localFs = FileSystem.getLocal(defaultConf);
+    JobTokens jt = loadJobTokens(jobTokenFile, localFs);
+    LOG.debug("Child: got jobTokenfile=" + jobTokenFile);
+    
     TaskUmbilicalProtocol umbilical =
       (TaskUmbilicalProtocol)RPC.getProxy(TaskUmbilicalProtocol.class,
           TaskUmbilicalProtocol.versionID,
@@ -138,8 +152,11 @@
         //are viewable immediately
         TaskLog.syncLogs(firstTaskid, taskid, isCleanup);
         JobConf job = new JobConf(task.getJobFile());
-
-        // setup the child's mapred-local-dir. The child is now sandboxed and
+        
+        // set the jobTokenFile into task
+        task.setJobTokens(jt);
+        
+        // setup the child's Configs.LOCAL_DIR. The child is now sandboxed and
         // can only see files down and under attemtdir only.
         TaskRunner.setupChildMapredLocalDirs(task, job);
 
@@ -150,7 +167,7 @@
 
         numTasksToExecute = job.getNumTasksToExecutePerJvm();
         assert(numTasksToExecute != 0);
-        TaskLog.cleanup(job.getInt("mapred.userlog.retain.hours", 24));
+        TaskLog.cleanup(job.getInt(JobContext.TASK_LOG_RETAIN_HOURS, 24));
 
         task.setConf(job);
 
@@ -170,23 +187,33 @@
     } catch (FSError e) {
       LOG.fatal("FSError from child", e);
       umbilical.fsError(taskid, e.getMessage());
-    } catch (Throwable throwable) {
-      LOG.warn("Error running child : "
-          + StringUtils.stringifyException(throwable));
+    } catch (Exception exception) {
+      LOG.warn("Exception running child : "
+          + StringUtils.stringifyException(exception));
       try {
         if (task != null) {
           // do cleanup for the task
           task.taskCleanup(umbilical);
         }
-      } catch (Throwable th) {
-        LOG.info("Error cleaning up : " + StringUtils.stringifyException(th));
+      } catch (Exception e) {
+        LOG.info("Exception cleaning up : " + StringUtils.stringifyException(e));
       }
       // Report back any failures, for diagnostic purposes
       ByteArrayOutputStream baos = new ByteArrayOutputStream();
-      throwable.printStackTrace(new PrintStream(baos));
+      exception.printStackTrace(new PrintStream(baos));
       if (taskid != null) {
         umbilical.reportDiagnosticInfo(taskid, baos.toString());
       }
+    } catch (Throwable throwable) {
+      LOG.fatal("Error running child : "
+    	        + StringUtils.stringifyException(throwable));
+      if (taskid != null) {
+        Throwable tCause = throwable.getCause();
+        String cause = tCause == null 
+                                 ? throwable.getMessage() 
+                                 : StringUtils.stringifyException(tCause);
+        umbilical.fatalError(taskid, cause);
+      }
     } finally {
       RPC.stopProxy(umbilical);
       MetricsContext metricsContext = MetricsUtil.getContext("mapred");
@@ -197,4 +224,22 @@
       LogManager.shutdown();
     }
   }
+  
+  /**
+   * load secret keys from a file
+   * @param jobTokenFile
+   * @param conf
+   * @throws IOException
+   */
+  private static JobTokens loadJobTokens(String jobTokenFile, FileSystem localFS) 
+  throws IOException {
+    Path localJobTokenFile = new Path (jobTokenFile);
+    FSDataInputStream in = localFS.open(localJobTokenFile);
+    JobTokens jt = new JobTokens();
+    jt.readFields(in);
+        
+    LOG.debug("Loaded jobTokenFile from: "+localJobTokenFile.toUri().getPath());
+    in.close();
+    return jt;
+  }
 }

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/CleanupQueue.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/CleanupQueue.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/CleanupQueue.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/CleanupQueue.java Sat Nov 28 20:26:01 2009
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.mapred;
 
-import java.io.IOException;
 import java.util.concurrent.LinkedBlockingQueue;
 
 import org.apache.commons.logging.Log;

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/ClusterStatus.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/ClusterStatus.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/ClusterStatus.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/ClusterStatus.java Sat Nov 28 20:26:01 2009
@@ -27,6 +27,8 @@
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.mapreduce.ClusterMetrics;
+import org.apache.hadoop.mapreduce.TaskTrackerInfo;
 
 /**
  * Status information on the current state of the Map-Reduce cluster.
@@ -57,7 +59,9 @@
  * {@link JobClient#getClusterStatus()}.</p>
  * 
  * @see JobClient
+ * @deprecated  Use {@link ClusterMetrics} or {@link TaskTrackerInfo} instead
  */
+@Deprecated
 public class ClusterStatus implements Writable {
   /**
    * Class which encapsulates information about a blacklisted tasktracker.

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/CommitTaskAction.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/CommitTaskAction.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/CommitTaskAction.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/CommitTaskAction.java Sat Nov 28 20:26:01 2009
@@ -1,4 +1,5 @@
-/* Licensed to the Apache Software Foundation (ASF) under one
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
  * regarding copyright ownership.  The ASF licenses this file

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/CompletedJobStatusStore.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/CompletedJobStatusStore.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/CompletedJobStatusStore.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/CompletedJobStatusStore.java Sat Nov 28 20:26:01 2009
@@ -14,7 +14,7 @@
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
- */  
+ */
 package org.apache.hadoop.mapred;
 
 import java.io.IOException;
@@ -27,6 +27,7 @@
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 
 /**
  * Persists and retrieves the Job info of a job into/from DFS.
@@ -53,14 +54,14 @@
 
   CompletedJobStatusStore(Configuration conf) throws IOException {
     active =
-      conf.getBoolean("mapred.job.tracker.persist.jobstatus.active", false);
+      conf.getBoolean(JTConfig.JT_PERSIST_JOBSTATUS, false);
 
     if (active) {
       retainTime =
-        conf.getInt("mapred.job.tracker.persist.jobstatus.hours", 0) * HOUR;
+        conf.getInt(JTConfig.JT_PERSIST_JOBSTATUS_HOURS, 0) * HOUR;
 
       jobInfoDir =
-        conf.get("mapred.job.tracker.persist.jobstatus.dir", JOB_INFO_STORE_DIR);
+        conf.get(JTConfig.JT_PERSIST_JOBSTATUS_DIR, JOB_INFO_STORE_DIR);
 
       Path path = new Path(jobInfoDir);
       

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/Counters.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/Counters.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/Counters.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/Counters.java Sat Nov 28 20:26:01 2009
@@ -66,6 +66,25 @@
   //private static Log log = LogFactory.getLog("Counters.class");
   
   /**
+   * Downgrade new {@link org.apache.hadoop.mapreduce.Counters} to old Counters
+   * @param newCounters new Counters
+   * @return old Counters instance corresponding to newCounters
+   */
+  static Counters downgrade(org.apache.hadoop.mapreduce.Counters newCounters) {
+    Counters oldCounters = new Counters();
+    for (org.apache.hadoop.mapreduce.CounterGroup newGroup: newCounters) {
+      String groupName = newGroup.getName();
+      Group oldGroup = oldCounters.getGroup(groupName);
+      for (org.apache.hadoop.mapreduce.Counter newCounter: newGroup) {
+        Counter oldCounter = oldGroup.getCounterForName(newCounter.getName());
+        oldCounter.setDisplayName(newCounter.getDisplayName());
+        oldCounter.increment(newCounter.getValue());
+      }
+    }
+    return oldCounters;
+  }
+
+  /**
    * A counter record, comprising its name and value. 
    */
   public static class Counter extends org.apache.hadoop.mapreduce.Counter {

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/DefaultTaskController.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/DefaultTaskController.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/DefaultTaskController.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/DefaultTaskController.java Sat Nov 28 20:26:01 2009
@@ -14,7 +14,7 @@
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
-*/
+ */
 
 package org.apache.hadoop.mapred;
 
@@ -22,12 +22,13 @@
 import java.util.List;
 
 import org.apache.hadoop.mapred.JvmManager.JvmEnv;
-import org.apache.hadoop.util.ProcessTree;
+import org.apache.hadoop.mapreduce.util.ProcessTree;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 
 /**
  * The default implementation for controlling tasks.
@@ -35,8 +36,12 @@
  * This class provides an implementation for launching and killing 
  * tasks that need to be run as the tasktracker itself. Hence,
  * many of the initializing or cleanup methods are not required here.
+ * 
+ * <br/>
+ * 
  */
-class DefaultTaskController extends TaskController {
+@InterfaceAudience.Private
+public class DefaultTaskController extends TaskController {
 
   private static final Log LOG = 
       LogFactory.getLog(DefaultTaskController.class);
@@ -127,5 +132,29 @@
       }
     }
   }
+
+  @Override
+  public void initializeDistributedCache(InitializationContext context) {
+    // Do nothing.
+  }
+
+  @Override
+  public void initializeUser(InitializationContext context) {
+    // Do nothing.
+  }
   
+  @Override
+  void runDebugScript(DebugScriptContext context) throws IOException {
+    List<String>  wrappedCommand = TaskLog.captureDebugOut(context.args, 
+        context.stdout);
+    // run the script.
+    ShellCommandExecutor shexec = 
+      new ShellCommandExecutor(wrappedCommand.toArray(new String[0]), context.workDir);
+    shexec.execute();
+    int exitCode = shexec.getExitCode();
+    if (exitCode != 0) {
+      throw new IOException("Task debug script exit with nonzero status of " 
+          + exitCode + ".");
+    }
+  }
 }

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/EagerTaskInitializationListener.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/EagerTaskInitializationListener.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/EagerTaskInitializationListener.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/EagerTaskInitializationListener.java Sat Nov 28 20:26:01 2009
@@ -29,6 +29,7 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapred.JobStatusChangeEvent.EventType;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 
 /**
  * A {@link JobInProgressListener} which initializes the tasks for a job as soon
@@ -84,10 +85,11 @@
   private List<JobInProgress> jobInitQueue = new ArrayList<JobInProgress>();
   private ExecutorService threadPool;
   private int numThreads;
-  private TaskTrackerManager ttm;
+  TaskTrackerManager ttm;
   
   public EagerTaskInitializationListener(Configuration conf) {
-    numThreads = conf.getInt("mapred.jobinit.threads", DEFAULT_NUM_THREADS);
+    numThreads = 
+      conf.getInt(JTConfig.JT_JOBINIT_THREADS, DEFAULT_NUM_THREADS);
     threadPool = Executors.newFixedThreadPool(numThreads);
   }
   

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/FileAlreadyExistsException.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/FileAlreadyExistsException.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/FileAlreadyExistsException.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/FileAlreadyExistsException.java Sat Nov 28 20:26:01 2009
@@ -24,6 +24,7 @@
  * Used when target file already exists for any operation and 
  * is not configured to be overwritten.  
  */
+@Deprecated // may be removed after 0.23
 public class FileAlreadyExistsException
     extends IOException {
 

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/FileInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/FileInputFormat.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/FileInputFormat.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/FileInputFormat.java Sat Nov 28 20:26:01 2009
@@ -123,7 +123,8 @@
    */
   public static void setInputPathFilter(JobConf conf,
                                         Class<? extends PathFilter> filter) {
-    conf.setClass("mapred.input.pathFilter.class", filter, PathFilter.class);
+    conf.setClass(org.apache.hadoop.mapreduce.lib.input.
+      FileInputFormat.PATHFILTER_CLASS, filter, PathFilter.class);
   }
 
   /**
@@ -133,7 +134,8 @@
    */
   public static PathFilter getInputPathFilter(JobConf conf) {
     Class<? extends PathFilter> filterClass = conf.getClass(
-	"mapred.input.pathFilter.class", null, PathFilter.class);
+	  org.apache.hadoop.mapreduce.lib.input.FileInputFormat.PATHFILTER_CLASS,
+	  null, PathFilter.class);
     return (filterClass != null) ?
         ReflectionUtils.newInstance(filterClass, conf) : null;
   }
@@ -193,6 +195,15 @@
     return result.toArray(new FileStatus[result.size()]);
   }
 
+  /**
+   * A factory that makes the split for this class. It can be overridden
+   * by sub-classes to make sub-types
+   */
+  protected FileSplit makeSplit(Path file, long start, long length, 
+                                String[] hosts) {
+    return new FileSplit(file, start, length, hosts);
+  }
+
   /** Splits files returned by {@link #listStatus(JobConf)} when
    * they're too big.*/ 
   @SuppressWarnings("deprecation")
@@ -209,8 +220,8 @@
     }
 
     long goalSize = totalSize / (numSplits == 0 ? 1 : numSplits);
-    long minSize = Math.max(job.getLong("mapred.min.split.size", 1),
-                            minSplitSize);
+    long minSize = Math.max(job.getLong(org.apache.hadoop.mapreduce.lib.input.
+      FileInputFormat.SPLIT_MINSIZE, 1), minSplitSize);
 
     // generate splits
     ArrayList<FileSplit> splits = new ArrayList<FileSplit>(numSplits);
@@ -228,21 +239,21 @@
         while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) {
           String[] splitHosts = getSplitHosts(blkLocations, 
               length-bytesRemaining, splitSize, clusterMap);
-          splits.add(new FileSplit(path, length-bytesRemaining, splitSize, 
-              splitHosts));
+          splits.add(makeSplit(path, length-bytesRemaining, splitSize, 
+                               splitHosts));
           bytesRemaining -= splitSize;
         }
         
         if (bytesRemaining != 0) {
-          splits.add(new FileSplit(path, length-bytesRemaining, bytesRemaining, 
+          splits.add(makeSplit(path, length-bytesRemaining, bytesRemaining, 
                      blkLocations[blkLocations.length-1].getHosts()));
         }
       } else if (length != 0) {
         String[] splitHosts = getSplitHosts(blkLocations,0,length,clusterMap);
-        splits.add(new FileSplit(path, 0, length, splitHosts));
+        splits.add(makeSplit(path, 0, length, splitHosts));
       } else { 
         //Create empty hosts array for zero length files
-        splits.add(new FileSplit(path, 0, length, new String[0]));
+        splits.add(makeSplit(path, 0, length, new String[0]));
       }
     }
     LOG.debug("Total # of splits: " + splits.size());
@@ -313,7 +324,8 @@
       path = new Path(conf.getWorkingDirectory(), inputPaths[i]);
       str.append(StringUtils.escapeString(path.toString()));
     }
-    conf.set("mapred.input.dir", str.toString());
+    conf.set(org.apache.hadoop.mapreduce.lib.input.
+      FileInputFormat.INPUT_DIR, str.toString());
   }
 
   /**
@@ -326,8 +338,10 @@
   public static void addInputPath(JobConf conf, Path path ) {
     path = new Path(conf.getWorkingDirectory(), path);
     String dirStr = StringUtils.escapeString(path.toString());
-    String dirs = conf.get("mapred.input.dir");
-    conf.set("mapred.input.dir", dirs == null ? dirStr :
+    String dirs = conf.get(org.apache.hadoop.mapreduce.lib.input.
+      FileInputFormat.INPUT_DIR);
+    conf.set(org.apache.hadoop.mapreduce.lib.input.
+      FileInputFormat.INPUT_DIR, dirs == null ? dirStr :
       dirs + StringUtils.COMMA_STR + dirStr);
   }
          
@@ -377,7 +391,8 @@
    * @return the list of input {@link Path}s for the map-reduce job.
    */
   public static Path[] getInputPaths(JobConf conf) {
-    String dirs = conf.get("mapred.input.dir", "");
+    String dirs = conf.get(org.apache.hadoop.mapreduce.lib.input.
+      FileInputFormat.INPUT_DIR, "");
     String [] list = StringUtils.split(dirs);
     Path[] result = new Path[list.length];
     for (int i = 0; i < list.length; i++) {

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/FileOutputCommitter.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/FileOutputCommitter.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/FileOutputCommitter.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/FileOutputCommitter.java Sat Nov 28 20:26:01 2009
@@ -26,10 +26,11 @@
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.JobStatus;
 import org.apache.hadoop.util.StringUtils;
 
 /** An {@link OutputCommitter} that commits files specified 
- * in job output directory i.e. ${mapred.output.dir}. 
+ * in job output directory i.e. ${mapreduce.output.fileoutputformat.outputdir}. 
  **/
 public class FileOutputCommitter extends OutputCommitter {
 
@@ -39,6 +40,9 @@
    * Temporary directory name 
    */
   public static final String TEMP_DIR_NAME = "_temporary";
+  public static final String SUCCEEDED_FILE_NAME = "_SUCCESS";
+  static final String SUCCESSFUL_JOB_OUTPUT_DIR_MARKER = 
+    "mapreduce.fileoutputcommitter.marksuccessfuljobs";
 
   public void setupJob(JobContext context) throws IOException {
     JobConf conf = context.getJobConf();
@@ -52,6 +56,38 @@
     }
   }
 
+  // True if the job requires output.dir marked on successful job.
+  // Note that by default it is set to true.
+  private boolean shouldMarkOutputDir(JobConf conf) {
+    return conf.getBoolean(SUCCESSFUL_JOB_OUTPUT_DIR_MARKER, true);
+  }
+  
+  public void commitJob(JobContext context) throws IOException {
+    // delete the _temporary folder in the output folder
+    cleanupJob(context);
+    // check if the output-dir marking is required
+    if (shouldMarkOutputDir(context.getJobConf())) {
+      // create a _success file in the output folder
+      markOutputDirSuccessful(context);
+    }
+  }
+  
+  // Create a _success file in the job's output folder
+  private void markOutputDirSuccessful(JobContext context) throws IOException {
+    JobConf conf = context.getJobConf();
+    // get the o/p path
+    Path outputPath = FileOutputFormat.getOutputPath(conf);
+    if (outputPath != null) {
+      // get the filesys
+      FileSystem fileSys = outputPath.getFileSystem(conf);
+      // create a file in the output folder to mark the job completion
+      Path filePath = new Path(outputPath, SUCCEEDED_FILE_NAME);
+      fileSys.create(filePath).close();
+    }
+  }
+
+  @Override
+  @Deprecated
   public void cleanupJob(JobContext context) throws IOException {
     JobConf conf = context.getJobConf();
     // do the clean up of temporary directory
@@ -62,10 +98,19 @@
       context.getProgressible().progress();
       if (fileSys.exists(tmpDir)) {
         fileSys.delete(tmpDir, true);
+      } else {
+        LOG.warn("Output Path is Null in cleanup");
       }
     }
   }
 
+  @Override
+  public void abortJob(JobContext context, int runState) 
+  throws IOException {
+    // simply delete the _temporary dir from the o/p folder of the job
+    cleanupJob(context);
+  }
+  
   public void setupTask(TaskAttemptContext context) throws IOException {
     // FileOutputCommitter's setupTask doesn't do anything. Because the
     // temporary task directory is created on demand when the 

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/FileOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/FileOutputFormat.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/FileOutputFormat.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/FileOutputFormat.java Sat Nov 28 20:26:01 2009
@@ -25,6 +25,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
 
 /** A base class for {@link OutputFormat}. */
 public abstract class FileOutputFormat<K, V> implements OutputFormat<K, V> {
@@ -35,7 +36,8 @@
    * @param compress should the output of the job be compressed?
    */
   public static void setCompressOutput(JobConf conf, boolean compress) {
-    conf.setBoolean("mapred.output.compress", compress);
+    conf.setBoolean(org.apache.hadoop.mapreduce.lib.output.
+      FileOutputFormat.COMPRESS, compress);
   }
   
   /**
@@ -45,7 +47,8 @@
    *         <code>false</code> otherwise
    */
   public static boolean getCompressOutput(JobConf conf) {
-    return conf.getBoolean("mapred.output.compress", false);
+    return conf.getBoolean(org.apache.hadoop.mapreduce.lib.output.
+      FileOutputFormat.COMPRESS, false);
   }
   
   /**
@@ -58,7 +61,8 @@
   setOutputCompressorClass(JobConf conf, 
                            Class<? extends CompressionCodec> codecClass) {
     setCompressOutput(conf, true);
-    conf.setClass("mapred.output.compression.codec", codecClass, 
+    conf.setClass(org.apache.hadoop.mapreduce.lib.output.
+      FileOutputFormat.COMPRESS_CODEC, codecClass, 
                   CompressionCodec.class);
   }
   
@@ -75,7 +79,8 @@
 		                       Class<? extends CompressionCodec> defaultValue) {
     Class<? extends CompressionCodec> codecClass = defaultValue;
     
-    String name = conf.get("mapred.output.compression.codec");
+    String name = conf.get(org.apache.hadoop.mapreduce.lib.output.
+      FileOutputFormat.COMPRESS_CODEC);
     if (name != null) {
       try {
         codecClass = 
@@ -123,7 +128,8 @@
    */
   public static void setOutputPath(JobConf conf, Path outputDir) {
     outputDir = new Path(conf.getWorkingDirectory(), outputDir);
-    conf.set("mapred.output.dir", outputDir.toString());
+    conf.set(org.apache.hadoop.mapreduce.lib.output.
+      FileOutputFormat.OUTDIR, outputDir.toString());
   }
 
   /**
@@ -139,7 +145,7 @@
   
   static void setWorkOutputPath(JobConf conf, Path outputDir) {
     outputDir = new Path(conf.getWorkingDirectory(), outputDir);
-    conf.set("mapred.work.output.dir", outputDir.toString());
+    conf.set(JobContext.TASK_OUTPUT_DIR, outputDir.toString());
   }
   
   /**
@@ -149,7 +155,8 @@
    * @see FileOutputFormat#getWorkOutputPath(JobConf)
    */
   public static Path getOutputPath(JobConf conf) {
-    String name = conf.get("mapred.output.dir");
+    String name = conf.get(org.apache.hadoop.mapreduce.lib.output.
+      FileOutputFormat.OUTDIR);
     return name == null ? null: new Path(name);
   }
   
@@ -163,7 +170,7 @@
    *  is {@link FileOutputCommitter}. If <code>OutputCommitter</code> is not 
    *  a <code>FileOutputCommitter</code>, the task's temporary output
    *  directory is same as {@link #getOutputPath(JobConf)} i.e.
-   *  <tt>${mapred.output.dir}$</tt></p>
+   *  <tt>${mapreduce.output.fileoutputformat.outputdir}$</tt></p>
    *  
    * <p>Some applications need to create/write-to side-files, which differ from
    * the actual job-outputs.
@@ -176,23 +183,23 @@
    * 
    * <p>To get around this the Map-Reduce framework helps the application-writer 
    * out by maintaining a special 
-   * <tt>${mapred.output.dir}/_temporary/_${taskid}</tt> 
+   * <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid}</tt> 
    * sub-directory for each task-attempt on HDFS where the output of the 
    * task-attempt goes. On successful completion of the task-attempt the files 
-   * in the <tt>${mapred.output.dir}/_temporary/_${taskid}</tt> (only) 
-   * are <i>promoted</i> to <tt>${mapred.output.dir}</tt>. Of course, the 
+   * in the <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid}</tt> (only) 
+   * are <i>promoted</i> to <tt>${mapreduce.output.fileoutputformat.outputdir}</tt>. Of course, the 
    * framework discards the sub-directory of unsuccessful task-attempts. This 
    * is completely transparent to the application.</p>
    * 
    * <p>The application-writer can take advantage of this by creating any 
-   * side-files required in <tt>${mapred.work.output.dir}</tt> during execution 
+   * side-files required in <tt>${mapreduce.task.output.dir}</tt> during execution 
    * of his reduce-task i.e. via {@link #getWorkOutputPath(JobConf)}, and the 
    * framework will move them out similarly - thus she doesn't have to pick 
    * unique paths per task-attempt.</p>
    * 
-   * <p><i>Note</i>: the value of <tt>${mapred.work.output.dir}</tt> during 
+   * <p><i>Note</i>: the value of <tt>${mapreduce.task.output.dir}</tt> during 
    * execution of a particular task-attempt is actually 
-   * <tt>${mapred.output.dir}/_temporary/_{$taskid}</tt>, and this value is 
+   * <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_{$taskid}</tt>, and this value is 
    * set by the map-reduce framework. So, just create any side-files in the 
    * path  returned by {@link #getWorkOutputPath(JobConf)} from map/reduce 
    * task to take advantage of this feature.</p>
@@ -205,7 +212,7 @@
    * for the map-reduce job.
    */
   public static Path getWorkOutputPath(JobConf conf) {
-    String name = conf.get("mapred.work.output.dir");
+    String name = conf.get(JobContext.TASK_OUTPUT_DIR);
     return name == null ? null: new Path(name);
   }
 
@@ -228,8 +235,10 @@
 
     OutputCommitter committer = conf.getOutputCommitter();
     Path workPath = outputPath;
-    TaskAttemptContext context = new TaskAttemptContext(conf,
-                TaskAttemptID.forName(conf.get("mapred.task.id")));
+    TaskAttemptContext context = 
+      new TaskAttemptContextImpl(conf,
+                                 TaskAttemptID.forName(conf.get(
+                                     JobContext.TASK_ATTEMPT_ID)));
     if (committer instanceof FileOutputCommitter) {
       workPath = ((FileOutputCommitter)committer).getWorkPath(context,
                                                               outputPath);
@@ -256,13 +265,13 @@
    * @return a unique name accross all tasks of the job.
    */
   public static String getUniqueName(JobConf conf, String name) {
-    int partition = conf.getInt("mapred.task.partition", -1);
+    int partition = conf.getInt(JobContext.TASK_PARTITION, -1);
     if (partition == -1) {
       throw new IllegalArgumentException(
         "This method can only be called from within a Job");
     }
 
-    String taskType = (conf.getBoolean("mapred.task.is.map", true)) ? "m" : "r";
+    String taskType = (conf.getBoolean(JobContext.TASK_ISMAP, true)) ? "m" : "r";
 
     NumberFormat numberFormat = NumberFormat.getInstance();
     numberFormat.setMinimumIntegerDigits(5);

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/FileSplit.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/FileSplit.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/FileSplit.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/FileSplit.java Sat Nov 28 20:26:01 2009
@@ -34,7 +34,7 @@
 public class FileSplit extends org.apache.hadoop.mapreduce.InputSplit 
                        implements InputSplit {
   org.apache.hadoop.mapreduce.lib.input.FileSplit fs; 
-  FileSplit() {
+  protected FileSplit() {
     fs = new org.apache.hadoop.mapreduce.lib.input.FileSplit();
   }
 

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/HeartbeatResponse.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/HeartbeatResponse.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/HeartbeatResponse.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/HeartbeatResponse.java Sat Nov 28 20:26:01 2009
@@ -21,10 +21,6 @@
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.Set;
 
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
@@ -41,7 +37,6 @@
   short responseId;
   int heartbeatInterval;
   TaskTrackerAction[] actions;
-  Set<JobID> recoveredJobs = new HashSet<JobID>();
 
   HeartbeatResponse() {}
   
@@ -58,15 +53,7 @@
   public short getResponseId() {
     return responseId;
   }
-  
-  public void setRecoveredJobs(Set<JobID> ids) {
-    recoveredJobs = ids; 
-  }
-  
-  public Set<JobID> getRecoveredJobs() {
-    return recoveredJobs;
-  }
-  
+
   public void setActions(TaskTrackerAction[] actions) {
     this.actions = actions;
   }
@@ -103,11 +90,6 @@
         action.write(out);
       }
     }
-    // Write the job ids of the jobs that were recovered
-    out.writeInt(recoveredJobs.size());
-    for (JobID id : recoveredJobs) {
-      id.write(out);
-    }
   }
   
   public void readFields(DataInput in) throws IOException {
@@ -125,12 +107,5 @@
     } else {
       actions = null;
     }
-    // Read the job ids of the jobs that were recovered
-    int size = in.readInt();
-    for (int i = 0; i < size; ++i) {
-      JobID id = new JobID();
-      id.readFields(in);
-      recoveredJobs.add(id);
-    }
   }
 }

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/IFile.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/IFile.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/IFile.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/IFile.java Sat Nov 28 20:26:01 2009
@@ -21,8 +21,6 @@
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.EOFException;
-import java.io.File;
-import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
 
@@ -48,10 +46,15 @@
  * 
  * There is a <code>Writer</code> to write out map-outputs in this format and 
  * a <code>Reader</code> to read files of this format.
+ *
+ * <FRAMEWORK-USE-ONLY>
+ * This method is intended only for use by the Map/Reduce framework and not
+ * for external users
+ *
  */
-class IFile {
+public class IFile {
 
-  static final int EOF_MARKER = -1;
+  public static final int EOF_MARKER = -1; // End of File Marker
   
   /**
    * <code>IFile.Writer</code> to write out intermediate map-outputs. 
@@ -91,6 +94,10 @@
       ownOutputStream = true;
     }
     
+    protected Writer(Counters.Counter writesCounter) {
+      writtenRecordsCounter = writesCounter;
+    }
+
     public Writer(Configuration conf, FSDataOutputStream out, 
         Class<K> keyClass, Class<V> valueClass,
         CompressionCodec codec, Counters.Counter writesCounter)
@@ -273,18 +280,18 @@
 
     final InputStream in;        // Possibly decompressed stream that we read
     Decompressor decompressor;
-    long bytesRead = 0;
-    final long fileLength;
-    boolean eof = false;
+    public long bytesRead = 0;
+    protected final long fileLength;
+    protected boolean eof = false;
     final IFileInputStream checksumIn;
-    DataInputStream dataIn;
     
-    byte[] buffer = null;
-    int bufferSize = DEFAULT_BUFFER_SIZE;
-
-    int recNo = 1;
-    int currentKeyLength;
-    int currentValueLength;
+    protected byte[] buffer = null;
+    protected int bufferSize = DEFAULT_BUFFER_SIZE;
+    protected DataInputStream dataIn;
+
+    protected int recNo = 1;
+    protected int currentKeyLength;
+    protected int currentValueLength;
     byte keyBytes[] = new byte[0];
     
     
@@ -458,119 +465,4 @@
     }
 
   }    
-  
-  /**
-   * <code>IFile.InMemoryReader</code> to read map-outputs present in-memory.
-   */
-  public static class InMemoryReader<K, V> extends Reader<K, V> {
-    RamManager ramManager;
-    TaskAttemptID taskAttemptId;
-    DataInputBuffer memDataIn = new DataInputBuffer();
-    private int start;
-    private int length;
-    public InMemoryReader(RamManager ramManager, TaskAttemptID taskAttemptId,
-                          byte[] data, int start, int length)
-                          throws IOException {
-      super(null, null, length - start, null, null);
-      this.ramManager = ramManager;
-      this.taskAttemptId = taskAttemptId;
-      
-      buffer = data;
-      bufferSize = (int)fileLength;
-      memDataIn.reset(buffer, start, length);
-      this.start = start;
-      this.length = length;
-    }
-
-    @Override
-    public void reset(int offset) {
-      memDataIn.reset(buffer, start + offset, length);
-      bytesRead = offset;
-      eof = false;
-    }
-    
-    @Override
-    public long getPosition() throws IOException {
-      // InMemoryReader does not initialize streams like Reader, so in.getPos()
-      // would not work. Instead, return the number of uncompressed bytes read,
-      // which will be correct since in-memory data is not compressed.
-      return bytesRead;
-    }
-    
-    @Override
-    public long getLength() { 
-      return fileLength;
-    }
-    
-    private void dumpOnError() {
-      File dumpFile = new File("../output/" + taskAttemptId + ".dump");
-      System.err.println("Dumping corrupt map-output of " + taskAttemptId + 
-                         " to " + dumpFile.getAbsolutePath());
-      try {
-        FileOutputStream fos = new FileOutputStream(dumpFile);
-        fos.write(buffer, 0, bufferSize);
-        fos.close();
-      } catch (IOException ioe) {
-        System.err.println("Failed to dump map-output of " + taskAttemptId);
-      }
-    }
-    
-    public boolean nextRawKey(DataInputBuffer key) throws IOException {
-      try {
-        if (!positionToNextRecord(memDataIn)) {
-          return false;
-        }
-        // Setup the key
-        int pos = memDataIn.getPosition();
-        byte[] data = memDataIn.getData();
-        key.reset(data, pos, currentKeyLength);
-        // Position for the next value
-        long skipped = memDataIn.skip(currentKeyLength);
-        if (skipped != currentKeyLength) {
-          throw new IOException("Rec# " + recNo + 
-              ": Failed to skip past key of length: " + 
-              currentKeyLength);
-        }
-
-        // Record the byte
-        bytesRead += currentKeyLength;
-        return true;
-      } catch (IOException ioe) {
-        dumpOnError();
-        throw ioe;
-      }
-    }
-    
-    public void nextRawValue(DataInputBuffer value) throws IOException {
-      try {
-        int pos = memDataIn.getPosition();
-        byte[] data = memDataIn.getData();
-        value.reset(data, pos, currentValueLength);
-
-        // Position for the next record
-        long skipped = memDataIn.skip(currentValueLength);
-        if (skipped != currentValueLength) {
-          throw new IOException("Rec# " + recNo + 
-              ": Failed to skip past value of length: " + 
-              currentValueLength);
-        }
-        // Record the byte
-        bytesRead += currentValueLength;
-
-        ++recNo;
-      } catch (IOException ioe) {
-        dumpOnError();
-        throw ioe;
-      }
-    }
-      
-    public void close() {
-      // Release
-      memDataIn = null;
-      buffer = null;
-      
-      // Inform the RamManager
-      ramManager.unreserve(bufferSize);
-    }
-  }
 }

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/IFileInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/IFileInputStream.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/IFileInputStream.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/IFileInputStream.java Sat Nov 28 20:26:01 2009
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -28,9 +28,14 @@
 /**
  * A checksum input stream, used for IFiles.
  * Used to validate the checksum of files created by {@link IFileOutputStream}. 
- */
+ * 
+ * <FRAMEWORK-USE-ONLY>
+ * This method is intended only for use by the Map/Reduce framework and not
+ * for external users
+ *
+*/
 
-class IFileInputStream extends InputStream {
+public class IFileInputStream extends InputStream {
   
   private final InputStream in; //The input stream to be verified for checksum. 
   private final long length; //The total length of the input file

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/IFileOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/IFileOutputStream.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/IFileOutputStream.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/IFileOutputStream.java Sat Nov 28 20:26:01 2009
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -28,8 +28,13 @@
  * Checksum for the contents of the file is calculated and
  * appended to the end of the file on close of the stream.
  * Used for IFiles
+ *
+ * <FRAMEWORK-USE-ONLY>
+ * This method is intended only for use by the Map/Reduce framework and not
+ * for external users
+ *
  */
-class IFileOutputStream extends FilterOutputStream {
+public class IFileOutputStream extends FilterOutputStream {
   /**
    * The output stream to be checksummed. 
    */

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/IndexCache.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/IndexCache.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/IndexCache.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/IndexCache.java Sat Nov 28 20:26:01 2009
@@ -25,6 +25,7 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
 
 class IndexCache {
 
@@ -42,7 +43,7 @@
   public IndexCache(JobConf conf) {
     this.conf = conf;
     totalMemoryAllowed =
-      conf.getInt("mapred.tasktracker.indexcache.mb", 10) * 1024 * 1024;
+      conf.getInt(TTConfig.TT_INDEX_CACHE, 10) * 1024 * 1024;
     LOG.info("IndexCache created with max memory = " + totalMemoryAllowed);
   }
 

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/InputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/InputFormat.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/InputFormat.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/InputFormat.java Sat Nov 28 20:26:01 2009
@@ -48,8 +48,8 @@
  * bytes, of the input files. However, the {@link FileSystem} blocksize of  
  * the input files is treated as an upper bound for input splits. A lower bound 
  * on the split size can be set via 
- * <a href="{@docRoot}/../mapred-default.html#mapred.min.split.size">
- * mapred.min.split.size</a>.</p>
+ * <a href="{@docRoot}/../mapred-default.html#mapreduce.input.fileinputformat.split.minsize">
+ * mapreduce.input.fileinputformat.split.minsize</a>.</p>
  * 
  * <p>Clearly, logical splits based on input-size is insufficient for many 
  * applications since record boundaries are to respected. In such cases, the

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/InputSplit.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/InputSplit.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/InputSplit.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/InputSplit.java Sat Nov 28 20:26:01 2009
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information

Modified: hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/InterTrackerProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/InterTrackerProtocol.java?rev=885145&r1=885144&r2=885145&view=diff
==============================================================================
--- hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/InterTrackerProtocol.java (original)
+++ hadoop/mapreduce/branches/MAPREDUCE-233/src/java/org/apache/hadoop/mapred/InterTrackerProtocol.java Sat Nov 28 20:26:01 2009
@@ -64,8 +64,9 @@
    * Version 26: Modified TaskID to be aware of the new TaskTypes
    * Version 27: Added numRequiredSlots to TaskStatus for MAPREDUCE-516
    * Version 28: Adding node health status to TaskStatus for MAPREDUCE-211
+   * Version 29: Adding user name to the serialized Task for use by TT.
    */
-  public static final long versionID = 28L;
+  public static final long versionID = 29L;
   
   public final static int TRACKERS_OK = 0;
   public final static int UNKNOWN_TASKTRACKER = 1;