You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by dd...@apache.org on 2008/03/17 06:19:57 UTC

svn commit: r637723 [1/2] - in /hadoop/core/trunk: ./ conf/ docs/ src/docs/src/documentation/content/xdocs/ src/java/org/apache/hadoop/mapred/ src/webapps/history/ src/webapps/job/

Author: ddas
Date: Sun Mar 16 22:19:52 2008
New Revision: 637723

URL: http://svn.apache.org/viewvc?rev=637723&view=rev
Log:
HADOOP-2901. Fixes the creation of info servers in the JobClient and JobTracker. Removes the creation from JobClient and removes additional info server from the JobTracker. Also adds the command line utility to view the history files (HADOOP-2896), and fixes bugs in JSPs to do with analysis - HADOOP-2742, HADOOP-2792. Contributed by Amareshwari Sri Ramadasu.

Added:
    hadoop/core/trunk/src/java/org/apache/hadoop/mapred/HistoryViewer.java
    hadoop/core/trunk/src/webapps/job/analysejobhistory.jsp
    hadoop/core/trunk/src/webapps/job/jobconf_history.jsp
    hadoop/core/trunk/src/webapps/job/jobdetailshistory.jsp
    hadoop/core/trunk/src/webapps/job/jobhistory.jsp
    hadoop/core/trunk/src/webapps/job/jobtaskshistory.jsp
    hadoop/core/trunk/src/webapps/job/loadhistory.jsp
    hadoop/core/trunk/src/webapps/job/taskdetailshistory.jsp
Removed:
    hadoop/core/trunk/src/webapps/history/
Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/build.xml
    hadoop/core/trunk/conf/hadoop-default.xml
    hadoop/core/trunk/docs/changes.html
    hadoop/core/trunk/docs/cluster_setup.html
    hadoop/core/trunk/docs/cluster_setup.pdf
    hadoop/core/trunk/docs/hadoop-default.html
    hadoop/core/trunk/docs/mapred_tutorial.html
    hadoop/core/trunk/docs/mapred_tutorial.pdf
    hadoop/core/trunk/src/docs/src/documentation/content/xdocs/cluster_setup.xml
    hadoop/core/trunk/src/docs/src/documentation/content/xdocs/mapred_tutorial.xml
    hadoop/core/trunk/src/java/org/apache/hadoop/mapred/DefaultJobHistoryParser.java
    hadoop/core/trunk/src/java/org/apache/hadoop/mapred/JobClient.java
    hadoop/core/trunk/src/java/org/apache/hadoop/mapred/JobHistory.java
    hadoop/core/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java
    hadoop/core/trunk/src/webapps/job/jobtracker.jsp

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=637723&r1=637722&r2=637723&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Sun Mar 16 22:19:52 2008
@@ -230,6 +230,14 @@
     HADOOP-2976. When a lease expires, the Namenode ensures that 
     blocks of the file are adequately replicated. (dhruba)
 
+    HADOOP-2901. Fixes the creation of info servers in the JobClient
+    and JobTracker. Removes the creation from JobClient and removes
+    additional info server from the JobTracker. Also adds the command
+    line utility to view the history files (HADOOP-2896), and fixes
+    bugs in JSPs to do with analysis - HADOOP-2742, HADOOP-2792.
+    (Amareshwari Sri Ramadasu via ddas)
+
+
 Release 0.16.1 - 2008-03-13
 
   INCOMPATIBLE CHANGES

Modified: hadoop/core/trunk/build.xml
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/build.xml?rev=637723&r1=637722&r2=637723&view=diff
==============================================================================
--- hadoop/core/trunk/build.xml (original)
+++ hadoop/core/trunk/build.xml Sun Mar 16 22:19:52 2008
@@ -179,7 +179,6 @@
     <mkdir dir="${build.webapps}/job/WEB-INF"/>
     <mkdir dir="${build.webapps}/dfs/WEB-INF"/>
     <mkdir dir="${build.webapps}/datanode/WEB-INF"/>
-    <mkdir dir="${build.webapps}/history/WEB-INF"/>
     <mkdir dir="${build.examples}"/>
     <mkdir dir="${build.anttasks}"/>
     <mkdir dir="${build.dir}/c++"/>
@@ -272,13 +271,6 @@
      outputdir="${build.src}"
      package="org.apache.hadoop.dfs"
      webxml="${build.webapps}/dfs/WEB-INF/web.xml">
-    </jsp-compile>
-
-    <jsp-compile
-     uriroot="${src.webapps}/history"
-     outputdir="${build.src}"
-     package="org.apache.hadoop.mapred"
-     webxml="${build.webapps}/history/WEB-INF/web.xml">
     </jsp-compile>
 
     <jsp-compile

Modified: hadoop/core/trunk/conf/hadoop-default.xml
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/conf/hadoop-default.xml?rev=637723&r1=637722&r2=637723&view=diff
==============================================================================
--- hadoop/core/trunk/conf/hadoop-default.xml (original)
+++ hadoop/core/trunk/conf/hadoop-default.xml Sun Mar 16 22:19:52 2008
@@ -524,15 +524,6 @@
 </property>
 
 <property>
-  <name>mapred.job.history.http.bindAddress</name>
-  <value>0.0.0.0:0</value>
-  <description>
-    The job history http server bind address and port.
-    If the port is 0 then the server will start on a free port.
-  </description>
-</property>
-
-<property>
   <name>mapred.job.tracker.handler.count</name>
   <value>10</value>
   <description>

Modified: hadoop/core/trunk/docs/changes.html
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/docs/changes.html?rev=637723&r1=637722&r2=637723&view=diff
==============================================================================
--- hadoop/core/trunk/docs/changes.html (original)
+++ hadoop/core/trunk/docs/changes.html Sun Mar 16 22:19:52 2008
@@ -97,7 +97,7 @@
     </ol>
   </li>
   <li><a href="javascript:toggleList('trunk_(unreleased_changes)_._improvements_')">  IMPROVEMENTS
-</a>&nbsp;&nbsp;&nbsp;(13)
+</a>&nbsp;&nbsp;&nbsp;(14)
     <ol id="trunk_(unreleased_changes)_._improvements_">
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2655">HADOOP-2655</a>. Copy on write for data and metadata files in the
 presence of snapshots. Needed for supporting appends to HDFS
@@ -125,6 +125,8 @@
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2775">HADOOP-2775</a>.  Adds unit test framework for HOD.
 (Vinod Kumar Vavilapalli via ddas).
 </li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2804">HADOOP-2804</a>.  Add support to publish CHANGES.txt as HTML when running
+the Ant 'docs' target.<br />(nigel)</li>
     </ol>
   </li>
   <li><a href="javascript:toggleList('trunk_(unreleased_changes)_._optimizations_')">  OPTIMIZATIONS
@@ -144,7 +146,7 @@
     </ol>
   </li>
   <li><a href="javascript:toggleList('trunk_(unreleased_changes)_._bug_fixes_')">  BUG FIXES
-</a>&nbsp;&nbsp;&nbsp;(34)
+</a>&nbsp;&nbsp;&nbsp;(36)
     <ol id="trunk_(unreleased_changes)_._bug_fixes_">
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2195">HADOOP-2195</a>. '-mkdir' behaviour is now closer to Linux shell in case of
 errors.<br />(Mahadev Konar via rangadi)</li>
@@ -208,6 +210,11 @@
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2972">HADOOP-2972</a>. Fix for a NPE in FSDataset.invalidate.<br />(Mahadev Konar via dhruba)</li>
       <li><a href="http://issues.apache.org/jira/browse/HADOOP-2994">HADOOP-2994</a>. Code cleanup for DFSClient: remove redundant
 conversions from string to string.<br />(Dave Brosius via dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-3009">HADOOP-3009</a>. TestFileCreation sometimes fails because restarting
+minidfscluster sometimes creates datanodes with ports that are
+different from their original instance.<br />(dhruba)</li>
+      <li><a href="http://issues.apache.org/jira/browse/HADOOP-2992">HADOOP-2992</a>. Distributed Upgrade framework works correctly with
+more than one upgrade object.<br />(Konstantin Shvachko via dhruba)</li>
     </ol>
   </li>
 </ul>

Modified: hadoop/core/trunk/docs/cluster_setup.html
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/docs/cluster_setup.html?rev=637723&r1=637722&r2=637723&view=diff
==============================================================================
--- hadoop/core/trunk/docs/cluster_setup.html (original)
+++ hadoop/core/trunk/docs/cluster_setup.html Sun Mar 16 22:19:52 2008
@@ -637,8 +637,7 @@
 <p> The job history files are stored in central location 
             <span class="codefrag"> hadoop.job.history.location </span> which can be on DFS also,
             whose default value is <span class="codefrag">${HADOOP_LOG_DIR}/history</span>. 
-            Job history server is started on job tracker. The history 
-            web UI is accessible from job tracker web UI.</p>
+            The history web UI is accessible from job tracker web UI.</p>
 <p> The history files are also logged to user specified directory
             <span class="codefrag">hadoop.job.history.user.location</span> 
             which defaults to job output directory. The files are stored in
@@ -647,18 +646,19 @@
             logging by giving the value <span class="codefrag">none</span> for 
             <span class="codefrag">hadoop.job.history.user.location</span> 
 </p>
-<p> User can view logs in specified directory using 
-            the following command <br>
+<p> User can view the history logs summary in specified directory 
+            using the following command <br>
             
 <span class="codefrag">$ bin/hadoop job -history output-dir</span>
+<br> 
+            This command will print job details, failed and killed tip
+            details. <br>
+            More details about the job such as successful tasks and 
+            task attempts made for each task can be viewed using the  
+            following command <br>
+            
+<span class="codefrag">$ bin/hadoop job -history all output-dir</span>
 <br>
-            This will start a stand alone jetty on the client and 
-            load history jsp's. 
-            It will display the port where the server is up at. The server will
-            be up for 30 minutes. User has to use 
-            <span class="codefrag"> http://hostname:port </span> to view the history. User can 
-            also provide http bind address using 
-            <span class="codefrag">mapred.job.history.http.bindAddress</span>
 </p>
 <p>Once all the necessary configuration is complete, distribute the files
       to the <span class="codefrag">HADOOP_CONF_DIR</span> directory on all the machines, 
@@ -666,7 +666,7 @@
 </div>
     
     
-<a name="N10343"></a><a name="Hadoop+Rack+Awareness"></a>
+<a name="N10345"></a><a name="Hadoop+Rack+Awareness"></a>
 <h2 class="h3">Hadoop Rack Awareness</h2>
 <div class="section">
 <p>The HDFS and the Map-Reduce components are rack-aware.</p>
@@ -689,7 +689,7 @@
 </div>
     
     
-<a name="N10369"></a><a name="Hadoop+Startup"></a>
+<a name="N1036B"></a><a name="Hadoop+Startup"></a>
 <h2 class="h3">Hadoop Startup</h2>
 <div class="section">
 <p>To start a Hadoop cluster you will need to start both the HDFS and 
@@ -724,7 +724,7 @@
 </div>
     
     
-<a name="N103AF"></a><a name="Hadoop+Shutdown"></a>
+<a name="N103B1"></a><a name="Hadoop+Shutdown"></a>
 <h2 class="h3">Hadoop Shutdown</h2>
 <div class="section">
 <p>

Modified: hadoop/core/trunk/docs/cluster_setup.pdf
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/docs/cluster_setup.pdf?rev=637723&r1=637722&r2=637723&view=diff
==============================================================================
--- hadoop/core/trunk/docs/cluster_setup.pdf (original)
+++ hadoop/core/trunk/docs/cluster_setup.pdf Sun Mar 16 22:19:52 2008
@@ -266,10 +266,10 @@
 >>
 endobj
 42 0 obj
-<< /Length 2722 /Filter [ /ASCII85Decode /FlateDecode ]
+<< /Length 2677 /Filter [ /ASCII85Decode /FlateDecode ]
  >>
 stream
-Gatm=D3*F0')oV[63moD!P5X+;/cEA9sWG(g2&CR_0rg5!j,/N9Op_[AGc(Xrq?oq3g<1;...@V>BpJ]X/l0*l6JL(='-C+O@)k5iT]`%QEnkf+Jl`B[iK'J/[s*$<^]&QRrp/=n:OfO%__Jai3>t13h0tK!Xf2fqOC/@h1^,@8*dO`r18[<Aei3nRpls!SCe2;3&fIHTiCrQoUWoELp$1Q_iDh0F0rAd0I(sL87#<635TdX&*hRAYfZM']\CuQ2>;[\48jtER9oP.
 K\<,fP_%X'cpuSn3H(lS*4O[K_U#drXQdg+POh&i:QmurNAR/;8HB4;saU`N6W&<F*1IcF"^qYY@s'Gfm`E*=oe^rI=L&N[#,;#Ge`d"($*$NA_dk!bA!aGjP71b>b8n?XHY[B$C(]1]BG=``V!iE(H4sD49/(pbl,J79[.G?U,SNC%q<1YPtl4D@1S'5MuYu#Qeb;6koJh6I!SaL4ed5*'K!9sG(8tZp]\0D\="S*u98I8q(:U9*9*q@#cl:mkIq+5k'!(Uq*bVOgTmBN+0JF1),T!HF<]:ORrhmN.3/M9QGlb7*)mLi3C/q@j'Ns5Hc_[*GH5gsUIE@Xg=:Q*[KD[*#rFcA_SRjpHj"n390pIk)pX_'!]XX[XXW1n0FP8A"3'l!SVYbB.7Mt>YG"*[H/duKbAU:f!Z&#bZOg=2(Z'$N%_jXF\rCQG+^,!4;e18<#G2Dg4W[=f?5;ulg96E]Y6Vpu-A=e3CYoFhS.G!KIGTrJ6s>qOZ3e)J]k:US0Zk]@MS/.7D+_%fQ\YLb'3*,R_j`'6:60[Ff86u8?lS%A<D@8GTDU^:+[@WUL+Imf2VBQMhle.B9O9@=U9g+c933-,,8MNl`&O[:)=V0I[K1;TX+"n:J'cbHT'nEk`u*>a3HpYf;=nYN=mS`s_qHKWsAU"[Q/:1V"(:=i9q879INbB"n`\9=eh1!C:iGJCigF!X2f1eCN2_58E@BpWO#7RKd]i3mQNQig';pJ4&ma48]HBNd=VgO/@Q[oBg'p1(;DV"QUa?4+[=gOY*Uhl+F5XubYMIZ'--:OIT+\Lhk7qFakYLo-[u/n=.2G'8t6IS@"]TlOS9o@4j'!8B]ulQoj_DbTD+gY>;o<pJ=jH;r]2_rF*K5V67;Gk7L%@i^J$HXEjK?Nu<+10$fsAk^5s`-Q,MM-LZfpZEJo]1h602__KC3B0tq7et(PmGS*ndU&uqNqmmQLZ-Ntp$m4ig)L<B8j18mTXcudNYI
 5c2eV$;2c>`/JEqK,m,P;)7GC\;6Ld<ZH...@Tg-qdK6>PF_HE>4*]>"=Z#l_U~>
+Gau0E=c_>r&q6H[+W(5h!u\O8;:eG@a7aa)Lco(j"T,D5%(2P[c&<nt3k@,ZaFE*q?'Hb^GRkY"Rk:ltIe?\Uqks=LGP;o'>ZeuHrk?,Q9O\pClb^E\WBXbDbTj9+)`]jc(Os'll0/YC9rP@+F6+8TKkL/W$a\N!_B6s7pG,3Ch`'s:iu]olTnJP0`[,J4^&1;?%O+'K14D%IJ(?'W4^q=(=&")S&6Og)B2G_1V)E=1g8_grG>`%Jg8a)M\13OZ)pepH5/0$//<:F*iDQlur_Er;qBU%YiLAIRM=5]/fskhbkr.]0@k_O,JdcsrL!cXGW=i$C=mKg8+sn6oK^BD=/PG<0*Hr]K@sMIsGLkaUVKU;n7IQg9pmf]/O%h146*k@;h\=gBb&`1sePb^El[%_<fMmN>K,IGiGMJ&!I3XqDVT"_3Wp!WQ];fX!^f:Ol',$n>J8)6rhhd:l',G)/=_9#s]tVh1KV)iT0LB5%o5PAOi^bFmc!V+(H9q"CU=@"oEtupihm1(])AH:=,h)^h_(j&PRIRp(]GQ=G8Yi4%Ik"t^@ZIOOj6*?j8stKbnqiM-@`gsJOOo9er@AYo82u/j.!@uuW]Vhd9ZQO-C&sY4ga"E_[tX'a>SP7eR=9EM#607h/IdOtY#"tG@d^Z9HX@/"3e)N@SNEuC*8;Fr*/Mmi%[fE&-Pj$l477)nJ!\o0ELb`]T,.u,Ncs!WEYKLEJf4*cbek8>:=R70&Tou@)Z2+o*h-Xe?mY&r,;W1i52/>>]q"[k6q:cpLlC/1I&c5i[o[dposg)JN6Cfi(22H_n&$VE$$H[9$9]3ERcCj,,eRUUGQ57'rV#sYe(7rEhg^'$oD,T7E&`bn4-i>IK%p6cokL.u7D'f%;[AmR!m%Q(W_Z!(U50V>f])8Sk;$2U).4\8i;')[/Y+o!8K9=:^Bq@A6)p`36&[6!RN,jIOmu!/"b(_EEZQZB1-aR6FQRqg?@d4#krH8^TenH
 'g'$%K&.?0Aas"j+bc4S$c-qJWcnu4\qO_fHMm!]iU5/F(L&N[##csQtVH'3W*$sqEE>BZc!Cj\W&Skfe&?.Q#XuWB5M^6N^?.)3.J8(L0O3#CL'm(mX">=C;7%9X<F6...@atk>K*=4<,PIZW+k,V7Q1?C"tA:[D;FALAK4HT\!-$fNC'PN1tGGKhU5b8*C0>qJB)>-:h,H-bN`/cW"f;W/RO6)SYb;bf#j`pu7Ct!2_&P!ZZLZJIF[_$,Z$B]4dI?2m+f;<o(\/[["^1h#%$)Tp)DqKTo#L**ug%l-"Efm),;[@'Wb>?+\9%6#*EE![h%j4uJC6?bOBa]^`]-C9>aILSb+cT]/:&AJ'nO8.G2cQI!'Z5?Q_GH!+_bsE95=:LSOs_+on0Hd<a?a4s+Z;Y%e0KVm(nNnQ+m-6p=)`Ea5Oc'd<5eeupkB7'^)>*PN8-c%h*RgNX[0VXE6@Jk<=!#AN0rZq(@r0*U)eK@MeuHHbrLu&MQ?LJd+15_`&K#Wf0I'\rHM=dODD1kBh^O%#S"DV*3',q.8l2T\pV^i"?Y32,Wi5dD>L7E6PFiqqr-@<Rl]VGm4X10@4FS]d?l7k1iN`lm.P9$G7g$qo/$$?j6H:T)BEe'=IdniZt\$sqcV0a@/6<qNC&BrVPi["YLSIt+VFY/LG6@<b]afAG)\qhltO#[Wmr]sAQX;U,uNr(i[Rt\?Q[d-Lo]t=(af0UgoY]7oedh^Cn(=3W0LO5J\C@JTB5d\T/BYe.\-<0\V81fC+hTTn7BCN5#Z%R/Rgbu'"bQs5-RU4h(SLqI;g0t0M<rTbY_U[-#SdPO4qJT:1V%=Oe1Wd_X4k0HN!)?mB.8&:MTcWH`t!HKu30Q_`,oUltrO@PB(!Npsd>k\Ni$):5.\YhY(oTLpSMeYf'PKB6%8U'"P&P5+R@k+:O+U)E3A-E`V'6g9:ZVdGF)r+&QpRO=oSYb@G;"q1O1MJH<`V=8J?tWX4\.ZU<
 %:r>:i!ht?OiI-\%^MkZ-.Y+O'o"PR(d&'7_+KaqgVIXI%V!EDS)6m\PEIj?J)&1Po!N/e%\[B.))'i:dZ7NO'&c;`^71ktgHm[Fpo,I,i]Gli@UEGdf"'hIC#jsG2m7?i_3gs"LCPp9FM.4j\Sh,tS!25KY@8.4![HM(lZ;"/A,^WY&Jm8YVJBFA/GYRn>on=T=i$LkhWS*iNp>:IbGX7f?FMJ^U`=lh(B^=()X.I+7FYUK"1RWMn'+$:;rT((:r;AMuBL'Fm/X:P6)CFZVD0@dC3a\%W(N2\SEqi[oW\V\hr$]4\9a(mmie3E-#WLVWf(t?3oDF0]Dq't(WW[RUFpR*u&HZ.QXabrSHje_g39oj'`dQ#Pls7)N6iN$R5iqkO5M$#mV8<B4/L=X#8m]*O''^*tKC85=7G'+_"\X2?&q5Ol8$UqXa8t6'OGS"!rLl\hiPs_Rd5VddVmO6P)<snF'.J+JZfc2_Wn<FeII);,*!PQ57#)3?t_kP!>2g!@jJ`fqkjdWM<)nrodVT@7&ld-+UQK]"-l/mi6fr_0/I6UG.;^7h/VT?6+J.^])D"n\c\>%5::0;+tC=&bRO#a/^Z5eW&Y%.58gp-]_?*hRV*0*a<4hh:^Ff]RO+a:q+7oSfLs+.^fhoU:lm\Z-?Rk%X<]8!Qrg=?>&>r9,#[,qP<:OG[Pf_laZQL]2~>
 endstream
 endobj
 43 0 obj
@@ -313,7 +313,7 @@
 47 0 obj
 << /Type /Annot
 /Subtype /Link
-/Rect [ 172.332 234.403 206.988 222.403 ]
+/Rect [ 172.332 247.603 206.988 235.603 ]
 /C [ 0 0 0 ]
 /Border [ 0 0 0 ]
 /A << /URI (api/org/apache/hadoop/net/DNSToSwitchMapping.html#resolve(java.util.List))
@@ -322,10 +322,10 @@
 >>
 endobj
 48 0 obj
-<< /Length 1470 /Filter [ /ASCII85Decode /FlateDecode ]
+<< /Length 1388 /Filter [ /ASCII85Decode /FlateDecode ]
  >>
 stream
-Gb!#\gMZ%0&:O:Sk[>R=5g>*D;<gN"h5"r"5a1ThDb[K5/D:k?(0sLl&JY3<N\34]fs9^EJA?scd[D'Ebfo%.5#2Bt:'U_,YVV'LhZeXi>fZFuph%1O*`<e/-^2ini6T7&al%3X*DGCHa_]m97P9(m;]B`fGqO.b9BWL:6pAa=s6'k_jlf%Uo^t&2/u8$l-;G1:HB?.;.a<lf4Y:3?-\mH.e@9e$1-Ja<]J)\-<tg=L?hAIQk!/KO)6R"[3kt]nVnt8G#HCnIX;1GIBC_mQ#,?ukdE'2h%r4&a`*'-,9oR8XE%n=K@)mo4#jTt14JZ9<KTT._4WYDJ<ifA^^B=:U).K2HXT7@,SL%YqJ-Qq%U5hH83aQ.41[s^:fecjpKaI.1>'Et<.a=,)]&Pq=\r7RE7`!kbO2`2CrZbYW^WC.PQ$^\ALU,1<!;Xn%7k._tLNf92N?1JBe9:bo[YfDI-Sg1'36Rq<)%Sb.0Dhl.q#NU&DS>!g]j,B9D<B8c.8psQ4YrTcX))6e-'`MhRM]NRfHX41i_I^1.S8+o9%n+PjV#B/8^Cr5eEl]8=-:_eGoB`QFX"f/5_77_NSFRR[hTgPRqI<thD.0`&4-pgS<0a!LX@YCq;N[^Y0fcJGpFlZ(dY'g#%n&`"DZMrcDOgX1.sRJQ'nR-?RI2&(.X]])+Hg!,CK:\k0s"Zh:M*sGIO@9f=uh4mkR\Ua7;2LNXGBsY/AC/3Dq!UGd.T/doDH4+>0J?[^*E;K"Eri`:L%P1>Q5k7<i8R'U"/SAl0tCWhWKZP:WiE8UttX`i7o]aD,Z&lklVMrJ([L4gdG,9G?=;_&)bu.F2:f*Pn/UFO0Gej"jGS/9.oai>BmOe>)aUeJ-H4pJ$;3iXq40S@9KNDYr$)_SM.VT*X0Ao%`B*KN3O?Y'O(N%>)%E$ei?"`T=]YSq,ZZ]a-/39:"C<1k2F5Ui7:T%G$Z\0laa+(QmFISa)A)?Nl[1eKos"INN]
 (TuHe-hMqt"k.B.b:$#JMn"W%U.*ijUlm)a3js%CMH]Ng/Wi,Q`:uurfVAB89(B(!?cq80@=&pCWqOP)^4GSMJ8B<qa2%/dnR1rg]f\U_)19'2>[X!p;_*3QVC"]oX8ZhiI'BZ%i:-Zp-+*HQILpud.>b2M!kso'L'AeL,Gs6B#q\*'f5d.L$H_bM&OuWgBO#':EnNs06P<:CSZN:cZ8s6P$]J=uPj#,et&C5fQ+b].+GiA7Pjf;Hg&FAd%^47MWJj<&`meZ/bl7jE=QKd-p)<1FImH$WAMD,A%^fnDWJHFdBkMf+KKA+S0Aa=bIq<G<WL0]?5Do@SBj\e^=fiH8RK.@3EBXhXT?c0F]64%/tfWDC\KgZ((`p6[LJS%(5!<6JZCKFSPop!Ge790B)0+k`"M6BFfFbLl2C+/DaVcEO^<fb:9<c#XsQl2lG)MLV];Il0F\Z#g6m+`H$>\9&D`YuAe!UU:T_u~>
+Gb!#\?#SIU'Rf_ZcpJlf?tR>1;<l&L2OW/3B[YFWCPG0\BNpiTC6DgH)"uiWM6]^qQ7_8%#_F<KXa:TIiOtS5qA>6A$V15UTX0`p^Q/F4\;o`tnh6^0k:pKI3<aUJ7XpF^L81dUFWciSK_(>\(qK2B3m*P1^iP>GY+4qs75XnR]QD/lnT/^-9^)$A=8f[e9&lC@PikWH\A&05c<j?A=^j,k&qt&[+US;;.6J,S(S@F(Lg6"=<E)r`]_-Ik@Mu#$H5eY\4[==F[MRj7J=+1aKuPWY4?\Z"=#N_'A]5$?nL2t7YY!8BO##!jK=$*?#>m(4#FF5]^R7cTKk6"V02Xq4`oH`Nmk&Ii'3=VPa1uPET86qk*KE,>)a0"@Z7;#mF_+#t:JtL_8t=H/h2V_q$)_)0r"empo\d80i$>p<DOGA)Z=q35&T^)kT!tb:0mYaC&g)?M`mc@/#aLmTQh.aY;mDH6D=B#U.nT^nHDXeWH6BL7="u@l72@cQoeJsH2?uFN!kFS4gbs!@##?'>Zt"9*6SJ])n:ibk3KE8K.$l;ALINF=q%6R3Te'3ei!_;mAH(64jfD!n1Nh/EQlD<Ula/-'qpE#<9H`i(NUZAn:EOKBm_8\1^:Ts6\G>1>Iog2"rT*'3<a</8Ye!Ac,Y1.,7.Fiu)NY?J70)"A<j-3#*_Zq3I$Iih]:5aV:+QIic%6tB38uI9;^mj^/?Q3LA_Sq$NhZP]=!LC_Qqm;rC63A5m'5>[jBE4D((Xi%ma`aZ0@/.eH\NmUT1G+&*@lM_N#+St;#>q0(,Di^9nW',&;>Ye%UQUA<BqBaX7d.C*A:2?IZ-fbR,MdV7c5EQg-)5/'e(ih'lbYphW[Ho-IFEWf-pW)EJ])c&/Qd+G"c-nf2X@[=g,ET^'c&Q4%1b$q*WXXQ6jPZnoe3)Qu?#P[$bDUI>ITiX?LPRZ]csH;[hm`l+J,`nq#Ksb:L\<2hN$1AMG/6+gbn\_Wh6HHD_d
 >VmIRM?\l:R<#=7Q19iB-\:*QLA^\pkD"@\<!p.)tFmV6W8`[e.M3PW'4T-gT6e$r3!8=i-Fa8Q?VQ[XU:O!iLjQWd6`TIoj!=#)pcpM*NPFVe1GeA)+(,'f\k;<dni[d>_8DPOC#;=jsW!<n$5Wnl=JhYZ=n]^lgj_I\!+R&215,1dGZ4S9i],RVWYX`6!ka9)EPX1>Z/:790+9iA\$<X>F-9DP*OE5EDha-YlVS)N7XFAjSG_7UNN4*38Q0D1>RJBn+^k4-CLqHis$htVl\-9j1j[3*h-P[8'KGlK\+f-=NW$CUJ,,eaPMu9hN(<\')R9qaj0."=R+kL:O'IkT_i'SR1bjeZVX%cP11D@%=ii#I;"q3;q2ep]IQc>DZ]_Er!=;1EnJ`c^?~>
 endstream
 endobj
 49 0 obj
@@ -563,19 +563,19 @@
 21 0 obj
 <<
 /S /GoTo
-/D [43 0 R /XYZ 85.0 298.137 null]
+/D [43 0 R /XYZ 85.0 311.337 null]
 >>
 endobj
 23 0 obj
 <<
 /S /GoTo
-/D [49 0 R /XYZ 85.0 628.6 null]
+/D [49 0 R /XYZ 85.0 641.8 null]
 >>
 endobj
 25 0 obj
 <<
 /S /GoTo
-/D [49 0 R /XYZ 85.0 391.066 null]
+/D [49 0 R /XYZ 85.0 404.266 null]
 >>
 endobj
 50 0 obj
@@ -622,31 +622,31 @@
 xref
 0 79
 0000000000 65535 f 
-0000024333 00000 n 
-0000024433 00000 n 
-0000024525 00000 n 
+0000024206 00000 n 
+0000024306 00000 n 
+0000024398 00000 n 
 0000000015 00000 n 
 0000000071 00000 n 
 0000000825 00000 n 
 0000000945 00000 n 
 0000001026 00000 n 
-0000024670 00000 n 
+0000024543 00000 n 
 0000001161 00000 n 
-0000024733 00000 n 
+0000024606 00000 n 
 0000001298 00000 n 
-0000024799 00000 n 
+0000024672 00000 n 
 0000001433 00000 n 
-0000024865 00000 n 
+0000024738 00000 n 
 0000001570 00000 n 
-0000024931 00000 n 
+0000024804 00000 n 
 0000001707 00000 n 
-0000024997 00000 n 
+0000024870 00000 n 
 0000001844 00000 n 
-0000025061 00000 n 
+0000024934 00000 n 
 0000001981 00000 n 
-0000025127 00000 n 
+0000025000 00000 n 
 0000002117 00000 n 
-0000025191 00000 n 
+0000025064 00000 n 
 0000002254 00000 n 
 0000004485 00000 n 
 0000004608 00000 n 
@@ -664,42 +664,42 @@
 0000012056 00000 n 
 0000014816 00000 n 
 0000014924 00000 n 
-0000017739 00000 n 
-0000017862 00000 n 
-0000017903 00000 n 
-0000018087 00000 n 
-0000018275 00000 n 
-0000018502 00000 n 
-0000020065 00000 n 
-0000025257 00000 n 
-0000020173 00000 n 
-0000020306 00000 n 
-0000020495 00000 n 
-0000020673 00000 n 
-0000020897 00000 n 
-0000021112 00000 n 
-0000025308 00000 n 
-0000021361 00000 n 
-0000025374 00000 n 
-0000021761 00000 n 
-0000025440 00000 n 
-0000022104 00000 n 
-0000025504 00000 n 
-0000022407 00000 n 
-0000025568 00000 n 
-0000022569 00000 n 
-0000025632 00000 n 
-0000022763 00000 n 
-0000022960 00000 n 
-0000023190 00000 n 
-0000023379 00000 n 
-0000023560 00000 n 
-0000023673 00000 n 
-0000023783 00000 n 
-0000023894 00000 n 
-0000024002 00000 n 
-0000024108 00000 n 
-0000024224 00000 n 
+0000017694 00000 n 
+0000017817 00000 n 
+0000017858 00000 n 
+0000018042 00000 n 
+0000018230 00000 n 
+0000018457 00000 n 
+0000019938 00000 n 
+0000025130 00000 n 
+0000020046 00000 n 
+0000020179 00000 n 
+0000020368 00000 n 
+0000020546 00000 n 
+0000020770 00000 n 
+0000020985 00000 n 
+0000025181 00000 n 
+0000021234 00000 n 
+0000025247 00000 n 
+0000021634 00000 n 
+0000025313 00000 n 
+0000021977 00000 n 
+0000025377 00000 n 
+0000022280 00000 n 
+0000025441 00000 n 
+0000022442 00000 n 
+0000025505 00000 n 
+0000022636 00000 n 
+0000022833 00000 n 
+0000023063 00000 n 
+0000023252 00000 n 
+0000023433 00000 n 
+0000023546 00000 n 
+0000023656 00000 n 
+0000023767 00000 n 
+0000023875 00000 n 
+0000023981 00000 n 
+0000024097 00000 n 
 trailer
 <<
 /Size 79
@@ -707,5 +707,5 @@
 /Info 4 0 R
 >>
 startxref
-25698
+25571
 %%EOF

Modified: hadoop/core/trunk/docs/hadoop-default.html
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/docs/hadoop-default.html?rev=637723&r1=637722&r2=637723&view=diff
==============================================================================
--- hadoop/core/trunk/docs/hadoop-default.html (original)
+++ hadoop/core/trunk/docs/hadoop-default.html Sun Mar 16 22:19:52 2008
@@ -315,12 +315,6 @@
   </td>
 </tr>
 <tr>
-<td><a name="mapred.job.history.http.bindAddress">mapred.job.history.http.bindAddress</a></td><td>0.0.0.0:0</td><td>
-    The job history http server bind address and port.
-    If the port is 0 then the server will start on a free port.
-  </td>
-</tr>
-<tr>
 <td><a name="mapred.job.tracker.handler.count">mapred.job.tracker.handler.count</a></td><td>10</td><td>
     The number of server threads for the JobTracker. This should be roughly
     4% of the number of tasktracker nodes.

Modified: hadoop/core/trunk/docs/mapred_tutorial.html
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/docs/mapred_tutorial.html?rev=637723&r1=637722&r2=637723&view=diff
==============================================================================
--- hadoop/core/trunk/docs/mapred_tutorial.html (original)
+++ hadoop/core/trunk/docs/mapred_tutorial.html Sun Mar 16 22:19:52 2008
@@ -289,7 +289,7 @@
 <a href="#Example%3A+WordCount+v2.0">Example: WordCount v2.0</a>
 <ul class="minitoc">
 <li>
-<a href="#Source+Code-N10BDE">Source Code</a>
+<a href="#Source+Code-N10BE0">Source Code</a>
 </li>
 <li>
 <a href="#Sample+Runs">Sample Runs</a>
@@ -1579,23 +1579,24 @@
 <p> Job history files are also logged to user specified directory
         <span class="codefrag">hadoop.job.history.user.location</span> 
         which defaults to job output directory. The files are stored in
-        "_logs/history/" in the specified directory. Hence, by default they will
-        be in mapred.output.dir/_logs/history. User can stop
+        "_logs/history/" in the specified directory. Hence, by default they
+        will be in mapred.output.dir/_logs/history. User can stop
         logging by giving the value <span class="codefrag">none</span> for 
         <span class="codefrag">hadoop.job.history.user.location</span>
 </p>
-<p> User can view logs in specified directory using 
-        the following command <br>
+<p> User can view the history logs summary in specified directory 
+        using the following command <br>
         
 <span class="codefrag">$ bin/hadoop job -history output-dir</span>
+<br> 
+        This command will print job details, failed and killed tip
+        details. <br>
+        More details about the job such as successful tasks and 
+        task attempts made for each task can be viewed using the  
+        following command <br>
+       
+<span class="codefrag">$ bin/hadoop job -history all output-dir</span>
 <br>
-        This will start a stand alone jetty on the client and 
-        load history jsp's. 
-        It will display the port where the server is up at. The server will
-        be up for 30 minutes. User has to use 
-        <span class="codefrag"> http://hostname:port </span> to view the history. User can 
-        also provide http bind address using 
-        <span class="codefrag">mapred.job.history.http.bindAddress</span>
 </p>
 <p> User can use 
         <a href="api/org/apache/hadoop/mapred/OutputLogFilter.html">OutputLogFilter</a>
@@ -1603,7 +1604,7 @@
 <p>Normally the user creates the application, describes various facets 
         of the job via <span class="codefrag">JobConf</span>, and then uses the 
         <span class="codefrag">JobClient</span> to submit the job and monitor its progress.</p>
-<a name="N10917"></a><a name="Job+Control"></a>
+<a name="N10919"></a><a name="Job+Control"></a>
 <h4>Job Control</h4>
 <p>Users may need to chain map-reduce jobs to accomplish complex
           tasks which cannot be done via a single map-reduce job. This is fairly
@@ -1639,7 +1640,7 @@
             </li>
           
 </ul>
-<a name="N10941"></a><a name="Job+Input"></a>
+<a name="N10943"></a><a name="Job+Input"></a>
 <h3 class="h4">Job Input</h3>
 <p>
 <a href="api/org/apache/hadoop/mapred/InputFormat.html">
@@ -1687,7 +1688,7 @@
         appropriate <span class="codefrag">CompressionCodec</span>. However, it must be noted that
         compressed files with the above extensions cannot be <em>split</em> and 
         each compressed file is processed in its entirety by a single mapper.</p>
-<a name="N109AB"></a><a name="InputSplit"></a>
+<a name="N109AD"></a><a name="InputSplit"></a>
 <h4>InputSplit</h4>
 <p>
 <a href="api/org/apache/hadoop/mapred/InputSplit.html">
@@ -1701,7 +1702,7 @@
           FileSplit</a> is the default <span class="codefrag">InputSplit</span>. It sets 
           <span class="codefrag">map.input.file</span> to the path of the input file for the
           logical split.</p>
-<a name="N109D0"></a><a name="RecordReader"></a>
+<a name="N109D2"></a><a name="RecordReader"></a>
 <h4>RecordReader</h4>
 <p>
 <a href="api/org/apache/hadoop/mapred/RecordReader.html">
@@ -1713,7 +1714,7 @@
           for processing. <span class="codefrag">RecordReader</span> thus assumes the 
           responsibility of processing record boundaries and presents the tasks 
           with keys and values.</p>
-<a name="N109F3"></a><a name="Job+Output"></a>
+<a name="N109F5"></a><a name="Job+Output"></a>
 <h3 class="h4">Job Output</h3>
 <p>
 <a href="api/org/apache/hadoop/mapred/OutputFormat.html">
@@ -1738,7 +1739,7 @@
 <p>
 <span class="codefrag">TextOutputFormat</span> is the default 
         <span class="codefrag">OutputFormat</span>.</p>
-<a name="N10A1C"></a><a name="Task+Side-Effect+Files"></a>
+<a name="N10A1E"></a><a name="Task+Side-Effect+Files"></a>
 <h4>Task Side-Effect Files</h4>
 <p>In some applications, component tasks need to create and/or write to
           side-files, which differ from the actual job-output files.</p>
@@ -1764,7 +1765,7 @@
           JobConf.getOutputPath()</a>, and the framework will promote them 
           similarly for succesful task-attempts, thus eliminating the need to 
           pick unique paths per task-attempt.</p>
-<a name="N10A51"></a><a name="RecordWriter"></a>
+<a name="N10A53"></a><a name="RecordWriter"></a>
 <h4>RecordWriter</h4>
 <p>
 <a href="api/org/apache/hadoop/mapred/RecordWriter.html">
@@ -1772,9 +1773,9 @@
           pairs to an output file.</p>
 <p>RecordWriter implementations write the job outputs to the 
           <span class="codefrag">FileSystem</span>.</p>
-<a name="N10A68"></a><a name="Other+Useful+Features"></a>
+<a name="N10A6A"></a><a name="Other+Useful+Features"></a>
 <h3 class="h4">Other Useful Features</h3>
-<a name="N10A6E"></a><a name="Counters"></a>
+<a name="N10A70"></a><a name="Counters"></a>
 <h4>Counters</h4>
 <p>
 <span class="codefrag">Counters</span> represent global counters, defined either by 
@@ -1788,7 +1789,7 @@
           Reporter.incrCounter(Enum, long)</a> in the <span class="codefrag">map</span> and/or 
           <span class="codefrag">reduce</span> methods. These counters are then globally 
           aggregated by the framework.</p>
-<a name="N10A99"></a><a name="DistributedCache"></a>
+<a name="N10A9B"></a><a name="DistributedCache"></a>
 <h4>DistributedCache</h4>
 <p>
 <a href="api/org/apache/hadoop/filecache/DistributedCache.html">
@@ -1821,7 +1822,7 @@
           <a href="api/org/apache/hadoop/filecache/DistributedCache.html#createSymlink(org.apache.hadoop.conf.Configuration)">
           DistributedCache.createSymlink(Path, Configuration)</a> api. Files 
           have <em>execution permissions</em> set.</p>
-<a name="N10AD7"></a><a name="Tool"></a>
+<a name="N10AD9"></a><a name="Tool"></a>
 <h4>Tool</h4>
 <p>The <a href="api/org/apache/hadoop/util/Tool.html">Tool</a> 
           interface supports the handling of generic Hadoop command-line options.
@@ -1861,7 +1862,7 @@
             </span>
           
 </p>
-<a name="N10B09"></a><a name="IsolationRunner"></a>
+<a name="N10B0B"></a><a name="IsolationRunner"></a>
 <h4>IsolationRunner</h4>
 <p>
 <a href="api/org/apache/hadoop/mapred/IsolationRunner.html">
@@ -1885,13 +1886,13 @@
 <p>
 <span class="codefrag">IsolationRunner</span> will run the failed task in a single 
           jvm, which can be in the debugger, over precisely the same input.</p>
-<a name="N10B3C"></a><a name="JobControl"></a>
+<a name="N10B3E"></a><a name="JobControl"></a>
 <h4>JobControl</h4>
 <p>
 <a href="api/org/apache/hadoop/mapred/jobcontrol/package-summary.html">
           JobControl</a> is a utility which encapsulates a set of Map-Reduce jobs
           and their dependencies.</p>
-<a name="N10B49"></a><a name="Data+Compression"></a>
+<a name="N10B4B"></a><a name="Data+Compression"></a>
 <h4>Data Compression</h4>
 <p>Hadoop Map-Reduce provides facilities for the application-writer to
           specify compression for both intermediate map-outputs and the
@@ -1905,7 +1906,7 @@
           codecs for reasons of both performance (zlib) and non-availability of
           Java libraries (lzo). More details on their usage and availability are
           available <a href="native_libraries.html">here</a>.</p>
-<a name="N10B69"></a><a name="Intermediate+Outputs"></a>
+<a name="N10B6B"></a><a name="Intermediate+Outputs"></a>
 <h5>Intermediate Outputs</h5>
 <p>Applications can control compression of intermediate map-outputs
             via the 
@@ -1926,7 +1927,7 @@
             <a href="api/org/apache/hadoop/mapred/JobConf.html#setMapOutputCompressionType(org.apache.hadoop.io.SequenceFile.CompressionType)">
             JobConf.setMapOutputCompressionType(SequenceFile.CompressionType)</a> 
             api.</p>
-<a name="N10B95"></a><a name="Job+Outputs"></a>
+<a name="N10B97"></a><a name="Job+Outputs"></a>
 <h5>Job Outputs</h5>
 <p>Applications can control compression of job-outputs via the
             <a href="api/org/apache/hadoop/mapred/OutputFormatBase.html#setCompressOutput(org.apache.hadoop.mapred.JobConf,%20boolean)">
@@ -1946,7 +1947,7 @@
 </div>
 
     
-<a name="N10BC4"></a><a name="Example%3A+WordCount+v2.0"></a>
+<a name="N10BC6"></a><a name="Example%3A+WordCount+v2.0"></a>
 <h2 class="h3">Example: WordCount v2.0</h2>
 <div class="section">
 <p>Here is a more complete <span class="codefrag">WordCount</span> which uses many of the
@@ -1956,7 +1957,7 @@
       <a href="quickstart.html#SingleNodeSetup">pseudo-distributed</a> or
       <a href="quickstart.html#Fully-Distributed+Operation">fully-distributed</a> 
       Hadoop installation.</p>
-<a name="N10BDE"></a><a name="Source+Code-N10BDE"></a>
+<a name="N10BE0"></a><a name="Source+Code-N10BE0"></a>
 <h3 class="h4">Source Code</h3>
 <table class="ForrestTable" cellspacing="1" cellpadding="4">
           
@@ -3166,7 +3167,7 @@
 </tr>
         
 </table>
-<a name="N11340"></a><a name="Sample+Runs"></a>
+<a name="N11342"></a><a name="Sample+Runs"></a>
 <h3 class="h4">Sample Runs</h3>
 <p>Sample text-files as input:</p>
 <p>
@@ -3334,7 +3335,7 @@
 <br>
         
 </p>
-<a name="N11414"></a><a name="Highlights"></a>
+<a name="N11416"></a><a name="Highlights"></a>
 <h3 class="h4">Highlights</h3>
 <p>The second version of <span class="codefrag">WordCount</span> improves upon the 
         previous one by using some features offered by the Map-Reduce framework:

Modified: hadoop/core/trunk/docs/mapred_tutorial.pdf
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/docs/mapred_tutorial.pdf?rev=637723&r1=637722&r2=637723&view=diff
==============================================================================
--- hadoop/core/trunk/docs/mapred_tutorial.pdf (original)
+++ hadoop/core/trunk/docs/mapred_tutorial.pdf Sun Mar 16 22:19:52 2008
@@ -1199,10 +1199,10 @@
 >>
 endobj
 143 0 obj
-<< /Length 2712 /Filter [ /ASCII85Decode /FlateDecode ]
+<< /Length 2635 /Filter [ /ASCII85Decode /FlateDecode ]
  >>
 stream
-Gatm>gQ((/&UiQ?+H-;_!GsL%H.SO&=I+oa:3P67M1VCem$VRE_kq\3p&9As[p^#,bo%IO+CU^L<EIf6e1";Q5Ml>5Gk:T_4+$c"dnSgWf*T>+rq1p3pMJj-9SgK`Pk0"gc^XAV$@#.ZU.?DX]D$lo"l+I[h71.g$\,ZBa9I6MY4"IoSU\EMffX]KgkoFcRtIj_;n^HmFui^t]Te[JMnCt>.Q@7<b3!i!m^9I&k#G]e<:;Ym37F-bLRnJ*4T5cO-R\Mbl!D7)an150N%*Fmo>F,9=go'f9e%*!MNb@(9UR7jk#6+M#Fuot8STX-]W'LYoGN##Zq]43/#QhI[E!n<c(X;4+VU%C>L)YsLHVMmq=ciJ"m=K]UCZe,=b!G3W9[oCXT"lM<5"jS:TnC@+rd30007FATV=oXG_1:dSNLnV-(L81Nb)VO*B>=Ln2HM6W@WkK8t)B`fWUoYYFip"ii?^[[q[-BrI?\"ciXuZ:`JZ^`ua7KOe&I(AUa]&*rd=fIo$p"1C@s.nQDn/il6:-^UD=3L'"u^G:(Y\$b;muj&$TF7E:(t>f_Bqn^&:cXGa^,:\1ufFN;AfN-OBJ+m3aZoSX1^(d6F;,:KiDJ)/0QeVPPq$KNk.pM"[M):^RbFiIJ+Bs(He2o%Iq,LPcbi'5iT?Y`V:*uir`10p6WI'qU'RdQ!pAK:ch\hh8SN+iug<:u2k&A,8;FY'".WnK;^&I@^gbALL.mels\MmcKj'l+<-dMnhd0BT;@:a%blq<m\=@-XVN8+dA&E\7$=c^CFEnF!YgM2VBFHID4f<p!Ot=;Mg6S?#4"B3D(rH8Q^&U)kNp=C&oAO?5jDC^h6T3a5^!_KTO#CZ5*rT@%,c"`kf5r\\A.mXK7pGi1C(NBrc_6bq(]Sm]Zf+a70,#EeW;4:HmZ'X%/TI(YXUABqAAr;=h+hs7pP'*_kP-bNj>#ta(YfStIgZ)rCk=.,g0=feD'Ba4T)"PHtOrQ(P
 GnJ>+EAY.Y\K@7ZE;R4,.P%S?/k'C][LmMYP:+Q9rV4M7p9oI=I;haJ+fiba+h!S=gXk"SJ/;WP!JPcASMVU`7#!i$JL"+jW-O.s,ScmT[@%+#eJ(]`0$9GU^<-K...@OdCC>LMf5l5SaRP5>dWu)PIqqo'f)O[P\*4$E-TfJRP>Yc#4+StjLH85Lbie$>4p_hR=HXWt:IdQ7*$$(Jd!GW=Pr"RkhccQaVGU\_")ZR(JF9&,VV=Q%N%jciN8#PmGOpq=qT`Y&"afR8<*+2fkAZ7=NmKJl;DLd:l'JZ5;;_Hgf11Dh4WXW&KgsU8nEJ)fMur?p_Yp2&?OVU,PAt4\MZWE!nR$\n/2ZrqQ';I`IRJ".)90Go`"_qmgf%i4AT=`nHX)3QmU!p8Q_1Ec`W&FK@VunSD,C:0on;EhO2GM`"%cmA^U=Kuo=C%n"oufXj;0p>V[$nU3Pid1V7Q*1iF4-k(^fTo9^J?oH`)-/G]4j%=8lGOMAS4;1&Y4V\odkTaF5k'hMhWTDtb><]m4;&<j8$b*mC++4\-e%$8Oi-QP-5AU(-Y1GVqa+?g,BI6i+iJX9^
 \Fk!0"DCii-UeR^k3`-Rm=X[UA;B'`;E3O!MaO$ifR0P'-r%&Sgo6O%W1jgPR2og)&d(.:_q.03cTd?'F3"H,OT5=TSWqej2cenQQ$Q3&SCi]6bD*T(H]j%,OKMYc4]dGD&^g2h!Ogu7V[HUG3CAQR!KH83qd$t9>)Y`]=Yn0`ag-U+rrgjuLJ9HYAO6Lm_MD(ZG.*eRV3W0O[u1PXWAZ-U))MLW`QB<neIrloD"S_t^YEL>XYa_GgLeIdSJ)4uD/;6@CJ(pZ`K@9*@HMM\5`)Z'-/Ro6[]%mK<J0<q74I;d8/k_%r$;A\0$l]gm7,r%L#^0M/9Rb$?dHdLuZ+C`I"SZHV?05Q7sSp#4<l"?QpgWo:n(5>$/"0/P(>AsI/jKWqYFZE.rF1j"s]IVD5Sj.eV5F1bD0>-'H"6a,9eT%cj;e>\qB%s3k!p2(GFk<FL0Tdp`=`''#%/Me#,a#.aQL-VmCUr@HfX[O<h"QQmH:sd?Pj<qAkE5u?bK=q5T_27CG(;^=7%qBBIkgIU#]%);RPRMp>mF]u?,X>kG2pI^VN5+/l>eftb@Yh,r7Ep)dP@^?DEkLJmRHQmq->oV+,/P#\bW6GM9k0[3m0\BN<eY]g`?8UMac\3Ul@`JLOfOE`W[H+rF!e"9(*:=G"#]&!;?os]Fq@dc)\+s%ZBdJJ1@pT0Y?IYHjqc7H#MC\l][n`Z!jdP(B*)I>-%W)e7ik(ci"B4"9SB~>
+Gau0E=c_>r&q6H[+U!(0!"VSqm_!Cg3-LC)As/LO`I]>>],@Y<=[u9MhktQtW?l[7]7FD.Pf.%%?[]omlLF#+a.#j`gtK['Df6hhfp;+(n?;.8s1e5sO9slKKsI:gI_#:>iW^cf=:bbFiU37LE<5MLG;j]Wc\=[>lLoOUq6k'%*j'q,q9D*P@GQfAd$oY?.oX)U%sC!6jg:8+6Ar3-djd6P]h5/EL#1@V.]-46X'"gIr]&;ZMS#)P@m,8Gl:7WG<-9Qtm\'DB-H>-5GOs%)`E@6p92-rfD6d$+G:(NG%WeVVQ\1;/80t[-F]t'R(B'9,B'LkLWs*:ko^)+@H?H/L_Im?)@*@5_H.ZUfS,WH@J&1sB?13EXc^)>G%dLVB+KaP%/66\ep%.L;K?+)WonuRDZr'\s(-KDRAe1ImarIKoSRWONG"54o)ZE?,/!_&m=\eQeBDZ7EpA.26&RV)>>-!Y$F?Rnh5n1n/VDclK%3<H$>BZmRPl2ZQH2)m1r_?Q,:+3@*`fjFV/"SbRM^CtC!;5k1nS=YTgF+a!a*M!6-;D_mGomtb0hX.qdJ'NLruAKEG*`h&]Be654mOT2?eeBlpJ8@7]`lq>qAW7T/G2LE[%R,5n`5DL:n83b8Q7!Y>+uqk_&@Zc#lY*]#tjO]9GUh^:Cj+=9[t3nL8KD=3q:nQeOn2Enn!dm=-D'i@o\i,)S)Q)ji;D\Y3$aN<E8=OA*cD%KObQbFsXC+<)?d)$dK=?</2=ERTdPqrJ/gfU]15,V6=_,ln@P-)tBnX2R!8`W>F)?Qqi6Us/gF]4dS0P#a4P_.ZTg6"H0,^WF`E_$`k01TqaICk$i7g\kGGP3^:#QHuJ`IFBYZ['MO+3j)l`C?+@H6qMbd2<Z0;f0ZTFYYL3It)<(Z7YZPm_?r)%;UFD37X4NW3.)CcK,qH&Bq,ZX'Wk0$t:a8hK>j,h5lQcjTC2Br0'eUuB];EYF>&H4D^h6tEN<MrN[E3S
 28a$1lMIsKW/iqAf-J5&KEDSJhkPeXbR0R'Imtp6tj]&8bVH9#uDVOg*5+He-rI!*ri?g;?BJNk%5gq\^H"$CM+=S6E$W]l'"=su!:+`*KN$FWWL'3<,<L=I:2Ao4o!DsHf0UssgC9NRC<!1lf+ZOWJ'nY<koeX!6BZ(.43<mjO>MtWd9HC[\l"(:u$aij+G0iEMJ#kQ%g!B`WQPgK(g`)E5&e_OAaC5'UG$?Mm67=bm,uSm51,<fVL7q0VlN/;fP3"2S#n,QW[-I^bHHcJ/#^G`rRK[-90;6YG_(Q)p-,<GbiQ*rSHs==Nd-M@-c)Z5p55LM&7s0CQOL29*$FQ7+XeDiALo1O=+<P4\O(o`R"_W&l'8&*tRhp3F`EOl*#FH"`7jTscV-LnTP@:gWa8mb@l[hqJ>"3%%m:rLb:S%bSGLqr*\kl10i#^P3_Lfn4cg8L/nl$!1i"Rq^UM\fGE/@3G"!0O=>-EJRDc[(4ofo\V2BKYPe0Alm.J:ZH,M;QuW,kAJ>i+6/,_WYHP3fFO3bT,bB^md.f&Mcdc:AS]pGjTp]tA:cg7ljmh4KG!gI11_0_%^+a.eQ#:*r34SSP>Aelu4LR]7SmLI3V:KSHT:4im1Vr=X9.$J].NqC$<l`?Scjd&<BC5!8Y1;1R+ch!]Rh`BX\jl;'lr;':<3r[TXUKk<NKP2,!kPAKPoL,&/t0u^g4pXG$kXZ4kn/ZqBS2]%pujbG3bnc)pMQl<qlSRhe#/R^Zc@mB8)5Wc3]T5')EH!/7h?5kncnftkFM\"0p;hC-gMKRU&_o5:`+V`MXRFln([n_!(nDHir:]=ZIM@_Y355eT_RWSHS97A^_hXpugYP0-1hE4b3.p1"kLI]$A80S6F1-a:PmK%<u$lt-g`iBT,gHtAk(t(WpZNAWAWWNRgalnDrWl]KBM7#1n=c/FObh*q<ZF@i95BQ:BKcpiEB.uc5$lRQ;kI:C<lJ3MtR287cV0I7`=$t
 gP'.e[YXVn7Dn!X9`>,iIdR;Z2p(!jG:D1BA\O!A(".-a"sUV1#F[-,q(fU_F.<LVX-Z7G!5]pRe(#-j22XHJFjiD0GXO'J^$\H<XtEe&8mZ^k/1BQK;4n.=+?HE!EOW6s-"%pe<+>5T^GS]GQ*ol#S+bCYYVa'jE-#-GiZ=pH&4VZ6Vr7^_LsA&ottLKOXk-mkNHX6B"Rc'5LKHLP(dI#/VUls!Ju6LRA+4*:bp"_K@15NuH!p?c\Ce?nr3\!4S<]RWGgEO%.g5$Jtrn[,'t7%\R!D`bkARI@c[%&gmFD-\3?^gZ2LfB"gib]SXsENVhGpjN@HT*kb@b]A.0El[pIKDAKd]o#OHgaee/>N^:URIA[#P<-QK1@Jl;=tF(.Vr8bT>dV(Yh`rgUJqIIoEisK8l,)c1EUH`-$Qbo.,bUbGVO7ThIuN+lO9;&pGA$J"-Ct<J%Hk@#9G1CTJ`h:pnE.Tjs7lP3YqCVTGC;2a2WNi<\h\u*.HPOGY)RKa?t8B/=naHHDWol';3/e0iL%o2:J^!lZ@^hZhe/Vg>`Z73aD9\>!5l>)SZK"hr`\OaU!9a5aV*O'*hQKM[Y&;E2qi0>Y8j2BXWPciHKhU6\gQr?SL&.$1UO9R/%S/rG)=eM<5iPigq[u\!fLgG('~>
 endstream
 endobj
 144 0 obj
@@ -2650,53 +2650,53 @@
 xref
 0 296
 0000000000 65535 f 
-0000106277 00000 n 
-0000106542 00000 n 
-0000106635 00000 n 
+0000106200 00000 n 
+0000106465 00000 n 
+0000106558 00000 n 
 0000000015 00000 n 
 0000000071 00000 n 
 0000001295 00000 n 
 0000001415 00000 n 
 0000001573 00000 n 
-0000106787 00000 n 
+0000106710 00000 n 
 0000001708 00000 n 
-0000106850 00000 n 
+0000106773 00000 n 
 0000001845 00000 n 
-0000106916 00000 n 
+0000106839 00000 n 
 0000001982 00000 n 
-0000106982 00000 n 
+0000106905 00000 n 
 0000002119 00000 n 
-0000107046 00000 n 
+0000106969 00000 n 
 0000002255 00000 n 
-0000107112 00000 n 
+0000107035 00000 n 
 0000002392 00000 n 
-0000107178 00000 n 
+0000107101 00000 n 
 0000002529 00000 n 
-0000107242 00000 n 
+0000107165 00000 n 
 0000002665 00000 n 
-0000107306 00000 n 
+0000107229 00000 n 
 0000002802 00000 n 
-0000107370 00000 n 
+0000107293 00000 n 
 0000002939 00000 n 
-0000107434 00000 n 
+0000107357 00000 n 
 0000003074 00000 n 
-0000107501 00000 n 
+0000107424 00000 n 
 0000003211 00000 n 
-0000107566 00000 n 
+0000107489 00000 n 
 0000003348 00000 n 
-0000107631 00000 n 
+0000107554 00000 n 
 0000003483 00000 n 
-0000107698 00000 n 
+0000107621 00000 n 
 0000003620 00000 n 
-0000107765 00000 n 
+0000107688 00000 n 
 0000003757 00000 n 
-0000107832 00000 n 
+0000107755 00000 n 
 0000003893 00000 n 
-0000107897 00000 n 
+0000107820 00000 n 
 0000004030 00000 n 
-0000107964 00000 n 
+0000107887 00000 n 
 0000004167 00000 n 
-0000108029 00000 n 
+0000107952 00000 n 
 0000004304 00000 n 
 0000006935 00000 n 
 0000007058 00000 n 
@@ -2786,165 +2786,165 @@
 0000050786 00000 n 
 0000050912 00000 n 
 0000050981 00000 n 
-0000108094 00000 n 
+0000108017 00000 n 
 0000051117 00000 n 
 0000051408 00000 n 
 0000051694 00000 n 
 0000051936 00000 n 
 0000052170 00000 n 
 0000052365 00000 n 
-0000055171 00000 n 
-0000055297 00000 n 
-0000055366 00000 n 
-0000055565 00000 n 
-0000055802 00000 n 
-0000056042 00000 n 
-0000056239 00000 n 
-0000056476 00000 n 
-0000056671 00000 n 
-0000059312 00000 n 
-0000059438 00000 n 
-0000059507 00000 n 
-0000059704 00000 n 
-0000059901 00000 n 
-0000060097 00000 n 
-0000060292 00000 n 
-0000060489 00000 n 
-0000060687 00000 n 
-0000063236 00000 n 
-0000063362 00000 n 
-0000063399 00000 n 
-0000063610 00000 n 
-0000063808 00000 n 
-0000066559 00000 n 
-0000066685 00000 n 
-0000066754 00000 n 
-0000066978 00000 n 
-0000067183 00000 n 
-0000067440 00000 n 
-0000067630 00000 n 
-0000067835 00000 n 
-0000068082 00000 n 
-0000070694 00000 n 
-0000070820 00000 n 
-0000070953 00000 n 
-0000071154 00000 n 
-0000071365 00000 n 
-0000071573 00000 n 
-0000071748 00000 n 
-0000071943 00000 n 
-0000072117 00000 n 
-0000072293 00000 n 
-0000072516 00000 n 
-0000072757 00000 n 
-0000072953 00000 n 
-0000073166 00000 n 
-0000073383 00000 n 
-0000073602 00000 n 
-0000073874 00000 n 
-0000076603 00000 n 
+0000055094 00000 n 
+0000055220 00000 n 
+0000055289 00000 n 
+0000055488 00000 n 
+0000055725 00000 n 
+0000055965 00000 n 
+0000056162 00000 n 
+0000056399 00000 n 
+0000056594 00000 n 
+0000059235 00000 n 
+0000059361 00000 n 
+0000059430 00000 n 
+0000059627 00000 n 
+0000059824 00000 n 
+0000060020 00000 n 
+0000060215 00000 n 
+0000060412 00000 n 
+0000060610 00000 n 
+0000063159 00000 n 
+0000063285 00000 n 
+0000063322 00000 n 
+0000063533 00000 n 
+0000063731 00000 n 
+0000066482 00000 n 
+0000066608 00000 n 
+0000066677 00000 n 
+0000066901 00000 n 
+0000067106 00000 n 
+0000067363 00000 n 
+0000067553 00000 n 
+0000067758 00000 n 
+0000068005 00000 n 
+0000070617 00000 n 
+0000070743 00000 n 
+0000070876 00000 n 
+0000071077 00000 n 
+0000071288 00000 n 
+0000071496 00000 n 
+0000071671 00000 n 
+0000071866 00000 n 
+0000072040 00000 n 
+0000072216 00000 n 
+0000072439 00000 n 
+0000072680 00000 n 
+0000072876 00000 n 
+0000073089 00000 n 
+0000073306 00000 n 
+0000073525 00000 n 
+0000073797 00000 n 
+0000076526 00000 n 
+0000076652 00000 n 
 0000076729 00000 n 
-0000076806 00000 n 
-0000077067 00000 n 
-0000077343 00000 n 
-0000077552 00000 n 
-0000077873 00000 n 
-0000078191 00000 n 
-0000078377 00000 n 
-0000078575 00000 n 
-0000080840 00000 n 
-0000080950 00000 n 
-0000083311 00000 n 
-0000083421 00000 n 
-0000085753 00000 n 
-0000085863 00000 n 
-0000088238 00000 n 
-0000088348 00000 n 
-0000090563 00000 n 
-0000090673 00000 n 
-0000092770 00000 n 
-0000092880 00000 n 
-0000094092 00000 n 
-0000094202 00000 n 
-0000096078 00000 n 
-0000108154 00000 n 
-0000096188 00000 n 
-0000096324 00000 n 
-0000096517 00000 n 
-0000096675 00000 n 
-0000096891 00000 n 
-0000097175 00000 n 
-0000097345 00000 n 
-0000097495 00000 n 
-0000097671 00000 n 
-0000097987 00000 n 
-0000108208 00000 n 
-0000098177 00000 n 
-0000108275 00000 n 
-0000098371 00000 n 
-0000108340 00000 n 
-0000098563 00000 n 
-0000108407 00000 n 
-0000098778 00000 n 
-0000108474 00000 n 
-0000098946 00000 n 
-0000108541 00000 n 
-0000099153 00000 n 
-0000108606 00000 n 
-0000099357 00000 n 
-0000108672 00000 n 
-0000099534 00000 n 
-0000108739 00000 n 
-0000099774 00000 n 
-0000108806 00000 n 
-0000099971 00000 n 
-0000108872 00000 n 
-0000100168 00000 n 
-0000108940 00000 n 
-0000100347 00000 n 
-0000100553 00000 n 
-0000100774 00000 n 
-0000101058 00000 n 
-0000109008 00000 n 
-0000101391 00000 n 
-0000101557 00000 n 
-0000109074 00000 n 
-0000101772 00000 n 
-0000109140 00000 n 
-0000101948 00000 n 
-0000102136 00000 n 
-0000109208 00000 n 
-0000102357 00000 n 
-0000109274 00000 n 
-0000102602 00000 n 
-0000102790 00000 n 
-0000109342 00000 n 
-0000103061 00000 n 
-0000109410 00000 n 
-0000103225 00000 n 
-0000109476 00000 n 
-0000103452 00000 n 
-0000109544 00000 n 
-0000103607 00000 n 
-0000109610 00000 n 
-0000103828 00000 n 
-0000109678 00000 n 
-0000104019 00000 n 
-0000109746 00000 n 
-0000104272 00000 n 
-0000109814 00000 n 
-0000104517 00000 n 
-0000104708 00000 n 
-0000104977 00000 n 
-0000105147 00000 n 
-0000105332 00000 n 
-0000105497 00000 n 
-0000105611 00000 n 
-0000105722 00000 n 
-0000105834 00000 n 
-0000105943 00000 n 
-0000106050 00000 n 
-0000106167 00000 n 
+0000076990 00000 n 
+0000077266 00000 n 
+0000077475 00000 n 
+0000077796 00000 n 
+0000078114 00000 n 
+0000078300 00000 n 
+0000078498 00000 n 
+0000080763 00000 n 
+0000080873 00000 n 
+0000083234 00000 n 
+0000083344 00000 n 
+0000085676 00000 n 
+0000085786 00000 n 
+0000088161 00000 n 
+0000088271 00000 n 
+0000090486 00000 n 
+0000090596 00000 n 
+0000092693 00000 n 
+0000092803 00000 n 
+0000094015 00000 n 
+0000094125 00000 n 
+0000096001 00000 n 
+0000108077 00000 n 
+0000096111 00000 n 
+0000096247 00000 n 
+0000096440 00000 n 
+0000096598 00000 n 
+0000096814 00000 n 
+0000097098 00000 n 
+0000097268 00000 n 
+0000097418 00000 n 
+0000097594 00000 n 
+0000097910 00000 n 
+0000108131 00000 n 
+0000098100 00000 n 
+0000108198 00000 n 
+0000098294 00000 n 
+0000108263 00000 n 
+0000098486 00000 n 
+0000108330 00000 n 
+0000098701 00000 n 
+0000108397 00000 n 
+0000098869 00000 n 
+0000108464 00000 n 
+0000099076 00000 n 
+0000108529 00000 n 
+0000099280 00000 n 
+0000108595 00000 n 
+0000099457 00000 n 
+0000108662 00000 n 
+0000099697 00000 n 
+0000108729 00000 n 
+0000099894 00000 n 
+0000108795 00000 n 
+0000100091 00000 n 
+0000108863 00000 n 
+0000100270 00000 n 
+0000100476 00000 n 
+0000100697 00000 n 
+0000100981 00000 n 
+0000108931 00000 n 
+0000101314 00000 n 
+0000101480 00000 n 
+0000108997 00000 n 
+0000101695 00000 n 
+0000109063 00000 n 
+0000101871 00000 n 
+0000102059 00000 n 
+0000109131 00000 n 
+0000102280 00000 n 
+0000109197 00000 n 
+0000102525 00000 n 
+0000102713 00000 n 
+0000109265 00000 n 
+0000102984 00000 n 
+0000109333 00000 n 
+0000103148 00000 n 
+0000109399 00000 n 
+0000103375 00000 n 
+0000109467 00000 n 
+0000103530 00000 n 
+0000109533 00000 n 
+0000103751 00000 n 
+0000109601 00000 n 
+0000103942 00000 n 
+0000109669 00000 n 
+0000104195 00000 n 
+0000109737 00000 n 
+0000104440 00000 n 
+0000104631 00000 n 
+0000104900 00000 n 
+0000105070 00000 n 
+0000105255 00000 n 
+0000105420 00000 n 
+0000105534 00000 n 
+0000105645 00000 n 
+0000105757 00000 n 
+0000105866 00000 n 
+0000105973 00000 n 
+0000106090 00000 n 
 trailer
 <<
 /Size 296
@@ -2952,5 +2952,5 @@
 /Info 4 0 R
 >>
 startxref
-109882
+109805
 %%EOF

Modified: hadoop/core/trunk/src/docs/src/documentation/content/xdocs/cluster_setup.xml
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/docs/src/documentation/content/xdocs/cluster_setup.xml?rev=637723&r1=637722&r2=637723&view=diff
==============================================================================
--- hadoop/core/trunk/src/docs/src/documentation/content/xdocs/cluster_setup.xml (original)
+++ hadoop/core/trunk/src/docs/src/documentation/content/xdocs/cluster_setup.xml Sun Mar 16 22:19:52 2008
@@ -373,8 +373,7 @@
             <p> The job history files are stored in central location 
             <code> hadoop.job.history.location </code> which can be on DFS also,
             whose default value is <code>${HADOOP_LOG_DIR}/history</code>. 
-            Job history server is started on job tracker. The history 
-            web UI is accessible from job tracker web UI.</p>
+            The history web UI is accessible from job tracker web UI.</p>
             
             <p> The history files are also logged to user specified directory
             <code>hadoop.job.history.user.location</code> 
@@ -384,16 +383,15 @@
             logging by giving the value <code>none</code> for 
             <code>hadoop.job.history.user.location</code> </p>
             
-            <p> User can view logs in specified directory using 
-            the following command <br/>
-            <code>$ bin/hadoop job -history output-dir</code><br/>
-            This will start a stand alone jetty on the client and 
-            load history jsp's. 
-            It will display the port where the server is up at. The server will
-            be up for 30 minutes. User has to use 
-            <code> http://hostname:port </code> to view the history. User can 
-            also provide http bind address using 
-            <code>mapred.job.history.http.bindAddress</code></p>
+            <p> User can view the history logs summary in specified directory 
+            using the following command <br/>
+            <code>$ bin/hadoop job -history output-dir</code><br/> 
+            This command will print job details, failed and killed tip
+            details. <br/>
+            More details about the job such as successful tasks and 
+            task attempts made for each task can be viewed using the  
+            following command <br/>
+            <code>$ bin/hadoop job -history all output-dir</code><br/></p> 
           </section>
         </section>
       </section>

Modified: hadoop/core/trunk/src/docs/src/documentation/content/xdocs/mapred_tutorial.xml
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/docs/src/documentation/content/xdocs/mapred_tutorial.xml?rev=637723&r1=637722&r2=637723&view=diff
==============================================================================
--- hadoop/core/trunk/src/docs/src/documentation/content/xdocs/mapred_tutorial.xml (original)
+++ hadoop/core/trunk/src/docs/src/documentation/content/xdocs/mapred_tutorial.xml Sun Mar 16 22:19:52 2008
@@ -1113,21 +1113,20 @@
         <p> Job history files are also logged to user specified directory
         <code>hadoop.job.history.user.location</code> 
         which defaults to job output directory. The files are stored in
-        "_logs/history/" in the specified directory. Hence, by default they will
-        be in mapred.output.dir/_logs/history. User can stop
+        "_logs/history/" in the specified directory. Hence, by default they
+        will be in mapred.output.dir/_logs/history. User can stop
         logging by giving the value <code>none</code> for 
         <code>hadoop.job.history.user.location</code></p>
 
-        <p> User can view logs in specified directory using 
-        the following command <br/>
-        <code>$ bin/hadoop job -history output-dir</code><br/>
-        This will start a stand alone jetty on the client and 
-        load history jsp's. 
-        It will display the port where the server is up at. The server will
-        be up for 30 minutes. User has to use 
-        <code> http://hostname:port </code> to view the history. User can 
-        also provide http bind address using 
-        <code>mapred.job.history.http.bindAddress</code></p>
+        <p> User can view the history logs summary in specified directory 
+        using the following command <br/>
+        <code>$ bin/hadoop job -history output-dir</code><br/> 
+        This command will print job details, failed and killed tip
+        details. <br/>
+        More details about the job such as successful tasks and 
+        task attempts made for each task can be viewed using the  
+        following command <br/>
+       <code>$ bin/hadoop job -history all output-dir</code><br/></p> 
             
         <p> User can use 
         <a href="ext:api/org/apache/hadoop/mapred/outputlogfilter">OutputLogFilter</a>

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/mapred/DefaultJobHistoryParser.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/mapred/DefaultJobHistoryParser.java?rev=637723&r1=637722&r2=637723&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/mapred/DefaultJobHistoryParser.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/mapred/DefaultJobHistoryParser.java Sun Mar 16 22:19:52 2008
@@ -127,20 +127,20 @@
   }
 
   // call this only for jobs that succeeded for better results. 
-  static class FailedOnNodesFilter implements JobHistory.Listener {
+  abstract static class NodesFilter implements JobHistory.Listener {
     private Map<String, Set<String>> badNodesToNumFailedTasks =
       new HashMap<String, Set<String>>();
     
     Map<String, Set<String>> getValues(){
       return badNodesToNumFailedTasks; 
     }
+    String failureType;
+    
     public void handle(JobHistory.RecordTypes recType, Map<Keys, String> values)
       throws IOException {
-      
       if (recType.equals(JobHistory.RecordTypes.MapAttempt) || 
           recType.equals(JobHistory.RecordTypes.ReduceAttempt)) {
-        
-        if (Values.FAILED.name().equals(values.get(Keys.TASK_STATUS)) ){
+        if (failureType.equals(values.get(Keys.TASK_STATUS)) ) {
           String hostName = values.get(Keys.HOSTNAME);
           String taskid = values.get(Keys.TASKID); 
           Set<String> tasks = badNodesToNumFailedTasks.get(hostName); 
@@ -154,33 +154,23 @@
         }
       }      
     }
+    abstract void setFailureType();
+    String getFailureType() {
+      return failureType;
+    }
+    NodesFilter() {
+      setFailureType();
+    }
   }
-  static class KilledOnNodesFilter implements JobHistory.Listener {
-    private Map<String, Set<String>> badNodesToNumFailedTasks =
-      new HashMap<String, Set<String>>();
-    
-    Map<String, Set<String>> getValues(){
-      return badNodesToNumFailedTasks; 
+ 
+  static class FailedOnNodesFilter extends NodesFilter {
+    void setFailureType() {
+      failureType = Values.FAILED.name();
     }
-    public void handle(JobHistory.RecordTypes recType, Map<Keys, String> values)
-      throws IOException {
-      
-      if (recType.equals(JobHistory.RecordTypes.MapAttempt) || 
-          recType.equals(JobHistory.RecordTypes.ReduceAttempt)) {
-        
-        if (Values.KILLED.name().equals(values.get(Keys.TASK_STATUS)) ){
-          String hostName = values.get(Keys.HOSTNAME);
-          String taskid = values.get(Keys.TASKID); 
-          Set<String> tasks = badNodesToNumFailedTasks.get(hostName); 
-          if (null == tasks ){
-            tasks = new TreeSet<String>(); 
-            tasks.add(taskid);
-            badNodesToNumFailedTasks.put(hostName, tasks);
-          }else{
-            tasks.add(taskid);
-          }
-        }
-      }      
+  }
+  static class KilledOnNodesFilter extends NodesFilter {
+    void setFailureType() {
+      failureType = Values.KILLED.name();
     }
   }
 }

Added: hadoop/core/trunk/src/java/org/apache/hadoop/mapred/HistoryViewer.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/mapred/HistoryViewer.java?rev=637723&view=auto
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/mapred/HistoryViewer.java (added)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/mapred/HistoryViewer.java Sun Mar 16 22:19:52 2008
@@ -0,0 +1,486 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import java.io.IOException;
+import java.text.SimpleDateFormat;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.Date;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.mapred.DefaultJobHistoryParser.*;
+import org.apache.hadoop.mapred.JobHistory.*;
+import org.apache.hadoop.util.StringUtils;
+
+/**
+ * This class is to view job history files.
+ */
+class HistoryViewer {
+  private static final Log LOG = LogFactory.getLog(
+                                   "org.apache.hadoop.mapred.HistoryViewer");
+  private static SimpleDateFormat dateFormat = new SimpleDateFormat(
+                                             "d-MMM-yyyy HH:mm:ss");
+  private FileSystem fs;
+  private Configuration conf;
+  private Path historyLogDir;
+  private String jobLogFile;
+  private JobHistory.JobInfo job;
+  private String trackerHostName;
+  private String trackerStartTime;
+  private String jobId;
+  private boolean printAll;
+  
+  private PathFilter jobLogFileFilter = new PathFilter() {
+    public boolean accept(Path path) {
+      return !(path.getName().endsWith(".xml"));
+    }
+  };
+
+  public HistoryViewer(String outputDir, Configuration conf, boolean printAll)
+  throws IOException {
+    this.conf = conf;
+    this.printAll = printAll;
+    Path output = new Path(outputDir);
+    historyLogDir = new Path(output, "_logs/history");
+    try {
+      fs = output.getFileSystem(this.conf);
+      if (!fs.exists(output)) {
+        throw new IOException("History directory " + historyLogDir.toString()
+                              + "does not exist");
+      }
+      Path[] jobFiles = FileUtil.stat2Paths(fs.listStatus(historyLogDir,
+                                                          jobLogFileFilter));
+      if (jobFiles.length == 0) {
+        throw new IOException("Not a valid history directory " 
+                              + historyLogDir.toString());
+      }
+      jobLogFile = jobFiles[0].toString();
+      String[] jobDetails = jobFiles[0].getName().split("_");
+      trackerHostName = jobDetails[0];
+      trackerStartTime = jobDetails[1];
+      jobId = jobDetails[2] + "_" + jobDetails[3] + "_" + jobDetails[4];
+      job = new JobHistory.JobInfo(jobId); 
+      DefaultJobHistoryParser.parseJobTasks(jobFiles[0].toString(), job, fs);
+    } catch(Exception e) {
+      throw new IOException("Not able to initialize History viewer");
+    }
+  }
+  
+  public void print() throws IOException{
+    printJobDetails();
+    printTaskSummary();
+    printJobAnalysis();
+    printTasks("MAP", "FAILED");
+    printTasks("MAP", "KILLED");
+    printTasks("REDUCE", "FAILED");
+    printTasks("REDUCE", "KILLED");
+    if (printAll) {
+      printTasks("MAP", "SUCCESS");
+      printTasks("REDUCE", "SUCCESS");
+      printAllTaskAttempts("MAP");
+      printAllTaskAttempts("REDUCE");
+    }
+    NodesFilter filter = new FailedOnNodesFilter();
+    printFailedAttempts(filter);
+    filter = new KilledOnNodesFilter();
+    printFailedAttempts(filter);
+  }
+
+  private void printJobDetails() {
+    StringBuffer jobDetails = new StringBuffer();
+    jobDetails.append("\nHadoop job: " ).append(jobId);
+    jobDetails.append("\n=====================================");
+    jobDetails.append("\nJob tracker host name: ").append(trackerHostName);
+    jobDetails.append("\njob tracker start time: ").append( 
+                      new Date(Long.parseLong(trackerStartTime))); 
+    jobDetails.append("\nUser: ").append(job.get(Keys.USER)); 
+    jobDetails.append("\nJobName: ").append(job.get(Keys.JOBNAME)); 
+    jobDetails.append("\nJobConf: ").append(job.get(Keys.JOBCONF)); 
+    jobDetails.append("\nSubmitted At: ").append(StringUtils.
+                        getFormattedTimeWithDiff(dateFormat,
+                        job.getLong(Keys.SUBMIT_TIME), 0)); 
+    jobDetails.append("\nLaunched At: ").append(StringUtils.
+                        getFormattedTimeWithDiff(dateFormat,
+                        job.getLong(Keys.LAUNCH_TIME),
+                        job.getLong(Keys.SUBMIT_TIME)));
+    jobDetails.append("\nFinished At: ").append(StringUtils.
+                        getFormattedTimeWithDiff(dateFormat,
+                        job.getLong(Keys.FINISH_TIME),
+                        job.getLong(Keys.LAUNCH_TIME)));
+    jobDetails.append("\nStatus: ").append(((job.get(Keys.JOB_STATUS) == "") ? 
+                      "Incomplete" :job.get(Keys.JOB_STATUS)));
+    jobDetails.append("\n=====================================");
+    System.out.println(jobDetails.toString());
+  }
+  
+  private void printTasks(String taskType, String taskStatus) {
+    Map<String, JobHistory.Task> tasks = job.getAllTasks();
+    StringBuffer taskList = new StringBuffer();
+    taskList.append("\n").append(taskStatus).append(" ");
+    taskList.append(taskType).append(" task list for ").append(jobId);
+    taskList.append("\nTaskId\t\tStartTime\tFinishTime\tError");
+    taskList.append("\n====================================================");
+    for (JobHistory.Task task : tasks.values()) {
+      if (taskType.equals(task.get(Keys.TASK_TYPE))){
+        Map <String, TaskAttempt> taskAttempts = task.getTaskAttempts();
+        for (JobHistory.TaskAttempt attempt : taskAttempts.values()) {
+          if (taskStatus.equals(attempt.get(Keys.TASK_STATUS))
+              || taskStatus.equals("all")){
+            taskList.append("\n").append(attempt.get(Keys.TASKID));
+            taskList.append("\t").append(StringUtils.getFormattedTimeWithDiff(
+                       dateFormat, attempt.getLong(Keys.START_TIME), 0));
+            taskList.append("\t").append(StringUtils.getFormattedTimeWithDiff(
+                       dateFormat, attempt.getLong(Keys.FINISH_TIME),
+                       task.getLong(Keys.START_TIME))); 
+            taskList.append("\t").append(attempt.get(Keys.ERROR));
+          }
+        }
+      }
+    }
+    System.out.println(taskList.toString());
+  }
+  
+  private void printAllTaskAttempts(String taskType) {
+    Map<String, JobHistory.Task> tasks = job.getAllTasks();
+    StringBuffer taskList = new StringBuffer();
+    taskList.append("\n").append(taskType);
+    taskList.append(" task list for ").append(jobId);
+    taskList.append("\nTaskId\t\tStartTime");
+    if (Values.REDUCE.name().equals(taskType)) {
+      taskList.append("\tShuffleFinished\tSortFinished");
+    }
+    taskList.append("\tFinishTime\tHostName\tError");
+    taskList.append("\n====================================================");
+    for (JobHistory.Task task : tasks.values()) {
+      for (JobHistory.TaskAttempt attempt : task.getTaskAttempts().values()) {
+        if (taskType.equals(task.get(Keys.TASK_TYPE))){
+          taskList.append("\n"); 
+          taskList.append(attempt.get(Keys.TASK_ATTEMPT_ID)).append("\t");
+          taskList.append(StringUtils.getFormattedTimeWithDiff(dateFormat,
+                          attempt.getLong(Keys.START_TIME), 0)).append("\t");
+          if (Values.REDUCE.name().equals(taskType)) {
+            ReduceAttempt reduceAttempt = (ReduceAttempt)attempt; 
+            taskList.append(StringUtils.getFormattedTimeWithDiff(dateFormat,
+                            reduceAttempt.getLong(Keys.SHUFFLE_FINISHED),
+                            reduceAttempt.getLong(Keys.START_TIME)));
+            taskList.append("\t"); 
+            taskList.append(StringUtils.getFormattedTimeWithDiff(dateFormat, 
+                            reduceAttempt.getLong(Keys.SORT_FINISHED),
+                            reduceAttempt.getLong(Keys.SHUFFLE_FINISHED))); 
+          } 
+          taskList.append(StringUtils.getFormattedTimeWithDiff(dateFormat,
+                          attempt.getLong(Keys.FINISH_TIME),
+                          attempt.getLong(Keys.START_TIME))); 
+          taskList.append("\t"); 
+          taskList.append(attempt.get(Keys.HOSTNAME)).append("\t");
+          taskList.append(attempt.get(Keys.ERROR));
+        }
+      }
+    }
+    taskList.append("\n");
+    System.out.println(taskList.toString());
+  }
+  
+  private void printTaskSummary() {
+    Map<String, JobHistory.Task> tasks = job.getAllTasks();
+    int totalMaps = 0; 
+    int totalReduces = 0; 
+    int numFailedMaps = 0; 
+    int numKilledMaps = 0;
+    int numFailedReduces = 0; 
+    int numKilledReduces = 0;
+    long mapStarted = 0; 
+    long mapFinished = 0; 
+    long reduceStarted = 0; 
+    long reduceFinished = 0; 
+
+    Map <String, String> allHosts = new TreeMap<String, String>();
+
+    for (JobHistory.Task task : tasks.values()) {
+      Map<String, TaskAttempt> attempts = task.getTaskAttempts();
+      allHosts.put(task.get(Keys.HOSTNAME), "");
+      for (TaskAttempt attempt : attempts.values()) {
+        long startTime = attempt.getLong(Keys.START_TIME); 
+        long finishTime = attempt.getLong(Keys.FINISH_TIME); 
+        if (Values.MAP.name().equals(task.get(Keys.TASK_TYPE))) {
+          if (mapStarted==0 || mapStarted > startTime) {
+            mapStarted = startTime; 
+          }
+          if (mapFinished < finishTime) {
+            mapFinished = finishTime; 
+          }
+          totalMaps++; 
+          if (Values.FAILED.name().equals(attempt.get(Keys.TASK_STATUS))) {
+            numFailedMaps++; 
+          } else if (Values.KILLED.name().equals(
+                                            attempt.get(Keys.TASK_STATUS))) {
+            numKilledMaps++;
+          }
+        } else {
+          if (reduceStarted==0||reduceStarted > startTime) {
+            reduceStarted = startTime; 
+          }
+          if (reduceFinished < finishTime) {
+            reduceFinished = finishTime; 
+          }
+          totalReduces++; 
+          if (Values.FAILED.name().equals(attempt.get(Keys.TASK_STATUS))) {
+            numFailedReduces++;
+          } else if (Values.KILLED.name().equals(
+                                            attempt.get(Keys.TASK_STATUS))) {
+            numKilledReduces++;
+          }
+        }
+      }
+    }
+    
+    StringBuffer taskSummary = new StringBuffer();
+    taskSummary.append("\nTask Summary");
+    taskSummary.append("\n============================");
+    taskSummary.append("\nKind\tTotal\t");
+    taskSummary.append("Successful\tFailed\tKilled\tStartTime\tFinishTime");
+    taskSummary.append("\n");
+    taskSummary.append("\nMap\t").append(totalMaps);
+    taskSummary.append("\t").append(job.getInt(Keys.FINISHED_MAPS));
+    taskSummary.append("\t\t").append(numFailedMaps);
+    taskSummary.append("\t").append(numKilledMaps);
+    taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
+                               dateFormat, mapStarted, 0));
+    taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
+                               dateFormat, mapFinished, mapStarted));
+    taskSummary.append("\nReduce\t").append(totalReduces);
+    taskSummary.append("\t").append(job.getInt(Keys.FINISHED_REDUCES));
+    taskSummary.append("\t\t").append(numFailedReduces);
+    taskSummary.append("\t").append(numKilledReduces);
+    taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
+                               dateFormat, reduceStarted, 0));
+    taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
+                               dateFormat, reduceFinished, reduceStarted)); 
+    taskSummary.append("\n============================\n");
+    System.out.println(taskSummary.toString());
+  }
+  
+  private void printFailedAttempts(NodesFilter filter) throws IOException {
+    JobHistory.parseHistoryFromFS(jobLogFile, filter, fs); 
+    Map<String, Set<String>> badNodes = filter.getValues();
+    StringBuffer attempts = new StringBuffer(); 
+    if (badNodes.size() > 0) {
+      attempts.append("\n").append(filter.getFailureType());
+      attempts.append(" task attempts by nodes");
+      attempts.append("\nHostname\tFailedTasks");
+      attempts.append("\n===============================");
+      for (Map.Entry<String, Set<String>> entry : badNodes.entrySet()) {
+        String node = entry.getKey();
+        Set<String> failedTasks = entry.getValue();
+        attempts.append("\n").append(node).append("\t");
+        for (String t : failedTasks) {
+          attempts.append(t).append(", ");
+        }
+      }
+    }
+    System.out.println(attempts.toString());
+  }
+  
+  private void printJobAnalysis() {
+    if (!Values.SUCCESS.name().equals(job.get(Keys.JOB_STATUS))) {
+      System.out.println("No Analysis available as job did not finish");
+      return;
+    }
+    
+    Map<String, JobHistory.Task> tasks = job.getAllTasks();
+    int finishedMaps = job.getInt(Keys.FINISHED_MAPS);
+    int finishedReduces = job.getInt(Keys.FINISHED_REDUCES);
+    JobHistory.Task [] mapTasks = new JobHistory.Task[finishedMaps]; 
+    JobHistory.Task [] reduceTasks = new JobHistory.Task[finishedReduces]; 
+    int mapIndex = 0 , reduceIndex=0; 
+    long avgMapTime = 0;
+    long avgReduceTime = 0;
+    long avgShuffleTime = 0;
+
+    for (JobHistory.Task task : tasks.values()) {
+      Map<String, TaskAttempt> attempts = task.getTaskAttempts();
+      for (JobHistory.TaskAttempt attempt : attempts.values()) {
+        if (attempt.get(Keys.TASK_STATUS).equals(Values.SUCCESS.name())) {
+          long avgFinishTime = (attempt.getLong(Keys.FINISH_TIME) -
+                                attempt.getLong(Keys.START_TIME));
+          if (Values.MAP.name().equals(task.get(Keys.TASK_TYPE))) {
+            mapTasks[mapIndex++] = attempt; 
+            avgMapTime += avgFinishTime;
+          } else { 
+            reduceTasks[reduceIndex++] = attempt;
+            avgShuffleTime += (attempt.getLong(Keys.SHUFFLE_FINISHED) - 
+                               attempt.getLong(Keys.START_TIME));
+            avgReduceTime += (attempt.getLong(Keys.FINISH_TIME) -
+                              attempt.getLong(Keys.SHUFFLE_FINISHED));
+          }
+          break;
+        }
+      }
+    }
+    if (finishedMaps > 0) {
+      avgMapTime /= finishedMaps;
+    }
+    if (finishedReduces > 0) {
+      avgReduceTime /= finishedReduces;
+      avgShuffleTime /= finishedReduces;
+    }
+    System.out.println("\nAnalysis");
+    System.out.println("=========");
+    printAnalysis(mapTasks, cMap, "map", avgMapTime, 10);
+    printLast(mapTasks, "map", cFinishMapRed);
+
+    if (reduceTasks.length > 0) {
+      printAnalysis(reduceTasks, cShuffle, "shuffle", avgShuffleTime, 10);
+      printLast(reduceTasks, "shuffle", cFinishShuffle);
+
+      printAnalysis(reduceTasks, cReduce, "reduce", avgReduceTime, 10);
+      printLast(reduceTasks, "reduce", cFinishMapRed);
+    }
+    System.out.println("=========");
+  }
+  
+  private void printLast(JobHistory.Task [] tasks,
+                         String taskType,
+                         Comparator<JobHistory.Task> cmp
+                         ) {
+    Arrays.sort(tasks, cFinishMapRed);
+    JobHistory.Task last = tasks[0];
+    StringBuffer lastBuf = new StringBuffer();
+    lastBuf.append("The last ").append(taskType);
+    lastBuf.append(" task ").append(last.get(Keys.TASKID));
+    Long finishTime;
+    if ("shuffle".equals(taskType)) {
+      finishTime = last.getLong(Keys.SHUFFLE_FINISHED);
+    } else {
+      finishTime = last.getLong(Keys.FINISH_TIME);
+    }
+    lastBuf.append(" finished at (relative to the Job launch time): ");
+    lastBuf.append(StringUtils.getFormattedTimeWithDiff(dateFormat,
+                                 finishTime, job.getLong(Keys.LAUNCH_TIME)));
+    System.out.println(lastBuf.toString());
+  }
+
+  private void printAnalysis(JobHistory.Task [] tasks,
+                             Comparator<JobHistory.Task> cmp,
+                             String taskType,
+                             long avg,
+                             int showTasks) {
+    Arrays.sort(tasks, cmp);
+    JobHistory.Task min = tasks[tasks.length-1];
+    StringBuffer details = new StringBuffer();
+    details.append("\nTime taken by best performing ");
+    details.append(taskType).append(" task ");
+    details.append(min.get(Keys.TASKID)).append(": ");
+    if ("map".equals(taskType)) {
+      details.append(StringUtils.formatTimeDiff(
+                     min.getLong(Keys.FINISH_TIME),
+                     min.getLong(Keys.START_TIME)));
+    } else if ("shuffle".equals(taskType)) {
+      details.append(StringUtils.formatTimeDiff(
+                     min.getLong(Keys.SHUFFLE_FINISHED),
+                     min.getLong(Keys.START_TIME)));
+    } else {
+      details.append(StringUtils.formatTimeDiff(
+                min.getLong(Keys.FINISH_TIME),
+                min.getLong(Keys.SHUFFLE_FINISHED)));
+    }
+    details.append("\nAverage time taken by ");
+    details.append(taskType).append(" tasks: "); 
+    details.append(StringUtils.formatTimeDiff(avg, 0));
+    details.append("\nWorse performing ");
+    details.append(taskType).append(" tasks: ");
+    details.append("\nTaskId\t\tTimetaken");
+    for (int i = 0; i < showTasks && i < tasks.length; i++) {
+      details.append("\n").append(tasks[i].get(Keys.TASKID)).append(" ");
+      if ("map".equals(taskType)) {
+        details.append(StringUtils.formatTimeDiff(
+                       tasks[i].getLong(Keys.FINISH_TIME),
+                       tasks[i].getLong(Keys.START_TIME)));
+      } else if ("shuffle".equals(taskType)) {
+        details.append(StringUtils.formatTimeDiff(
+                       tasks[i].getLong(Keys.SHUFFLE_FINISHED),
+                       tasks[i].getLong(Keys.START_TIME)));
+      } else {
+        details.append(StringUtils.formatTimeDiff(
+                       tasks[i].getLong(Keys.FINISH_TIME),
+                       tasks[i].getLong(Keys.SHUFFLE_FINISHED)));
+      }
+    }
+    System.out.println(details.toString());
+  }
+  
+  private Comparator<JobHistory.Task> cMap = 
+                                        new Comparator<JobHistory.Task>() {
+    public int compare(JobHistory.Task t1, JobHistory.Task t2) {
+      long l1 = t1.getLong(Keys.FINISH_TIME) - t1.getLong(Keys.START_TIME);
+      long l2 = t2.getLong(Keys.FINISH_TIME) - t2.getLong(Keys.START_TIME);
+      return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1));
+    }
+  };
+  
+  private Comparator<JobHistory.Task> cShuffle = 
+    new Comparator<JobHistory.Task>() {
+    public int compare(JobHistory.Task t1, JobHistory.Task t2) {
+      long l1 = t1.getLong(Keys.SHUFFLE_FINISHED) - 
+                t1.getLong(Keys.START_TIME);
+      long l2 = t2.getLong(Keys.SHUFFLE_FINISHED) -
+                t2.getLong(Keys.START_TIME);
+      return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1));
+    }
+  };
+
+  private Comparator<JobHistory.Task> cFinishShuffle = 
+    new Comparator<JobHistory.Task>() {
+    public int compare(JobHistory.Task t1, JobHistory.Task t2) {
+      long l1 = t1.getLong(Keys.SHUFFLE_FINISHED); 
+      long l2 = t2.getLong(Keys.SHUFFLE_FINISHED);
+      return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1));
+    }
+  };
+
+  private Comparator<JobHistory.Task> cFinishMapRed = 
+    new Comparator<JobHistory.Task>() {
+    public int compare(JobHistory.Task t1, JobHistory.Task t2) {
+      long l1 = t1.getLong(Keys.FINISH_TIME); 
+      long l2 = t2.getLong(Keys.FINISH_TIME);
+      return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1));
+    }
+  };
+  
+  private Comparator<JobHistory.Task> cReduce = 
+    new Comparator<JobHistory.Task>() {
+    public int compare(JobHistory.Task t1, JobHistory.Task t2) {
+      long l1 = t1.getLong(Keys.FINISH_TIME) -
+                t1.getLong(Keys.SHUFFLE_FINISHED);
+      long l2 = t2.getLong(Keys.FINISH_TIME) -
+                t2.getLong(Keys.SHUFFLE_FINISHED);
+      return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1));
+    }
+  }; 
+}

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/mapred/JobClient.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/mapred/JobClient.java?rev=637723&r1=637722&r2=637723&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/mapred/JobClient.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/mapred/JobClient.java Sun Mar 16 22:19:52 2008
@@ -61,6 +61,7 @@
 import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.mapred.TaskInProgress;
+import org.apache.hadoop.mapred.DefaultJobHistoryParser.*;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
@@ -1034,6 +1035,7 @@
     boolean killJob = false;
     boolean listEvents = false;
     boolean viewHistory = false;
+    boolean viewAllHistory = false;
     boolean listJobs = false;
     boolean listAllJobs = false;
     boolean killTask = false;
@@ -1064,10 +1066,15 @@
       nEvents = Integer.parseInt(argv[3]);
       listEvents = true;
     } else if ("-history".equals(argv[0])) {
-      if (argv.length != 2)
-        displayUsage();
-        outputDir = argv[1];
-        viewHistory = true;
+      if (argv.length != 2 && !(argv.length == 3 && "all".equals(argv[1])))
+         displayUsage();
+      viewHistory = true;
+      if (argv.length == 3 && "all".equals(argv[1])) {
+         viewAllHistory = true;
+         outputDir = argv[2];
+      } else {
+         outputDir = argv[1];
+      }
     } else if ("-list".equals(argv[0])) {
       if (argv.length != 1 && !(argv.length == 2 && "all".equals(argv[1])))
         displayUsage();
@@ -1125,8 +1132,7 @@
           exitCode = 0;
         }
       } else if (viewHistory) {
-    	// start http server
-        viewHistory(outputDir);
+        viewHistory(outputDir, viewAllHistory);
         exitCode = 0;
       } else if (listEvents) {
         listEvents(jobid, fromEvent, nEvents);
@@ -1160,43 +1166,11 @@
     return exitCode;
   }
 
-  private void viewHistory(String outputDir) 
+  private void viewHistory(String outputDir, boolean all) 
     throws IOException {
-
-    Path output = new Path(outputDir);
-    FileSystem fs = output.getFileSystem(getConf());
-
-    // start http server used to provide an HTML view on Job history
-    StatusHttpServer infoServer;
-    String infoAddr = new JobConf(getConf()).get(
-             "mapred.job.history.http.bindAddress", "0.0.0.0:0");
-    InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
-    String infoBindAddress = infoSocAddr.getHostName();
-    int tmpInfoPort = infoSocAddr.getPort();
-    infoServer = new StatusHttpServer("history", infoBindAddress, tmpInfoPort,
-                                       tmpInfoPort == 0);
-    infoServer.setAttribute("fileSys", fs);
-    infoServer.setAttribute("historyLogDir", outputDir + "/_logs/history");
-    infoServer.start();
-    int infoPort = infoServer.getPort();
-    getConf().set("mapred.job.history.http.bindAddress", 
-        infoBindAddress + ":" + infoPort);
-    LOG.info("JobHistory webserver up at: " + infoPort);
-
-    // let the server be up for 30 minutes.
-    try {
-      Thread.sleep(30 * 60 * 1000);
-    } catch (InterruptedException ie) {}
-      
-    // stop infoServer
-    if (infoServer != null) {
-      LOG.info("Stopping infoServer");
-      try {
-        infoServer.stop();
-      } catch (InterruptedException ex) {
-        ex.printStackTrace();
-      }
-    } 
+    HistoryViewer historyViewer = new HistoryViewer(outputDir,
+                                        getConf(), all);
+    historyViewer.print();
   }
   
   /**

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/mapred/JobHistory.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/mapred/JobHistory.java?rev=637723&r1=637722&r2=637723&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/mapred/JobHistory.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/mapred/JobHistory.java Sun Mar 16 22:19:52 2008
@@ -111,29 +111,33 @@
 
   /**
    * Initialize JobHistory files. 
-   *
+   * @param conf Jobconf of the job tracker.
+   * @param hostname jobtracker's hostname
+   * @return true if intialized properly
+   *         false otherwise
    */
-  public static void init(JobConf conf, String hostname){
-    if (!disableHistory){
-      try{
-        LOG_DIR = conf.get("hadoop.job.history.location" ,
-          "file:///" + new File(System.getProperty(
-          "hadoop.log.dir")).getAbsolutePath() + File.separator + "history");
-        JOBTRACKER_UNIQUE_STRING = hostname + "_" + 
+  public static boolean init(JobConf conf, String hostname){
+    try {
+      LOG_DIR = conf.get("hadoop.job.history.location" ,
+        "file:///" + new File(
+        System.getProperty("hadoop.log.dir")).getAbsolutePath()
+        + File.separator + "history");
+      JOBTRACKER_UNIQUE_STRING = hostname + "_" + 
                                    JOBTRACKER_START_TIME + "_";
-        Path logDir = new Path(LOG_DIR);
-        FileSystem fs = logDir.getFileSystem(conf);
-        if (!fs.exists(logDir)){
-          if (!fs.mkdirs(logDir)){
-            throw new IOException("Mkdirs failed to create " + logDir.toString());
-          }
+      Path logDir = new Path(LOG_DIR);
+      FileSystem fs = logDir.getFileSystem(conf);
+      if (!fs.exists(logDir)){
+        if (!fs.mkdirs(logDir)){
+          throw new IOException("Mkdirs failed to create " + logDir.toString());
         }
-        conf.set("hadoop.job.history.location", LOG_DIR);
-      }catch(IOException e){
-        LOG.error("Failed to initialize JobHistory log file", e); 
-        disableHistory = true; 
       }
+      conf.set("hadoop.job.history.location", LOG_DIR);
+      disableHistory = false;
+    } catch(IOException e) {
+        LOG.error("Failed to initialize JobHistory log file", e); 
+        disableHistory = true;
     }
+    return !(disableHistory);
   }
 
   /**

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java?rev=637723&r1=637722&r2=637723&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java Sun Mar 16 22:19:52 2008
@@ -611,7 +611,6 @@
 
   // Used to provide an HTML view on Job, Task, and TaskTracker structures
   StatusHttpServer infoServer;
-  StatusHttpServer historyServer;
   int infoPort;
 
   Server interTrackerServer;
@@ -676,6 +675,16 @@
     infoServer = new StatusHttpServer("job", infoBindAddress, tmpInfoPort, 
                                       tmpInfoPort == 0);
     infoServer.setAttribute("job.tracker", this);
+    // initialize history parameters.
+    boolean historyInitialized = JobHistory.init(conf, this.localMachine);
+    String historyLogDir = null;
+    FileSystem historyFS = null;
+    if (historyInitialized) {
+      historyLogDir = conf.get("hadoop.job.history.location");
+      infoServer.setAttribute("historyLogDir", historyLogDir);
+      historyFS = new Path(historyLogDir).getFileSystem(conf);
+      infoServer.setAttribute("fileSys", historyFS);
+    }
     infoServer.start();
 
     this.startTime = System.currentTimeMillis();
@@ -719,29 +728,19 @@
       }
       Thread.sleep(SYSTEM_DIR_CLEANUP_RETRY_PERIOD);
     }
-
-    // start history viewing server.
-    JobHistory.init(conf, this.localMachine); 
-    String histAddr = conf.get("mapred.job.history.http.bindAddress",
-                                  "0.0.0.0:0");
-    InetSocketAddress historySocAddr = NetUtils.createSocketAddr(histAddr);
-    String historyBindAddress = historySocAddr.getHostName();
-    int tmpHistoryPort = historySocAddr.getPort();
-    historyServer = new StatusHttpServer("history", historyBindAddress, 
-                       tmpHistoryPort, tmpHistoryPort == 0);
-    String historyLogDir = conf.get("hadoop.job.history.location");
-    historyServer.setAttribute("historyLogDir", historyLogDir);
-    FileSystem fileSys = new Path(historyLogDir).getFileSystem(conf);
-    historyServer.setAttribute("fileSys", fileSys);
-    historyServer.start();
-    this.conf.set("mapred.job.history.http.bindAddress", 
-                (this.localMachine + ":" + historyServer.getPort()));
-    LOG.info("JobHistory webserver on JobTracker up at: " +
-              historyServer.getPort());
-
-
     // Same with 'localDir' except it's always on the local disk.
     jobConf.deleteLocalFiles(SUBDIR);
+
+    // Initialize history again if it is not initialized
+    // because history was on dfs and namenode was in safemode.
+    if (!historyInitialized) {
+      JobHistory.init(conf, this.localMachine); 
+      historyLogDir = conf.get("hadoop.job.history.location");
+      infoServer.setAttribute("historyLogDir", historyLogDir);
+      historyFS = new Path(historyLogDir).getFileSystem(conf);
+      infoServer.setAttribute("fileSys", historyFS);
+    }
+
     this.dnsToSwitchMapping = (DNSToSwitchMapping)ReflectionUtils.newInstance(
         conf.getClass("topology.node.switch.mapping.impl", ScriptBasedMapping.class,
             DNSToSwitchMapping.class), conf);
@@ -763,10 +762,6 @@
     return NetUtils.createSocketAddr(jobTrackerStr);
   }
 
-  public String getHistoryAddress() {
-    return conf.get("mapred.job.history.http.bindAddress");
-  }
-
   /**
    * Run forever
    */
@@ -798,14 +793,6 @@
       LOG.info("Stopping infoServer");
       try {
         this.infoServer.stop();
-      } catch (InterruptedException ex) {
-        ex.printStackTrace();
-      }
-    }
-    if (this.historyServer != null) {
-      LOG.info("Stopping historyServer");
-      try {
-        this.historyServer.stop();
       } catch (InterruptedException ex) {
         ex.printStackTrace();
       }