You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cu...@apache.org on 2008/05/07 01:23:00 UTC

svn commit: r653951 [3/3] - in /hadoop/core/trunk: ./ docs/ docs/skin/images/ src/docs/src/documentation/content/xdocs/ src/java/ src/java/org/apache/hadoop/dfs/ src/java/org/apache/hadoop/net/ src/test/org/apache/hadoop/dfs/

Modified: hadoop/core/trunk/docs/streaming.pdf
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/docs/streaming.pdf?rev=653951&r1=653950&r2=653951&view=diff
==============================================================================
--- hadoop/core/trunk/docs/streaming.pdf (original)
+++ hadoop/core/trunk/docs/streaming.pdf Tue May  6 16:22:57 2008
@@ -387,10 +387,10 @@
 >>
 endobj
 69 0 obj
-<< /Length 1542 /Filter [ /ASCII85Decode /FlateDecode ]
+<< /Length 1538 /Filter [ /ASCII85Decode /FlateDecode ]
  >>
 stream
-GauGaCN%otn2nD%id**M^j$r-gD_H$d^3^lWClW::-'Dnb*W@&K$Dsq:&b*NNK)Po4F0.:;1`Gno7$$7D[)bVK?c\%C/+6eI7FnmQKu-Wh\!hK?Z6N<cE...@-d9>=U#I8N#-VtIU@QI-L<WsrdDDqkdCluNSmet(>ZSk=-k\*J9,q.\%FfVnMB1r`8e)pjHFYlkDQs'YUUE#e9VNP!JQ+'8N5o\7q_4$WaMpd5;dau?L4/649=r'l@5!KWoO[X8am_u^E">T7HctL>CtK]-aZRXeL%qhTPsH:K!r!0R_[^JB*R]ZMZSl9b2M-Om)pD#LY@+GT/TRs?A<-(Qd2Rd\$?A(d%h$Flfg)HWJ"pPoJ5eh5)j%1eJq8M/@+7TL[nsSFi5cr1=pAdus.F?!1N4j7%;(ab%fQ33Q0FfOC+DhYFRg.;c7$eG+R+sN]\e+QJV]pp3`FZXa8a*ac@Q"/?Kp;;n@`QNaZ)7[6JWt=?#AtPLiToEN9$4U>Ij^:E-iIuH(Pd8>0"A(8rm<o0Dm]c8s;4]3V^4Sq>V/c[ZUjb^'!Fbd[>@j;"'*
 Yd(]0(N+4k6\$b-R3=3,p_Z:#2[QZ4^nD9M.$DY^aaeZp^j/nD58K>qng^O`%d<(WX`JXs!8!$3V#lWYR-:unk+-TQmDn?qn_tNEm?G-qIJgnoI)hG5tlP]&_5-VUkr0aW)H"!?pQ`Y+?&:q[%ctq@pY$iQ7djf-YXK0TF($[!W]"E:#l>"&I/NdOa&L)1mW2utqfQ5k_5Ikr7klGSTN8lBrHhOV.?<G(SP'AXpFA\rtmbu+LeT:E7f!Tc%:a"UaBm-!YNmga/K0(U-iq^=,+KOOmd"+O5S@4ck,b79+/8rqD_,--Ao4S7K`]-U]q=CMIBHM^/K.6P4Co,n$LW@r`l.kIB2NlVm_)<<e=["fk.QHO9j'%gU5ZuXDn*8h@KSm5I)bD(+<p!l%YB,PFi/&oeb>eZYE)J&+h%hJ:(rs<_3oG@Mbg4+)[3W>Pd[Mm"^rM`k['("=&0Pu0`0O3bq<95/5,ZeMQC%OId[*YS#/ZVu)JCg'%<0"^3cG7P8LF9-+Or1-oinj<d)bA@"t4%q)Q`E`Hc2R6^AA^1^KHg~>
+GauGaCN%rcn@O1(i,u!i_'b?Z757=7UhS;b5`oqA"q:*"a>.T_[3\V3e)]ET,YJnaduUY!&gZ&!Gjbn+r+=<$2sC1g+k@A-hKPatNobO-rc`YXguh@]@*D6.2<t+jH#o4cF^1Trp3?m*=#euU5S,8UnL5#Z=m=Gis75pYegV6u>A95ZeR6gF9#VeJ-.\O7otfhB?t'#aHZaQ2pAY'_f=UoUn*EmcgTt:X,g/!E:T.23mQC\%6J`Vo*FBoUB6gM/72/OkF`]T*Zi7B;+#gK^oF'X%#:\9Aqbb"2jUFf5?=bUo(Wt$T`H&+:3t/uoRRD4.QU`3Kb.F*[qd`M0KN0cC_/cM3]t.!*3mAR&$r18',Z%@C9\"A&[WnCb`0&u@MFk+r:Gs9?G)ps89*r6<CVQ[Y:;^6,W4#Hf3P^%L?584HZgf;&\*J:Y0`grHl2Y7u31s=J1lg!.nA"$6bNDb#mBof]OPj1Pfr)P36.R*+84I##:E#]WMWf2u&'0Ci>BWSuqnmK*O<Y1cTfeknef3^@GTn3g@%f3bn")6+U4nJ_n/8mU*8m)`?8ZQ]>':ZfrJL$NNV!q^7-1^0(.EXbBisBY)JPka6^$n/D\[9e3707Q$$V$hh\`c';.(%(8chbQIDL00`ju]@BS(&ZA3L%:2ItH\cF:/2C`W5D\3XI[O3AK5BbBcA<G?BN*1dUd@l9YZ\ZQM@[@f9',7#uC8/Z_U)f\Cf`m*6,IG/WAhJP#+h7S?Ed'^s&qN!YJ4%4'%c3VMI4?b;9>@mu.deJfb.0ZUV6J[0.XV;0XKE<GHMS4_\,R%9;R+F*$PPGH;1S98#3GL7+1jA00&$&Ce4B#HMd\;r'R!2"k*X,,a7-FO2Es<-S$)KG3D0#P]N*m(HO62_a4V(o!7Zipd>:%d9ZI7:SDMKWAMp+^e(jM>#%h#k56(7P=i^`=e(L$(.X5\iJ#!?T47jS7e2`(K+_V8,>+Pq=YH'*3G_?D,i-jT-
 D@::/!iOQ&o_kDq9[2L[-Gdm?[3J$q=/6*N]SkCCfmM^"t4A+U@hV`9j@#1\\1alZekWaYU$:a7Z"uT!##5FREl?q>l2CQ"@cPQ705TEI+g0+]jKjE#4&+P5%?/j1@e]8+#9t>(W>D!1iL4s6W*d8&4Bq/4,b@=gpVQ*H/n^Za?j&K1PokiY*6]O,GLPj6CC4JJ!SbA6d(Qec4j(994*P,nFC?^EuN8'/5q0e12KlhkZX/P4HXbCQ\r1QBgNRI/Q,93W;\d!AeQc8QEM*Y1_3M?O8Y;pIgB1Y.<?s2?\CP/<!X67k95)aKINq2@/-.+pU/d>8B?.MWWIh.U@M:9#]Nbp&D"bLVQ#do&!oi,nRou=<l]7A8.BA_7U`s<Du.5m>M\uj2qEaS!m,=(`J"j9mK'6$hs+P@OQ;&I3a9=j&>9'5;EW`g#u:$KoH#2j"7jk;WuKQE57@:ID-p&&oq/>(C)3m10FId'MJ8HWBnMZt*D<!-AYWt&CN?]n"[4\.G(H2X&r">mF[]E(XXR0G1SbT8JeNUQiMPEf74~>
 endstream
 endobj
 70 0 obj
@@ -1014,68 +1014,68 @@
 xref
 0 128
 0000000000 65535 f 
-0000045516 00000 n 
-0000045666 00000 n 
-0000045758 00000 n 
+0000045512 00000 n 
+0000045662 00000 n 
+0000045754 00000 n 
 0000000015 00000 n 
 0000000071 00000 n 
 0000002128 00000 n 
 0000002248 00000 n 
 0000002420 00000 n 
-0000045910 00000 n 
+0000045906 00000 n 
 0000002555 00000 n 
-0000045973 00000 n 
+0000045969 00000 n 
 0000002692 00000 n 
-0000046039 00000 n 
+0000046035 00000 n 
 0000002829 00000 n 
-0000046104 00000 n 
+0000046100 00000 n 
 0000002966 00000 n 
-0000046170 00000 n 
+0000046166 00000 n 
 0000003103 00000 n 
-0000046236 00000 n 
+0000046232 00000 n 
 0000003239 00000 n 
-0000046300 00000 n 
+0000046296 00000 n 
 0000003375 00000 n 
-0000046366 00000 n 
+0000046362 00000 n 
 0000003512 00000 n 
-0000046431 00000 n 
+0000046427 00000 n 
 0000003648 00000 n 
-0000046495 00000 n 
+0000046491 00000 n 
 0000003784 00000 n 
-0000046560 00000 n 
+0000046556 00000 n 
 0000003919 00000 n 
-0000046626 00000 n 
+0000046622 00000 n 
 0000004058 00000 n 
 0000004193 00000 n 
-0000046690 00000 n 
+0000046686 00000 n 
 0000004330 00000 n 
-0000046755 00000 n 
+0000046751 00000 n 
 0000004467 00000 n 
-0000046820 00000 n 
+0000046816 00000 n 
 0000004604 00000 n 
-0000046886 00000 n 
+0000046882 00000 n 
 0000004743 00000 n 
-0000046950 00000 n 
+0000046946 00000 n 
 0000004879 00000 n 
-0000047016 00000 n 
+0000047012 00000 n 
 0000005015 00000 n 
-0000047082 00000 n 
+0000047078 00000 n 
 0000005154 00000 n 
 0000005288 00000 n 
-0000047148 00000 n 
+0000047144 00000 n 
 0000005424 00000 n 
 0000006221 00000 n 
 0000006344 00000 n 
 0000006399 00000 n 
-0000047213 00000 n 
+0000047209 00000 n 
 0000006531 00000 n 
-0000047279 00000 n 
+0000047275 00000 n 
 0000006663 00000 n 
-0000047345 00000 n 
+0000047341 00000 n 
 0000006796 00000 n 
-0000047411 00000 n 
+0000047407 00000 n 
 0000006929 00000 n 
-0000047475 00000 n 
+0000047471 00000 n 
 0000007062 00000 n 
 0000009221 00000 n 
 0000009329 00000 n 
@@ -1083,64 +1083,64 @@
 0000011368 00000 n 
 0000013582 00000 n 
 0000013690 00000 n 
-0000015325 00000 n 
-0000015433 00000 n 
-0000018246 00000 n 
-0000018369 00000 n 
-0000018396 00000 n 
-0000018587 00000 n 
-0000020793 00000 n 
-0000020916 00000 n 
-0000020943 00000 n 
-0000021136 00000 n 
-0000023205 00000 n 
-0000023313 00000 n 
-0000025283 00000 n 
-0000025406 00000 n 
-0000025433 00000 n 
-0000025684 00000 n 
-0000027857 00000 n 
-0000027965 00000 n 
-0000030251 00000 n 
-0000030374 00000 n 
-0000030401 00000 n 
-0000030606 00000 n 
-0000032515 00000 n 
-0000032623 00000 n 
-0000033991 00000 n 
-0000047541 00000 n 
-0000034099 00000 n 
-0000034285 00000 n 
-0000034526 00000 n 
-0000034832 00000 n 
-0000035140 00000 n 
-0000035338 00000 n 
-0000035651 00000 n 
-0000036028 00000 n 
-0000036466 00000 n 
-0000036706 00000 n 
-0000036968 00000 n 
-0000037394 00000 n 
-0000038228 00000 n 
-0000038768 00000 n 
-0000039150 00000 n 
-0000039441 00000 n 
-0000040021 00000 n 
-0000040348 00000 n 
-0000040648 00000 n 
-0000041499 00000 n 
-0000042080 00000 n 
-0000043125 00000 n 
-0000043502 00000 n 
-0000043901 00000 n 
-0000044364 00000 n 
-0000044736 00000 n 
-0000044850 00000 n 
-0000044961 00000 n 
-0000045073 00000 n 
-0000045182 00000 n 
-0000045289 00000 n 
-0000045406 00000 n 
+0000015321 00000 n 
+0000015429 00000 n 
+0000018242 00000 n 
+0000018365 00000 n 
+0000018392 00000 n 
+0000018583 00000 n 
+0000020789 00000 n 
+0000020912 00000 n 
+0000020939 00000 n 
+0000021132 00000 n 
+0000023201 00000 n 
+0000023309 00000 n 
+0000025279 00000 n 
+0000025402 00000 n 
+0000025429 00000 n 
+0000025680 00000 n 
+0000027853 00000 n 
+0000027961 00000 n 
+0000030247 00000 n 
+0000030370 00000 n 
+0000030397 00000 n 
+0000030602 00000 n 
+0000032511 00000 n 
+0000032619 00000 n 
+0000033987 00000 n 
+0000047537 00000 n 
+0000034095 00000 n 
+0000034281 00000 n 
+0000034522 00000 n 
+0000034828 00000 n 
+0000035136 00000 n 
+0000035334 00000 n 
+0000035647 00000 n 
+0000036024 00000 n 
+0000036462 00000 n 
+0000036702 00000 n 
+0000036964 00000 n 
+0000037390 00000 n 
+0000038224 00000 n 
+0000038764 00000 n 
+0000039146 00000 n 
+0000039437 00000 n 
+0000040017 00000 n 
+0000040344 00000 n 
+0000040644 00000 n 
+0000041495 00000 n 
+0000042076 00000 n 
+0000043121 00000 n 
+0000043498 00000 n 
+0000043897 00000 n 
+0000044360 00000 n 
+0000044732 00000 n 
+0000044846 00000 n 
+0000044957 00000 n 
+0000045069 00000 n 
+0000045178 00000 n 
+0000045285 00000 n 
+0000045402 00000 n 
 trailer
 <<
 /Size 128
@@ -1148,5 +1148,5 @@
 /Info 4 0 R
 >>
 startxref
-47593
+47589
 %%EOF

Modified: hadoop/core/trunk/src/docs/src/documentation/content/xdocs/cluster_setup.xml
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/docs/src/documentation/content/xdocs/cluster_setup.xml?rev=653951&r1=653950&r2=653951&view=diff
==============================================================================
--- hadoop/core/trunk/src/docs/src/documentation/content/xdocs/cluster_setup.xml (original)
+++ hadoop/core/trunk/src/docs/src/documentation/content/xdocs/cluster_setup.xml Tue May  6 16:22:57 2008
@@ -147,12 +147,12 @@
 		    </tr>
   		    <tr>
 		      <td>fs.default.name</td>
-  		      <td>Hostname or IP address of <code>NameNode</code>.</td>
-		      <td><em>host:port</em> pair.</td>
+  		      <td>URI of <code>NameNode</code>.</td>
+		      <td><em>hdfs://hostname/</em></td>
 		    </tr>
 		    <tr>
 		      <td>mapred.job.tracker</td>
-		      <td>Hostname or IP address of <code>JobTracker</code>.</td>
+		      <td>Host or IP and port of <code>JobTracker</code>.</td>
 		      <td><em>host:port</em> pair.</td>
 		    </tr>
 		    <tr>

Modified: hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_shell.xml
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_shell.xml?rev=653951&r1=653950&r2=653951&view=diff
==============================================================================
--- hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_shell.xml (original)
+++ hadoop/core/trunk/src/docs/src/documentation/content/xdocs/hdfs_shell.xml Tue May  6 16:22:57 2008
@@ -21,17 +21,28 @@
 	</header>
 	<body>
 		<section>
-			<title> DFShell </title>
+			<title> FS Shell </title>
 			<p>
-      The HDFS shell is invoked by 
-      <code>bin/hadoop dfs &lt;args&gt;</code>.
-      All the HDFS shell commands take path URIs as arguments. The URI format is <em>scheme://autority/path</em>. For HDFS the scheme is <em>hdfs</em>, and for the local filesystem the scheme is <em>file</em>. The scheme and authority are optional. If not specified, the default scheme specified in the configuration is used. An HDFS file or directory such as <em>/parent/child</em> can be specified as <em>hdfs://namenode:namenodeport/parent/child</em> or simply as <em>/parent/child</em> (given that your configuration is set to point to <em>namenode:namenodeport</em>). Most of the commands in HDFS shell behave like corresponding Unix commands. Differences are described with each of the commands. Error information is sent to <em>stderr</em> and the output is sent to <em>stdout</em>. 
+      The FileSystem (FS) shell is invoked by 
+      <code>bin/hadoop fs &lt;args&gt;</code>.
+      All the FS shell commands take path URIs as arguments. The URI
+      format is <em>scheme://autority/path</em>. For HDFS the scheme
+      is <em>hdfs</em>, and for the local filesystem the scheme
+      is <em>file</em>. The scheme and authority are optional. If not
+      specified, the default scheme specified in the configuration is
+      used. An HDFS file or directory such as <em>/parent/child</em>
+      can be specified as <em>hdfs://namenodehost/parent/child</em> or
+      simply as <em>/parent/child</em> (given that your configuration
+      is set to point to <em>hdfs://namenodehost</em>). Most of the
+      commands in FS shell behave like corresponding Unix
+      commands. Differences are described with each of the
+      commands. Error information is sent to <em>stderr</em> and the
+      output is sent to <em>stdout</em>.
   </p>
-		</section>
 		<section>
 			<title> cat </title>
 			<p>
-				<code>Usage: hadoop dfs -cat URI [URI &#x2026;]</code>
+				<code>Usage: hadoop fs -cat URI [URI &#x2026;]</code>
 			</p>
 			<p>
 		   Copies source paths to <em>stdout</em>. 
@@ -39,11 +50,11 @@
 			<p>Example:</p>
 			<ul>
 				<li>
-					<code> hadoop dfs -cat hdfs://host1:port1/file1 hdfs://host2:port2/file2 
+					<code> hadoop fs -cat hdfs://nn1.example.com/file1 hdfs://nn2.example.com/file2 
 		   </code>
 				</li>
 				<li>
-					<code>hadoop dfs -cat file:///file3 /user/hadoop/file4 </code>
+					<code>hadoop fs -cat file:///file3 /user/hadoop/file4 </code>
 				</li>
 			</ul>
 			<p>Exit Code:<br/>
@@ -52,7 +63,7 @@
 		<section>
 			<title> chgrp </title>
 			<p>
-				<code>Usage: hadoop dfs -chgrp [-R] GROUP URI [URI &#x2026;]</code>
+				<code>Usage: hadoop fs -chgrp [-R] GROUP URI [URI &#x2026;]</code>
 			</p>
 			<p>
 	    Change group association of files. With <code>-R</code>, make the change recursively through the directory structure. The user must be the owner of files, or else a super-user. Additional information is in the <a href="hdfs_permissions_guide.html">Permissions User Guide</a>.
@@ -61,7 +72,7 @@
 		<section>
 			<title> chmod </title>
 			<p>
-				<code>Usage: hadoop dfs -chmod [-R] &lt;MODE[,MODE]... | OCTALMODE&gt; URI [URI &#x2026;]</code>
+				<code>Usage: hadoop fs -chmod [-R] &lt;MODE[,MODE]... | OCTALMODE&gt; URI [URI &#x2026;]</code>
 			</p>
 			<p>
 	    Change the permissions of files. With <code>-R</code>, make the change recursively through the directory structure. The user must be the owner of the file, or else a super-user. Additional information is in the <a href="hdfs_permissions_guide.html">Permissions User Guide</a>.
@@ -70,7 +81,7 @@
 		<section>
 			<title> chown </title>
 			<p>
-				<code>Usage: hadoop dfs -chown [-R] [OWNER][:[GROUP]] URI [URI ]</code>
+				<code>Usage: hadoop fs -chown [-R] [OWNER][:[GROUP]] URI [URI ]</code>
 			</p>
 			<p>
 	    Change the owner of files. With <code>-R</code>, make the change recursively through the directory structure. The user must be a super-user. Additional information is in the <a href="hdfs_permissions_guide.html">Permissions User Guide</a>.
@@ -79,21 +90,21 @@
 		<section>
 			<title>copyFromLocal</title>
 			<p>
-				<code>Usage: hadoop dfs -copyFromLocal &lt;localsrc&gt; URI</code>
+				<code>Usage: hadoop fs -copyFromLocal &lt;localsrc&gt; URI</code>
 			</p>
 			<p>Similar to <a href="#putlink"><strong>put</strong></a> command, except that the source is restricted to a local file reference. </p>
 		</section>
 		<section>
 			<title> copyToLocal</title>
 			<p>
-				<code>Usage: hadoop dfs -copyToLocal [-ignorecrc] [-crc] URI &lt;localdst&gt;</code>
+				<code>Usage: hadoop fs -copyToLocal [-ignorecrc] [-crc] URI &lt;localdst&gt;</code>
 			</p>
 			<p> Similar to <a href="#getlink"><strong>get</strong></a> command, except that the destination is restricted to a local file reference.</p>
 		</section>
 		<section>
 			<title> cp </title>
 			<p>
-				<code>Usage: hadoop dfs -cp URI [URI &#x2026;] &lt;dest&gt;</code>
+				<code>Usage: hadoop fs -cp URI [URI &#x2026;] &lt;dest&gt;</code>
 			</p>
 			<p>
 	    Copy files from source to destination. This command allows multiple sources as well in which case the destination must be a directory.
@@ -101,10 +112,10 @@
 	    Example:</p>
 			<ul>
 				<li>
-					<code> hadoop dfs -cp /user/hadoop/file1 /user/hadoop/file2</code>
+					<code> hadoop fs -cp /user/hadoop/file1 /user/hadoop/file2</code>
 				</li>
 				<li>
-					<code> hadoop dfs -cp /user/hadoop/file1 /user/hadoop/file2 /user/hadoop/dir </code>
+					<code> hadoop fs -cp /user/hadoop/file1 /user/hadoop/file2 /user/hadoop/dir </code>
 				</li>
 			</ul>
 			<p>Exit Code:</p>
@@ -115,17 +126,17 @@
 		<section>
 			<title>du</title>
 			<p>
-				<code>Usage: hadoop dfs -du URI [URI &#x2026;]</code>
+				<code>Usage: hadoop fs -du URI [URI &#x2026;]</code>
 			</p>
 			<p>
 	     Displays aggregate length of  files contained in the directory or the length of a file in case its just a file.<br/>
-	     Example:<br/><code>hadoop dfs -du /user/hadoop/dir1 /user/hadoop/file1 hdfs://host:port/user/hadoop/dir1</code><br/>
+	     Example:<br/><code>hadoop fs -du /user/hadoop/dir1 /user/hadoop/file1 hdfs://nn.example.com/user/hadoop/dir1</code><br/>
 	     Exit Code:<br/><code> Returns 0 on success and -1 on error. </code><br/></p>
 		</section>
 		<section>
 			<title> dus </title>
 			<p>
-				<code>Usage: hadoop dfs -dus &lt;args&gt;</code>
+				<code>Usage: hadoop fs -dus &lt;args&gt;</code>
 			</p>
 			<p>
 	    Displays a summary of file lengths.
@@ -134,7 +145,7 @@
 		<section>
 			<title> expunge </title>
 			<p>
-				<code>Usage: hadoop dfs -expunge</code>
+				<code>Usage: hadoop fs -expunge</code>
 			</p>
 			<p>Empty the Trash. Refer to <a href="hdfs_design.html">HDFS Design</a> for more information on Trash feature.
 	   </p>
@@ -142,7 +153,7 @@
 		<section>
 			<title id="getlink"> get </title>
 			<p>
-				<code>Usage: hadoop dfs -get [-ignorecrc] [-crc] &lt;src&gt; &lt;localdst&gt;</code>
+				<code>Usage: hadoop fs -get [-ignorecrc] [-crc] &lt;src&gt; &lt;localdst&gt;</code>
 				<br/>
 			</p>
 			<p>
@@ -153,10 +164,10 @@
 			<p>Example:</p>
 			<ul>
 				<li>
-					<code> hadoop dfs -get /user/hadoop/file localfile </code>
+					<code> hadoop fs -get /user/hadoop/file localfile </code>
 				</li>
 				<li>
-					<code> hadoop dfs -get hdfs://host:port/user/hadoop/file localfile</code>
+					<code> hadoop fs -get hdfs://nn.example.com/user/hadoop/file localfile</code>
 				</li>
 			</ul>
 			<p>Exit Code:</p>
@@ -167,7 +178,7 @@
 		<section>
 			<title> getmerge </title>
 			<p>
-				<code>Usage: hadoop dfs -getmerge &lt;src&gt; &lt;localdst&gt; [addnl]</code>
+				<code>Usage: hadoop fs -getmerge &lt;src&gt; &lt;localdst&gt; [addnl]</code>
 			</p>
 			<p>
 	  Takes a source directory and a destination file as input and concatenates files in src into the destination local file. Optionally <code>addnl</code> can be set to enable adding a newline character at the end of each file.  
@@ -176,25 +187,25 @@
 		<section>
 			<title> ls </title>
 			<p>
-				<code>Usage: hadoop dfs -ls &lt;args&gt;</code>
+				<code>Usage: hadoop fs -ls &lt;args&gt;</code>
 			</p>
 			<p>
 		 For a file returns stat on the file with the following format:<br/><code>filename &lt;number of replicas&gt; filesize modification_date modification_time permissions userid groupid</code><br/>
 	         For a directory it returns list of its direct children as in unix.
 	         A directory is listed as: <br/><code>dirname &lt;dir&gt; modification_time modification_time permissions userid groupid</code><br/>
-	         Example:<br/><code>hadoop dfs -ls /user/hadoop/file1 /user/hadoop/file2 hdfs://host:port/user/hadoop/dir1 /nonexistentfile</code><br/>
+	         Example:<br/><code>hadoop fs -ls /user/hadoop/file1 /user/hadoop/file2 hdfs://nn.example.com/user/hadoop/dir1 /nonexistentfile</code><br/>
 	         Exit Code:<br/><code> Returns 0 on success and -1 on error. </code><br/></p>
 		</section>
 		<section>
 			<title>lsr</title>
-			<p><code>Usage: hadoop dfs -lsr &lt;args&gt;</code><br/>
+			<p><code>Usage: hadoop fs -lsr &lt;args&gt;</code><br/>
 	      Recursive version of <code>ls</code>. Similar to Unix <code>ls -R</code>.
 	      </p>
 		</section>
 		<section>
 			<title> mkdir </title>
 			<p>
-				<code>Usage: hadoop dfs -mkdir &lt;paths&gt;</code>
+				<code>Usage: hadoop fs -mkdir &lt;paths&gt;</code>
 				<br/>
 			</p>
 			<p>
@@ -203,10 +214,10 @@
 			<p>Example:</p>
 			<ul>
 				<li>
-					<code>hadoop dfs -mkdir /user/hadoop/dir1 /user/hadoop/dir2 </code>
+					<code>hadoop fs -mkdir /user/hadoop/dir1 /user/hadoop/dir2 </code>
 				</li>
 				<li>
-					<code>hadoop dfs -mkdir hdfs://host1:port1/user/hadoop/dir hdfs://host2:port2/user/hadoop/dir
+					<code>hadoop fs -mkdir hdfs://nn1.example.com/user/hadoop/dir hdfs://nn2.example.com/user/hadoop/dir
 	  </code>
 				</li>
 			</ul>
@@ -226,7 +237,7 @@
 		<section>
 			<title> mv </title>
 			<p>
-				<code>Usage: hadoop dfs -mv URI [URI &#x2026;] &lt;dest&gt;</code>
+				<code>Usage: hadoop fs -mv URI [URI &#x2026;] &lt;dest&gt;</code>
 			</p>
 			<p>
 	    Moves files from source to destination. This command allows multiple sources as well in which case the destination needs to be a directory. Moving files across filesystems is not permitted.
@@ -235,10 +246,10 @@
 	    </p>
 			<ul>
 				<li>
-					<code> hadoop dfs -mv /user/hadoop/file1 /user/hadoop/file2</code>
+					<code> hadoop fs -mv /user/hadoop/file1 /user/hadoop/file2</code>
 				</li>
 				<li>
-					<code> hadoop dfs -mv hdfs://host:port/file1 hdfs://host:port/file2 hdfs://host:port/file3 hdfs://host:port/dir1</code>
+					<code> hadoop fs -mv hdfs://nn.example.com/file1 hdfs://nn.example.com/file2 hdfs://nn.example.com/file3 hdfs://nn.example.com/dir1</code>
 				</li>
 			</ul>
 			<p>Exit Code:</p>
@@ -249,21 +260,21 @@
 		<section>
 			<title id="putlink"> put </title>
 			<p>
-				<code>Usage: hadoop dfs -put &lt;localsrc&gt; ... &lt;dst&gt;</code>
+				<code>Usage: hadoop fs -put &lt;localsrc&gt; ... &lt;dst&gt;</code>
 			</p>
 			<p>Copy single src, or multiple srcs from local file system to the destination filesystem. Also reads input from stdin and writes to destination filesystem.<br/>
 	   </p>
 			<ul>
 				<li>
-					<code> hadoop dfs -put localfile /user/hadoop/hadoopfile</code>
+					<code> hadoop fs -put localfile /user/hadoop/hadoopfile</code>
 				</li>
 				<li>
-					<code> hadoop dfs -put localfile1 localfile2 /user/hadoop/hadoopdir</code>
+					<code> hadoop fs -put localfile1 localfile2 /user/hadoop/hadoopdir</code>
 				</li>
 				<li>
-					<code> hadoop dfs -put localfile hdfs://host:port/hadoop/hadoopfile</code>
+					<code> hadoop fs -put localfile hdfs://nn.example.com/hadoop/hadoopfile</code>
 				</li>
-				<li><code>hadoop dfs -put - hdfs://host:port/hadoop/hadoopfile</code><br/>Reads the input from stdin.</li>
+				<li><code>hadoop fs -put - hdfs://nn.example.com/hadoop/hadoopfile</code><br/>Reads the input from stdin.</li>
 			</ul>
 			<p>Exit Code:</p>
 			<p>
@@ -273,7 +284,7 @@
 		<section>
 			<title> rm </title>
 			<p>
-				<code>Usage: hadoop dfs -rm URI [URI &#x2026;] </code>
+				<code>Usage: hadoop fs -rm URI [URI &#x2026;] </code>
 			</p>
 			<p>
 	   Delete files specified as args. Only deletes non empty directory and files. Refer to rmr for recursive deletes.<br/>
@@ -281,7 +292,7 @@
 	   </p>
 			<ul>
 				<li>
-					<code> hadoop dfs -rm hdfs://host:port/file /user/hadoop/emptydir </code>
+					<code> hadoop fs -rm hdfs://nn.example.com/file /user/hadoop/emptydir </code>
 				</li>
 			</ul>
 			<p>Exit Code:</p>
@@ -292,17 +303,17 @@
 		<section>
 			<title> rmr </title>
 			<p>
-				<code>Usage: hadoop dfs -rmr URI [URI &#x2026;]</code>
+				<code>Usage: hadoop fs -rmr URI [URI &#x2026;]</code>
 			</p>
 			<p>Recursive version of delete.<br/>
 	   Example:
 	   </p>
 			<ul>
 				<li>
-					<code> hadoop dfs -rmr /user/hadoop/dir </code>
+					<code> hadoop fs -rmr /user/hadoop/dir </code>
 				</li>
 				<li>
-					<code> hadoop dfs -rmr hdfs://host:port/user/hadoop/dir </code>
+					<code> hadoop fs -rmr hdfs://nn.example.com/user/hadoop/dir </code>
 				</li>
 			</ul>
 			<p>Exit Code:</p>
@@ -313,7 +324,7 @@
 		<section>
 			<title> setrep </title>
 			<p>
-				<code>Usage: hadoop dfs -setrep [-R] &lt;path&gt;</code>
+				<code>Usage: hadoop fs -setrep [-R] &lt;path&gt;</code>
 			</p>
 			<p>
 	   Changes the replication factor of a file. -R option is for recursively increasing the replication factor of files within a directory.
@@ -321,7 +332,7 @@
 			<p>Example:</p>
 			<ul>
 				<li>
-					<code> hadoop dfs -setrep -w 3 -R /user/hadoop/dir1 </code>
+					<code> hadoop fs -setrep -w 3 -R /user/hadoop/dir1 </code>
 				</li>
 			</ul>
 			<p>Exit Code:</p>
@@ -332,7 +343,7 @@
 		<section>
 			<title> stat </title>
 			<p>
-				<code>Usage: hadoop dfs -stat URI [URI &#x2026;]</code>
+				<code>Usage: hadoop fs -stat URI [URI &#x2026;]</code>
 			</p>
 			<p>
 	   Returns the stat information on the path.
@@ -340,7 +351,7 @@
 			<p>Example:</p>
 			<ul>
 				<li>
-					<code> hadoop dfs -stat path </code>
+					<code> hadoop fs -stat path </code>
 				</li>
 			</ul>
 			<p>Exit Code:<br/>
@@ -349,7 +360,7 @@
 		<section>
 			<title> tail </title>
 			<p>
-				<code>Usage: hadoop dfs -tail [-f] URI </code>
+				<code>Usage: hadoop fs -tail [-f] URI </code>
 			</p>
 			<p>
 	   Displays last kilobyte of the file to stdout. -f option can be used as in Unix.
@@ -357,7 +368,7 @@
 			<p>Example:</p>
 			<ul>
 				<li>
-					<code> hadoop dfs -tail pathname </code>
+					<code> hadoop fs -tail pathname </code>
 				</li>
 			</ul>
 			<p>Exit Code: <br/>
@@ -366,7 +377,7 @@
 		<section>
 			<title> test </title>
 			<p>
-				<code>Usage: hadoop dfs -test -[ezd] URI</code>
+				<code>Usage: hadoop fs -test -[ezd] URI</code>
 			</p>
 			<p>
 	   Options: <br/>
@@ -376,14 +387,14 @@
 			<p>Example:</p>
 			<ul>
 				<li>
-					<code> hadoop dfs -test -e filename </code>
+					<code> hadoop fs -test -e filename </code>
 				</li>
 			</ul>
 		</section>
 		<section>
 			<title> text </title>
 			<p>
-				<code>Usage: hadoop dfs -text &lt;src&gt;</code>
+				<code>Usage: hadoop fs -text &lt;src&gt;</code>
 				<br/>
 			</p>
 			<p>
@@ -393,7 +404,7 @@
 		<section>
 			<title> touchz </title>
 			<p>
-				<code>Usage: hadoop dfs -touchz URI [URI &#x2026;]</code>
+				<code>Usage: hadoop fs -touchz URI [URI &#x2026;]</code>
 				<br/>
 			</p>
 			<p>
@@ -408,5 +419,6 @@
 			<p>Exit Code:<br/>
 	   <code> Returns 0 on success and -1 on error.</code></p>
 		</section>
+        </section>
 	</body>
 </document>

Modified: hadoop/core/trunk/src/docs/src/documentation/content/xdocs/site.xml
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/docs/src/documentation/content/xdocs/site.xml?rev=653951&r1=653950&r2=653951&view=diff
==============================================================================
--- hadoop/core/trunk/src/docs/src/documentation/content/xdocs/site.xml (original)
+++ hadoop/core/trunk/src/docs/src/documentation/content/xdocs/site.xml Tue May  6 16:22:57 2008
@@ -37,8 +37,8 @@
     <setup     label="Cluster Setup"      href="cluster_setup.html" />
     <hdfs      label="HDFS Architecture"  href="hdfs_design.html" />
     <hdfs      label="HDFS User Guide"    href="hdfs_user_guide.html" />
-    <hdfs      label="HDFS Shell Guide"   href="hdfs_shell.html" />
     <hdfs      label="HDFS Permissions Guide"    href="hdfs_permissions_guide.html" />
+    <fs        label="FS Shell Guide"     href="hdfs_shell.html" />
     <mapred    label="Map-Reduce Tutorial" href="mapred_tutorial.html" />
     <mapred    label="Native Hadoop Libraries" href="native_libraries.html" />
     <streaming label="Streaming"          href="streaming.html" />

Modified: hadoop/core/trunk/src/docs/src/documentation/content/xdocs/streaming.xml
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/docs/src/documentation/content/xdocs/streaming.xml?rev=653951&r1=653950&r2=653951&view=diff
==============================================================================
--- hadoop/core/trunk/src/docs/src/documentation/content/xdocs/streaming.xml (original)
+++ hadoop/core/trunk/src/docs/src/documentation/content/xdocs/streaming.xml Tue May  6 16:22:57 2008
@@ -163,7 +163,7 @@
                   -mapper "xargs cat"  \
                   -reducer "cat"  \
                   -output "/user/me/samples/cachefile/out" \  
-                  -cacheArchive 'hdfs://hadoop-nn1.example.com:8020/user/me/samples/cachefile/cachedir.jar#testlink' \  
+                  -cacheArchive 'hdfs://hadoop-nn1.example.com/user/me/samples/cachefile/cachedir.jar#testlink' \  
                   -jobconf mapred.map.tasks=1 \
                   -jobconf mapred.reduce.tasks=1 \ 
                   -jobconf mapred.job.name="Experiment"

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java?rev=653951&r1=653950&r2=653951&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java Tue May  6 16:22:57 2008
@@ -81,9 +81,7 @@
     new TreeMap<String, OutputStream>();
  
   static ClientProtocol createNamenode(Configuration conf) throws IOException {
-    return createNamenode(NetUtils.createSocketAddr
-                          (FileSystem.getDefaultUri(conf).getAuthority()),
-                          conf);
+    return createNamenode(NameNode.getAddress(conf), conf);
   }
 
   static ClientProtocol createNamenode( InetSocketAddress nameNodeAddr,
@@ -132,9 +130,7 @@
    * Create a new DFSClient connected to the default namenode.
    */
   public DFSClient(Configuration conf) throws IOException {
-    this(NetUtils.createSocketAddr(FileSystem.getDefaultUri(conf)
-                                   .getAuthority()),
-         conf);
+    this(NameNode.getAddress(conf), conf);
   }
 
   /** 

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DataNode.java?rev=653951&r1=653950&r2=653951&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DataNode.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DataNode.java Tue May  6 16:22:57 2008
@@ -210,8 +210,7 @@
                                      conf.get("dfs.datanode.dns.interface","default"),
                                      conf.get("dfs.datanode.dns.nameserver","default"));
     }
-    InetSocketAddress nameNodeAddr =
-      NetUtils.createSocketAddr(FileSystem.getDefaultUri(conf).getAuthority());
+    InetSocketAddress nameNodeAddr = NameNode.getAddress(conf);
     
     this.estimateBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
     this.socketTimeout =  conf.getInt("dfs.socket.timeout",

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java?rev=653951&r1=653950&r2=653951&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java Tue May  6 16:22:57 2008
@@ -47,10 +47,7 @@
   /** @deprecated */
   public DistributedFileSystem(InetSocketAddress namenode,
     Configuration conf) throws IOException {
-    initialize(URI.create("hdfs://"+
-                          namenode.getHostName()+":"+
-                          namenode.getPort()),
-                          conf);
+    initialize(NameNode.getUri(namenode), conf);
   }
 
   /** @deprecated */
@@ -60,14 +57,15 @@
 
   public void initialize(URI uri, Configuration conf) throws IOException {
     setConf(conf);
+
     String host = uri.getHost();
-    int port = uri.getPort();
-    if (host == null || port == -1) {
-      throw new IOException("Incomplete HDFS URI, no host/port: "+ uri);
-    }
-    this.dfs = new DFSClient(new InetSocketAddress(host, port), conf,
-                             statistics);
-    this.uri = URI.create("hdfs://"+host+":"+port);
+    if (host == null) {
+      throw new IOException("Incomplete HDFS URI, no host: "+ uri);
+    }
+
+    InetSocketAddress namenode = NameNode.getAddress(uri.getAuthority());
+    this.dfs = new DFSClient(namenode, conf, statistics);
+    this.uri = NameNode.getUri(namenode);
     this.workingDir = getHomeDirectory();
   }
 

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NameNode.java?rev=653951&r1=653950&r2=653951&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NameNode.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NameNode.java Tue May  6 16:22:57 2008
@@ -85,6 +85,8 @@
     }
   }
     
+  public static final int DEFAULT_PORT = 8020;
+
   public static final Log LOG = LogFactory.getLog("org.apache.hadoop.dfs.NameNode");
   public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.dfs.StateChange");
 
@@ -110,7 +112,20 @@
     return myMetrics;
   }
   
-    
+  static InetSocketAddress getAddress(String address) {
+    return NetUtils.createSocketAddr(address, DEFAULT_PORT);
+  }
+
+  static InetSocketAddress getAddress(Configuration conf) {
+    return getAddress(FileSystem.getDefaultUri(conf).getAuthority());
+  }
+
+  static URI getUri(InetSocketAddress namenode) {
+    int port = namenode.getPort();
+    String portString = port == DEFAULT_PORT ? "" : (":"+port);
+    return URI.create("hdfs://"+ namenode.getHostName()+portString);
+  }
+
   /**
    * Initialize the server
    * 
@@ -118,14 +133,14 @@
    * @param conf the configuration
    */
   private void initialize(String address, Configuration conf) throws IOException {
-    InetSocketAddress socAddr = NetUtils.createSocketAddr(address);
+    InetSocketAddress socAddr = NameNode.getAddress(address);
     this.handlerCount = conf.getInt("dfs.namenode.handler.count", 10);
     this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(),
                                 handlerCount, false, conf);
 
     // The rpc-server port can be ephemeral... ensure we have the correct info
     this.nameNodeAddress = this.server.getListenerAddress(); 
-    FileSystem.setDefaultUri(conf, "hdfs://"+nameNodeAddress.getHostName() + ":" + nameNodeAddress.getPort());
+    FileSystem.setDefaultUri(conf, getUri(nameNodeAddress));
     LOG.info("Namenode up at: " + this.nameNodeAddress);
 
     myMetrics = new NameNodeMetrics(conf, this);

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java?rev=653951&r1=653950&r2=653951&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java Tue May  6 16:22:57 2008
@@ -121,8 +121,8 @@
     
     // Create connection to the namenode.
     shouldRun = true;
-    nameNodeAddr =
-      NetUtils.createSocketAddr(FileSystem.getDefaultUri(conf).getAuthority());
+    nameNodeAddr = NameNode.getAddress(conf);
+
     this.conf = conf;
     this.namenode =
         (ClientProtocol) RPC.waitForProxy(ClientProtocol.class,

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/net/NetUtils.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/net/NetUtils.java?rev=653951&r1=653950&r2=653951&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/net/NetUtils.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/net/NetUtils.java Tue May  6 16:22:57 2008
@@ -116,22 +116,41 @@
    *   <fs>://<host>:<port>/<path>
    */
   public static InetSocketAddress createSocketAddr(String target) {
+    return createSocketAddr(target, -1);
+  }
+
+  /**
+   * Util method to build socket addr from either:
+   *   <host>
+   *   <host>:<post>
+   *   <fs>://<host>:<port>/<path>
+   */
+  public static InetSocketAddress createSocketAddr(String target,
+                                                   int defaultPort) {
     int colonIndex = target.indexOf(':');
-    if (colonIndex < 0) {
+    if (colonIndex < 0 && defaultPort == -1) {
       throw new RuntimeException("Not a host:port pair: " + target);
     }
     String hostname;
-    int port;
+    int port = -1;
     if (!target.contains("/")) {
-      // must be the old style <host>:<port>
-      hostname = target.substring(0, colonIndex);
-      port = Integer.parseInt(target.substring(colonIndex + 1));
+      if (colonIndex == -1) {
+        hostname = target;
+      } else {
+        // must be the old style <host>:<port>
+        hostname = target.substring(0, colonIndex);
+        port = Integer.parseInt(target.substring(colonIndex + 1));
+      }
     } else {
       // a new uri
       URI addr = new Path(target).toUri();
       hostname = addr.getHost();
       port = addr.getPort();
     }
+
+    if (port == -1) {
+      port = defaultPort;
+    }
   
     if (getStaticResolution(hostname) != null) {
       hostname = getStaticResolution(hostname);

Modified: hadoop/core/trunk/src/java/overview.html
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/overview.html?rev=653951&r1=653950&r2=653951&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/overview.html (original)
+++ hadoop/core/trunk/src/java/overview.html Tue May  6 16:22:57 2008
@@ -133,7 +133,7 @@
 <ol>
 
 <li>The {@link org.apache.hadoop.dfs.NameNode} (Distributed Filesystem
-master) host and port.  This is specified with the configuration
+master) host.  This is specified with the configuration
 property <tt><a
 href="../hadoop-default.html#fs.default.name">fs.default.name</a></tt>.
 </li>
@@ -158,7 +158,7 @@
 
   <property>
     <name>fs.default.name</name>
-    <value>localhost:9000</value>
+    <value>hdfs://localhost/</value>
   </property>
 
   <property>
@@ -173,7 +173,7 @@
 
 </configuration></xmp>
 
-<p>(We also set the DFS replication level to 1 in order to
+<p>(We also set the HDFS replication level to 1 in order to
 reduce warnings when running on a single node.)</p>
 
 <p>Now check that the command <br><tt>ssh localhost</tt><br> does not
@@ -198,7 +198,7 @@
 
 <p>Input files are copied into the distributed filesystem as follows:</p>
 
-<p><tt>bin/hadoop dfs -put input input</tt></p>
+<p><tt>bin/hadoop fs -put input input</tt></p>
 
 <h3>Distributed execution</h3>
 
@@ -207,7 +207,7 @@
 
 <tt>
 bin/hadoop jar hadoop-*-examples.jar grep input output 'dfs[a-z.]+'<br>
-bin/hadoop dfs -get output output
+bin/hadoop fs -get output output
 cat output/*
 </tt>
 
@@ -215,49 +215,49 @@
 
 <p><tt>bin/stop-all.sh</tt></p>
 
-<h2>Fully-distributed operation</h2>
+<h3>Fully-distributed operation</h3>
 
-<p>Distributed operation is just like the pseudo-distributed operation
-described above, except:</p>
+<p>Fully distributed operation is just like the pseudo-distributed operation
+described above, except, in <tt>conf/hadoop-site.xml</tt>, specify:</p>
 
 <ol>
 
-<li>Specify hostname or IP address of the master server in the values
+<li>The hostname or IP address of your master server in the value
 for <tt><a
-href="../hadoop-default.html#fs.default.name">fs.default.name</a></tt>
-and <tt><a
-href="../hadoop-default.html#mapred.job.tracker">mapred.job.tracker</a></tt>
-in <tt>conf/hadoop-site.xml</tt>.  These are specified as
-<tt><em>host</em>:<em>port</em></tt> pairs.</li>
+href="../hadoop-default.html#fs.default.name">fs.default.name</a></tt>,
+  as <tt><em>hdfs://master.example.com/</em></tt>.</li>
+
+<li>The host and port of the your master server in the value
+of <tt><a href="../hadoop-default.html#mapred.job.tracker">mapred.job.tracker</a></tt>
+as <tt><em>master.example.com</em>:<em>port</em></tt>.</li>
 
-<li>Specify directories for <tt><a
+<li>Directories for <tt><a
 href="../hadoop-default.html#dfs.name.dir">dfs.name.dir</a></tt> and
-<tt><a
-href="../hadoop-default.html#dfs.data.dir">dfs.data.dir</a></tt> in
-<tt>conf/hadoop-site.xml</tt>.  These are used to hold distributed
-filesystem data on the master node and slave nodes respectively.  Note
+<tt><a href="../hadoop-default.html#dfs.data.dir">dfs.data.dir</a>.
+</tt>These are local directories used to hold distributed filesystem
+data on the master node and slave nodes respectively.  Note
 that <tt>dfs.data.dir</tt> may contain a space- or comma-separated
-list of directory names, so that data may be stored on multiple
+list of directory names, so that data may be stored on multiple local
 devices.</li>
 
-<li>Specify <tt><a
-href="../hadoop-default.html#mapred.local.dir">mapred.local.dir</a></tt>
-in <tt>conf/hadoop-site.xml</tt>.  This determines where temporary
-MapReduce data is written.  It also may be a list of directories.</li>
+<li><tt><a href="../hadoop-default.html#mapred.local.dir">mapred.local.dir</a></tt>,
+  the local directory where temporary MapReduce data is stored.  It
+  also may be a list of directories.</li>
 
-<li>Specify <tt><a
+<li><tt><a
 href="../hadoop-default.html#mapred.map.tasks">mapred.map.tasks</a></tt>
 and <tt><a
-href="../hadoop-default.html#mapred.reduce.tasks">mapred.reduce.tasks</a></tt>
-in <tt>conf/hadoop-site.xml</tt>.  As a rule of thumb, use 10x the
+href="../hadoop-default.html#mapred.reduce.tasks">mapred.reduce.tasks</a></tt>.
+As a rule of thumb, use 10x the
 number of slave processors for <tt>mapred.map.tasks</tt>, and 2x the
 number of slave processors for <tt>mapred.reduce.tasks</tt>.</li>
 
-<li>List all slave hostnames or IP addresses in your
-<tt>conf/slaves</tt> file, one per line.</li>
-
 </ol>
 
+<p>Finally, list all slave hostnames or IP addresses in your
+<tt>conf/slaves</tt> file, one per line.  Then format your filesystem
+and start your cluster on your master node, as above.
+
 </body>
 </html>
 

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDFSShellGenericOptions.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDFSShellGenericOptions.java?rev=653951&r1=653950&r2=653951&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDFSShellGenericOptions.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDFSShellGenericOptions.java Tue May  6 16:22:57 2008
@@ -100,8 +100,7 @@
     FileSystem fs=null;
     try {
       ToolRunner.run(shell, args);
-      fs = new DistributedFileSystem(
-                                     NetUtils.createSocketAddr(namenode), 
+      fs = new DistributedFileSystem(NameNode.getAddress(namenode), 
                                      shell.getConf());
       assertTrue("Directory does not get created", 
                  fs.isDirectory(new Path("/data")));

Added: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDefaultNameNodePort.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDefaultNameNodePort.java?rev=653951&view=auto
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDefaultNameNodePort.java (added)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDefaultNameNodePort.java Tue May  6 16:22:57 2008
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.dfs;
+
+import junit.framework.TestCase;
+import java.io.*;
+import java.net.*;
+import java.util.*;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+
+/** Test NameNode port defaulting code. */
+public class TestDefaultNameNodePort extends TestCase {
+
+  public void testGetAddressFromString() throws Exception {
+    assertEquals(NameNode.getAddress("foo").getPort(),
+                 NameNode.DEFAULT_PORT);
+    assertEquals(NameNode.getAddress("hdfs://foo/").getPort(),
+                 NameNode.DEFAULT_PORT);
+    assertEquals(NameNode.getAddress("hdfs://foo:555").getPort(),
+                 555);
+    assertEquals(NameNode.getAddress("foo:555").getPort(),
+                 555);
+  }
+
+  public void testGetAddressFromConf() throws Exception {
+    Configuration conf = new Configuration();
+    FileSystem.setDefaultUri(conf, "hdfs://foo/");
+    assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
+    FileSystem.setDefaultUri(conf, "hdfs://foo:555/");
+    assertEquals(NameNode.getAddress(conf).getPort(), 555);
+    FileSystem.setDefaultUri(conf, "foo");
+    assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
+  }
+
+  public void testGetUri() {
+    assertEquals(NameNode.getUri(new InetSocketAddress("foo", 555)),
+                 URI.create("hdfs://foo:555"));
+    assertEquals(NameNode.getUri(new InetSocketAddress("foo",
+                                                       NameNode.DEFAULT_PORT)),
+                 URI.create("hdfs://foo"));
+  }
+}