Author: ddas
Date: Tue Feb 19 22:04:44 2008
New Revision: 629369

URL: http://svn.apache.org/viewvc?rev=629369&view=rev
Log:
HADOOP-2178.  Job History on DFS. Contributed by Amareshwari Sri Ramadasu.

Added:
    hadoop/core/trunk/src/java/org/apache/hadoop/mapred/OutputLogFilter.java
    hadoop/core/trunk/src/webapps/history/
    hadoop/core/trunk/src/webapps/history/analysejobhistory.jsp
    hadoop/core/trunk/src/webapps/history/index.html
    hadoop/core/trunk/src/webapps/history/jobconf_history.jsp
    hadoop/core/trunk/src/webapps/history/jobdetailshistory.jsp
    hadoop/core/trunk/src/webapps/history/jobhistory.jsp
    hadoop/core/trunk/src/webapps/history/jobtaskshistory.jsp
    hadoop/core/trunk/src/webapps/history/loadhistory.jsp
    hadoop/core/trunk/src/webapps/history/taskdetailshistory.jsp
Removed:
    hadoop/core/trunk/src/webapps/job/analysejobhistory.jsp
    hadoop/core/trunk/src/webapps/job/jobdetailshistory.jsp
    hadoop/core/trunk/src/webapps/job/jobhistory.jsp
    hadoop/core/trunk/src/webapps/job/jobtaskshistory.jsp
    hadoop/core/trunk/src/webapps/job/loadhistory.jsp
    hadoop/core/trunk/src/webapps/job/taskdetailshistory.jsp
Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/build.xml
    hadoop/core/trunk/conf/hadoop-default.xml
    hadoop/core/trunk/docs/cluster_setup.html
    hadoop/core/trunk/docs/cluster_setup.pdf
    hadoop/core/trunk/docs/hadoop-default.html
    hadoop/core/trunk/docs/mapred_tutorial.html
    hadoop/core/trunk/docs/mapred_tutorial.pdf
    
hadoop/core/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestMultipleCachefiles.java
    
hadoop/core/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestSymLink.java
    hadoop/core/trunk/src/docs/src/documentation/content/xdocs/cluster_setup.xml
    
hadoop/core/trunk/src/docs/src/documentation/content/xdocs/mapred_tutorial.xml
    hadoop/core/trunk/src/docs/src/documentation/content/xdocs/site.xml
    
hadoop/core/trunk/src/java/org/apache/hadoop/mapred/DefaultJobHistoryParser.java
    hadoop/core/trunk/src/java/org/apache/hadoop/mapred/JobClient.java
    hadoop/core/trunk/src/java/org/apache/hadoop/mapred/JobHistory.java
    hadoop/core/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java
    
hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestClusterMapReduceTestCase.java
    hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRClasspath.java
    hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRWithDFS.java
    hadoop/core/trunk/src/test/org/apache/hadoop/mapred/pipes/TestPipes.java
    hadoop/core/trunk/src/webapps/job/jobtracker.jsp

Modified: hadoop/core/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=629369&r1=629368&r2=629369&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Tue Feb 19 22:04:44 2008
@@ -14,6 +14,8 @@
 
     HADOOP-1398.  Add HBase in-memory block cache.  (tomwhite)
 
+    HADOOP-2178.  Job History on DFS. (Amareshwari Sri Ramadasu via ddas)
+
   OPTIMIZATIONS
 
   BUG FIXES

Modified: hadoop/core/trunk/build.xml
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/build.xml?rev=629369&r1=629368&r2=629369&view=diff
==============================================================================
--- hadoop/core/trunk/build.xml (original)
+++ hadoop/core/trunk/build.xml Tue Feb 19 22:04:44 2008
@@ -178,6 +178,7 @@
     <mkdir dir="${build.webapps}/job/WEB-INF"/>
     <mkdir dir="${build.webapps}/dfs/WEB-INF"/>
     <mkdir dir="${build.webapps}/datanode/WEB-INF"/>
+    <mkdir dir="${build.webapps}/history/WEB-INF"/>
     <mkdir dir="${build.examples}"/>
     <mkdir dir="${build.anttasks}"/>
     <mkdir dir="${build.dir}/c++"/>
@@ -270,6 +271,13 @@
      outputdir="${build.src}"
      package="org.apache.hadoop.dfs"
      webxml="${build.webapps}/dfs/WEB-INF/web.xml">
+    </jsp-compile>
+
+    <jsp-compile
+     uriroot="${src.webapps}/history"
+     outputdir="${build.src}"
+     package="org.apache.hadoop.mapred"
+     webxml="${build.webapps}/history/WEB-INF/web.xml">
     </jsp-compile>
 
     <jsp-compile

Modified: hadoop/core/trunk/conf/hadoop-default.xml
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/conf/hadoop-default.xml?rev=629369&r1=629368&r2=629369&view=diff
==============================================================================
--- hadoop/core/trunk/conf/hadoop-default.xml (original)
+++ hadoop/core/trunk/conf/hadoop-default.xml Tue Feb 19 22:04:44 2008
@@ -36,6 +36,25 @@
 </property>
 
 <property>
+  <name>hadoop.job.history.location</name>
+  <value>file://${hadoop.log.dir}/history</value>
+  <description> If job tracker is static the history files are stored 
+  in this single well known place. By default, it is in the local 
+  file system at ${hadoop.log.dir}/history.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.job.history.user.location</name>
+  <value></value>
+  <description> User can specify a location to store the history files of 
+  a particular job. If nothing is specified, the logs are stored in 
+  output directory. The files are stored in "_logs/history/" in the directory.
+  User can stop logging by giving the value "none". 
+  </description>
+</property>
+
+<property>
   <name>dfs.namenode.logging.level</name>
   <value>info</value>
   <description>The logging level for dfs namenode. Other values are "dir"(trac
@@ -502,6 +521,15 @@
   <value>0.0.0.0:50030</value>
   <description>
     The job tracker http server address and port the server will listen on.
+    If the port is 0 then the server will start on a free port.
+  </description>
+</property>
+
+<property>
+  <name>mapred.job.history.http.bindAddress</name>
+  <value>0.0.0.0:0</value>
+  <description>
+    The job history http server bind address and port.
     If the port is 0 then the server will start on a free port.
   </description>
 </property>

Modified: hadoop/core/trunk/docs/cluster_setup.html
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/docs/cluster_setup.html?rev=629369&r1=629368&r2=629369&view=diff
==============================================================================
--- hadoop/core/trunk/docs/cluster_setup.html (original)
+++ hadoop/core/trunk/docs/cluster_setup.html Tue Feb 19 22:04:44 2008
@@ -622,13 +622,41 @@
           Commons Logging</a> framework for logging. Edit the 
           <span class="codefrag">conf/log4j.properties</span> file to 
customize the Hadoop 
           daemons' logging configuration (log-formats and so on).</p>
+<a name="N1030D"></a><a name="History+Logging"></a>
+<h5>History Logging</h5>
+<p> The job history files are stored in central location 
+            <span class="codefrag"> hadoop.job.history.location </span> which 
can be on DFS also,
+            whose default value is <span 
class="codefrag">${HADOOP_LOG_DIR}/history</span>. 
+            Job history server is started on job tracker. The history 
+            web UI is accessible from job tracker web UI.</p>
+<p> The history files are also logged to user specified directory
+            <span class="codefrag">hadoop.job.history.user.location</span> 
+            which defaults to job output directory. The files are stored in
+            "_logs/history/" in the specified directory. Hence, by default 
+            they will be in "mapred.output.dir/_logs/history/". User can stop
+            logging by giving the value <span class="codefrag">none</span> for 
+            <span class="codefrag">hadoop.job.history.user.location</span> 
+</p>
+<p> User can view logs in specified directory using 
+            the following command <br>
+            
+<span class="codefrag">$ bin/hadoop job -history output-dir</span>
+<br>
+            This will start a stand alone jetty on the client and 
+            load history jsp's. 
+            It will display the port where the server is up at. The server will
+            be up for 30 minutes. User has to use 
+            <span class="codefrag"> http://hostname:port </span> to view the 
history. User can 
+            also provide http bind address using 
+            <span class="codefrag">mapred.job.history.http.bindAddress</span>
+</p>
 <p>Once all the necessary configuration is complete, distribute the files
       to the <span class="codefrag">HADOOP_CONF_DIR</span> directory on all 
the machines, 
       typically <span class="codefrag">${HADOOP_HOME}/conf</span>.</p>
 </div>
     
     
-<a name="N10319"></a><a name="Hadoop+Startup"></a>
+<a name="N10343"></a><a name="Hadoop+Startup"></a>
 <h2 class="h3">Hadoop Startup</h2>
 <div class="section">
 <p>To start a Hadoop cluster you will need to start both the HDFS and 
@@ -663,7 +691,7 @@
 </div>
     
     
-<a name="N1035F"></a><a name="Hadoop+Shutdown"></a>
+<a name="N10389"></a><a name="Hadoop+Shutdown"></a>
 <h2 class="h3">Hadoop Shutdown</h2>
 <div class="section">
 <p>

Modified: hadoop/core/trunk/docs/cluster_setup.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/docs/cluster_setup.pdf?rev=629369&r1=629368&r2=629369&view=diff
==============================================================================
--- hadoop/core/trunk/docs/cluster_setup.pdf (original)
+++ hadoop/core/trunk/docs/cluster_setup.pdf Tue Feb 19 22:04:44 2008
@@ -5,10 +5,10 @@
 /Producer (FOP 0.20.5) >>
 endobj
 5 0 obj
-<< /Length 622 /Filter [ /ASCII85Decode /FlateDecode ]
+<< /Length 623 /Filter [ /ASCII85Decode /FlateDecode ]
  >>
 stream
-Gaua=bAu>['[EMAIL 
PROTECTED]&P,YAH.NN<VAtdr"er2'uKUV/[(Q^1e3YTq9,qnTe7mDC`>e$uC%pKAHK*YFNL>:BE6f=>?JF!MndZ"Qf+;G'YIY)L-:[EMAIL
 
PROTECTED]"#ko&)B2^WPcMHRaKDFJa/.ncN0WfY9\D=+f8_3XQgmuF9:5lD,_0tmo=::Dh^]A",sDU0Hs8RA'"44&'*PfQ+)6+p3n"S=UfP&^E?eUZ/hEBl5fsa,`PMI,[EMAIL
 
PROTECTED]:>2F#i4LRUmjKL,I476R0B((>k*24='VP,7'ci%8:Y,b9tqUH,3Vp%A5ZVnSJ#%:*if_[u-9m[lQlJ*-m$SaB6OBjSL[\S]e141m]$&/<"rtY1&U28TP^PBF,]bKqZC6'sn;P"[EMAIL
 
PROTECTED])Bfa$I=iqCR#C,;MRaQl&1UZp^BVeOL4!EYtk9<Xg>4B`[dc?Pb?f\-4*7W.m=ao2?/g)7:m+G_qI-8NuLUdjs^KgsK#?l^+qjQ?Qb6V8M+9W(81EP@@Ygk?0#Ghn*--+?HFaH6=E;fQ,3"[EMAIL
 PROTECTED]/4XifDF4WWQ)U#3Z?\OYs8$-J^J7=(C\(P&D;)cqNj=[JGoda;X>D~>
+Gaua=bAu>['[EMAIL 
PROTECTED];Kajed_Q&`7fZVYLLCIT$K6I#/i$V2$mSVbch,qnTe7mDC`>duGn%pKAHK*[]aLA]Xe6f=>?JD:B^_N,"X+;HmbItDU.:Xqbso*r2U13[f/LN_m`13$^"m,7#u5l&Bu97SD_.nM?R`)[EMAIL
 
PROTECTED]/+.E9^/(H&(!Mn&N87Fmmll$c7$.8'X,jrXiO*QE_lmA-*ptfST?t;7B"X%o(q`eZVU6O#nGWft3n3LM4PtbKu+?kH;9ogK(c1h<G4d;W](*=Wq0:JZqT0eGDOUF`SPHlJ)je$SaCi+o>`c>Y,MDAGBbC'+4Q#s"!;+7G0TdIh-b4H6M.5eH&mI\NDVAAmUPNM>tktF/3+7jS_"#=Nt*$e)^RBA1gZ*3PZ<sfT\klSZPTL67[c$h[(Hm3#Bd!^'.:[EMAIL
 PROTECTED]@Krep\2fRnWU-d/^*Q^g<)QJ$>h<.rPXObD/:[EMAIL 
PROTECTED]&aZk4_*R$HIa7KTDpo\%2I8V^'98hV&<bEj:F[9,kA$XC4'[EMAIL 
PROTECTED]>`\D5.]omLrgtb5>-B(t&?`AQ\:?~>
 endstream
 endobj
 6 0 obj
@@ -284,10 +284,10 @@
 >>
 endobj
 43 0 obj
-<< /Length 1565 /Filter [ /ASCII85Decode /FlateDecode ]
+<< /Length 2107 /Filter [ /ASCII85Decode /FlateDecode ]
  >>
 stream
-Gb!#\95iQE&AJ$Ckg2ZuC`)[EMAIL 
PROTECTED];(*2s$^F/_,hF^crU2go=eoleMN\$+OMmqT1S!EOh[K<h3r1ZRrB%mI!e1.dGrDr[+U%^S]_Hln0bRM:[EMAIL
 PROTECTED]&='q$!KaE,]SWE,8aG(dQ<J"[EMAIL 
PROTECTED]:eu>9s._`9m8?)(;i?D0XX1:i(I\*srIN5_I9XUKmr,@KFXadG:kDh:>&Sm?]a"s\DMkYBV0=>_-3KmV.VqZBpB:iV003KT`biNd1<"NYe)Et<Ffje_;7T*0IX3S<)U$JtmMl,_Y+c%u#h7gY7^]40/j8Ig/qotbjG-ZKkQIr\t*/(;]j\#8LGI'M5.t=u9A!q%+kM%8PJ].9'p8NfZ[9[hen,[EMAIL
 
PROTECTED]:%infqI[iZd/XJT_*TL"V$=EA5V9W)duhmeYKFPY0[",koc8S^dN0e"=5d&Kr%m&6g:.K)frp_F(J)fq3$"SJCk*M"[EMAIL
 
PROTECTED](9OoJVX;<&XkUZ=eB]V11,te1*IEhs?hPK0mEKn("2QX>EqN-2CE6IKp(!gZ1Dbo%q/H`/K-&'REsP-!#<,[EMAIL
 PROTECTED]),$WC4r^t0[OR5_95tBA-Z2qL*C)[gBYX_$j$%$/G=&[EMAIL 
PROTECTED]";XAHfp%J<m>qM!Aq:<!>#b(U7>%Y;]>f#i'KhiQ*$-j0">1YOfusLj-d4+%S[^LC^1Sb,\pATko!(k025'FHe:1L<:kPJL;9Z"N4!uCmKg4T":%jMh&:BFekA+j&Abk7P6VOm&7.^o3^*PjSpcG`JLNY<[EMAIL
 
PROTECTED])kin^:AGa9?#T&"?I/\Lt`^3%.2)AS12lDZ&9SYmCbmKf`ap03>.Y.K+rP!nu$>V[EGi<b&4BQk&s'&`d/J4b](HY2R30of,\EfoMlEoG:l(mKoHf5,2Z/&dO!&nrm#W6a2VU$5nSH:#KaZ#dhk#<:I3_0--dYo.95ALN^%p$[t]
 M&[EMAIL PROTECTED]'p3)02n]rnce#EgX7+AQ^!g/skld:n+u(];[EMAIL 
PROTECTED]'\k<AAUJ6(sa`X(k[eFGj8)O0)hLdmY]eRH#;@pfD+.C`,*2JaK]a'*>pDPoA#fr)EDN2HnVCVV^h$4+fn;`?H.#:4u\YlB8U&C-G!2<5pkR!eO)%L89&',Jj?Ih/X*I:W*nE'kV0cj:Y6f!jl`:ac2b.W+J63M//]I##=NV;F\lDXU_JK4h$W`</_Y1uV2+OiEs*.s^sK)&?e6^N%\6,H_iP(nQ)jps'Ui>iSE?XJ[`KO^1cg%Q:Mf:]c!=R6>[EMAIL
 PROTECTED],T2`,ZIUel=1^bJ=m$imj*J]*?7UG'=(o/j?_i:qO8plo^[NO2'[EMAIL 
PROTECTED]"Rde]:[EMAIL 
PROTECTED]@(j(u7*kqIYBil%;'A0H3#^m>*(t[W70o*[]g]Gd?"BVTeWaqTRMe._Ha"?<A!0)Fh,[EMAIL
 PROTECTED]"[EMAIL PROTECTED]:'a$KMtN&7g)KW7=P44D2)PNbjCEuVak!<ZVcC]~>
+Gb!#]D/\/e&H;*)+VY*G!XgTCHL82MBpKHMdl$QkFXUdW[aiNp-HS!$U&TP:,b%;,PhfpRL*2$ql$.C_ms0.W?Hsn$B9U(WfNO0ih^5%VCs2E4n^hdBod."#E^FT-baM=VT=d:#5Wg.AO#*/0]+uaQfk`CJ)`q:%@&p\X4lk:V2-X:7>f8a3UruIMeLAh:M7K'(00W`RI!N9\jZ9ClFZ[_Z25n$rk5"1QkaD+A21$R<q\I2RF58PW&Fhg%%O/t[(o]N5d]2Hk:>L0<Qch3mG1-qO5\q.gi;j-,1EH`dO2L[RMeO-NCrMXZYC*JWX+!0h1h.)U&m3:X)#u#f@)ab$b:$:J1*1XaGCYB;JPm+)JX7GD.S;ZROGFNl_4dX'$otqnKBRcUm4+)s+MJ*a4(iM4eb.Jc4\.`:poeGB?1A8h-#=01Z<H(/[EMAIL
 PROTECTED];[EMAIL 
PROTECTED],T8\[qb7I<o!8A(iOVQjAr2)nFOZ[16/d&O$3O07f"G&$48Z;'cd=266faT2o*h)WG\:"MnbOGiX"[EMAIL
 PROTECTED]<V=Z`6X\)45,8o(@594XU>V#]U1FG^R(kKC:[EMAIL 
PROTECTED]"\H")68WKICfA0kX+J=T,)[EMAIL 
PROTECTED]@tpEIS!>uOGjg0*.>7:b;r['_\m0(_`)_CW<]`n=3SHX#YdE\qp,W#P:m"8C<s0LPMruc\Bn`$!&^A00a#QQh]2`n3IiAG1E`AfVUBi'$)*jo,!T4#Kh*#fa<D5M[$N!Cnrm*[RQ?=m$b\rt%;I;^Abr&_e)9L*d9)luk9.eOoH\IKZ]PlB64]3mNS)*D;8<L;@e_/rcq`^:,m>MY5,<"q!EfjJff6I+*giX-n1<U(m<Qq-l_]q-!K7sI2Eqr=T&[EMAIL
 
PROTECTED];D2Q??`<L]1Wb48H=I1;Pg3diH5>]gC5:7p&t)';:a7A6"sG,"6d7Od;bJ0^;\IrANU$h,F/:4'u*^u;7Eq
 ^M^5E^/D:tN.([EMAIL 
PROTECTED]"/5RBk5!8!%(4DBf#2'@$Vrq?UMI(JD0:7S`ZE1<CXB!7>;7\;iU$1M[BppOmBT'[EMAIL
 PROTECTED];[EMAIL 
PROTECTED]'c#%t.[Cs[ZR>%!!jM]?SRr6^J7m/bA$N!7<7?>,@Y`2/$ceDa5>F"lq(!+*r]dhY7;(f"VGEeSrW)6;^c.X4pZl!2`(8I"io!%r0&3a#WcaSJ2f\mG<bh>;[EMAIL
 PROTECTED]/pu(NVg]tlHSpATNXkU.a;*"P-0!t)@MJ./rt`(;[EMAIL 
PROTECTED](&+WQ::nd_A:d[Mj0im<YQrm#*SpKs$Fpi]__IJbZ(o$[39H+0Fj<#0;FtIbluj#GhWCB(+'P1OWZ%%Ga#>=obaGG7n+iIEH=^4=)[EMAIL
 
PROTECTED]/jHXP'=4J'#1"P?%$86Mrjqoe0pltkKh\g#kqQfu*3Gj*U'Zr#Y]kDfA!!^\o1&]PD8W(UF'NC7kRHe5ah<`%#b'Mt#eRW9!b<f?J>U>h+^nB^bbGA-F*EP]R5/aR9\p.>hD]\H4'Z..gUB8X.'>PW)]#8!c-'WpTk\]_G67RsWgQt<Hgl-8+)[EMAIL
 
PROTECTED]&QY?In=PHE$JlTM%j_00BS-$1_ih20UAcFrrZKQk1h"h<M'\=U97LI+nRF>ULk;[EMAIL 
PROTECTED]'D0f\&j<(A=QOPfACh_'c<[EMAIL 
PROTECTED]<6;W0c2&I#f*!3h<8\pKrrgu#4Ichr,T#)F1TC'BdRC?[q*a>48!?;ZI)1sEcomiro!.-t-c4U3'il=/Rol>@7r4?.Zk2c;u'[EMAIL
 PROTECTED]"I(kTBQ1,\.YEK7Ol7OlTEU'2MU8].qK=n./G_c/)u^XG0YJf6+Bh'[EMAIL 
PROTECTED]'uc.<E&'R>WcJ$]Ma16iSH``250R_7kFkPNt2,dNJK*scc*Wq`;Lgs+UHJG>+pS#IJt
 
=_-.BMP+:2knpt7&<^?Cs7s/WBiU<mt`^_,hhG,eWVfTk]Ae^V>U86e=dP,JttD-r:6_gFOj;[EMAIL 
PROTECTED](uOpR^$S"A[WenM-;U)*(BS49$FPN+G6Q!fq*^;eL#"u+bc;Z~>
 endstream
 endobj
 44 0 obj
@@ -298,154 +298,178 @@
 /Contents 43 0 R
 >>
 endobj
+45 0 obj
+<< /Length 898 /Filter [ /ASCII85Decode /FlateDecode ]
+ >>
+stream
+Gau`R>u03/'Re<2n,j1DXfh'DEO%NrlPS'#dCa<016([EMAIL 
PROTECTED]/9V2IEi(WdumV]VPt*V]uiM4q"X^LSqYI/bQ4RPJT0`o0R#39k(c6Q9[9Cc`3'?^-2;[EMAIL
 
PROTECTED]&ZKo!!ks9UdWlWih[ED>p>2d2>RGo*"17oRJBa\sEXD0):X3+bJcSP[(0q+I6cs0GPJV3_0cEjABH7nI2C]KJ%;%([EMAIL
 
PROTECTED]/-):99D\^g.]4QgH_Su:m'8t4;ER/RY)*j_%(BB#bX2Wc2:0bjct_aVJ>k4P]97*<H%UgWPq.e,gFA"L8BYTQM5A[e4-K)1t,8Sp,&mJR*Qpj:r7qAdhH:sT$/fH]GD`%]3%l#3%J0X7A?&q8Z?#BL#0D!Ibi8)H*#cZki]qLkkb/<[EMAIL
 PROTECTED],DnQkEC6lG,K33tSiCu*X%]?kK1fpS:[EMAIL 
PROTECTED]/I*OLqW`%rZD'L9*,(([EMAIL 
PROTECTED]"i0,oP,M824n53_K>8aQ?npbu%lcl.fSf;JerM@(9F"r*)q7]GiaB%!#?GVH,[EMAIL 
PROTECTED]>p/i-]<DWKe;d@/>,#=&QQ:\r9j"mBCE0+tKa)en+DSOP*MIr$BVh(*FjN5^lttAQJMHp&\qG7R6D5KmVr;sOGXJt2?[0J-9RmZ<c4t;7eeJhINS>2%Eu8kWJE-63ARP+)DBb!=B!3fJ&lbQ5`$&d<`e)F-Pl&ViEF6OX1'l*i_]&[EMAIL
 
PROTECTED]'"V.iKRK'(M4'[*WnS`B>HIT\]TB^nU+2.55.]=^(?%RL`jXj.'08HK@<Lfm<G@;`EM>'Auo$I,[EMAIL
 PROTECTED];_mq02FZ>N([EMAIL PROTECTED]>
+endstream
+endobj
 46 0 obj
+<< /Type /Page
+/Parent 1 0 R
+/MediaBox [ 0 0 612 792 ]
+/Resources 3 0 R
+/Contents 45 0 R
+>>
+endobj
+48 0 obj
 <<
  /Title (\376\377\0\61\0\40\0\120\0\165\0\162\0\160\0\157\0\163\0\145)
- /Parent 45 0 R
- /Next 47 0 R
+ /Parent 47 0 R
+ /Next 49 0 R
  /A 9 0 R
 >> endobj
-47 0 obj
+49 0 obj
 <<
  /Title 
(\376\377\0\62\0\40\0\120\0\162\0\145\0\55\0\162\0\145\0\161\0\165\0\151\0\163\0\151\0\164\0\145\0\163)
- /Parent 45 0 R
- /Prev 46 0 R
- /Next 48 0 R
+ /Parent 47 0 R
+ /Prev 48 0 R
+ /Next 50 0 R
  /A 11 0 R
 >> endobj
-48 0 obj
+50 0 obj
 <<
  /Title 
(\376\377\0\63\0\40\0\111\0\156\0\163\0\164\0\141\0\154\0\154\0\141\0\164\0\151\0\157\0\156)
- /Parent 45 0 R
- /Prev 47 0 R
- /Next 49 0 R
+ /Parent 47 0 R
+ /Prev 49 0 R
+ /Next 51 0 R
  /A 13 0 R
 >> endobj
-49 0 obj
+51 0 obj
 <<
  /Title 
(\376\377\0\64\0\40\0\103\0\157\0\156\0\146\0\151\0\147\0\165\0\162\0\141\0\164\0\151\0\157\0\156)
- /Parent 45 0 R
- /First 50 0 R
- /Last 51 0 R
- /Prev 48 0 R
- /Next 62 0 R
- /Count -7
+ /Parent 47 0 R
+ /First 52 0 R
+ /Last 53 0 R
+ /Prev 50 0 R
+ /Next 66 0 R
+ /Count -8
  /A 15 0 R
 >> endobj
-50 0 obj
+52 0 obj
 <<
  /Title 
(\376\377\0\64\0\56\0\61\0\40\0\103\0\157\0\156\0\146\0\151\0\147\0\165\0\162\0\141\0\164\0\151\0\157\0\156\0\40\0\106\0\151\0\154\0\145\0\163)
- /Parent 49 0 R
- /Next 51 0 R
+ /Parent 51 0 R
+ /Next 53 0 R
  /A 17 0 R
 >> endobj
-51 0 obj
-<<
- /Title 
(\376\377\0\64\0\56\0\62\0\40\0\123\0\151\0\164\0\145\0\40\0\103\0\157\0\156\0\146\0\151\0\147\0\165\0\162\0\141\0\164\0\151\0\157\0\156)
- /Parent 49 0 R
- /First 53 0 R
- /Last 61 0 R
- /Prev 50 0 R
- /Count -5
- /A 19 0 R
->> endobj
 53 0 obj
 <<
- /Title 
(\376\377\0\64\0\56\0\62\0\56\0\61\0\40\0\103\0\157\0\156\0\146\0\151\0\147\0\165\0\162\0\151\0\156\0\147\0\40\0\164\0\150\0\145\0\40\0\105\0\156\0\166\0\151\0\162\0\157\0\156\0\155\0\145\0\156\0\164\0\40\0\157\0\146\0\40\0\164\0\150\0\145\0\40\0\110\0\141\0\144\0\157\0\157\0\160\0\40\0\104\0\141\0\145\0\155\0\157\0\156\0\163)
+ /Title 
(\376\377\0\64\0\56\0\62\0\40\0\123\0\151\0\164\0\145\0\40\0\103\0\157\0\156\0\146\0\151\0\147\0\165\0\162\0\141\0\164\0\151\0\157\0\156)
  /Parent 51 0 R
- /Next 55 0 R
- /A 52 0 R
+ /First 55 0 R
+ /Last 63 0 R
+ /Prev 52 0 R
+ /Count -6
+ /A 19 0 R
 >> endobj
 55 0 obj
 <<
- /Title 
(\376\377\0\64\0\56\0\62\0\56\0\62\0\40\0\103\0\157\0\156\0\146\0\151\0\147\0\165\0\162\0\151\0\156\0\147\0\40\0\164\0\150\0\145\0\40\0\110\0\141\0\144\0\157\0\157\0\160\0\40\0\104\0\141\0\145\0\155\0\157\0\156\0\163)
- /Parent 51 0 R
- /First 57 0 R
- /Last 57 0 R
- /Prev 53 0 R
- /Next 59 0 R
- /Count -1
+ /Title 
(\376\377\0\64\0\56\0\62\0\56\0\61\0\40\0\103\0\157\0\156\0\146\0\151\0\147\0\165\0\162\0\151\0\156\0\147\0\40\0\164\0\150\0\145\0\40\0\105\0\156\0\166\0\151\0\162\0\157\0\156\0\155\0\145\0\156\0\164\0\40\0\157\0\146\0\40\0\164\0\150\0\145\0\40\0\110\0\141\0\144\0\157\0\157\0\160\0\40\0\104\0\141\0\145\0\155\0\157\0\156\0\163)
+ /Parent 53 0 R
+ /Next 57 0 R
  /A 54 0 R
 >> endobj
 57 0 obj
 <<
- /Title 
(\376\377\0\64\0\56\0\62\0\56\0\62\0\56\0\61\0\40\0\122\0\145\0\141\0\154\0\55\0\127\0\157\0\162\0\154\0\144\0\40\0\103\0\154\0\165\0\163\0\164\0\145\0\162\0\40\0\103\0\157\0\156\0\146\0\151\0\147\0\165\0\162\0\141\0\164\0\151\0\157\0\156\0\163)
- /Parent 55 0 R
+ /Title 
(\376\377\0\64\0\56\0\62\0\56\0\62\0\40\0\103\0\157\0\156\0\146\0\151\0\147\0\165\0\162\0\151\0\156\0\147\0\40\0\164\0\150\0\145\0\40\0\110\0\141\0\144\0\157\0\157\0\160\0\40\0\104\0\141\0\145\0\155\0\157\0\156\0\163)
+ /Parent 53 0 R
+ /First 59 0 R
+ /Last 59 0 R
+ /Prev 55 0 R
+ /Next 61 0 R
+ /Count -1
  /A 56 0 R
 >> endobj
 59 0 obj
 <<
- /Title 
(\376\377\0\64\0\56\0\62\0\56\0\63\0\40\0\123\0\154\0\141\0\166\0\145\0\163)
- /Parent 51 0 R
- /Prev 55 0 R
- /Next 61 0 R
+ /Title 
(\376\377\0\64\0\56\0\62\0\56\0\62\0\56\0\61\0\40\0\122\0\145\0\141\0\154\0\55\0\127\0\157\0\162\0\154\0\144\0\40\0\103\0\154\0\165\0\163\0\164\0\145\0\162\0\40\0\103\0\157\0\156\0\146\0\151\0\147\0\165\0\162\0\141\0\164\0\151\0\157\0\156\0\163)
+ /Parent 57 0 R
  /A 58 0 R
 >> endobj
 61 0 obj
 <<
- /Title 
(\376\377\0\64\0\56\0\62\0\56\0\64\0\40\0\114\0\157\0\147\0\147\0\151\0\156\0\147)
- /Parent 51 0 R
- /Prev 59 0 R
+ /Title 
(\376\377\0\64\0\56\0\62\0\56\0\63\0\40\0\123\0\154\0\141\0\166\0\145\0\163)
+ /Parent 53 0 R
+ /Prev 57 0 R
+ /Next 63 0 R
  /A 60 0 R
 >> endobj
-62 0 obj
+63 0 obj
+<<
+ /Title 
(\376\377\0\64\0\56\0\62\0\56\0\64\0\40\0\114\0\157\0\147\0\147\0\151\0\156\0\147)
+ /Parent 53 0 R
+ /First 65 0 R
+ /Last 65 0 R
+ /Prev 61 0 R
+ /Count -1
+ /A 62 0 R
+>> endobj
+65 0 obj
+<<
+ /Title 
(\376\377\0\64\0\56\0\62\0\56\0\64\0\56\0\61\0\40\0\110\0\151\0\163\0\164\0\157\0\162\0\171\0\40\0\114\0\157\0\147\0\147\0\151\0\156\0\147)
+ /Parent 63 0 R
+ /A 64 0 R
+>> endobj
+66 0 obj
 <<
  /Title 
(\376\377\0\65\0\40\0\110\0\141\0\144\0\157\0\157\0\160\0\40\0\123\0\164\0\141\0\162\0\164\0\165\0\160)
- /Parent 45 0 R
- /Prev 49 0 R
- /Next 63 0 R
+ /Parent 47 0 R
+ /Prev 51 0 R
+ /Next 67 0 R
  /A 21 0 R
 >> endobj
-63 0 obj
+67 0 obj
 <<
  /Title 
(\376\377\0\66\0\40\0\110\0\141\0\144\0\157\0\157\0\160\0\40\0\123\0\150\0\165\0\164\0\144\0\157\0\167\0\156)
- /Parent 45 0 R
- /Prev 62 0 R
+ /Parent 47 0 R
+ /Prev 66 0 R
  /A 23 0 R
 >> endobj
-64 0 obj
+68 0 obj
 << /Type /Font
 /Subtype /Type1
 /Name /F3
 /BaseFont /Helvetica-Bold
 /Encoding /WinAnsiEncoding >>
 endobj
-65 0 obj
+69 0 obj
 << /Type /Font
 /Subtype /Type1
 /Name /F5
 /BaseFont /Times-Roman
 /Encoding /WinAnsiEncoding >>
 endobj
-66 0 obj
+70 0 obj
 << /Type /Font
 /Subtype /Type1
 /Name /F6
 /BaseFont /Times-Italic
 /Encoding /WinAnsiEncoding >>
 endobj
-67 0 obj
+71 0 obj
 << /Type /Font
 /Subtype /Type1
 /Name /F1
 /BaseFont /Helvetica
 /Encoding /WinAnsiEncoding >>
 endobj
-68 0 obj
+72 0 obj
 << /Type /Font
 /Subtype /Type1
 /Name /F9
 /BaseFont /Courier
 /Encoding /WinAnsiEncoding >>
 endobj
-69 0 obj
+73 0 obj
 << /Type /Font
 /Subtype /Type1
 /Name /F2
 /BaseFont /Helvetica-Oblique
 /Encoding /WinAnsiEncoding >>
 endobj
-70 0 obj
+74 0 obj
 << /Type /Font
 /Subtype /Type1
 /Name /F7
@@ -454,19 +478,19 @@
 endobj
 1 0 obj
 << /Type /Pages
-/Count 6
-/Kids [6 0 R 25 0 R 33 0 R 35 0 R 39 0 R 44 0 R ] >>
+/Count 7
+/Kids [6 0 R 25 0 R 33 0 R 35 0 R 39 0 R 44 0 R 46 0 R ] >>
 endobj
 2 0 obj
 << /Type /Catalog
 /Pages 1 0 R
- /Outlines 45 0 R
+ /Outlines 47 0 R
  /PageMode /UseOutlines
  >>
 endobj
 3 0 obj
 << 
-/Font << /F3 64 0 R /F5 65 0 R /F1 67 0 R /F6 66 0 R /F9 68 0 R /F2 69 0 R /F7 
70 0 R >> 
+/Font << /F3 68 0 R /F5 69 0 R /F1 71 0 R /F6 70 0 R /F9 72 0 R /F2 73 0 R /F7 
74 0 R >> 
 /ProcSet [ /PDF /ImageC /Text ] >> 
 endobj
 9 0 obj
@@ -508,129 +532,139 @@
 21 0 obj
 <<
 /S /GoTo
-/D [44 0 R /XYZ 85.0 607.4 null]
+/D [44 0 R /XYZ 85.0 357.509 null]
 >>
 endobj
 23 0 obj
 <<
 /S /GoTo
-/D [44 0 R /XYZ 85.0 369.866 null]
+/D [46 0 R /XYZ 85.0 659.0 null]
 >>
 endobj
-45 0 obj
+47 0 obj
 <<
- /First 46 0 R
- /Last 63 0 R
+ /First 48 0 R
+ /Last 67 0 R
 >> endobj
-52 0 obj
+54 0 obj
 <<
 /S /GoTo
 /D [33 0 R /XYZ 85.0 573.347 null]
 >>
 endobj
-54 0 obj
+56 0 obj
 <<
 /S /GoTo
 /D [33 0 R /XYZ 85.0 408.775 null]
 >>
 endobj
-56 0 obj
+58 0 obj
 <<
 /S /GoTo
 /D [35 0 R /XYZ 85.0 399.6 null]
 >>
 endobj
-58 0 obj
+60 0 obj
 <<
 /S /GoTo
 /D [39 0 R /XYZ 85.0 282.85 null]
 >>
 endobj
-60 0 obj
+62 0 obj
 <<
 /S /GoTo
 /D [39 0 R /XYZ 85.0 185.078 null]
 >>
 endobj
+64 0 obj
+<<
+/S /GoTo
+/D [44 0 R /XYZ 85.0 641.8 null]
+>>
+endobj
 xref
-0 71
+0 75
 0000000000 65535 f 
-0000020841 00000 n 
-0000020934 00000 n 
-0000021026 00000 n 
+0000022719 00000 n 
+0000022819 00000 n 
+0000022911 00000 n 
 0000000015 00000 n 
 0000000071 00000 n 
-0000000784 00000 n 
-0000000904 00000 n 
-0000000978 00000 n 
-0000021171 00000 n 
-0000001113 00000 n 
-0000021234 00000 n 
-0000001250 00000 n 
-0000021300 00000 n 
-0000001385 00000 n 
-0000021366 00000 n 
-0000001522 00000 n 
-0000021432 00000 n 
-0000001659 00000 n 
-0000021498 00000 n 
-0000001796 00000 n 
-0000021562 00000 n 
-0000001932 00000 n 
-0000021626 00000 n 
-0000002069 00000 n 
-0000004300 00000 n 
-0000004423 00000 n 
-0000004478 00000 n 
-0000004647 00000 n 
-0000004824 00000 n 
-0000005000 00000 n 
-0000005213 00000 n 
-0000005412 00000 n 
-0000008375 00000 n 
-0000008483 00000 n 
-0000011507 00000 n 
-0000011630 00000 n 
-0000011657 00000 n 
-0000011864 00000 n 
-0000014853 00000 n 
-0000014976 00000 n 
-0000015010 00000 n 
-0000015194 00000 n 
-0000015382 00000 n 
-0000017040 00000 n 
-0000021692 00000 n 
-0000017148 00000 n 
-0000017281 00000 n 
-0000017470 00000 n 
-0000017648 00000 n 
-0000017872 00000 n 
-0000018087 00000 n 
-0000021743 00000 n 
-0000018336 00000 n 
-0000021809 00000 n 
-0000018736 00000 n 
-0000021875 00000 n 
-0000019079 00000 n 
-0000021939 00000 n 
-0000019382 00000 n 
-0000022004 00000 n 
-0000019544 00000 n 
-0000019698 00000 n 
-0000019887 00000 n 
-0000020068 00000 n 
-0000020181 00000 n 
-0000020291 00000 n 
-0000020402 00000 n 
-0000020510 00000 n 
-0000020616 00000 n 
-0000020732 00000 n 
+0000000785 00000 n 
+0000000905 00000 n 
+0000000979 00000 n 
+0000023056 00000 n 
+0000001114 00000 n 
+0000023119 00000 n 
+0000001251 00000 n 
+0000023185 00000 n 
+0000001386 00000 n 
+0000023251 00000 n 
+0000001523 00000 n 
+0000023317 00000 n 
+0000001660 00000 n 
+0000023383 00000 n 
+0000001797 00000 n 
+0000023447 00000 n 
+0000001933 00000 n 
+0000023513 00000 n 
+0000002070 00000 n 
+0000004301 00000 n 
+0000004424 00000 n 
+0000004479 00000 n 
+0000004648 00000 n 
+0000004825 00000 n 
+0000005001 00000 n 
+0000005214 00000 n 
+0000005413 00000 n 
+0000008376 00000 n 
+0000008484 00000 n 
+0000011508 00000 n 
+0000011631 00000 n 
+0000011658 00000 n 
+0000011865 00000 n 
+0000014854 00000 n 
+0000014977 00000 n 
+0000015011 00000 n 
+0000015195 00000 n 
+0000015383 00000 n 
+0000017583 00000 n 
+0000017691 00000 n 
+0000018681 00000 n 
+0000023577 00000 n 
+0000018789 00000 n 
+0000018922 00000 n 
+0000019111 00000 n 
+0000019289 00000 n 
+0000019513 00000 n 
+0000019728 00000 n 
+0000023628 00000 n 
+0000019977 00000 n 
+0000023694 00000 n 
+0000020377 00000 n 
+0000023760 00000 n 
+0000020720 00000 n 
+0000023824 00000 n 
+0000021023 00000 n 
+0000023889 00000 n 
+0000021185 00000 n 
+0000023955 00000 n 
+0000021379 00000 n 
+0000021576 00000 n 
+0000021765 00000 n 
+0000021946 00000 n 
+0000022059 00000 n 
+0000022169 00000 n 
+0000022280 00000 n 
+0000022388 00000 n 
+0000022494 00000 n 
+0000022610 00000 n 
 trailer
 <<
-/Size 71
+/Size 75
 /Root 2 0 R
 /Info 4 0 R
 >>
 startxref
-22070
+24019
 %%EOF

Modified: hadoop/core/trunk/docs/hadoop-default.html
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/docs/hadoop-default.html?rev=629369&r1=629368&r2=629369&view=diff
==============================================================================
--- hadoop/core/trunk/docs/hadoop-default.html (original)
+++ hadoop/core/trunk/docs/hadoop-default.html Tue Feb 19 22:04:44 2008
@@ -17,6 +17,19 @@
 <td><a 
name="hadoop.logfile.count">hadoop.logfile.count</a></td><td>10</td><td>The max 
number of log files</td>
 </tr>
 <tr>
+<td><a 
name="hadoop.job.history.location">hadoop.job.history.location</a></td><td>file://${hadoop.log.dir}/history</td><td>
 If job tracker is static the history files are stored 
+  in this single well known place. By default, it is in the local 
+  file system at ${hadoop.log.dir}/history.
+  </td>
+</tr>
+<tr>
+<td><a 
name="hadoop.job.history.user.location">hadoop.job.history.user.location</a></td><td></td><td>
 User can specify a location to store the history files of 
+  a particular job. If nothing is specified, the logs are stored in 
+  output directory. The files are stored in "_logs/history/" in the directory.
+  User can stop logging by giving the value "none". 
+  </td>
+</tr>
+<tr>
 <td><a 
name="dfs.namenode.logging.level">dfs.namenode.logging.level</a></td><td>info</td><td>The
 logging level for dfs namenode. Other values are "dir"(trac
 e namespace mutations), "block"(trace block under/over replications and block
 creations/deletions), or "all".</td>
@@ -62,10 +75,6 @@
   determine the host, port, etc. for a filesystem.</td>
 </tr>
 <tr>
-<td><a 
name="fs.trash.root">fs.trash.root</a></td><td>${hadoop.tmp.dir}/Trash</td><td>The
 trash directory, used by FsShell's 'rm' command.
-  </td>
-</tr>
-<tr>
 <td><a name="fs.trash.interval">fs.trash.interval</a></td><td>0</td><td>Number 
of minutes between trash checkpoints.
   If zero, the trash feature is disabled.
   </td>
@@ -106,25 +115,25 @@
   </td>
 </tr>
 <tr>
-<td><a 
name="dfs.secondary.http.bindAddress">dfs.secondary.http.bindAddress</a></td><td>0.0.0.0:50090</td><td>
-    The secondary namenode http server bind address and port.
+<td><a 
name="dfs.secondary.http.address">dfs.secondary.http.address</a></td><td>0.0.0.0:50090</td><td>
+    The secondary namenode http server address and port.
     If the port is 0 then the server will start on a free port.
   </td>
 </tr>
 <tr>
-<td><a 
name="dfs.datanode.bindAddress">dfs.datanode.bindAddress</a></td><td>0.0.0.0:50010</td><td>
-    The address where the datanode will listen to.
+<td><a 
name="dfs.datanode.address">dfs.datanode.address</a></td><td>0.0.0.0:50010</td><td>
+    The address where the datanode server will listen to.
     If the port is 0 then the server will start on a free port.
   </td>
 </tr>
 <tr>
-<td><a 
name="dfs.datanode.http.bindAddress">dfs.datanode.http.bindAddress</a></td><td>0.0.0.0:50075</td><td>
-    The datanode http server bind address and port.
+<td><a 
name="dfs.datanode.http.address">dfs.datanode.http.address</a></td><td>0.0.0.0:50075</td><td>
+    The datanode http server address and port.
     If the port is 0 then the server will start on a free port.
   </td>
 </tr>
 <tr>
-<td><a 
name="dfs.http.bindAddress">dfs.http.bindAddress</a></td><td>0.0.0.0:50070</td><td>
+<td><a 
name="dfs.http.address">dfs.http.address</a></td><td>0.0.0.0:50070</td><td>
     The address and the base port where the dfs namenode web ui will listen on.
     If the port is 0 then the server will start on a free port.
   </td>
@@ -163,6 +172,11 @@
       directories, for redundancy. </td>
 </tr>
 <tr>
+<td><a name="dfs.web.ugi">dfs.web.ugi</a></td><td>webuser,webgroup</td><td>The 
user account used by the web interface.
+    Syntax: USERNAME,GROUP1,GROUP2, ...
+  </td>
+</tr>
+<tr>
 <td><a name="dfs.permissions">dfs.permissions</a></td><td>true</td><td>
     If "true", enable permission checking in HDFS.
     If "false", permission checking is turned off,
@@ -267,6 +281,12 @@
   </td>
 </tr>
 <tr>
+<td><a 
name="dfs.namenode.decommission.interval">dfs.namenode.decommission.interval</a></td><td>300</td><td>Namenode
 periodicity in seconds to check if decommission is complete.</td>
+</tr>
+<tr>
+<td><a 
name="dfs.replication.interval">dfs.replication.interval</a></td><td>3</td><td>The
 periodicity in seconds with which the namenode computes repliaction work for 
datanodes. </td>
+</tr>
+<tr>
 <td><a 
name="fs.s3.block.size">fs.s3.block.size</a></td><td>67108864</td><td>Block 
size to use when writing files to S3.</td>
 </tr>
 <tr>
@@ -291,8 +311,14 @@
   </td>
 </tr>
 <tr>
-<td><a 
name="mapred.job.tracker.http.bindAddress">mapred.job.tracker.http.bindAddress</a></td><td>0.0.0.0:50030</td><td>
-    The job tracker http server bind address and port.
+<td><a 
name="mapred.job.tracker.http.address">mapred.job.tracker.http.address</a></td><td>0.0.0.0:50030</td><td>
+    The job tracker http server address and port the server will listen on.
+    If the port is 0 then the server will start on a free port.
+  </td>
+</tr>
+<tr>
+<td><a 
name="mapred.job.history.http.bindAddress">mapred.job.history.http.bindAddress</a></td><td>0.0.0.0:0</td><td>
+    The job history http server bind address and port.
     If the port is 0 then the server will start on a free port.
   </td>
 </tr>
@@ -303,8 +329,10 @@
   </td>
 </tr>
 <tr>
-<td><a 
name="mapred.task.tracker.report.bindAddress">mapred.task.tracker.report.bindAddress</a></td><td>127.0.0.1:0</td><td>The
 interface that task processes use to communicate
-  with their parent tasktracker process.</td>
+<td><a 
name="mapred.task.tracker.report.address">mapred.task.tracker.report.address</a></td><td>127.0.0.1:0</td><td>The
 interface and port that task tracker server listens on. 
+  Since it is only connected to by the tasks, it uses the local interface.
+  EXPERT ONLY. Should only be changed if your host does not have the loopback 
+  interface.</td>
 </tr>
 <tr>
 <td><a 
name="mapred.local.dir">mapred.local.dir</a></td><td>${hadoop.tmp.dir}/mapred/local</td><td>The
 local directory where MapReduce stores intermediate
@@ -410,6 +438,15 @@
   </td>
 </tr>
 <tr>
+<td><a name="mapred.child.tmp">mapred.child.tmp</a></td><td>./tmp</td><td> To 
set the value of tmp directory for map and reduce tasks.
+  If the value is an absolute path, it is directly assigned. Otherwise, it is
+  prepended with task's working directory. The java tasks are executed with
+  option -Djava.io.tmpdir='the absolute path of the tmp dir'. Pipes and
+  streaming are set with environment variable,
+   TMPDIR='the absolute path of the tmp dir'
+  </td>
+</tr>
+<tr>
 <td><a 
name="mapred.inmem.merge.threshold">mapred.inmem.merge.threshold</a></td><td>1000</td><td>The
 threshold, in terms of the number of files 
   for the in-memory merge process. When we accumulate threshold number of files
   we initiate the in-memory merge and spill to disk. A value of 0 or less than
@@ -452,8 +489,8 @@
   </td>
 </tr>
 <tr>
-<td><a 
name="mapred.task.tracker.http.bindAddress">mapred.task.tracker.http.bindAddress</a></td><td>0.0.0.0:50060</td><td>
-    The task tracker http server bind address and port.
+<td><a 
name="mapred.task.tracker.http.address">mapred.task.tracker.http.address</a></td><td>0.0.0.0:50060</td><td>
+    The task tracker http server address and port.
     If the port is 0 then the server will start on a free port.
   </td>
 </tr>
@@ -564,6 +601,22 @@
     </td>
 </tr>
 <tr>
+<td><a 
name="mapred.task.profile">mapred.task.profile</a></td><td>false</td><td>To set 
whether the system should collect profiler
+     information for some of the tasks in this job? The information is stored
+     in the the user log directory. The value is "true" if task profiling
+     is enabled.</td>
+</tr>
+<tr>
+<td><a 
name="mapred.task.profile.maps">mapred.task.profile.maps</a></td><td>0-2</td><td>
 To set the ranges of map tasks to profile.
+    mapred.task.profile has to be set to true for the value to be accounted.
+    </td>
+</tr>
+<tr>
+<td><a 
name="mapred.task.profile.reduces">mapred.task.profile.reduces</a></td><td>0-2</td><td>
 To set the ranges of reduce tasks to profile.
+    mapred.task.profile has to be set to true for the value to be accounted.
+    </td>
+</tr>
+<tr>
 <td><a 
name="ipc.client.timeout">ipc.client.timeout</a></td><td>60000</td><td>Defines 
the timeout for IPC calls in milliseconds.</td>
 </tr>
 <tr>
@@ -593,6 +646,18 @@
 <tr>
 <td><a 
name="ipc.server.listen.queue.size">ipc.server.listen.queue.size</a></td><td>128</td><td>Indicates
 the length of the listen queue for servers accepting
                client connections.
+  </td>
+</tr>
+<tr>
+<td><a 
name="ipc.server.tcpnodelay">ipc.server.tcpnodelay</a></td><td>false</td><td>Turn
 on/off Nagle's algorithm for the TCP socket connection on 
+  the server. Setting to true disables the algorithm and may decrease latency
+  with a cost of more/smaller packets. 
+  </td>
+</tr>
+<tr>
+<td><a 
name="ipc.client.tcpnodelay">ipc.client.tcpnodelay</a></td><td>false</td><td>Turn
 on/off Nagle's algorithm for the TCP socket connection on 
+  the client. Setting to true disables the algorithm and may decrease latency
+  with a cost of more/smaller packets. 
   </td>
 </tr>
 <tr>

Modified: hadoop/core/trunk/docs/mapred_tutorial.html
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/docs/mapred_tutorial.html?rev=629369&r1=629368&r2=629369&view=diff
==============================================================================
--- hadoop/core/trunk/docs/mapred_tutorial.html (original)
+++ hadoop/core/trunk/docs/mapred_tutorial.html Tue Feb 19 22:04:44 2008
@@ -283,7 +283,7 @@
 <a href="#Example%3A+WordCount+v2.0">Example: WordCount v2.0</a>
 <ul class="minitoc">
 <li>
-<a href="#Source+Code-N10BBE">Source Code</a>
+<a href="#Source+Code-N10BDE">Source Code</a>
 </li>
 <li>
 <a href="#Sample+Runs">Sample Runs</a>
@@ -1570,10 +1570,34 @@
           </li>
         
 </ol>
+<p> Job history files are also logged to user specified directory
+        <span class="codefrag">hadoop.job.history.user.location</span> 
+        which defaults to job output directory. The files are stored in
+        "_logs/history/" in the specified directory. Hence, by default they 
will
+        be in mapred.output.dir/_logs/history. User can stop
+        logging by giving the value <span class="codefrag">none</span> for 
+        <span class="codefrag">hadoop.job.history.user.location</span>
+</p>
+<p> User can view logs in specified directory using 
+        the following command <br>
+        
+<span class="codefrag">$ bin/hadoop job -history output-dir</span>
+<br>
+        This will start a stand alone jetty on the client and 
+        load history jsp's. 
+        It will display the port where the server is up at. The server will
+        be up for 30 minutes. User has to use 
+        <span class="codefrag"> http://hostname:port </span> to view the 
history. User can 
+        also provide http bind address using 
+        <span class="codefrag">mapred.job.history.http.bindAddress</span>
+</p>
+<p> User can use 
+        <a 
href="api/org/apache/hadoop/mapred/OutputLogFilter.html">OutputLogFilter</a>
+        to filter log files from the output directory listing. </p>
 <p>Normally the user creates the application, describes various facets 
         of the job via <span class="codefrag">JobConf</span>, and then uses 
the 
         <span class="codefrag">JobClient</span> to submit the job and monitor 
its progress.</p>
-<a name="N108F7"></a><a name="Job+Control"></a>
+<a name="N10917"></a><a name="Job+Control"></a>
 <h4>Job Control</h4>
 <p>Users may need to chain map-reduce jobs to accomplish complex
           tasks which cannot be done via a single map-reduce job. This is 
fairly
@@ -1609,7 +1633,7 @@
             </li>
           
 </ul>
-<a name="N10921"></a><a name="Job+Input"></a>
+<a name="N10941"></a><a name="Job+Input"></a>
 <h3 class="h4">Job Input</h3>
 <p>
 <a href="api/org/apache/hadoop/mapred/InputFormat.html">
@@ -1657,7 +1681,7 @@
         appropriate <span class="codefrag">CompressionCodec</span>. However, 
it must be noted that
         compressed files with the above extensions cannot be <em>split</em> 
and 
         each compressed file is processed in its entirety by a single 
mapper.</p>
-<a name="N1098B"></a><a name="InputSplit"></a>
+<a name="N109AB"></a><a name="InputSplit"></a>
 <h4>InputSplit</h4>
 <p>
 <a href="api/org/apache/hadoop/mapred/InputSplit.html">
@@ -1671,7 +1695,7 @@
           FileSplit</a> is the default <span 
class="codefrag">InputSplit</span>. It sets 
           <span class="codefrag">map.input.file</span> to the path of the 
input file for the
           logical split.</p>
-<a name="N109B0"></a><a name="RecordReader"></a>
+<a name="N109D0"></a><a name="RecordReader"></a>
 <h4>RecordReader</h4>
 <p>
 <a href="api/org/apache/hadoop/mapred/RecordReader.html">
@@ -1683,7 +1707,7 @@
           for processing. <span class="codefrag">RecordReader</span> thus 
assumes the 
           responsibility of processing record boundaries and presents the 
tasks 
           with keys and values.</p>
-<a name="N109D3"></a><a name="Job+Output"></a>
+<a name="N109F3"></a><a name="Job+Output"></a>
 <h3 class="h4">Job Output</h3>
 <p>
 <a href="api/org/apache/hadoop/mapred/OutputFormat.html">
@@ -1708,7 +1732,7 @@
 <p>
 <span class="codefrag">TextOutputFormat</span> is the default 
         <span class="codefrag">OutputFormat</span>.</p>
-<a name="N109FC"></a><a name="Task+Side-Effect+Files"></a>
+<a name="N10A1C"></a><a name="Task+Side-Effect+Files"></a>
 <h4>Task Side-Effect Files</h4>
 <p>In some applications, component tasks need to create and/or write to
           side-files, which differ from the actual job-output files.</p>
@@ -1734,7 +1758,7 @@
           JobConf.getOutputPath()</a>, and the framework will promote them 
           similarly for succesful task-attempts, thus eliminating the need to 
           pick unique paths per task-attempt.</p>
-<a name="N10A31"></a><a name="RecordWriter"></a>
+<a name="N10A51"></a><a name="RecordWriter"></a>
 <h4>RecordWriter</h4>
 <p>
 <a href="api/org/apache/hadoop/mapred/RecordWriter.html">
@@ -1742,9 +1766,9 @@
           pairs to an output file.</p>
 <p>RecordWriter implementations write the job outputs to the 
           <span class="codefrag">FileSystem</span>.</p>
-<a name="N10A48"></a><a name="Other+Useful+Features"></a>
+<a name="N10A68"></a><a name="Other+Useful+Features"></a>
 <h3 class="h4">Other Useful Features</h3>
-<a name="N10A4E"></a><a name="Counters"></a>
+<a name="N10A6E"></a><a name="Counters"></a>
 <h4>Counters</h4>
 <p>
 <span class="codefrag">Counters</span> represent global counters, defined 
either by 
@@ -1758,7 +1782,7 @@
           Reporter.incrCounter(Enum, long)</a> in the <span 
class="codefrag">map</span> and/or 
           <span class="codefrag">reduce</span> methods. These counters are 
then globally 
           aggregated by the framework.</p>
-<a name="N10A79"></a><a name="DistributedCache"></a>
+<a name="N10A99"></a><a name="DistributedCache"></a>
 <h4>DistributedCache</h4>
 <p>
 <a href="api/org/apache/hadoop/filecache/DistributedCache.html">
@@ -1791,7 +1815,7 @@
           <a 
href="api/org/apache/hadoop/filecache/DistributedCache.html#createSymlink(org.apache.hadoop.conf.Configuration)">
           DistributedCache.createSymlink(Path, Configuration)</a> api. Files 
           have <em>execution permissions</em> set.</p>
-<a name="N10AB7"></a><a name="Tool"></a>
+<a name="N10AD7"></a><a name="Tool"></a>
 <h4>Tool</h4>
 <p>The <a href="api/org/apache/hadoop/util/Tool.html">Tool</a> 
           interface supports the handling of generic Hadoop command-line 
options.
@@ -1831,7 +1855,7 @@
             </span>
           
 </p>
-<a name="N10AE9"></a><a name="IsolationRunner"></a>
+<a name="N10B09"></a><a name="IsolationRunner"></a>
 <h4>IsolationRunner</h4>
 <p>
 <a href="api/org/apache/hadoop/mapred/IsolationRunner.html">
@@ -1855,13 +1879,13 @@
 <p>
 <span class="codefrag">IsolationRunner</span> will run the failed task in a 
single 
           jvm, which can be in the debugger, over precisely the same input.</p>
-<a name="N10B1C"></a><a name="JobControl"></a>
+<a name="N10B3C"></a><a name="JobControl"></a>
 <h4>JobControl</h4>
 <p>
 <a href="api/org/apache/hadoop/mapred/jobcontrol/package-summary.html">
           JobControl</a> is a utility which encapsulates a set of Map-Reduce 
jobs
           and their dependencies.</p>
-<a name="N10B29"></a><a name="Data+Compression"></a>
+<a name="N10B49"></a><a name="Data+Compression"></a>
 <h4>Data Compression</h4>
 <p>Hadoop Map-Reduce provides facilities for the application-writer to
           specify compression for both intermediate map-outputs and the
@@ -1875,7 +1899,7 @@
           codecs for reasons of both performance (zlib) and non-availability of
           Java libraries (lzo). More details on their usage and availability 
are
           available <a href="native_libraries.html">here</a>.</p>
-<a name="N10B49"></a><a name="Intermediate+Outputs"></a>
+<a name="N10B69"></a><a name="Intermediate+Outputs"></a>
 <h5>Intermediate Outputs</h5>
 <p>Applications can control compression of intermediate map-outputs
             via the 
@@ -1896,7 +1920,7 @@
             <a 
href="api/org/apache/hadoop/mapred/JobConf.html#setMapOutputCompressionType(org.apache.hadoop.io.SequenceFile.CompressionType)">
             
JobConf.setMapOutputCompressionType(SequenceFile.CompressionType)</a> 
             api.</p>
-<a name="N10B75"></a><a name="Job+Outputs"></a>
+<a name="N10B95"></a><a name="Job+Outputs"></a>
 <h5>Job Outputs</h5>
 <p>Applications can control compression of job-outputs via the
             <a 
href="api/org/apache/hadoop/mapred/OutputFormatBase.html#setCompressOutput(org.apache.hadoop.mapred.JobConf,%20boolean)">
@@ -1916,7 +1940,7 @@
 </div>
 
     
-<a name="N10BA4"></a><a name="Example%3A+WordCount+v2.0"></a>
+<a name="N10BC4"></a><a name="Example%3A+WordCount+v2.0"></a>
 <h2 class="h3">Example: WordCount v2.0</h2>
 <div class="section">
 <p>Here is a more complete <span class="codefrag">WordCount</span> which uses 
many of the
@@ -1926,7 +1950,7 @@
       <a href="quickstart.html#SingleNodeSetup">pseudo-distributed</a> or
       <a 
href="quickstart.html#Fully-Distributed+Operation">fully-distributed</a> 
       Hadoop installation.</p>
-<a name="N10BBE"></a><a name="Source+Code-N10BBE"></a>
+<a name="N10BDE"></a><a name="Source+Code-N10BDE"></a>
 <h3 class="h4">Source Code</h3>
 <table class="ForrestTable" cellspacing="1" cellpadding="4">
           
@@ -3136,7 +3160,7 @@
 </tr>
         
 </table>
-<a name="N11320"></a><a name="Sample+Runs"></a>
+<a name="N11340"></a><a name="Sample+Runs"></a>
 <h3 class="h4">Sample Runs</h3>
 <p>Sample text-files as input:</p>
 <p>
@@ -3304,7 +3328,7 @@
 <br>
         
 </p>
-<a name="N113F4"></a><a name="Highlights"></a>
+<a name="N11414"></a><a name="Highlights"></a>
 <h3 class="h4">Highlights</h3>
 <p>The second version of <span class="codefrag">WordCount</span> improves upon 
the 
         previous one by using some features offered by the Map-Reduce 
framework:


Reply via email to