Modified: flink/site/docs/0.6-incubating/cluster_setup.html
URL: 
http://svn.apache.org/viewvc/flink/site/docs/0.6-incubating/cluster_setup.html?rev=1657551&r1=1657550&r2=1657551&view=diff
==============================================================================
--- flink/site/docs/0.6-incubating/cluster_setup.html (original)
+++ flink/site/docs/0.6-incubating/cluster_setup.html Thu Feb  5 12:21:38 2015
@@ -5,110 +5,153 @@
     <meta http-equiv="X-UA-Compatible" content="IE=edge">
     <meta name="viewport" content="width=device-width, initial-scale=1">
     <title>Apache Flink (incubating): Cluster Setup</title>
-    <link rel="stylesheet" href="/css/bootstrap.css">
-    <link rel="stylesheet" href="/css/bootstrap-lumen-custom.css">
-    <link rel="stylesheet" href="/css/syntax.css">
-    <link rel="/css/custom.css">
-    <link rel="css/codetabs.css">
-    <link 
href="//maxcdn.bootstrapcdn.com/font-awesome/4.1.0/css/font-awesome.min.css" 
rel="stylesheet">
+    <link rel="shortcut icon" href="favicon.ico" type="image/x-icon">
+    <link rel="icon" href="favicon.ico" type="image/x-icon">
+    <link rel="stylesheet" href="css/bootstrap.css">
+    <link rel="stylesheet" href="css/bootstrap-lumen-custom.css">
+    <link rel="stylesheet" href="css/syntax.css">
+    <link rel="stylesheet" href="css/custom.css">
+    <link href="css/main/main.css" rel="stylesheet">
     <script 
src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.0/jquery.min.js";></script>
-    <script src="/js/bootstrap.min.js"></script>
+    <script src="js/bootstrap.min.js"></script>
     <script src="js/codetabs.js"></script>
   </head>
   <body>
 
     <nav class="navbar navbar-default navbar-fixed-top" role="navigation">
   <div class="container">
-    <div class="navbar-header">
-      <button type="button" class="navbar-toggle" data-toggle="collapse" 
data-target=".navbar-collapse">
-        <span class="sr-only">Toggle navigation</span>
-        <span class="icon-bar"></span>
-        <span class="icon-bar"></span>
-        <span class="icon-bar"></span>
-      </button>
-      <a class="navbar-brand" href="http://flink.incubator.apache.org";>Apache 
Flink</a>
-    </div>
-
-    <div class="collapse navbar-collapse" id="navbar-collapse-1">
-      <ul class="nav navbar-nav">
+    <div class="row">
+      <div class="col-md-1 af-mobile-nav-bar">
+       <a href="index.html" title="Home">
+         <img class="hidden-xs hidden-sm img-responsive"
+              src="img/logo.png" alt="Apache Flink Logo">
+       </a>    
+       <div class="row visible-xs">
+         <div class="col-xs-3">
+           <a href="index.html" title="Home">  
+             <img class="hidden-x hidden-sm img-responsive"
+                  src="img/logo.png" alt="Apache Flink Logo">
+           </a>        
+         </div>
+         <div class="col-xs-5"></div>
+         <div class="col-xs-4">
+           <div class="af-mobile-btn">
+             <span class="glyphicon glyphicon-plus"></span>
+           </div>
+         </div>
+       </div>
+      </div>
+      <!-- Navigation -->
+      <div class="col-md-11">
+       <div class="collapse navbar-collapse" id="navbar-collapse-1">
+         <ul class="nav navbar-nav">
+
+           <li>
+             <a href="index.html" class="">Documentation</a>
+           </li>
+
+           <li>
+             <a href="api/java/index.html">Javadoc</a>
+           </li>
+
+           <li>
+             <a 
href="api/scala/index.html#org.apache.flink.api.scala.package">Scaladoc</a>
+           </li>
 
-        <li>
-          <a href="index.html" class="">Documentation</a>
-        </li>
-
-        <li>
-          <a href="api/java/index.html">Javadoc</a>
-        </li>
-      </ul>
+         </ul>
+       </div>
+      </div>
     </div>
   </div>
 </nav>
 
-    <div style="padding-top:70px" class="container">
+
+    <div style="padding-top:120px" class="container">
 
       <div class="row">
         <div class="col-md-3">
           <ul>
-    <li>Quickstart
-        <ul>
-            <li><a href="setup_quickstart.html">Install</a></li>
-            <li><a href="run_example_quickstart.html">Run Example</a></li>
-            <li><a href="java_api_quickstart.html">Java API</a></li>
-            <li><a href="scala_api_quickstart.html">Scala API</a></li>
-            <li><a href="faq.html">FAQ</a></li>
-        </ul>
-    </li>
-
-    <li>Setup &amp; Configuration
-        <ul>
-            <li><a href="building.html">Build Instructions</a></li>
-            <li><a href="local_setup.html">Local Setup</a></li>
-            <li><a href="cluster_setup.html">Cluster Setup</a></li>
-            <li><a href="yarn_setup.html">YARN Setup</a></li>
-            <li><a href="config.html">Configuration</a></li>
-        </ul>
-    </li>
-
-    <li>Programming Guides
-        <ul>
-            <li><a href="java_api_guide.html">Java API</a></li>
-            <li><a href="java_api_transformations.html">Java API 
Transformations</a></li>
-            <li><a href="scala_api_guide.html">Scala API</a></li>
-            <li><a href="iterations.html">Iterations</a></li>
-            <li><a href="spargel_guide.html">Spargel Graph API</a></li>
-        </ul>
-    </li>
-
-    <li>Examples
-        <ul>
-            <li><a href="java_api_examples.html">Java API</a></li>
-            <li><a href="scala_api_examples.html">Scala API</a></li>
-            <li><a href="example_connectors.html">Connecting to other 
systems</a></li>
-        </ul>
-    </li>
-
-    <li>Execution
-        <ul>
-            <li><a href="local_execution.html">Local/Debugging</a></li>
-            <li><a href="cluster_execution.html">Cluster</a></li>
-            <li><a href="cli.html">Command-Line Interface</a></li>
-            <li><a href="web_client.html">Web Interface</a></li>
-        </ul>
-    </li>
-
-    <li>Internals
-        <ul>
-            <li><a href="internal_overview.html">Overview</a></li>
-            <li><a href="internal_general_arch.html">General 
Architecture</a></li>
-            <li><a href="internal_add_operator.html">How-to: Adding a new 
Operator</a></li>
-        </ul>
-    </li>
+  <li><a href="faq.html">FAQ</a></li>
+  <li>Quickstart
+    <ul>
+      <li><a href="setup_quickstart.html">Setup</a></li>
+      <li><a href="run_example_quickstart.html">Run Example</a></li>
+      <li><a href="java_api_quickstart.html">Java API</a></li>
+      <li><a href="scala_api_quickstart.html">Scala API</a></li>
+    </ul>
+  </li>
+
+  <li>Setup &amp; Configuration
+    <ul>
+      <li><a href="local_setup.html">Local Setup</a></li>
+      <li><a href="building.html">Build Flink</a></li>
+      <li><a href="cluster_setup.html">Cluster Setup</a></li>
+      <li><a href="yarn_setup.html">YARN Setup</a></li>
+      <li><a href="config.html">Configuration</a></li>
+    </ul>
+  </li>
+
+  <li>Programming Guides
+    <ul>
+      <li><a href="programming_guide.html">Programming Guide</a></li>
+      <li><a href="dataset_transformations.html">DataSet 
Transformations</a></li>
+      <li><a href="java8_programming_guide.html">Java 8 Programming 
Guide</a></li>
+      <li><a href="streaming_guide.html">Streaming Guide</a></li>
+      <li><a href="iterations.html">Iterations</a></li>
+      <li><a href="spargel_guide.html">Spargel Graph API</a></li>
+      <li><a href="hadoop_compatibility.html">Hadoop Compatibility</a></li>
+    </ul>
+  </li>
+
+  <li>Examples
+    <ul>
+      <li><a href="examples.html">Bundled Examples</a></li>
+      <li><a href="example_connectors.html">Connecting to other 
systems</a></li>
+    </ul>
+  </li>
+
+  <li>Execution
+    <ul>
+      <li><a href="local_execution.html">Local/Debugging</a></li>
+      <li><a href="cluster_execution.html">Cluster</a></li>
+      <li><a href="cli.html">Command-Line Interface</a></li>
+      <li><a href="web_client.html">Web Interface</a></li>
+    </ul>
+  </li>
+
+  <li>Internals
+    <ul>
+      <li><a href="internal_overview.html">Overview</a></li>
+    </ul>
+  </li>
 </ul>
+
         </div>  
         <div class="col-md-9">
           <h1>Cluster Setup</h1>
+         
+          <ul id="markdown-toc">
+  <li><a href="#preparing-the-cluster">Preparing the Cluster</a>    <ul>
+      <li><a href="#software-requirements">Software Requirements</a></li>
+      <li><a href="#configuring-remote-access-with-ssh">Configuring Remote 
Access with ssh</a></li>
+      <li><a href="#setting-javahome-on-each-node">Setting JAVA_HOME on each 
Node</a></li>
+    </ul>
+  </li>
+  <li><a href="#hadoop-distributed-filesystem-hdfs-setup">Hadoop Distributed 
Filesystem (HDFS) Setup</a>    <ul>
+      <li><a href="#downloading-installing-and-configuring-hdfs">Downloading, 
Installing, and Configuring HDFS</a></li>
+      <li><a href="#starting-hdfs">Starting HDFS</a></li>
+    </ul>
+  </li>
+  <li><a href="#flink-setup">Flink Setup</a>    <ul>
+      <li><a href="#configuring-the-cluster">Configuring the Cluster</a></li>
+      <li><a href="#configuring-the-network-buffers">Configuring the Network 
Buffers</a></li>
+      <li><a href="#configuring-temporary-io-directories">Configuring 
Temporary I/O Directories</a></li>
+      <li><a href="#starting-flink">Starting Flink</a></li>
+    </ul>
+  </li>
+</ul>
 
-          <p>This documentation is intended to provide instructions on how to 
run
+<p>This documentation is intended to provide instructions on how to run
 Flink in a fully distributed fashion on a static (but possibly
 heterogeneous) cluster.</p>
 
@@ -116,9 +159,9 @@ heterogeneous) cluster.</p>
 second installing and configuring the <a 
href="http://hadoop.apache.org/";>Hadoop Distributed
 Filesystem</a> (HDFS).</p>
 
-<h1 id="preparing-the-cluster">Preparing the Cluster</h1>
+<h2 id="preparing-the-cluster">Preparing the Cluster</h2>
 
-<h2 id="software-requirements">Software Requirements</h2>
+<h3 id="software-requirements">Software Requirements</h3>
 
 <p>Flink runs on all <em>UNIX-like environments</em>, e.g. 
<strong>Linux</strong>, <strong>Mac OS X</strong>,
 and <strong>Cygwin</strong> (for Windows) and expects the cluster to consist 
of <strong>one master
@@ -126,8 +169,8 @@ node</strong> and <strong>one or more wo
 make sure you have the following software installed <strong>on each 
node</strong>:</p>
 
 <ul>
-<li><strong>Java 1.6.x</strong> or higher,</li>
-<li><strong>ssh</strong> (sshd must be running to use the Flink scripts that 
manage
+  <li><strong>Java 1.6.x</strong> or higher,</li>
+  <li><strong>ssh</strong> (sshd must be running to use the Flink scripts that 
manage
 remote components)</li>
 </ul>
 
@@ -136,29 +179,34 @@ install/upgrade it.</p>
 
 <p>For example, on Ubuntu Linux, type in the following commands to install 
Java and
 ssh:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">sudo 
apt-get install ssh 
-sudo apt-get install openjdk-7-jre
-</code></pre></div>
+
+<div class="highlight"><pre><code class="language-bash">sudo apt-get install 
ssh 
+sudo apt-get install openjdk-7-jre</code></pre></div>
+
 <p>You can check the correct installation of Java by issuing the following 
command:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">java 
-version
-</code></pre></div>
+
+<div class="highlight"><pre><code class="language-bash">java 
-version</code></pre></div>
+
 <p>The command should output something comparable to the following on every 
node of
 your cluster (depending on your Java version, there may be small 
differences):</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">java 
version &quot;1.6.0_22&quot;
-Java(TM) SE Runtime Environment (build 1.6.0_22-b04)
-Java HotSpot(TM) 64-Bit Server VM (build 17.1-b03, mixed mode)
-</code></pre></div>
+
+<div class="highlight"><pre><code class="language-bash">java version <span 
class="s2">&quot;1.6.0_22&quot;</span>
+Java<span class="o">(</span>TM<span class="o">)</span> SE Runtime Environment 
<span class="o">(</span>build 1.6.0_22-b04<span class="o">)</span>
+Java HotSpot<span class="o">(</span>TM<span class="o">)</span> 64-Bit Server 
VM <span class="o">(</span>build 17.1-b03, mixed mode<span 
class="o">)</span></code></pre></div>
+
 <p>To make sure the ssh daemon is running properly, you can use the command</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">ps 
aux | grep sshd
-</code></pre></div>
+
+<div class="highlight"><pre><code class="language-bash">ps aux <span 
class="p">|</span> grep sshd</code></pre></div>
+
 <p>Something comparable to the following line should appear in the output
 of the command on every host of your cluster:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">root  
     894  0.0  0.0  49260   320 ?        Ss   Jan09   0:13 /usr/sbin/sshd
-</code></pre></div>
-<h2 id="configuring-remote-access-with-ssh">Configuring Remote Access with 
ssh</h2>
+
+<div class="highlight"><pre><code class="language-bash">root       <span 
class="m">894</span>  0.0  0.0  <span class="m">49260</span>   <span 
class="m">320</span> ?        Ss   Jan09   0:13 
/usr/sbin/sshd</code></pre></div>
+
+<h3 id="configuring-remote-access-with-ssh">Configuring Remote Access with 
ssh</h3>
 
 <p>In order to start/stop the remote processes, the master node requires 
access via
-ssh to the worker nodes. It is most convenient to use ssh&#39;s public key
+ssh to the worker nodes. It is most convenient to use ssh’s public key
 authentication for this. To setup public key authentication, log on to the
 master as the user who will later execute all the Flink components. <strong>The
 same user (i.e. a user with the same user name) must also exist on all worker
@@ -171,14 +219,16 @@ new public/private key pair. The followi
 public/private key pair into the <em>.ssh</em> directory inside the home 
directory of
 the user <em>flink</em>. See the ssh-keygen man page for more details. Note 
that
 the private key is not protected by a passphrase.</p>
-<div class="highlight"><pre><code class="language-text" 
data-lang="text">ssh-keygen -b 2048 -P &#39;&#39; -f ~/.ssh/id_rsa
-</code></pre></div>
+
+<div class="highlight"><pre><code class="language-bash">ssh-keygen -b <span 
class="m">2048</span> -P <span class="s1">&#39;&#39;</span> -f 
~/.ssh/id_rsa</code></pre></div>
+
 <p>Next, copy/append the content of the file <em>.ssh/id_rsa.pub</em> to your
 authorized_keys file. The content of the authorized_keys file defines which
 public keys are considered trustworthy during the public key authentication
 process. On most systems the appropriate command is</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">cat 
.ssh/id_rsa.pub &gt;&gt; .ssh/authorized_keys
-</code></pre></div>
+
+<div class="highlight"><pre><code class="language-bash">cat .ssh/id_rsa.pub 
&gt;&gt; .ssh/authorized_keys</code></pre></div>
+
 <p>On some Linux systems, the authorized keys file may also be expected by the 
ssh
 daemon under <em>.ssh/authorized_keys2</em>. In either case, you should make 
sure the
 file only contains those public keys which you consider trustworthy for each
@@ -186,13 +236,14 @@ node of cluster.</p>
 
 <p>Finally, the authorized keys file must be copied to every worker node of 
your
 cluster. You can do this by repeatedly typing in</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">scp 
.ssh/authorized_keys &lt;worker&gt;:~/.ssh/
-</code></pre></div>
+
+<div class="highlight"><pre><code class="language-bash">scp 
.ssh/authorized_keys &lt;worker&gt;:~/.ssh/</code></pre></div>
+
 <p>and replacing <em>&lt;worker&gt;</em> with the host name of the respective 
worker node.
 After having finished the copy process, you should be able to log on to each
 worker node from your master node via ssh without a password.</p>
 
-<h2 id="setting-java_home-on-each-node">Setting JAVA_HOME on each Node</h2>
+<h3 id="setting-javahome-on-each-node">Setting JAVA_HOME on each Node</h3>
 
 <p>Flink requires the <code>JAVA_HOME</code> environment variable to be set on 
the
 master and all worker nodes and point to the directory of your Java
@@ -203,16 +254,18 @@ installation.</p>
 
 <p>Alternatively, add the following line to your shell profile. If you use the
 <em>bash</em> shell (probably the most common shell), the shell profile is 
located in
-<em>~/.bashrc</em>:</p>
-<div class="highlight"><pre><code class="language-text" 
data-lang="text">export JAVA_HOME=/path/to/java_home/
-</code></pre></div>
+<em>\~/.bashrc</em>:</p>
+
+<div class="highlight"><pre><code class="language-bash"><span 
class="nb">export </span><span class="nv">JAVA_HOME</span><span 
class="o">=</span>/path/to/java_home/</code></pre></div>
+
 <p>If your ssh daemon supports user environments, you can also add 
<code>JAVA_HOME</code> to
-<em>.~/.ssh/environment</em>. As super user <em>root</em> you can enable ssh 
user
+<em>.\~/.ssh/environment</em>. As super user <em>root</em> you can enable ssh 
user
 environments with the following commands:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">echo 
&quot;PermitUserEnvironment yes&quot; &gt;&gt; /etc/ssh/sshd_config
-/etc/init.d/ssh restart
-</code></pre></div>
-<h1 id="hadoop-distributed-filesystem-(hdfs)-setup">Hadoop Distributed 
Filesystem (HDFS) Setup</h1>
+
+<div class="highlight"><pre><code class="language-bash"><span 
class="nb">echo</span> <span class="s2">&quot;PermitUserEnvironment 
yes&quot;</span> &gt;&gt; /etc/ssh/sshd_config
+/etc/init.d/ssh restart</code></pre></div>
+
+<h2 id="hadoop-distributed-filesystem-hdfs-setup">Hadoop Distributed 
Filesystem (HDFS) Setup</h2>
 
 <p>The Flink system currently uses the Hadoop Distributed Filesystem (HDFS)
 to read and write data in a distributed fashion.</p>
@@ -221,15 +274,15 @@ to read and write data in a distributed
 just a general overview of some required settings. Please consult one of the
 many installation guides available online for more detailed instructions.</p>
 
-<p>*<em>Note that the following instructions are based on Hadoop 1.2 and might 
differ
-*</em>for Hadoop 2.</p>
+<p>**Note that the following instructions are based on Hadoop 1.2 and might 
differ
+**for Hadoop 2.</p>
 
-<h2 id="downloading,-installing,-and-configuring-hdfs">Downloading, 
Installing, and Configuring HDFS</h2>
+<h3 id="downloading-installing-and-configuring-hdfs">Downloading, Installing, 
and Configuring HDFS</h3>
 
 <p>Similar to the Flink system HDFS runs in a distributed fashion. HDFS
-consists of a <strong>NameNode</strong> which manages the distributed file 
system&#39;s meta
+consists of a <strong>NameNode</strong> which manages the distributed file 
system’s meta
 data. The actual data is stored by one or more <strong>DataNodes</strong>. For 
the remainder
-of this instruction we assume the HDFS&#39;s NameNode component runs on the 
master
+of this instruction we assume the HDFS’s NameNode component runs on the 
master
 node while all the worker nodes run an HDFS DataNode.</p>
 
 <p>To start, log on to your master node and download Hadoop (which includes  
HDFS)
@@ -239,13 +292,15 @@ from the Apache <a href="http://hadoop.a
 
 <p>After having extracted the Hadoop archive, change into the Hadoop directory 
and
 edit the Hadoop environment configuration file:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">cd 
hadoop-*
-vi conf/hadoop-env.sh
-</code></pre></div>
+
+<div class="highlight"><pre><code class="language-bash"><span class="nb">cd 
</span>hadoop-*
+vi conf/hadoop-env.sh</code></pre></div>
+
 <p>Uncomment and modify the following line in the file according to the path of
 your Java installation.</p>
 
-<p><code>export JAVA_HOME=/path/to/java_home/</code></p>
+<div class="highlight"><pre><code>export JAVA_HOME=/path/to/java_home/
+</code></pre></div>
 
 <p>Save the changes and open the HDFS configuration file 
<em>conf/hdfs-site.xml</em>. HDFS
 offers multiple configuration parameters which affect the behavior of the
@@ -253,7 +308,8 @@ distributed file system in various ways.
 configuration which is required to make HDFS work. More information on how to
 configure HDFS can be found in the <a 
href="http://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html";>HDFS User
 Guide</a> guide.</p>
-<div class="highlight"><pre><code class="language-xml" data-lang="xml"><span 
class="nt">&lt;configuration&gt;</span>
+
+<div class="highlight"><pre><code class="language-xml"><span 
class="nt">&lt;configuration&gt;</span>
   <span class="nt">&lt;property&gt;</span>
     <span class="nt">&lt;name&gt;</span>fs.default.name<span 
class="nt">&lt;/name&gt;</span>
     <span class="nt">&lt;value&gt;</span>hdfs://MASTER:50040/<span 
class="nt">&lt;/value&gt;</span>
@@ -262,8 +318,8 @@ Guide</a> guide.</p>
     <span class="nt">&lt;name&gt;</span>dfs.data.dir<span 
class="nt">&lt;/name&gt;</span>
     <span class="nt">&lt;value&gt;</span>DATAPATH<span 
class="nt">&lt;/value&gt;</span>
   <span class="nt">&lt;/property&gt;</span>
-<span class="nt">&lt;/configuration&gt;</span>
-</code></pre></div>
+<span class="nt">&lt;/configuration&gt;</span></code></pre></div>
+
 <p>Replace <em>MASTER</em> with the IP/host name of your master node which 
runs the
 <em>NameNode</em>. <em>DATAPATH</em> must be replaced with path to the 
directory in which the
 actual HDFS data shall be stored on each worker node. Make sure that the
@@ -271,21 +327,24 @@ actual HDFS data shall be stored on each
 directory.</p>
 
 <p>After having saved the HDFS configuration file, open the file 
<em>conf/slaves</em> and
-enter the IP/host name of those worker nodes which shall act as *DataNode*s.
+enter the IP/host name of those worker nodes which shall act as 
<em>DataNode</em>s.
 Each entry must be separated by a line break.</p>
-<div class="highlight"><pre><code class="language-text" 
data-lang="text">&lt;worker 1&gt;
+
+<div class="highlight"><pre><code>&lt;worker 1&gt;
 &lt;worker 2&gt;
 .
 .
 .
 &lt;worker n&gt;
 </code></pre></div>
+
 <p>Initialize the HDFS by typing in the following command. Note that the
 command will <strong>delete all data</strong> which has been previously stored 
in the
 HDFS. However, since we have just installed a fresh HDFS, it should be
 safe to answer the confirmation with <em>yes</em>.</p>
-<div class="highlight"><pre><code class="language-text" 
data-lang="text">bin/hadoop namenode -format
-</code></pre></div>
+
+<div class="highlight"><pre><code class="language-bash">bin/hadoop namenode 
-format</code></pre></div>
+
 <p>Finally, we need to make sure that the Hadoop directory is available to
 all worker nodes which are intended to act as DataNodes and that all nodes
 <strong>find the directory under the same path</strong>. We recommend to use a 
shared network
@@ -293,32 +352,34 @@ directory (e.g. an NFS share) for that.
 directory to all nodes (with the disadvantage that all configuration and
 code updates need to be synced to all nodes).</p>
 
-<h2 id="starting-hdfs">Starting HDFS</h2>
+<h3 id="starting-hdfs">Starting HDFS</h3>
 
 <p>To start the HDFS log on to the master and type in the following
 commands</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">cd 
hadoop-*
-binn/start-dfs.sh
-</code></pre></div>
+
+<div class="highlight"><pre><code class="language-bash"><span class="nb">cd 
</span>hadoop-*
+binn/start-dfs.sh</code></pre></div>
+
 <p>If your HDFS setup is correct, you should be able to open the HDFS
-status website at <em><a 
href="http://MASTER:50070";>http://MASTER:50070</a></em>. In a matter of a 
seconds,
+status website at <em>http://MASTER:50070</em>. In a matter of a seconds,
 all DataNodes should appear as live nodes. For troubleshooting we would
 like to point you to the <a 
href="http://wiki.apache.org/hadoop/QuickStart";>Hadoop Quick
 Start</a>
 guide.</p>
 
-<h1 id="flink-setup">Flink Setup</h1>
+<h2 id="flink-setup">Flink Setup</h2>
 
-<p>Go to the <a href="downloads/">downloads page</a> and get the ready to run
+<p>Go to the <a href="/downloads.html">downloads page</a> and get the ready to 
run
 package. Make sure to pick the Flink package <strong>matching your Hadoop
 version</strong>.</p>
 
 <p>After downloading the latest release, copy the archive to your master node 
and
 extract it:</p>
-<div class="highlight"><pre><code class="language-text" data-lang="text">tar 
xzf flink-*.tgz
-cd flink-*
-</code></pre></div>
-<h2 id="configuring-the-cluster">Configuring the Cluster</h2>
+
+<div class="highlight"><pre><code class="language-bash">tar xzf flink-*.tgz
+<span class="nb">cd </span>flink-*</code></pre></div>
+
+<h3 id="configuring-the-cluster">Configuring the Cluster</h3>
 
 <p>After having extracted the system files, you need to configure Flink for
 the cluster by editing <em>conf/flink-conf.yaml</em>.</p>
@@ -338,18 +399,20 @@ as worker nodes. Therefore, similar to t
 will later run a TaskManager.</p>
 
 <p>Each entry must be separated by a new line, as in the following example:</p>
-<div class="highlight"><pre><code class="language-text" 
data-lang="text">192.168.0.100
+
+<div class="highlight"><pre><code>192.168.0.100
 192.168.0.101
 .
 .
 .
 192.168.0.150
 </code></pre></div>
+
 <p>The Flink directory must be available on every worker under the same
 path. Similarly as for HDFS, you can use a shared NSF directory, or copy the
 entire Flink directory to every worker node.</p>
 
-<h2 id="configuring-the-network-buffers">Configuring the Network Buffers</h2>
+<h3 id="configuring-the-network-buffers">Configuring the Network Buffers</h3>
 
 <p>Network buffers are a critical resource for the communication layers. They 
are
 used to buffer records before transmission over a network, and to buffer
@@ -370,7 +433,7 @@ you expect to be active at the same time
 
 <p>Since the <em>intra-node-parallelism</em> is typically the number of cores, 
and more
 than 4 repartitioning or broadcasting channels are rarely active in parallel, 
it
-frequently boils down to <em>#cores^2^</em> * <em>#machines</em> * 4. To 
support for
+frequently boils down to <em>#cores\^2\^</em> * <em>#machines</em> * 4. To 
support for
 example a cluster of 20 8-core machines, you should use roughly 5000 network
 buffers for optimal throughput.</p>
 
@@ -381,20 +444,20 @@ system would allocate roughly 300 MiByte
 parameters:</p>
 
 <ul>
-<li><code>taskmanager.network.numberOfBuffers</code>, and</li>
-<li><code>taskmanager.network.bufferSizeInBytes</code>.</li>
+  <li><code>taskmanager.network.numberOfBuffers</code>, and</li>
+  <li><code>taskmanager.network.bufferSizeInBytes</code>.</li>
 </ul>
 
-<h2 id="configuring-temporary-i/o-directories">Configuring Temporary I/O 
Directories</h2>
+<h3 id="configuring-temporary-io-directories">Configuring Temporary I/O 
Directories</h3>
 
 <p>Although Flink aims to process as much data in main memory as possible,
 it is not uncommon that  more data needs to be processed than memory is
-available. Flink&#39;s runtime is designed to  write temporary data to disk
+available. Flink’s runtime is designed to  write temporary data to disk
 to handle these situations.</p>
 
 <p>The <code>taskmanager.tmp.dirs</code> parameter specifies a list of 
directories into which
 Flink writes temporary files. The paths of the directories need to be
-separated by &#39;:&#39; (colon character).  Flink will concurrently write (or
+separated by ‘:’ (colon character).  Flink will concurrently write (or
 read) one temporary file to (from) each configured directory.  This way,
 temporary I/O can be evenly distributed over multiple independent I/O devices
 such as hard disks to improve performance.  To leverage fast I/O devices (e.g.,
@@ -407,7 +470,7 @@ system, such as <em>/tmp</em> in Linux s
 <p>Please see the <a href="config.html">configuration page</a> for details and 
additional
 configuration options.</p>
 
-<h2 id="starting-flink">Starting Flink</h2>
+<h3 id="starting-flink">Starting Flink</h3>
 
 <p>The following script starts a JobManager on the local node and connects via
 SSH to all worker nodes listed in the <em>slaves</em> file to start the
@@ -416,9 +479,11 @@ running. The JobManager running on the l
 at the configured RPC port.</p>
 
 <p>Assuming that you are on the master node and inside the Flink directory:</p>
-<div class="highlight"><pre><code class="language-text" 
data-lang="text">bin/start-cluster.sh
-</code></pre></div>
 
+<div class="highlight"><pre><code 
class="language-bash">bin/start-cluster.sh</code></pre></div>
+
+         
+        <!-- Disqus Area -->
           <div style="padding-top:30px" id="disqus_thread"></div>
       
             <script type="text/javascript">
@@ -433,14 +498,12 @@ at the configured RPC port.</p>
                 })();
             </script>
             <noscript>Please enable JavaScript to view the <a 
href="http://disqus.com/?ref_noscript";>comments powered by 
Disqus.</a></noscript>
-            <a href="http://disqus.com"; class="dsq-brlink">comments powered by 
<span class="logo-disqus">Disqus</span></a>
-
           </div>
         </div>
 
         <div class="footer">
           
-          <p><hr class="divider"></p>
+          <hr class="divider" />
 
 <p><small>Apache Flink is an effort undergoing incubation at The Apache 
Software
 Foundation (ASF), sponsored by the Apache Incubator PMC. Incubation is
@@ -451,9 +514,10 @@ incubation status is not necessarily a r
 stability of the code, it does indicate that the project has yet to be fully
 endorsed by the ASF.</small></p>
 
-<p><a href="http://incubator.apache.org/";><img 
src="/img/apache-incubator-logo.png" alt="Incubator Logo"></a></p>
+<p><a href="http://incubator.apache.org/";><img 
src="/img/apache-incubator-logo.png" alt="Incubator Logo" /></a></p>
 
-<p class="text-center"><a href="/privacy-policy.html">Privacy Policy<a></p>
+<p class="text-center"><a href="privacy-policy.html">Privacy Policy<a>
+</a></a></p>
 
         </div>
       </div>

Modified: flink/site/docs/0.6-incubating/coding_guidelines.html
URL: 
http://svn.apache.org/viewvc/flink/site/docs/0.6-incubating/coding_guidelines.html?rev=1657551&r1=1657550&r2=1657551&view=diff
==============================================================================
--- flink/site/docs/0.6-incubating/coding_guidelines.html (original)
+++ flink/site/docs/0.6-incubating/coding_guidelines.html Thu Feb  5 12:21:38 
2015
@@ -5,112 +5,135 @@
     <meta http-equiv="X-UA-Compatible" content="IE=edge">
     <meta name="viewport" content="width=device-width, initial-scale=1">
     <title>Apache Flink (incubating): Coding Guidelines</title>
-    <link rel="stylesheet" href="/css/bootstrap.css">
-    <link rel="stylesheet" href="/css/bootstrap-lumen-custom.css">
-    <link rel="stylesheet" href="/css/syntax.css">
-    <link rel="/css/custom.css">
-    <link rel="css/codetabs.css">
-    <link 
href="//maxcdn.bootstrapcdn.com/font-awesome/4.1.0/css/font-awesome.min.css" 
rel="stylesheet">
+    <link rel="shortcut icon" href="favicon.ico" type="image/x-icon">
+    <link rel="icon" href="favicon.ico" type="image/x-icon">
+    <link rel="stylesheet" href="css/bootstrap.css">
+    <link rel="stylesheet" href="css/bootstrap-lumen-custom.css">
+    <link rel="stylesheet" href="css/syntax.css">
+    <link rel="stylesheet" href="css/custom.css">
+    <link href="css/main/main.css" rel="stylesheet">
     <script 
src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.0/jquery.min.js";></script>
-    <script src="/js/bootstrap.min.js"></script>
+    <script src="js/bootstrap.min.js"></script>
     <script src="js/codetabs.js"></script>
   </head>
   <body>
 
     <nav class="navbar navbar-default navbar-fixed-top" role="navigation">
   <div class="container">
-    <div class="navbar-header">
-      <button type="button" class="navbar-toggle" data-toggle="collapse" 
data-target=".navbar-collapse">
-        <span class="sr-only">Toggle navigation</span>
-        <span class="icon-bar"></span>
-        <span class="icon-bar"></span>
-        <span class="icon-bar"></span>
-      </button>
-      <a class="navbar-brand" href="http://flink.incubator.apache.org";>Apache 
Flink</a>
-    </div>
-
-    <div class="collapse navbar-collapse" id="navbar-collapse-1">
-      <ul class="nav navbar-nav">
+    <div class="row">
+      <div class="col-md-1 af-mobile-nav-bar">
+       <a href="index.html" title="Home">
+         <img class="hidden-xs hidden-sm img-responsive"
+              src="img/logo.png" alt="Apache Flink Logo">
+       </a>    
+       <div class="row visible-xs">
+         <div class="col-xs-3">
+           <a href="index.html" title="Home">  
+             <img class="hidden-x hidden-sm img-responsive"
+                  src="img/logo.png" alt="Apache Flink Logo">
+           </a>        
+         </div>
+         <div class="col-xs-5"></div>
+         <div class="col-xs-4">
+           <div class="af-mobile-btn">
+             <span class="glyphicon glyphicon-plus"></span>
+           </div>
+         </div>
+       </div>
+      </div>
+      <!-- Navigation -->
+      <div class="col-md-11">
+       <div class="collapse navbar-collapse" id="navbar-collapse-1">
+         <ul class="nav navbar-nav">
+
+           <li>
+             <a href="index.html" class="">Documentation</a>
+           </li>
+
+           <li>
+             <a href="api/java/index.html">Javadoc</a>
+           </li>
+
+           <li>
+             <a 
href="api/scala/index.html#org.apache.flink.api.scala.package">Scaladoc</a>
+           </li>
 
-        <li>
-          <a href="index.html" class="">Documentation</a>
-        </li>
-
-        <li>
-          <a href="api/java/index.html">Javadoc</a>
-        </li>
-      </ul>
+         </ul>
+       </div>
+      </div>
     </div>
   </div>
 </nav>
 
-    <div style="padding-top:70px" class="container">
+
+    <div style="padding-top:120px" class="container">
 
       <div class="row">
         <div class="col-md-3">
           <ul>
-    <li>Quickstart
-        <ul>
-            <li><a href="setup_quickstart.html">Install</a></li>
-            <li><a href="run_example_quickstart.html">Run Example</a></li>
-            <li><a href="java_api_quickstart.html">Java API</a></li>
-            <li><a href="scala_api_quickstart.html">Scala API</a></li>
-            <li><a href="faq.html">FAQ</a></li>
-        </ul>
-    </li>
-
-    <li>Setup &amp; Configuration
-        <ul>
-            <li><a href="building.html">Build Instructions</a></li>
-            <li><a href="local_setup.html">Local Setup</a></li>
-            <li><a href="cluster_setup.html">Cluster Setup</a></li>
-            <li><a href="yarn_setup.html">YARN Setup</a></li>
-            <li><a href="config.html">Configuration</a></li>
-        </ul>
-    </li>
-
-    <li>Programming Guides
-        <ul>
-            <li><a href="java_api_guide.html">Java API</a></li>
-            <li><a href="java_api_transformations.html">Java API 
Transformations</a></li>
-            <li><a href="scala_api_guide.html">Scala API</a></li>
-            <li><a href="iterations.html">Iterations</a></li>
-            <li><a href="spargel_guide.html">Spargel Graph API</a></li>
-        </ul>
-    </li>
-
-    <li>Examples
-        <ul>
-            <li><a href="java_api_examples.html">Java API</a></li>
-            <li><a href="scala_api_examples.html">Scala API</a></li>
-            <li><a href="example_connectors.html">Connecting to other 
systems</a></li>
-        </ul>
-    </li>
-
-    <li>Execution
-        <ul>
-            <li><a href="local_execution.html">Local/Debugging</a></li>
-            <li><a href="cluster_execution.html">Cluster</a></li>
-            <li><a href="cli.html">Command-Line Interface</a></li>
-            <li><a href="web_client.html">Web Interface</a></li>
-        </ul>
-    </li>
-
-    <li>Internals
-        <ul>
-            <li><a href="internal_overview.html">Overview</a></li>
-            <li><a href="internal_general_arch.html">General 
Architecture</a></li>
-            <li><a href="internal_add_operator.html">How-to: Adding a new 
Operator</a></li>
-        </ul>
-    </li>
+  <li><a href="faq.html">FAQ</a></li>
+  <li>Quickstart
+    <ul>
+      <li><a href="setup_quickstart.html">Setup</a></li>
+      <li><a href="run_example_quickstart.html">Run Example</a></li>
+      <li><a href="java_api_quickstart.html">Java API</a></li>
+      <li><a href="scala_api_quickstart.html">Scala API</a></li>
+    </ul>
+  </li>
+
+  <li>Setup &amp; Configuration
+    <ul>
+      <li><a href="local_setup.html">Local Setup</a></li>
+      <li><a href="building.html">Build Flink</a></li>
+      <li><a href="cluster_setup.html">Cluster Setup</a></li>
+      <li><a href="yarn_setup.html">YARN Setup</a></li>
+      <li><a href="config.html">Configuration</a></li>
+    </ul>
+  </li>
+
+  <li>Programming Guides
+    <ul>
+      <li><a href="programming_guide.html">Programming Guide</a></li>
+      <li><a href="dataset_transformations.html">DataSet 
Transformations</a></li>
+      <li><a href="java8_programming_guide.html">Java 8 Programming 
Guide</a></li>
+      <li><a href="streaming_guide.html">Streaming Guide</a></li>
+      <li><a href="iterations.html">Iterations</a></li>
+      <li><a href="spargel_guide.html">Spargel Graph API</a></li>
+      <li><a href="hadoop_compatibility.html">Hadoop Compatibility</a></li>
+    </ul>
+  </li>
+
+  <li>Examples
+    <ul>
+      <li><a href="examples.html">Bundled Examples</a></li>
+      <li><a href="example_connectors.html">Connecting to other 
systems</a></li>
+    </ul>
+  </li>
+
+  <li>Execution
+    <ul>
+      <li><a href="local_execution.html">Local/Debugging</a></li>
+      <li><a href="cluster_execution.html">Cluster</a></li>
+      <li><a href="cli.html">Command-Line Interface</a></li>
+      <li><a href="web_client.html">Web Interface</a></li>
+    </ul>
+  </li>
+
+  <li>Internals
+    <ul>
+      <li><a href="internal_overview.html">Overview</a></li>
+    </ul>
+  </li>
 </ul>
+
         </div>  
         <div class="col-md-9">
           <h1>Coding Guidelines</h1>
-
+         
           <p>The coding guidelines are now located <a 
href="http://flink.incubator.apache.org/coding_guidelines.html";>on the project 
website</a>.</p>
 
-
+         
+        <!-- Disqus Area -->
           <div style="padding-top:30px" id="disqus_thread"></div>
       
             <script type="text/javascript">
@@ -125,14 +148,12 @@
                 })();
             </script>
             <noscript>Please enable JavaScript to view the <a 
href="http://disqus.com/?ref_noscript";>comments powered by 
Disqus.</a></noscript>
-            <a href="http://disqus.com"; class="dsq-brlink">comments powered by 
<span class="logo-disqus">Disqus</span></a>
-
           </div>
         </div>
 
         <div class="footer">
           
-          <p><hr class="divider"></p>
+          <hr class="divider" />
 
 <p><small>Apache Flink is an effort undergoing incubation at The Apache 
Software
 Foundation (ASF), sponsored by the Apache Incubator PMC. Incubation is
@@ -143,9 +164,10 @@ incubation status is not necessarily a r
 stability of the code, it does indicate that the project has yet to be fully
 endorsed by the ASF.</small></p>
 
-<p><a href="http://incubator.apache.org/";><img 
src="/img/apache-incubator-logo.png" alt="Incubator Logo"></a></p>
+<p><a href="http://incubator.apache.org/";><img 
src="/img/apache-incubator-logo.png" alt="Incubator Logo" /></a></p>
 
-<p class="text-center"><a href="/privacy-policy.html">Privacy Policy<a></p>
+<p class="text-center"><a href="privacy-policy.html">Privacy Policy<a>
+</a></a></p>
 
         </div>
       </div>

Modified: flink/site/docs/0.6-incubating/config.html
URL: 
http://svn.apache.org/viewvc/flink/site/docs/0.6-incubating/config.html?rev=1657551&r1=1657550&r2=1657551&view=diff
==============================================================================
--- flink/site/docs/0.6-incubating/config.html (original)
+++ flink/site/docs/0.6-incubating/config.html Thu Feb  5 12:21:38 2015
@@ -5,110 +5,148 @@
     <meta http-equiv="X-UA-Compatible" content="IE=edge">
     <meta name="viewport" content="width=device-width, initial-scale=1">
     <title>Apache Flink (incubating): Configuration</title>
-    <link rel="stylesheet" href="/css/bootstrap.css">
-    <link rel="stylesheet" href="/css/bootstrap-lumen-custom.css">
-    <link rel="stylesheet" href="/css/syntax.css">
-    <link rel="/css/custom.css">
-    <link rel="css/codetabs.css">
-    <link 
href="//maxcdn.bootstrapcdn.com/font-awesome/4.1.0/css/font-awesome.min.css" 
rel="stylesheet">
+    <link rel="shortcut icon" href="favicon.ico" type="image/x-icon">
+    <link rel="icon" href="favicon.ico" type="image/x-icon">
+    <link rel="stylesheet" href="css/bootstrap.css">
+    <link rel="stylesheet" href="css/bootstrap-lumen-custom.css">
+    <link rel="stylesheet" href="css/syntax.css">
+    <link rel="stylesheet" href="css/custom.css">
+    <link href="css/main/main.css" rel="stylesheet">
     <script 
src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.0/jquery.min.js";></script>
-    <script src="/js/bootstrap.min.js"></script>
+    <script src="js/bootstrap.min.js"></script>
     <script src="js/codetabs.js"></script>
   </head>
   <body>
 
     <nav class="navbar navbar-default navbar-fixed-top" role="navigation">
   <div class="container">
-    <div class="navbar-header">
-      <button type="button" class="navbar-toggle" data-toggle="collapse" 
data-target=".navbar-collapse">
-        <span class="sr-only">Toggle navigation</span>
-        <span class="icon-bar"></span>
-        <span class="icon-bar"></span>
-        <span class="icon-bar"></span>
-      </button>
-      <a class="navbar-brand" href="http://flink.incubator.apache.org";>Apache 
Flink</a>
-    </div>
-
-    <div class="collapse navbar-collapse" id="navbar-collapse-1">
-      <ul class="nav navbar-nav">
+    <div class="row">
+      <div class="col-md-1 af-mobile-nav-bar">
+       <a href="index.html" title="Home">
+         <img class="hidden-xs hidden-sm img-responsive"
+              src="img/logo.png" alt="Apache Flink Logo">
+       </a>    
+       <div class="row visible-xs">
+         <div class="col-xs-3">
+           <a href="index.html" title="Home">  
+             <img class="hidden-x hidden-sm img-responsive"
+                  src="img/logo.png" alt="Apache Flink Logo">
+           </a>        
+         </div>
+         <div class="col-xs-5"></div>
+         <div class="col-xs-4">
+           <div class="af-mobile-btn">
+             <span class="glyphicon glyphicon-plus"></span>
+           </div>
+         </div>
+       </div>
+      </div>
+      <!-- Navigation -->
+      <div class="col-md-11">
+       <div class="collapse navbar-collapse" id="navbar-collapse-1">
+         <ul class="nav navbar-nav">
+
+           <li>
+             <a href="index.html" class="">Documentation</a>
+           </li>
+
+           <li>
+             <a href="api/java/index.html">Javadoc</a>
+           </li>
+
+           <li>
+             <a 
href="api/scala/index.html#org.apache.flink.api.scala.package">Scaladoc</a>
+           </li>
 
-        <li>
-          <a href="index.html" class="">Documentation</a>
-        </li>
-
-        <li>
-          <a href="api/java/index.html">Javadoc</a>
-        </li>
-      </ul>
+         </ul>
+       </div>
+      </div>
     </div>
   </div>
 </nav>
 
-    <div style="padding-top:70px" class="container">
+
+    <div style="padding-top:120px" class="container">
 
       <div class="row">
         <div class="col-md-3">
           <ul>
-    <li>Quickstart
-        <ul>
-            <li><a href="setup_quickstart.html">Install</a></li>
-            <li><a href="run_example_quickstart.html">Run Example</a></li>
-            <li><a href="java_api_quickstart.html">Java API</a></li>
-            <li><a href="scala_api_quickstart.html">Scala API</a></li>
-            <li><a href="faq.html">FAQ</a></li>
-        </ul>
-    </li>
-
-    <li>Setup &amp; Configuration
-        <ul>
-            <li><a href="building.html">Build Instructions</a></li>
-            <li><a href="local_setup.html">Local Setup</a></li>
-            <li><a href="cluster_setup.html">Cluster Setup</a></li>
-            <li><a href="yarn_setup.html">YARN Setup</a></li>
-            <li><a href="config.html">Configuration</a></li>
-        </ul>
-    </li>
-
-    <li>Programming Guides
-        <ul>
-            <li><a href="java_api_guide.html">Java API</a></li>
-            <li><a href="java_api_transformations.html">Java API 
Transformations</a></li>
-            <li><a href="scala_api_guide.html">Scala API</a></li>
-            <li><a href="iterations.html">Iterations</a></li>
-            <li><a href="spargel_guide.html">Spargel Graph API</a></li>
-        </ul>
-    </li>
-
-    <li>Examples
-        <ul>
-            <li><a href="java_api_examples.html">Java API</a></li>
-            <li><a href="scala_api_examples.html">Scala API</a></li>
-            <li><a href="example_connectors.html">Connecting to other 
systems</a></li>
-        </ul>
-    </li>
-
-    <li>Execution
-        <ul>
-            <li><a href="local_execution.html">Local/Debugging</a></li>
-            <li><a href="cluster_execution.html">Cluster</a></li>
-            <li><a href="cli.html">Command-Line Interface</a></li>
-            <li><a href="web_client.html">Web Interface</a></li>
-        </ul>
-    </li>
-
-    <li>Internals
-        <ul>
-            <li><a href="internal_overview.html">Overview</a></li>
-            <li><a href="internal_general_arch.html">General 
Architecture</a></li>
-            <li><a href="internal_add_operator.html">How-to: Adding a new 
Operator</a></li>
-        </ul>
-    </li>
+  <li><a href="faq.html">FAQ</a></li>
+  <li>Quickstart
+    <ul>
+      <li><a href="setup_quickstart.html">Setup</a></li>
+      <li><a href="run_example_quickstart.html">Run Example</a></li>
+      <li><a href="java_api_quickstart.html">Java API</a></li>
+      <li><a href="scala_api_quickstart.html">Scala API</a></li>
+    </ul>
+  </li>
+
+  <li>Setup &amp; Configuration
+    <ul>
+      <li><a href="local_setup.html">Local Setup</a></li>
+      <li><a href="building.html">Build Flink</a></li>
+      <li><a href="cluster_setup.html">Cluster Setup</a></li>
+      <li><a href="yarn_setup.html">YARN Setup</a></li>
+      <li><a href="config.html">Configuration</a></li>
+    </ul>
+  </li>
+
+  <li>Programming Guides
+    <ul>
+      <li><a href="programming_guide.html">Programming Guide</a></li>
+      <li><a href="dataset_transformations.html">DataSet 
Transformations</a></li>
+      <li><a href="java8_programming_guide.html">Java 8 Programming 
Guide</a></li>
+      <li><a href="streaming_guide.html">Streaming Guide</a></li>
+      <li><a href="iterations.html">Iterations</a></li>
+      <li><a href="spargel_guide.html">Spargel Graph API</a></li>
+      <li><a href="hadoop_compatibility.html">Hadoop Compatibility</a></li>
+    </ul>
+  </li>
+
+  <li>Examples
+    <ul>
+      <li><a href="examples.html">Bundled Examples</a></li>
+      <li><a href="example_connectors.html">Connecting to other 
systems</a></li>
+    </ul>
+  </li>
+
+  <li>Execution
+    <ul>
+      <li><a href="local_execution.html">Local/Debugging</a></li>
+      <li><a href="cluster_execution.html">Cluster</a></li>
+      <li><a href="cli.html">Command-Line Interface</a></li>
+      <li><a href="web_client.html">Web Interface</a></li>
+    </ul>
+  </li>
+
+  <li>Internals
+    <ul>
+      <li><a href="internal_overview.html">Overview</a></li>
+    </ul>
+  </li>
 </ul>
+
         </div>  
         <div class="col-md-9">
           <h1>Configuration</h1>
+         
+          <ul id="markdown-toc">
+  <li><a href="#overview">Overview</a></li>
+  <li><a href="#common-options">Common Options</a></li>
+  <li><a href="#advanced-options">Advanced Options</a></li>
+  <li><a href="#full-reference">Full Reference</a>    <ul>
+      <li><a href="#hdfs">HDFS</a></li>
+      <li><a href="#jobmanager-amp-taskmanager">JobManager &amp; 
TaskManager</a></li>
+      <li><a href="#jobmanager-web-frontend">JobManager Web Frontend</a></li>
+      <li><a href="#webclient">Webclient</a></li>
+      <li><a href="#file-systems">File Systems</a></li>
+      <li><a href="#compileroptimizer">Compiler/Optimizer</a></li>
+    </ul>
+  </li>
+  <li><a href="#yarn">YARN</a></li>
+</ul>
 
-          <h1 id="overview">Overview</h1>
+<h2 id="overview">Overview</h2>
 
 <p>The default configuration parameters allow Flink to run out-of-the-box
 in single node setups.</p>
@@ -124,95 +162,125 @@ with format <code>key: value</code>.</p>
 <p>The system and run scripts parse the config at startup time. Changes to the 
configuration
 file require restarting the Flink JobManager and TaskManagers.</p>
 
-<h1 id="common-options">Common Options</h1>
+<h2 id="common-options">Common Options</h2>
 
 <ul>
-<li><p><code>env.java.home</code>: The path to the Java installation to use 
(DEFAULT: system&#39;s
+  <li>
+    <p><code>env.java.home</code>: The path to the Java installation to use 
(DEFAULT: system’s
 default Java installation, if found). Needs to be specified if the startup
 scipts fail to automatically resolve the java home directory. Can be specified
 to point to a specific java installation or version. If this option is not
-specified, the startup scripts also evaluate the <code>$JAVA_HOME</code> 
environment variable.</p></li>
-<li><p><code>jobmanager.rpc.address</code>: The IP address of the JobManager, 
which is the
-master/coordinator of the distributed system (DEFAULT: localhost).</p></li>
-<li><p><code>jobmanager.rpc.port</code>: The port number of the JobManager 
(DEFAULT: 6123).</p></li>
-<li><p><code>jobmanager.heap.mb</code>: JVM heap size (in megabytes) for the 
JobManager
-(DEFAULT: 256).</p></li>
-<li><p><code>taskmanager.heap.mb</code>: JVM heap size (in megabytes) for the 
TaskManagers,
+specified, the startup scripts also evaluate the <code>$JAVA_HOME</code> 
environment variable.</p>
+  </li>
+  <li>
+    <p><code>jobmanager.rpc.address</code>: The IP address of the JobManager, 
which is the
+master/coordinator of the distributed system (DEFAULT: localhost).</p>
+  </li>
+  <li>
+    <p><code>jobmanager.rpc.port</code>: The port number of the JobManager 
(DEFAULT: 6123).</p>
+  </li>
+  <li>
+    <p><code>jobmanager.heap.mb</code>: JVM heap size (in megabytes) for the 
JobManager
+(DEFAULT: 256).</p>
+  </li>
+  <li>
+    <p><code>taskmanager.heap.mb</code>: JVM heap size (in megabytes) for the 
TaskManagers,
 which are the parallel workers of the system. In
 contrast to Hadoop, Flink runs operators (e.g., join, aggregate) and
 user-defined functions (e.g., Map, Reduce, CoGroup) inside the TaskManager
 (including sorting/hashing/caching), so this value should be as
 large as possible (DEFAULT: 512). On YARN setups, this value is automatically
-configured to the size of the TaskManager&#39;s YARN container, minus a
-certain tolerance value.</p></li>
-<li><p><code>taskmanager.numberOfTaskSlots</code>: The number of parallel 
operator or
+configured to the size of the TaskManager’s YARN container, minus a
+certain tolerance value.</p>
+  </li>
+  <li>
+    <p><code>taskmanager.numberOfTaskSlots</code>: The number of parallel 
operator or
 UDF instances that a single TaskManager can run (DEFAULT: 1).
 If this value is larger than 1, a single TaskManager takes multiple instances 
of
 a function or operator. That way, the TaskManager can utilize multiple CPU 
cores,
 but at the same time, the available memory is divided between the different
 operator or function instances.
 This value is typically proportional to the number of physical CPU cores that
-the TaskManager&#39;s machine has (e.g., equal to the number of cores, or half 
the
-number of cores).</p></li>
-<li><p><code>parallelization.degree.default</code>: The default degree of 
parallelism to use for
+the TaskManager’s machine has (e.g., equal to the number of cores, or half 
the
+number of cores).</p>
+  </li>
+  <li>
+    <p><code>parallelization.degree.default</code>: The default degree of 
parallelism to use for
 programs that have no degree of parallelism specified. (DEFAULT: 1). For
 setups that have no concurrent jobs running, setting this value to
 NumTaskManagers * NumSlotsPerTaskManager will cause the system to use all
-available execution resources for the program&#39;s execution.</p></li>
-<li><p><code>fs.hdfs.hadoopconf</code>: The absolute path to the Hadoop File 
System&#39;s (HDFS)
+available execution resources for the program’s execution.</p>
+  </li>
+  <li>
+    <p><code>fs.hdfs.hadoopconf</code>: The absolute path to the Hadoop File 
System’s (HDFS)
 configuration directory (OPTIONAL VALUE).
 Specifying this value allows programs to reference HDFS files using short URIs
 (<code>hdfs:///path/to/files</code>, without including the address and port of 
the NameNode
 in the file URI). Without this option, HDFS files can be accessed, but require
 fully qualified URIs like <code>hdfs://address:port/path/to/files</code>.
-This option also causes file writers to pick up the HDFS&#39;s default values 
for block sizes
-and replication factors. Flink will look for the &quot;core-site.xml&quot; and
-&quot;hdfs-site.xml&quot; files in teh specified directory.</p></li>
+This option also causes file writers to pick up the HDFS’s default values 
for block sizes
+and replication factors. Flink will look for the “core-site.xml” and
+“hdfs-site.xml” files in teh specified directory.</p>
+  </li>
 </ul>
 
-<h1 id="advanced-options">Advanced Options</h1>
+<h2 id="advanced-options">Advanced Options</h2>
 
 <ul>
-<li><p><code>taskmanager.tmp.dirs</code>: The directory for temporary files, 
or a list of
-directories separated by the systems directory delimiter (for example 
&#39;:&#39;
+  <li>
+    <p><code>taskmanager.tmp.dirs</code>: The directory for temporary files, 
or a list of
+directories separated by the systems directory delimiter (for example ‘:’
 (colon) on Linux/Unix). If multiple directories are specified, then the 
temporary
 files will be distributed across the directories in a round-robin fashion. The
 I/O manager component will spawn one reading and one writing thread per
 directory. A directory may be listed multiple times to have the I/O manager use
 multiple threads for it (for example if it is physically stored on a very fast
-disc or RAID) (DEFAULT: The system&#39;s tmp dir).</p></li>
-<li><p><code>jobmanager.web.port</code>: Port of the JobManager&#39;s web 
interface (DEFAULT: 8081).</p></li>
-<li><p><code>fs.overwrite-files</code>: Specifies whether file output writers 
should overwrite
+disc or RAID) (DEFAULT: The system’s tmp dir).</p>
+  </li>
+  <li>
+    <p><code>jobmanager.web.port</code>: Port of the JobManager’s web 
interface (DEFAULT: 8081).</p>
+  </li>
+  <li>
+    <p><code>fs.overwrite-files</code>: Specifies whether file output writers 
should overwrite
 existing files by default. Set to <em>true</em> to overwrite by default, 
<em>false</em> otherwise.
-(DEFAULT: false)</p></li>
-<li><p><code>fs.output.always-create-directory</code>: File writers running 
with a parallelism
+(DEFAULT: false)</p>
+  </li>
+  <li>
+    <p><code>fs.output.always-create-directory</code>: File writers running 
with a parallelism
 larger than one create a directory for the output file path and put the 
different
 result files (one per parallel writer task) into that directory. If this option
 is set to <em>true</em>, writers with a parallelism of 1 will also create a 
directory
 and place a single result file into it. If the option is set to 
<em>false</em>, the
 writer will directly create the file directly at the output path, without
-creating a containing directory. (DEFAULT: false)</p></li>
-<li><p><code>taskmanager.network.numberOfBuffers</code>: The number of buffers 
available to the
+creating a containing directory. (DEFAULT: false)</p>
+  </li>
+  <li>
+    <p><code>taskmanager.network.numberOfBuffers</code>: The number of buffers 
available to the
 network stack. This number determines how many streaming data exchange channels
 a TaskManager can have at the same time and how well buffered the channels are.
 If a job is rejected or you get a warning that the system has not enough 
buffers
-available, increase this value (DEFAULT: 2048).</p></li>
-<li><p><code>taskmanager.memory.size</code>: The amount of memory (in 
megabytes) that the task
-manager reserves on the JVM&#39;s heap space for sorting, hash tables, and 
caching
+available, increase this value (DEFAULT: 2048).</p>
+  </li>
+  <li>
+    <p><code>taskmanager.memory.size</code>: The amount of memory (in 
megabytes) that the task
+manager reserves on the JVM’s heap space for sorting, hash tables, and 
caching
 of intermediate results. If unspecified (-1), the memory manager will take a 
fixed
 ratio of the heap memory available to the JVM, as specified by
-<code>taskmanager.memory.fraction</code>. (DEFAULT: -1)</p></li>
-<li><p><code>taskmanager.memory.fraction</code>: The relative amount of memory 
that the task
+<code>taskmanager.memory.fraction</code>. (DEFAULT: -1)</p>
+  </li>
+  <li>
+    <p><code>taskmanager.memory.fraction</code>: The relative amount of memory 
that the task
 manager reserves for sorting, hash tables, and caching of intermediate results.
 For example, a value of 0.8 means that TaskManagers reserve 80% of the
-JVM&#39;s heap space for internal data buffers, leaving 20% of the JVM&#39;s 
heap space
+JVM’s heap space for internal data buffers, leaving 20% of the JVM’s heap 
space
 free for objects created by user-defined functions. (DEFAULT: 0.7)
-This parameter is only evaluated, if <code>taskmanager.memory.size</code> is 
not set.</p></li>
+This parameter is only evaluated, if <code>taskmanager.memory.size</code> is 
not set.</p>
+  </li>
 </ul>
 
-<h1 id="full-reference">Full Reference</h1>
+<h2 id="full-reference">Full Reference</h2>
 
-<h2 id="hdfs">HDFS</h2>
+<h3 id="hdfs">HDFS</h3>
 
 <p>These parameters configure the default HDFS used by Flink. Setups that do 
not
 specify a HDFS configuration have to specify the full path to 
@@ -220,116 +288,116 @@ HDFS files (<code>hdfs://address:port/pa
 with default HDFS parameters (block size, replication factor).</p>
 
 <ul>
-<li><code>fs.hdfs.hadoopconf</code>: The absolute path to the Hadoop 
configuration directory.
-The system will look for the &quot;core-site.xml&quot; and 
&quot;hdfs-site.xml&quot; files in that
+  <li><code>fs.hdfs.hadoopconf</code>: The absolute path to the Hadoop 
configuration directory.
+The system will look for the “core-site.xml” and “hdfs-site.xml” files 
in that
 directory (DEFAULT: null).</li>
-<li><code>fs.hdfs.hdfsdefault</code>: The absolute path of Hadoop&#39;s own 
configuration file
-&quot;hdfs-default.xml&quot; (DEFAULT: null).</li>
-<li><code>fs.hdfs.hdfssite</code>: The absolute path of Hadoop&#39;s own 
configuration file
-&quot;hdfs-site.xml&quot; (DEFAULT: null).</li>
+  <li><code>fs.hdfs.hdfsdefault</code>: The absolute path of Hadoop’s own 
configuration file
+“hdfs-default.xml” (DEFAULT: null).</li>
+  <li><code>fs.hdfs.hdfssite</code>: The absolute path of Hadoop’s own 
configuration file
+“hdfs-site.xml” (DEFAULT: null).</li>
 </ul>
 
-<h2 id="jobmanager-&amp;-taskmanager">JobManager &amp; TaskManager</h2>
+<h3 id="jobmanager-amp-taskmanager">JobManager &amp; TaskManager</h3>
 
-<p>The following parameters configure Flink&#39;s JobManager and 
TaskManagers.</p>
+<p>The following parameters configure Flink’s JobManager and 
TaskManagers.</p>
 
 <ul>
-<li><code>jobmanager.rpc.address</code>: The IP address of the JobManager, 
which is the
+  <li><code>jobmanager.rpc.address</code>: The IP address of the JobManager, 
which is the
 master/coordinator of the distributed system (DEFAULT: localhost).</li>
-<li><code>jobmanager.rpc.port</code>: The port number of the JobManager 
(DEFAULT: 6123).</li>
-<li><code>taskmanager.rpc.port</code>: The task manager&#39;s IPC port 
(DEFAULT: 6122).</li>
-<li><code>taskmanager.data.port</code>: The task manager&#39;s port used for 
data exchange
+  <li><code>jobmanager.rpc.port</code>: The port number of the JobManager 
(DEFAULT: 6123).</li>
+  <li><code>taskmanager.rpc.port</code>: The task manager’s IPC port 
(DEFAULT: 6122).</li>
+  <li><code>taskmanager.data.port</code>: The task manager’s port used for 
data exchange
 operations (DEFAULT: 6121).</li>
-<li><code>jobmanager.heap.mb</code>: JVM heap size (in megabytes) for the 
JobManager
+  <li><code>jobmanager.heap.mb</code>: JVM heap size (in megabytes) for the 
JobManager
 (DEFAULT: 256).</li>
-<li><code>taskmanager.heap.mb</code>: JVM heap size (in megabytes) for the 
TaskManagers,
+  <li><code>taskmanager.heap.mb</code>: JVM heap size (in megabytes) for the 
TaskManagers,
 which are the parallel workers of the system. In
 contrast to Hadoop, Flink runs operators (e.g., join, aggregate) and
 user-defined functions (e.g., Map, Reduce, CoGroup) inside the TaskManager
 (including sorting/hashing/caching), so this value should be as
 large as possible (DEFAULT: 512). On YARN setups, this value is automatically
-configured to the size of the TaskManager&#39;s YARN container, minus a
+configured to the size of the TaskManager’s YARN container, minus a
 certain tolerance value.</li>
-<li><code>taskmanager.numberOfTaskSlots</code>: The number of parallel 
operator or
+  <li><code>taskmanager.numberOfTaskSlots</code>: The number of parallel 
operator or
 UDF instances that a single TaskManager can run (DEFAULT: 1).
 If this value is larger than 1, a single TaskManager takes multiple instances 
of
 a function or operator. That way, the TaskManager can utilize multiple CPU 
cores,
 but at the same time, the available memory is divided between the different
 operator or function instances.
 This value is typically proportional to the number of physical CPU cores that
-the TaskManager&#39;s machine has (e.g., equal to the number of cores, or half 
the
+the TaskManager’s machine has (e.g., equal to the number of cores, or half 
the
 number of cores).</li>
-<li><code>taskmanager.tmp.dirs</code>: The directory for temporary files, or a 
list of
-directories separated by the systems directory delimiter (for example 
&#39;:&#39;
+  <li><code>taskmanager.tmp.dirs</code>: The directory for temporary files, or 
a list of
+directories separated by the systems directory delimiter (for example ‘:’
 (colon) on Linux/Unix). If multiple directories are specified, then the 
temporary
 files will be distributed across the directories in a round robin fashion. The
 I/O manager component will spawn one reading and one writing thread per
 directory. A directory may be listed multiple times to have the I/O manager use
 multiple threads for it (for example if it is physically stored on a very fast
-disc or RAID) (DEFAULT: The system&#39;s tmp dir).</li>
-<li><code>taskmanager.network.numberOfBuffers</code>: The number of buffers 
available to the
+disc or RAID) (DEFAULT: The system’s tmp dir).</li>
+  <li><code>taskmanager.network.numberOfBuffers</code>: The number of buffers 
available to the
 network stack. This number determines how many streaming data exchange channels
 a TaskManager can have at the same time and how well buffered the channels are.
 If a job is rejected or you get a warning that the system has not enough 
buffers
 available, increase this value (DEFAULT: 2048).</li>
-<li><code>taskmanager.network.bufferSizeInBytes</code>: The size of the 
network buffers, in
+  <li><code>taskmanager.network.bufferSizeInBytes</code>: The size of the 
network buffers, in
 bytes (DEFAULT: 32768 (= 32 KiBytes)).</li>
-<li><code>taskmanager.memory.size</code>: The amount of memory (in megabytes) 
that the task
-manager reserves on the JVM&#39;s heap space for sorting, hash tables, and 
caching
+  <li><code>taskmanager.memory.size</code>: The amount of memory (in 
megabytes) that the task
+manager reserves on the JVM’s heap space for sorting, hash tables, and 
caching
 of intermediate results. If unspecified (-1), the memory manager will take a 
fixed
 ratio of the heap memory available to the JVM, as specified by
 <code>taskmanager.memory.fraction</code>. (DEFAULT: -1)</li>
-<li><code>taskmanager.memory.fraction</code>: The relative amount of memory 
that the task
+  <li><code>taskmanager.memory.fraction</code>: The relative amount of memory 
that the task
 manager reserves for sorting, hash tables, and caching of intermediate results.
 For example, a value of 0.8 means that TaskManagers reserve 80% of the
-JVM&#39;s heap space for internal data buffers, leaving 20% of the JVM&#39;s 
heap space
+JVM’s heap space for internal data buffers, leaving 20% of the JVM’s heap 
space
 free for objects created by user-defined functions. (DEFAULT: 0.7)
 This parameter is only evaluated, if <code>taskmanager.memory.size</code> is 
not set.</li>
-<li><code>jobclient.polling.interval</code>: The interval (in seconds) in 
which the client
+  <li><code>jobclient.polling.interval</code>: The interval (in seconds) in 
which the client
 polls the JobManager for the status of its job (DEFAULT: 2).</li>
-<li><code>taskmanager.runtime.max-fan</code>: The maximal fan-in for external 
merge joins and
+  <li><code>taskmanager.runtime.max-fan</code>: The maximal fan-in for 
external merge joins and
 fan-out for spilling hash tables. Limits the number of file handles per 
operator,
 but may cause intermediate merging/partitioning, if set too small (DEFAULT: 
128).</li>
-<li><code>taskmanager.runtime.sort-spilling-threshold</code>: A sort operation 
starts spilling
+  <li><code>taskmanager.runtime.sort-spilling-threshold</code>: A sort 
operation starts spilling
 when this fraction of its memory budget is full (DEFAULT: 0.8).</li>
 </ul>
 
-<h2 id="jobmanager-web-frontend">JobManager Web Frontend</h2>
+<h3 id="jobmanager-web-frontend">JobManager Web Frontend</h3>
 
 <ul>
-<li><code>jobmanager.web.port</code>: Port of the JobManager&#39;s web 
interface that displays
+  <li><code>jobmanager.web.port</code>: Port of the JobManager’s web 
interface that displays
 status of running jobs and execution time breakdowns of finished jobs
 (DEFAULT: 8081).</li>
-<li><code>jobmanager.web.history</code>: The number of latest jobs that the 
JobManager&#39;s web
+  <li><code>jobmanager.web.history</code>: The number of latest jobs that the 
JobManager’s web
 front-end in its history (DEFAULT: 5).</li>
 </ul>
 
-<h2 id="webclient">Webclient</h2>
+<h3 id="webclient">Webclient</h3>
 
 <p>These parameters configure the web interface that can be used to submit 
jobs and
-review the compiler&#39;s execution plans.</p>
+review the compiler’s execution plans.</p>
 
 <ul>
-<li><code>webclient.port</code>: The port of the webclient server (DEFAULT: 
8080).</li>
-<li><code>webclient.tempdir</code>: The temp directory for the web server. 
Used for example
-for caching file fragments during file-uploads (DEFAULT: The system&#39;s temp
+  <li><code>webclient.port</code>: The port of the webclient server (DEFAULT: 
8080).</li>
+  <li><code>webclient.tempdir</code>: The temp directory for the web server. 
Used for example
+for caching file fragments during file-uploads (DEFAULT: The system’s temp
 directory).</li>
-<li><code>webclient.uploaddir</code>: The directory into which the web server 
will store
+  <li><code>webclient.uploaddir</code>: The directory into which the web 
server will store
 uploaded programs (DEFAULT: ${webclient.tempdir}/webclient-jobs/).</li>
-<li><code>webclient.plandump</code>: The directory into which the web server 
will dump
+  <li><code>webclient.plandump</code>: The directory into which the web server 
will dump
 temporary JSON files describing the execution plans
 (DEFAULT: ${webclient.tempdir}/webclient-plans/).</li>
 </ul>
 
-<h2 id="file-systems">File Systems</h2>
+<h3 id="file-systems">File Systems</h3>
 
 <p>The parameters define the behavior of tasks that create result files.</p>
 
 <ul>
-<li><code>fs.overwrite-files</code>: Specifies whether file output writers 
should overwrite
+  <li><code>fs.overwrite-files</code>: Specifies whether file output writers 
should overwrite
 existing files by default. Set to <em>true</em> to overwrite by default, 
<em>false</em> otherwise.
 (DEFAULT: false)</li>
-<li><code>fs.output.always-create-directory</code>: File writers running with 
a parallelism
+  <li><code>fs.output.always-create-directory</code>: File writers running 
with a parallelism
 larger than one create a directory for the output file path and put the 
different
 result files (one per parallel writer task) into that directory. If this option
 is set to <em>true</em>, writers with a parallelism of 1 will also create a 
directory
@@ -338,25 +406,38 @@ writer will directly create the file dir
 creating a containing directory. (DEFAULT: false)</li>
 </ul>
 
-<h2 id="compiler/optimizer">Compiler/Optimizer</h2>
+<h3 id="compileroptimizer">Compiler/Optimizer</h3>
 
 <ul>
-<li><code>compiler.delimited-informat.max-line-samples</code>: The maximum 
number of line
+  <li><code>compiler.delimited-informat.max-line-samples</code>: The maximum 
number of line
 samples taken by the compiler for delimited inputs. The samples are used to
 estimate the number of records. This value can be overridden for a specific
-input with the input format&#39;s parameters (DEFAULT: 10).</li>
-<li><code>compiler.delimited-informat.min-line-samples</code>: The minimum 
number of line
+input with the input format’s parameters (DEFAULT: 10).</li>
+  <li><code>compiler.delimited-informat.min-line-samples</code>: The minimum 
number of line
 samples taken by the compiler for delimited inputs. The samples are used to
 estimate the number of records. This value can be overridden for a specific
-input with the input format&#39;s parameters (DEFAULT: 2).</li>
-<li><code>compiler.delimited-informat.max-sample-len</code>: The maximal 
length of a line
+input with the input format’s parameters (DEFAULT: 2).</li>
+  <li><code>compiler.delimited-informat.max-sample-len</code>: The maximal 
length of a line
 sample that the compiler takes for delimited inputs. If the length of a single
 sample exceeds this value (possible because of misconfiguration of the parser),
 the sampling aborts. This value can be overridden for a specific input with the
-input format&#39;s parameters (DEFAULT: 2097152 (= 2 MiBytes)).</li>
+input format’s parameters (DEFAULT: 2097152 (= 2 MiBytes)).</li>
 </ul>
 
+<h2 id="yarn">YARN</h2>
 
+<p>Please note that all ports used by Flink in a YARN session are offsetted by 
the YARN application ID
+to avoid duplicate port allocations when running multiple YARN sessions in 
parallel. </p>
+
+<p>So if <code>yarn.am.rpc.port</code> is configured to <code>10245</code> and 
the session’s application ID is <code>application_1406629969999_0002</code>, 
then the actual port being used is 10245 + 2 = 10247</p>
+
+<ul>
+  <li><code>yarn.am.rpc.port</code>: The port that is being opened by the 
Application Master (AM) to 
+let the YARN client connect for an RPC serice. (DEFAULT: Port 10245)</li>
+</ul>
+
+         
+        <!-- Disqus Area -->
           <div style="padding-top:30px" id="disqus_thread"></div>
       
             <script type="text/javascript">
@@ -371,14 +452,12 @@ input format&#39;s parameters (DEFAULT:
                 })();
             </script>
             <noscript>Please enable JavaScript to view the <a 
href="http://disqus.com/?ref_noscript";>comments powered by 
Disqus.</a></noscript>
-            <a href="http://disqus.com"; class="dsq-brlink">comments powered by 
<span class="logo-disqus">Disqus</span></a>
-
           </div>
         </div>
 
         <div class="footer">
           
-          <p><hr class="divider"></p>
+          <hr class="divider" />
 
 <p><small>Apache Flink is an effort undergoing incubation at The Apache 
Software
 Foundation (ASF), sponsored by the Apache Incubator PMC. Incubation is
@@ -389,9 +468,10 @@ incubation status is not necessarily a r
 stability of the code, it does indicate that the project has yet to be fully
 endorsed by the ASF.</small></p>
 
-<p><a href="http://incubator.apache.org/";><img 
src="/img/apache-incubator-logo.png" alt="Incubator Logo"></a></p>
+<p><a href="http://incubator.apache.org/";><img 
src="/img/apache-incubator-logo.png" alt="Incubator Logo" /></a></p>
 
-<p class="text-center"><a href="/privacy-policy.html">Privacy Policy<a></p>
+<p class="text-center"><a href="privacy-policy.html">Privacy Policy<a>
+</a></a></p>
 
         </div>
       </div>

Modified: flink/site/docs/0.6-incubating/css/custom.css
URL: 
http://svn.apache.org/viewvc/flink/site/docs/0.6-incubating/css/custom.css?rev=1657551&r1=1657550&r2=1657551&view=diff
==============================================================================
--- flink/site/docs/0.6-incubating/css/custom.css (original)
+++ flink/site/docs/0.6-incubating/css/custom.css Thu Feb  5 12:21:38 2015
@@ -1,9 +1,167 @@
 .extLink {
-       display:inline !important; 
-       padding-right:3px !important;
+       display: inline !important; 
+       padding-right: 3px !important;
 }
 
 .small-font-awesome {
-       font-size:10px;
+       font-size: 10px;
        padding-right: 10px;
+}
+
+#logo-element {
+    display: inline-block;
+    float: left;
+    width: 40px;
+    margin-top: 5px;
+    margin-right: 5px;
+}
+
+/*** Web Trust CSS ***/
+#af-upfooter {
+       margin-top: 50px;
+}
+
+/* Navigation Bar */
+.navbar {
+    padding-top: 20px;
+    padding-bottom: 20px;
+}
+
+.navbar {
+       border:none !important;
+}
+
+.navbar-default {
+    background: #08131F !important;
+}
+
+.navbar-default .navbar-nav > li > a {
+    color: #fff !important;
+    font-weight: normal;
+}
+
+.navbar-brand img {
+       margin-top: -15px;
+}
+
+@media (min-width: 768px) {
+       .navbar-nav {
+               margin-left: 5%;
+       }
+}
+
+/* Side Bar */
+.af-sidebar ul {
+       list-style-type: none;
+}
+
+.af-sidebar-item.active a {
+       color: #555;
+       text-decoration: underline;
+}
+
+.af-label {
+       background: #08131F;
+       border-radius: 5px;
+       color: #fff;
+       display: block;
+       font-size: 0.95em;
+    margin: 10px 0;
+    padding: 0.4em 0.8em;
+}
+
+.af-sidebar ul ul {
+       background: #fff;
+       list-style-type: disc;
+}
+
+/* Downloads Page */
+.af-download-row {
+    margin-top: 40px;
+    margin-bottom: 40px;       
+}
+
+.af-download-row h3 {
+       font-size: 1.8em;
+       margin: 0 0 15px 0;
+       padding: 0;
+}
+
+.af-download-row h3 span {
+       font-size: 0.7em;
+}
+
+.af-blue-color {
+       color: #3795c6;
+}
+
+.af-download-usage {
+       min-height: 115px;
+}
+
+.af-download-usage p {
+       color: #959595;
+       font-size: 0.9em;
+}
+
+.af-download-button {
+       background: #3795c6;
+       color: #fff !important;
+       font-size: 0.9em;
+       font-weight: bold;
+       display: block;
+       margin: 8px auto;
+       text-align:center;
+       width: 200px;
+}
+
+.af-download-button:hover {
+       background: #006599 !important;
+}
+
+.af-small-download-heading {
+       text-align: center;
+}
+
+.af-small-download-area {
+    margin-top: 40px;
+    margin-bottom: 40px;
+}
+
+/* Community Page */
+
+.af-team-member-inner {
+    padding-top: 20px; 
+}
+
+.af-team-member-img {
+       margin:10px auto;
+       width: 70%;
+}
+
+.af-team-member-img img {
+       border-radius: 100%;
+       margin: auto;
+}
+
+.af-team-member-info {
+       font-size: 0.9em;
+       margin: 15px 0;
+       text-align: center;
+}
+
+.af-mailinglist-item {
+       margin: 40px auto; 
+       text-align: center;
+}
+
+.af-mailinglist-item-inner {
+       background: #ececec;
+       border-radius: 6px;
+       padding: 10px 2% 40px 2%;
+}
+
+.af-mailinglist-item-inner p {
+       font-weight: bold;
+       margin: 20px 0;
 }
\ No newline at end of file


Reply via email to